Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

K8SPSMDB-1014: add upgrade-consistency-sharded-tls test #1384

Merged
merged 14 commits into from
Mar 26, 2024
75 changes: 75 additions & 0 deletions e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -1371,3 +1371,78 @@ deploy_pmm_server() {

retry 10 60 helm install monitoring --set imageTag=${IMAGE_PMM_SERVER#*:} --set imageRepo=${IMAGE_PMM_SERVER%:*} $additional_params https://percona-charts.storage.googleapis.com/pmm-server-$PMM_SERVER_VER.tgz
}

wait_certificate() {
certificate="$1"

for i in {1..10}; do
kubectl wait --for=condition=Ready "certificate/$certificate" --timeout=60s
sleep 1
done
}

renew_certificate() {
certificate="$1"

wait_certificate "$certificate"
desc "renew $certificate"

local pod_name
pod_name=$(kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}')

local revision
revision=$(kubectl_bin get certificate "$certificate" -o 'jsonpath={.status.revision}')

kubectl_bin exec "$pod_name" -- /tmp/cmctl renew "$certificate"

# wait for new revision
for i in {1..10}; do
local new_revision
new_revision=$(kubectl_bin get certificate "$certificate" -o 'jsonpath={.status.revision}')
if [ "$((revision + 1))" == "$new_revision" ]; then
break
fi
sleep 1
done
}

deploy_cmctl() {
local service_account="cmctl"

$sed -e "s/percona-server-mongodb-operator/$service_account/g" "${src_dir}/deploy/rbac.yaml" \
| yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \
| kubectl_bin apply -f -
kubectl_bin apply -f "$conf_dir/cmctl.yml"
}

stop_cluster() {
local cluster_name=$1
local max_wait_time=${2:-120}

local passed_time=0
local sleep_time=1
kubectl_bin patch psmdb "${cluster_name}" --type json -p='[{"op":"add","path":"/spec/pause","value":true}]'
set +x
echo -n 'Waiting for cluster stop'
until [[ $(kubectl_bin get psmdb "${cluster_name}" -o jsonpath='{.status.mongos.ready}') -le 0 ]] \
&& [[ $(kubectl_bin get deployment "${cluster_name}-mongos" -o jsonpath='{.status.replicas}') -le 0 ]] \
&& [[ $(kubectl_bin get psmdb "${cluster_name}" -o jsonpath='{.status.replsets.cfg.ready}') -le 0 ]] \
&& [[ $(kubectl_bin get psmdb "${cluster_name}" -o jsonpath='{.status.replsets.rs0.ready}') -le 0 ]]; do
echo -n .
passed_time=$((passed_time + sleep_time))
sleep ${passed_time}
if [[ ${passed_time} -gt ${max_wait_time} ]]; then
echo "We've been waiting for cluster stop for too long. Exiting..."
exit 1
fi
done
echo
set -x
}

start_cluster() {
local cluster_name=$1

kubectl_bin patch psmdb "${cluster_name}" --type json -p='[{"op":"add","path":"/spec/pause","value":false}]'
wait_cluster_consistency "${cluster_name}"
}
2 changes: 1 addition & 1 deletion e2e-tests/run-minikube.csv
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ security-context
self-healing-chaos
smart-update
upgrade-consistency
upgrade-consistency-sharded
upgrade-consistency-sharded-tls
users
version-service
2 changes: 1 addition & 1 deletion e2e-tests/run-pr.csv
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ storage
tls-issue-cert-manager
upgrade
upgrade-consistency
upgrade-consistency-sharded
upgrade-consistency-sharded-tls
upgrade-sharded
users
version-service
2 changes: 1 addition & 1 deletion e2e-tests/run-release.csv
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ storage
tls-issue-cert-manager
upgrade
upgrade-consistency
upgrade-consistency-sharded
upgrade-consistency-sharded-tls
upgrade-sharded
users
version-service
37 changes: 2 additions & 35 deletions e2e-tests/tls-issue-cert-manager/run
Original file line number Diff line number Diff line change
Expand Up @@ -6,30 +6,6 @@ test_dir=$(realpath $(dirname $0))
. "${test_dir}/../functions"
set_debug

renew-certificate() {
certificate="$1"

desc "renew $certificate"

local pod_name
pod_name=$(kubectl_bin get pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}')

local revision
revision=$(kubectl_bin get certificate "$certificate" -o 'jsonpath={.status.revision}')

kubectl_bin exec "$pod_name" -- /tmp/cmctl renew "$certificate"

# wait for new revision
for i in {1..10}; do
local new_revision
new_revision=$(kubectl_bin get certificate "$certificate" -o 'jsonpath={.status.revision}')
if [ "$((revision + 1))" == "$new_revision" ]; then
break
fi
sleep 1
done
}

check_tls_secret() {
local secret_name=$1
check_secret_data_key "$secret_name" 'ca.crt'
Expand All @@ -48,15 +24,6 @@ check_secret_data_key() {
fi
}

deploy_cmctl() {
local service_account="cmctl"

$sed -e "s/percona-server-mongodb-operator/$service_account/g" "${src_dir}/deploy/rbac.yaml" \
| yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \
| kubectl_bin apply -f -
kubectl_bin apply -f "$conf_dir/cmctl.yml"
}

main() {
create_infra "$namespace"
deploy_cert_manager
Expand Down Expand Up @@ -90,13 +57,13 @@ main() {
desc 'check if internal certificate issued'
compare_kubectl certificate/$cluster-ssl-internal

renew-certificate "some-name-ssl"
renew_certificate "some-name-ssl"
sleep 10
wait_for_running $cluster-rs0 3
wait_for_running $cluster-cfg 3 "false"
wait_for_running $cluster-mongos 3

renew-certificate "some-name-ssl-internal"
renew_certificate "some-name-ssl-internal"
sleep 10
wait_for_running $cluster-rs0 3
wait_for_running $cluster-cfg 3 "false"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 7
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 7
labels:
app.kubernetes.io/component: cfg
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 9
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 9
labels:
app.kubernetes.io/component: cfg
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 8
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 8
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 10
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations: {}
generation: 2
generation: 10
labels:
app.kubernetes.io/component: mongod
app.kubernetes.io/instance: some-name
Expand Down
130 changes: 130 additions & 0 deletions e2e-tests/upgrade-consistency-sharded-tls/run
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#!/bin/bash

set -o errexit

test_dir=$(realpath "$(dirname "$0")")
. "${test_dir}/../functions"
set_debug

CLUSTER='some-name'

wait_cluster() {
wait_for_running ${CLUSTER}-rs0 3
wait_for_running ${CLUSTER}-cfg 3
wait_for_running ${CLUSTER}-mongos 3
wait_cluster_consistency ${CLUSTER}
}

main() {
create_infra "$namespace"
deploy_cert_manager

desc 'create secrets and start client'
kubectl_bin apply -f "$conf_dir/secrets.yml"
kubectl_bin apply -f "$conf_dir/client_with_tls.yml"
deploy_cmctl

desc "create first PSMDB cluster 1.14.0 $CLUSTER"
apply_cluster "$test_dir/conf/${CLUSTER}.yml"

desc 'check if Pod started'
wait_cluster

compare_generation "1" "statefulset" "${CLUSTER}-rs0"
compare_generation "1" "statefulset" "${CLUSTER}-cfg"

# TODO: uncomment when 1.14.0 will be removed,
# renewal doesn't work on "1.14.0" version
#renew_certificate "some-name-ssl"
#renew_certificate "some-name-ssl-internal"
#wait_cluster

desc 'check if service and statefulset created with expected config'
compare_kubectl service/${CLUSTER}-rs0 "-1140"
compare_kubectl service/${CLUSTER}-cfg "-1140"
compare_kubectl statefulset/${CLUSTER}-rs0 "-1140"
compare_kubectl statefulset/${CLUSTER}-cfg "-1140"

desc 'test 1.15.0'
# workaround to switch to updated certificate structure
# more details: https://github.com/percona/percona-server-mongodb-operator/pull/1287
# TODO: remove the workaround when 1.14.0 will be removed
stop_cluster $CLUSTER

compare_generation "4" "statefulset" "${CLUSTER}-rs0"
compare_generation "3" "statefulset" "${CLUSTER}-cfg"

kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{
"spec": {"crVersion":"1.15.0"}
}'
# Wait for at least one reconciliation
sleep 20

compare_generation "5" "statefulset" "${CLUSTER}-rs0"
compare_generation "4" "statefulset" "${CLUSTER}-cfg"

kubectl_bin delete certificate "$CLUSTER"-ssl "$CLUSTER"-ssl-internal
kubectl_bin delete issuer "$CLUSTER-psmdb-ca"
kubectl_bin delete secret "$CLUSTER"-ssl "$CLUSTER"-ssl-internal
start_cluster $CLUSTER
compare_generation "6" "statefulset" "${CLUSTER}-rs0"
compare_generation "5" "statefulset" "${CLUSTER}-cfg"

# Wait for at least one reconciliation
sleep 20
desc 'check if Pod started'
wait_cluster

renew_certificate "some-name-ssl"
sleep 20
wait_cluster
compare_generation "7" "statefulset" "${CLUSTER}-rs0"
compare_generation "6" "statefulset" "${CLUSTER}-cfg"

renew_certificate "some-name-ssl-internal"
sleep 20
wait_cluster
compare_generation "8" "statefulset" "${CLUSTER}-rs0"
compare_generation "7" "statefulset" "${CLUSTER}-cfg"

desc 'check if service and statefulset created with expected config'
compare_kubectl service/${CLUSTER}-rs0 "-1150"
compare_kubectl service/${CLUSTER}-cfg "-1150"
compare_kubectl statefulset/${CLUSTER}-rs0 "-1150"
compare_kubectl statefulset/${CLUSTER}-cfg "-1150"

desc 'test 1.16.0'
kubectl_bin patch psmdb "${CLUSTER}" --type=merge --patch '{
"spec": {"crVersion":"1.16.0"}
}'
# Wait for at least one reconciliation
sleep 20
desc 'check if Pod started'
wait_cluster
compare_generation "8" "statefulset" "${CLUSTER}-rs0"
compare_generation "7" "statefulset" "${CLUSTER}-cfg"

renew_certificate "some-name-ssl"
sleep 20
wait_cluster
compare_generation "9" "statefulset" "${CLUSTER}-rs0"
compare_generation "8" "statefulset" "${CLUSTER}-cfg"

renew_certificate "some-name-ssl-internal"
sleep 20
wait_cluster
compare_generation "10" "statefulset" "${CLUSTER}-rs0"
compare_generation "9" "statefulset" "${CLUSTER}-cfg"

desc 'check if service and statefulset created with expected config'
compare_kubectl service/${CLUSTER}-rs0 "-1160"
compare_kubectl service/${CLUSTER}-cfg "-1160"
compare_kubectl statefulset/${CLUSTER}-rs0 "-1160"
compare_kubectl statefulset/${CLUSTER}-cfg "-1160"

destroy "$namespace"

desc 'test passed'
}

main
Loading
Loading