diff --git a/mojaloop/iac/roles/cc_k8s/defaults/main.yaml b/mojaloop/iac/roles/cc_k8s/defaults/main.yaml index 162d8ab8..15d6dfa6 100644 --- a/mojaloop/iac/roles/cc_k8s/defaults/main.yaml +++ b/mojaloop/iac/roles/cc_k8s/defaults/main.yaml @@ -44,8 +44,15 @@ argocd_default: rook_ceph: app_name: "rook-ceph" namespace: "rook-ceph" - helm_version: "1.14.5" + helm_version: "1.15.5" + image_version: "v18.2.4" rook_csi_kubelet_dir_path: "/var/lib/kubelet" + mon_volumes_storage_class: "gp3" + mon_volumes_size: "10Gi" + osd_volumes_storage_class: "gp3" + osd_count: "'3'" + volume_size_per_osd: "500Gi" + volumes_provider: "pvc" sync_wave: "'-10'" sub_app_enabled: "true" reflector: diff --git a/mojaloop/iac/roles/cc_k8s/tasks/setup_facts.yaml b/mojaloop/iac/roles/cc_k8s/tasks/setup_facts.yaml index 4481e7e4..c63974c4 100644 --- a/mojaloop/iac/roles/cc_k8s/tasks/setup_facts.yaml +++ b/mojaloop/iac/roles/cc_k8s/tasks/setup_facts.yaml @@ -27,6 +27,7 @@ fact_kubernetes_oidc_groups_claim: "{{ fact_argo_merged_config.apps['security'].sub_apps['zitadel'].zitadel_grant_prefix }}" fact_netbird_management_url: "https://netbird.{{ fact_argo_merged_config.apps['dns_utils'].sub_apps['cr_config'].dns_public_subdomain }}:443" fact_environment_list: "{{ fact_argo_merged_config.apps['deploy_env'].sub_apps['config'].environment_list.split(',') }}" + fact_rook_ceph_namespace: "{{ fact_argo_merged_config.apps['utils'].sub_apps['rook_ceph'].namespace }}" - name: set gitlab domain to public set_fact: diff --git a/mojaloop/iac/roles/cc_k8s/tasks/teardown.yaml b/mojaloop/iac/roles/cc_k8s/tasks/teardown.yaml index b260e742..6a395671 100644 --- a/mojaloop/iac/roles/cc_k8s/tasks/teardown.yaml +++ b/mojaloop/iac/roles/cc_k8s/tasks/teardown.yaml @@ -1,3 +1,4 @@ + - name: Delete resources before infra teardown shell: | export KUBECONFIG={{ kubeconfig_location }}/kubeconfig @@ -11,6 +12,30 @@ kubectl delete dbinstanceclaim -n {{ fact_argo_merged_config.apps['security'].sub_apps['zitadel'].namespace }} --ignore-not-found=true --all args: executable: /bin/bash + ignore_errors: true + +- name: Tear down ceph cluster + ansible.builtin.shell: | + export KUBECONFIG={{ kubeconfig_location }}/kubeconfig + kubectl patch application -n {{ fact_argo_merged_config.namespace }} rook-ceph --type json --patch='[ { "op": "remove", "path": "/spec/syncPolicy/automated" } ]' || true + kubectl config set-context --current --namespace {{ fact_rook_ceph_namespace }} + kubectl -n {{ fact_rook_ceph_namespace }} scale deployment rook-ceph-operator --replicas=0 + for CRD in $(kubectl get crd -n {{ fact_rook_ceph_namespace }} | awk '/ceph.rook.io/ {print $1}'); do kubectl get -n {{ fact_rook_ceph_namespace }} "$CRD" -o name | xargs -I {} kubectl patch -n {{ fact_rook_ceph_namespace }} {} --type merge -p '{"metadata":{"finalizers": []}}';done + kubectl delete cephobjectstore ceph-objectstore --ignore-not-found=true -n {{ fact_rook_ceph_namespace }} + kubectl delete cephfilesystem ceph-filesystem --ignore-not-found=true -n {{ fact_rook_ceph_namespace }} + kubectl delete cephblockpool ceph-blockpool --ignore-not-found=true -n {{ fact_rook_ceph_namespace }} + kubectl delete cephcluster rook-ceph --ignore-not-found=true -n {{ fact_rook_ceph_namespace }} + sleep 300 # Wait for 300 seconds + args: + executable: /bin/bash + ignore_errors: true + +# - name: Sleep for 300 seconds and continue with play +# ansible.builtin.wait_for: +# timeout: 300 +# - name: Pause to finish resources deletion +# ansible.builtin.pause: +# seconds: 300 # - name: Wait for vault to be gone # shell: | # export KUBECONFIG={{ kubeconfig_location }}/kubeconfig