diff --git a/.github/keys/mosipgpgkey_pub.gpg b/.github/keys/mosipgpgkey_pub.gpg deleted file mode 100644 index 4bdb1a9..0000000 Binary files a/.github/keys/mosipgpgkey_pub.gpg and /dev/null differ diff --git a/.github/keys/mosipgpgkey_sec.gpg b/.github/keys/mosipgpgkey_sec.gpg deleted file mode 100644 index 97de556..0000000 Binary files a/.github/keys/mosipgpgkey_sec.gpg and /dev/null differ diff --git a/.github/workflows/chart-lint-publish.yml b/.github/workflows/chart-lint-publish.yml deleted file mode 100644 index c8d6ba3..0000000 --- a/.github/workflows/chart-lint-publish.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: Validate / Publish helm charts - -on: - release: - types: [published] - pull_request: - types: [opened, reopened, synchronize] - paths: - - 'helm/**' - workflow_dispatch: - inputs: - IGNORE_CHARTS: - description: 'Provide list of charts to be ignored separated by pipe(|)' - required: false - default: '""' - type: string - CHART_PUBLISH: - description: 'Chart publishing to gh-pages branch' - required: false - default: 'NO' - type: string - options: - - YES - - NO - INCLUDE_ALL_CHARTS: - description: 'Include all charts for Linting/Publishing (YES/NO)' - required: false - default: 'NO' - type: string - options: - - YES - - NO - push: - branches: - - '!release-branch' - - '!master' - - 1.* - - 0.* - - develop - - release* - paths: - - 'helm/**' - -jobs: - chart-lint-publish: - uses: mosip/kattu/.github/workflows/chart-lint-publish.yml@master - with: - CHARTS_DIR: ./helm - CHARTS_URL: https://mosip.github.io/mosip-helm - REPOSITORY: mosip-helm - BRANCH: gh-pages - INCLUDE_ALL_CHARTS: "${{ inputs.INCLUDE_ALL_CHARTS || 'NO' }}" - IGNORE_CHARTS: "${{ inputs.IGNORE_CHARTS || '\"\"' }}" - CHART_PUBLISH: "${{ inputs.CHART_PUBLISH || 'YES' }}" - LINTING_CHART_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-schema.yaml" - LINTING_LINTCONF_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/lintconf.yaml" - LINTING_CHART_TESTING_CONFIG_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-testing-config.yaml" - LINTING_HEALTH_CHECK_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/health-check-schema.yaml" - DEPENDENCIES: "mosip,https://mosip.github.io/mosip-helm;" - secrets: - TOKEN: ${{ secrets.ACTION_PAT }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml new file mode 100644 index 0000000..0dc719a --- /dev/null +++ b/.github/workflows/docker-build.yml @@ -0,0 +1,95 @@ +name: Docker build and push + +on: + push: + branches: + - "**" + tags-ignore: + - "**" + paths: + - "consolidator/**" + - "hub/**" + - "kafka-admin-client/**" + - "*.Dockerfile" + pull_request: + branches: + - "**" + paths: + - "consolidator/**" + - "hub/**" + - "kafka-admin-client/**" + - "*.Dockerfile" + workflow_dispatch: + +jobs: + docker-build-consolidator: + name: Docker Build Consolidator + runs-on: ubuntu-latest + env: + NAMESPACE: ${{ secrets.docker_hub_organisation || 'mosipdev' }} + SERVICE_NAME: 'consolidator-websub-service' + steps: + - uses: actions/checkout@v3 + - name: Docker build + run: | + BRANCH_NAME=$(echo ${{ github.ref }} | sed -e 's,.*/\(.*\),\1,') + IMAGE_ID=$NAMESPACE/$SERVICE_NAME + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + VERSION=$BRANCH_NAME + if [[ $BRANCH_NAME == master || $BRANCH_NAME == main ]]; then + VERSION=develop + fi + echo IMAGE_ID=$IMAGE_ID + echo VERSION=$VERSION + echo IMAGE_ID=$IMAGE_ID >> $GITHUB_ENV + echo VERSION=$VERSION >> $GITHUB_ENV + + docker build . \ + --file consolidator.Dockerfile \ + --tag $IMAGE_ID:$VERSION + if [[ '${{ secrets.docker_hub_token }}' != '' && '${{ secrets.docker_hub_actor }}' != '' && '${{ github.event_name }}' != 'pull_request' ]]; then + echo DOCKER_PUSH=true >> $GITHUB_ENV + fi + - name: Docker Push + if: env.DOCKER_PUSH == 'true' + run: | + echo "${{ secrets.docker_hub_token }}" | docker login -u ${{ secrets.docker_hub_actor }} --password-stdin + docker push ${{ env.IMAGE_ID }}:${{ env.VERSION }} + + docker-build-websub: + name: Docker Build Websub + runs-on: ubuntu-latest + env: + NAMESPACE: ${{ secrets.docker_hub_organisation || 'mosipdev' }} + SERVICE_NAME: 'websub-service' + steps: + - uses: actions/checkout@v3 + - name: Docker build + run: | + BRANCH_NAME=$(echo ${{ github.ref }} | sed -e 's,.*/\(.*\),\1,') + IMAGE_ID=$NAMESPACE/$SERVICE_NAME + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + VERSION=$BRANCH_NAME + if [[ $BRANCH_NAME == master || $BRANCH_NAME == main ]]; then + VERSION=develop + fi + echo IMAGE_ID=$IMAGE_ID + echo VERSION=$VERSION + echo IMAGE_ID=$IMAGE_ID >> $GITHUB_ENV + echo VERSION=$VERSION >> $GITHUB_ENV + + docker build . \ + --file hub.Dockerfile \ + --tag $IMAGE_ID:$VERSION + if [[ '${{ secrets.docker_hub_token }}' != '' && '${{ secrets.docker_hub_actor }}' != '' && '${{ github.event_name }}' != 'pull_request' ]]; then + echo DOCKER_PUSH=true >> $GITHUB_ENV + fi + - name: Docker Push + if: env.DOCKER_PUSH == 'true' + run: | + echo "${{ secrets.docker_hub_token }}" | docker login -u ${{ secrets.docker_hub_actor }} --password-stdin + docker push ${{ env.IMAGE_ID }}:${{ env.VERSION }} diff --git a/.github/workflows/helm-charts-build.yml b/.github/workflows/helm-charts-build.yml new file mode 100644 index 0000000..f852e59 --- /dev/null +++ b/.github/workflows/helm-charts-build.yml @@ -0,0 +1,134 @@ +name: Build and Publish Helm charts + +on: + push: + tags-ignore: + - '**' + branches: + - 1.* + - develop + - main + workflow_dispatch: + inputs: + forcePublishCharts: + description: "Force publish Charts?" + default: "*" + type: string + +jobs: + generate-charts: + runs-on: ubuntu-latest + env: + SKIP: 'FALSE' + RANCHER_CHART_FILTER: "openg2p.org/add-to-rancher" + FORCE_PUBLISH_CHARTS: "${{ inputs.forcePublishCharts || '' }}" + defaults: + run: + shell: bash + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - id: files + if: env.FORCE_PUBLISH_CHARTS == '' + uses: jitterbit/get-changed-files@v1 + + - name: save helm/charts to tmp.txt file + run: | + touch charts-list.txt + if [ -n "${FORCE_PUBLISH_CHARTS}" ]; then + for chart in charts/${FORCE_PUBLISH_CHARTS}/; do + chart="${chart#charts/}" + chart="${chart%/}" + echo "$chart" >> charts-list.txt + done + else + for changed_file in ${{ steps.files.outputs.all }}; do + if [[ ${changed_file} =~ ^charts ]]; then + chart_name=$(echo "${changed_file}" | awk -F/ '/^[charts]/{print $2}') + echo $chart_name >> charts-list.txt; + echo "Saved $chart_name chart to charts-list.txt" + fi + done + cat charts-list.txt | sort | uniq > charts-list-unique.txt + mv charts-list-unique.txt charts-list.txt + fi + echo "List of charts to be published"; + cat charts-list.txt + + - name: Generate tar files + run: | + if [[ ! -s charts-list.txt ]]; then + echo "::warning::No Charts to publish"; + echo "SKIP=TRUE" >> $GITHUB_ENV + else + for chartpath in charts/*/; do + if [ -f ${chartpath}Chart.yaml ]; then + helm dep up $chartpath + fi + done + RANCHER_CHARTS=() + while IFS= read -r chartpath; do + echo "chartpath: $chartpath" + chartname=$(basename "$chartpath") + if [ -f charts/${chartname}/Chart.yaml ]; then + echo "Chartname: $chartname" + helm package charts/$chartpath + is_rancher_chart=$(grep "$RANCHER_CHART_FILTER" charts/${chartpath%*/}/Chart.yaml || true) + if [ -n "$is_rancher_chart" ]; then + RANCHER_CHARTS+=("$chartname") + fi + fi + done < charts-list.txt + echo "RANCHER_CHARTS=${RANCHER_CHARTS[@]}" >> $GITHUB_ENV + rm charts-list.txt + fi + + shopt -s nocasematch + if [[ '${{ github.repository_owner }}' != 'OpenG2P' ]]; then + echo "SKIP=TRUE" >> $GITHUB_ENV + fi + - name: Upload tar as Artifact + uses: actions/upload-artifact@v4 + with: + name: charts + path: ./*.tgz + if: env.SKIP != 'TRUE' + + - name: Checkout branch for publishing + uses: actions/checkout@v3 + with: + repository: 'openg2p/openg2p-helm' + ref: gh-pages + token: ${{ secrets.OPENG2P_BOT_GITHUB_PAT }} + if: env.SKIP != 'TRUE' + + - name: Download tar from Artifacts + uses: actions/download-artifact@v4 + with: + name: charts + path: ./ + if: env.SKIP != 'TRUE' + + - name: Update index.yaml + run: | + helm repo index --url https://openg2p.github.io/openg2p-helm/ . + for chartname in $RANCHER_CHARTS; do + cp ${chartname}*.tgz rancher/ + done + helm repo index --url https://openg2p.github.io/openg2p-helm/ --merge rancher/index.yaml rancher + for chartname in $RANCHER_CHARTS; do + rm rancher/${chartname}*.tgz || true + done + if: env.SKIP != 'TRUE' + + - name: Commit Changes to repository + uses: EndBug/add-and-commit@v7 + with: + branch: gh-pages + author_name: openg2pbot + author_email: bot@openg2p.org + default_author: user_info + message: 'added common helm charts for publish openg2p/websub@${{ github.sha }}' + add: './*.tgz ./index.yaml rancher/index.yaml' + if: env.SKIP != 'TRUE' diff --git a/.github/workflows/push-trigger.yml b/.github/workflows/push-trigger.yml deleted file mode 100644 index 87c2bb0..0000000 --- a/.github/workflows/push-trigger.yml +++ /dev/null @@ -1,206 +0,0 @@ -name: Maven Package upon a push - -on: - release: - types: [published] - pull_request: - types: [opened, reopened, synchronize] - branches: - - '!release-branch' - - release-1* - - 1.* - - develop - - MOSIP* - workflow_dispatch: - inputs: - message: - description: 'Message for manually triggering' - required: false - default: 'Triggered for Updates' - type: string - push: - branches: - - '!release-branch' - - release-1* - - master - - 1.* - - develop - - MOSIP* - -jobs: - build-kafka-admin-client: - uses: mosip/kattu/.github/workflows/maven-build.yml@master - with: - SERVICE_LOCATION: ./kafka-admin-client - BUILD_ARTIFACT: kafka-admin-client - secrets: - OSSRH_USER: ${{ secrets.OSSRH_USER }} - OSSRH_SECRET: ${{ secrets.OSSRH_SECRET }} - OSSRH_TOKEN: ${{ secrets.OSSRH_TOKEN }} - GPG_SECRET: ${{ secrets.GPG_SECRET }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} - - publish-to-nexus-kafka-admin-client: - if: "${{ !contains(github.ref, 'master') && github.event_name != 'pull_request' && github.event_name != 'release' && github.event_name != 'prerelease' && github.event_name != 'publish' }}" - needs: build-kafka-admin-client - uses: mosip/kattu/.github/workflows/maven-publish-to-nexus.yml@master - with: - SERVICE_LOCATION: ./kafka-admin-client - secrets: - OSSRH_URL: ${{ secrets.RELEASE_URL }} - OSSRH_USER: ${{ secrets.OSSRH_USER }} - OSSRH_SECRET: ${{ secrets.OSSRH_SECRET }} - OSSRH_TOKEN: ${{ secrets.OSSRH_TOKEN }} - GPG_SECRET: ${{ secrets.GPG_SECRET }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} - - sonar-analysis-kafka-admin-client: - needs: build-kafka-admin-client - if: "${{ github.event_name != 'pull_request' }}" - uses: mosip/kattu/.github/workflows/maven-sonar-analysis.yml@master - with: - SERVICE_LOCATION: ./kafka-admin-client - PROJECT_KEY: 'mosip_kafka-admin-client' - secrets: - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - ORG_KEY: ${{ secrets.ORG_KEY }} - OSSRH_USER: ${{ secrets.OSSRH_USER }} - OSSRH_SECRET: ${{ secrets.OSSRH_SECRET }} - OSSRH_TOKEN: ${{ secrets.OSSRH_TOKEN }} - GPG_SECRET: ${{ secrets.GPG_SECRET }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} - - build-consolidator: - needs: build-kafka-admin-client - env: - SERVICE_LOCATION: 'consolidator' - BUILD_ARTIFACT: 'consolidator' - KAFKA_ADMIN_BUILD_ARTIFACT: 'kafka-admin-client' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: AdoptOpenJDK/install-jdk@v1 - with: - version: '11' - - name: Download kafka-admin-client artifact - uses: actions/download-artifact@v1 - with: - name: ${{ env.KAFKA_ADMIN_BUILD_ARTIFACT }} - path: ./ - - name: Unzip the kafka-admin-client - run: | - unzip ${{ env.KAFKA_ADMIN_BUILD_ARTIFACT }}.zip -d ${{ env.SERVICE_LOCATION }} - - name: Setup branch and env - run: | - # Strip git ref prefix from version - echo "BRANCH_NAME=$(echo ${{ github.ref }} | sed -e 's,.*/\(.*\),\1,')" >> $GITHUB_ENV - echo "GPG_TTY=$(tty)" >> $GITHUB_ENV - - name: Install ballerina - run: | - wget -q --show-progress https://dist.ballerina.io/downloads/2201.0.0/ballerina-2201.0.0-swan-lake-linux-x64.deb -O ballerina-linux-installer-x64.deb - sudo dpkg -i ballerina-linux-installer-x64.deb - - name: Hub-service build - run: | - bal build ./${{ env.SERVICE_LOCATION }} - - name: Ready the springboot artifacts - if: ${{ !contains(github.ref, 'master') || !contains(github.ref, 'main') }} - run: | - ## FIND JARS & COPY ONLY EXECUTABLE JARs STORED UNDER TARGET DIRECTORY - cd ${{ env.SERVICE_LOCATION }} - find . -path '*/target/bin/*' -name '*.jar' -type f -exec zip ${{ env.BUILD_ARTIFACT }}.zip {} + - - name: Upload the springboot jars - if: ${{ !contains(github.ref, 'master') || !contains(github.ref, 'main') }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.BUILD_ARTIFACT }} - path: ./${{ env.SERVICE_LOCATION }}/${{ env.BUILD_ARTIFACT }}.zip - - - uses: 8398a7/action-slack@v3 - with: - status: ${{ job.status }} - fields: repo,message,author,commit,workflow,job # selectable (default: repo,message) - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} # required - if: failure() # Pick up events even if the job fails or is canceled. - - build-consolidator-dockers: - needs: build-consolidator - uses: mosip/kattu/.github/workflows/docker-build.yml@master - with: - SERVICE_LOCATION: 'consolidator' - SERVICE_NAME: 'consolidator-websub-service' - BUILD_ARTIFACT: 'consolidator' - ONLY_DOCKER: true - secrets: - DEV_NAMESPACE_DOCKER_HUB: ${{ secrets.DEV_NAMESPACE_DOCKER_HUB }} - ACTOR_DOCKER_HUB: ${{ secrets.ACTOR_DOCKER_HUB }} - RELEASE_DOCKER_HUB: ${{ secrets.RELEASE_DOCKER_HUB }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_DEVOPS }} - - - build-websub: - needs: build-kafka-admin-client - env: - SERVICE_LOCATION: 'hub' - BUILD_ARTIFACT: 'websub-service' - KAFKA_ADMIN_BUILD_ARTIFACT: 'kafka-admin-client' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: AdoptOpenJDK/install-jdk@v1 - with: - version: '11' - - name: Download kafka-admin-client artifact - uses: actions/download-artifact@v1 - with: - name: ${{ env.KAFKA_ADMIN_BUILD_ARTIFACT }} - path: ./ - - name: Unzip the kafka-admin-client - run: | - unzip ${{ env.KAFKA_ADMIN_BUILD_ARTIFACT }}.zip -d ${{ env.SERVICE_LOCATION }} - - name: Setup branch and env - run: | - # Strip git ref prefix from version - echo "BRANCH_NAME=$(echo ${{ github.ref }} | sed -e 's,.*/\(.*\),\1,')" >> $GITHUB_ENV - echo "GPG_TTY=$(tty)" >> $GITHUB_ENV - - name: Install ballerina - run: | - wget -q --show-progress https://dist.ballerina.io/downloads/2201.0.0/ballerina-2201.0.0-swan-lake-linux-x64.deb -O ballerina-linux-installer-x64.deb - sudo dpkg -i ballerina-linux-installer-x64.deb - - name: Hub-service build - run: | - bal build ./${{ env.SERVICE_LOCATION }} - - name: Ready the springboot artifacts - if: ${{ !contains(github.ref, 'master') || !contains(github.ref, 'main') }} - run: | - ## FIND JARS & COPY ONLY EXECUTABLE JARs STORED UNDER TARGET DIRECTORY - cd ${{ env.SERVICE_LOCATION }} - find . -path '*/target/bin/*' -name '*.jar' -type f -exec zip ${{ env.BUILD_ARTIFACT }}.zip {} + - - name: Upload the springboot jars - if: ${{ !contains(github.ref, 'master') || !contains(github.ref, 'main') }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.BUILD_ARTIFACT }} - path: ./${{ env.SERVICE_LOCATION }}/${{ env.BUILD_ARTIFACT }}.zip - - - uses: 8398a7/action-slack@v3 - with: - status: ${{ job.status }} - fields: repo,message,author,commit,workflow,job # selectable (default: repo,message) - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} # required - if: failure() # Pick up events even if the job fails or is canceled. - - build-websub-dockers: - needs: build-websub - uses: mosip/kattu/.github/workflows/docker-build.yml@master - with: - SERVICE_LOCATION: 'hub' - SERVICE_NAME: 'websub-service' - BUILD_ARTIFACT: 'websub-service' - ONLY_DOCKER: true - secrets: - DEV_NAMESPACE_DOCKER_HUB: ${{ secrets.DEV_NAMESPACE_DOCKER_HUB }} - ACTOR_DOCKER_HUB: ${{ secrets.ACTOR_DOCKER_HUB }} - RELEASE_DOCKER_HUB: ${{ secrets.RELEASE_DOCKER_HUB }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_DEVOPS }} diff --git a/.gitignore b/.gitignore index d1c3c53..55cc674 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,4 @@ ballerina-internal.log.lck .vscode/ hub/Dependencies.toml consolidator/Dependencies.toml -.idea \ No newline at end of file +.idea diff --git a/README.md b/README.md index 6516d55..0d62f22 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Websub -[![Ballerina Build and push](https://github.com/mosip/websub/actions/workflows/push_trigger.yml/badge.svg?branch=release-1.2.0.1)](https://github.com/mosip/websub/actions/workflows/push_trigger.yml) -[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?branch=release-1.2.0.1&project=mosip_websub&metric=alert_status)](https://sonarcloud.io/dashboard?branch=release-1.2.0.1&id=mosip_websub) +[![Ballerina Build and push](https://github.com/mosip/websub/actions/workflows/push_trigger.yml/badge.svg?branch=develop)](https://github.com/mosip/websub/actions/workflows/push_trigger.yml) +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?branch=develop&project=mosip_websub&metric=alert_status)](https://sonarcloud.io/dashboard?branch=develop&id=mosip_websub) ## Overview For an overview of Websub refer [MOSIP Docs](https://nayakrounak.gitbook.io/mosip-docs/modules/websub). @@ -37,10 +37,10 @@ To integrate any kafka(either local or remote) with hub following properties nee - KAFKA_BOOTSTRAP_NODE ### Starting the IDP -After starting IDP and Auth service following properties need to updated in both websub service. +After starting IDP and Auth service following properties need to be updated in both websub service. - - MOSIP_AUTH_BASE_URL - - MOSIP_AUTH_VALIDATE_TOKEN_URL + - SECURITY_JWT_ISSUER + - SECURITY_JWT_ISSUER_JWKS_URL ### Starting the Consolidator Service Once previous servers are up and running the Event Consolidator Service could be started. @@ -57,6 +57,7 @@ NOTE: ballerina should be present in your local system [(Download ballerina)](ht - Build: ``` + cd consolidator/ bal build ``` diff --git a/charts/websub/.gitignore b/charts/websub/.gitignore new file mode 100644 index 0000000..9169e44 --- /dev/null +++ b/charts/websub/.gitignore @@ -0,0 +1,2 @@ +charts/*.tgz +Chart.lock diff --git a/helm/websub-consolidator/.helmignore b/charts/websub/.helmignore similarity index 95% rename from helm/websub-consolidator/.helmignore rename to charts/websub/.helmignore index f0c1319..0e8a0eb 100644 --- a/helm/websub-consolidator/.helmignore +++ b/charts/websub/.helmignore @@ -14,8 +14,10 @@ *.swp *.bak *.tmp +*.orig *~ # Various IDEs .project .idea/ *.tmproj +.vscode/ diff --git a/charts/websub/Chart.yaml b/charts/websub/Chart.yaml new file mode 100644 index 0000000..438de98 --- /dev/null +++ b/charts/websub/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v2 +name: websub +description: A Helm chart for WebSub +type: application +version: 0.0.0-develop +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.x.x +- name: kafka + repository: oci://registry-1.docker.io/bitnamicharts + version: 29.x.x + condition: kafka.enabled +keywords: + - mosip + - websub +maintainers: + - email: info@openg2p.org + name: OpenG2P +icon: https://openg2p.github.io/openg2p-helm/openg2p-logo.png +annotations: + catalog.cattle.io/display-name: "WebSub" diff --git a/charts/websub/README.md b/charts/websub/README.md new file mode 100644 index 0000000..ff90cbc --- /dev/null +++ b/charts/websub/README.md @@ -0,0 +1,3 @@ +# WebSub + +Helm chart for installing WebSub module. diff --git a/charts/websub/app-readme.md b/charts/websub/app-readme.md new file mode 100644 index 0000000..1ead844 --- /dev/null +++ b/charts/websub/app-readme.md @@ -0,0 +1 @@ +WebSub Installation. diff --git a/charts/websub/questions.yaml b/charts/websub/questions.yaml new file mode 100644 index 0000000..96f96c5 --- /dev/null +++ b/charts/websub/questions.yaml @@ -0,0 +1,10 @@ +questions: +- variable: hostname + description: Hostname under which WebSub should be accessible + type: string + label: WebSub Hostname + +- variable: global.keycloakBaseUrl + description: Required for API auth validation. + type: string + label: Keycloak Base URL diff --git a/charts/websub/templates/_helpers.tpl b/charts/websub/templates/_helpers.tpl new file mode 100644 index 0000000..5113f7a --- /dev/null +++ b/charts/websub/templates/_helpers.tpl @@ -0,0 +1,101 @@ +{{/* +Return the proper image name +*/}} +{{- define "websub.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{- define "websub.consolidator.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.consolidator.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "websub.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.consolidator.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "websub.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return podAnnotations +*/}} +{{- define "websub.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{- define "websub.consolidator.podAnnotations" -}} +{{- if .Values.consolidator.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.consolidator.podAnnotations "context" $) }} +{{- end }} +{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} +{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} +{{- end }} +{{- end -}} + +{{/* +Render Env values section +*/}} +{{- define "websub.baseEnvVars" -}} +{{- $context := .context -}} +{{- range $k, $v := .envVars }} +- name: {{ $k }} +{{- if or (kindIs "int64" $v) (kindIs "float64" $v) (kindIs "bool" $v) }} + value: {{ $v | quote }} +{{- else if kindIs "string" $v }} + value: {{ include "common.tplvalues.render" ( dict "value" $v "context" $context ) | squote }} +{{- else }} + valueFrom: {{- include "common.tplvalues.render" ( dict "value" $v "context" $context ) | nindent 4}} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "websub.envVars" -}} +{{- $envVars := merge (deepCopy .Values.envVars) (deepCopy .Values.envVarsFrom) -}} +{{- include "websub.baseEnvVars" (dict "envVars" $envVars "context" $) }} +{{- end -}} + +{{- define "websub.consolidator.envVars" -}} +{{- $envVars := merge (deepCopy .Values.consolidator.envVars) (deepCopy .Values.consolidator.envVarsFrom) -}} +{{- include "websub.baseEnvVars" (dict "envVars" $envVars "context" $) }} +{{- end -}} + +{{/* +Return command +*/}} +{{- define "websub.commandBase" -}} +{{- if or .command .args }} +{{- if .command }} +command: {{- include "common.tplvalues.render" (dict "value" .command "context" .context) }} +{{- end }} +{{- if .args }} +args: {{- include "common.tplvalues.render" (dict "value" .args "context" .context) }} +{{- end }} +{{- else if .startUpCommand }} +command: ["/startup.sh"] +args: [] +{{- end }} +{{- end -}} + +{{- define "websub.command" -}} +{{- include "websub.commandBase" (dict "command" .Values.command "args" .Values.args "startUpCommand" .Values.startUpCommand "context" $) }} +{{- end -}} + +{{- define "websub.consolidator.command" -}} +{{- include "websub.commandBase" (dict "command" .Values.consolidator.command "args" .Values.consolidator.args "startUpCommand" .Values.consolidator.startUpCommand "context" $) }} +{{- end -}} diff --git a/charts/websub/templates/configmap-confg.yaml b/charts/websub/templates/configmap-confg.yaml new file mode 100644 index 0000000..61e97a3 --- /dev/null +++ b/charts/websub/templates/configmap-confg.yaml @@ -0,0 +1,13 @@ +{{- if .Values.configToml }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-config + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + config.toml: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configToml "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/websub/templates/configmap-startup.yaml b/charts/websub/templates/configmap-startup.yaml new file mode 100644 index 0000000..bce096b --- /dev/null +++ b/charts/websub/templates/configmap-startup.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.startUpCommand (not (or .Values.command .Values.args)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-startup + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + startup.sh: |- + {{- include "common.tplvalues.render" (dict "value" .Values.startUpCommand "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/websub/templates/consolidator/configmap-config.yaml b/charts/websub/templates/consolidator/configmap-config.yaml new file mode 100644 index 0000000..f73a020 --- /dev/null +++ b/charts/websub/templates/consolidator/configmap-config.yaml @@ -0,0 +1,13 @@ +{{- if .Values.consolidator.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-consolidator-config + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + config.toml: |- + {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.configToml "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/websub/templates/consolidator/configmap-startup.yaml b/charts/websub/templates/consolidator/configmap-startup.yaml new file mode 100644 index 0000000..e367636 --- /dev/null +++ b/charts/websub/templates/consolidator/configmap-startup.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.consolidator.startUpCommand (not (or .Values.consolidator.command .Values.consolidator.args)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }}-consolidator-startup + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + startup.sh: |- + {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.startUpCommand "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/websub/templates/consolidator/deployment.yaml b/charts/websub/templates/consolidator/deployment.yaml new file mode 100644 index 0000000..c5046dd --- /dev/null +++ b/charts/websub/templates/consolidator/deployment.yaml @@ -0,0 +1,113 @@ +{{- if .Values.consolidator.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }}-consolidator + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.consolidator.replicaCount }} + {{- if .Values.consolidator.updateStrategy }} + strategy: {{- toYaml .Values.consolidator.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" (dict "customLabels" .Values.consolidator.podLabels "context" $) | nindent 6 }} + template: + metadata: + {{- if or .Values.consolidator.podAnnotations .Values.metrics.enabled }} + annotations: {{- include "websub.consolidator.podAnnotations" . | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.consolidator.podLabels "context" $) | nindent 8 }} + spec: + serviceAccountName: {{ template "websub.serviceAccountName" . }} + {{- include "websub.imagePullSecrets" . | nindent 6 }} + {{- if .Values.consolidator.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.consolidator.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.consolidator.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.consolidator.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.consolidator.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.consolidator.nodeAffinityPreset.type "key" .Values.consolidator.nodeAffinityPreset.key "values" .Values.consolidator.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.consolidator.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.consolidator.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.consolidator.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.consolidator.priorityClassName }} + priorityClassName: {{ .Values.consolidator.priorityClassName | quote }} + {{- end }} + {{- if .Values.consolidator.podSecurityContext.enabled }} + securityContext: {{- omit .Values.consolidator.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.consolidator.initContainers }} + initContainers: + {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: websub-consolidator + image: {{ template "websub.consolidator.image" . }} + imagePullPolicy: {{ .Values.consolidator.image.pullPolicy }} + {{- if .Values.consolidator.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.consolidator.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.consolidator.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- include "websub.consolidator.command" . | nindent 10 }} + env: + {{- include "websub.consolidator.envVars" . | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.consolidator.containerPort }} + {{- if .Values.consolidator.resources }} + resources: {{- toYaml .Values.consolidator.resources | nindent 12 }} + {{- end }} + {{- if .Values.consolidator.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.consolidator.startupProbe "enabled") "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.consolidator.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.consolidator.livenessProbe "enabled") "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.consolidator.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.consolidator.readinessProbe "enabled") "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if and .Values.consolidator.startUpCommand (not (or .Values.consolidator.command .Values.consolidator.args)) }} + - name: startup-command + mountPath: /startup.sh + subPath: startup.sh + {{- end }} + {{- if .Values.consolidator.configToml }} + - name: config-toml + mountPath: {{ include "common.tplvalues.render" (dict "value" .Values.consolidator.configTomlMountPath "context" $) }} + subPath: config.toml + {{- end }} + {{- if .Values.consolidator.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.consolidator.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.consolidator.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if and .Values.consolidator.startUpCommand (not (or .Values.consolidator.command .Values.consolidator.args)) }} + - name: startup-command + configMap: + name: {{ include "common.names.fullname" . }}-consolidator-startup + defaultMode: 0755 + {{- end }} + {{- if .Values.consolidator.configToml }} + - name: config-toml + configMap: + name: {{ include "common.names.fullname" . }}-consolidator-config + {{- end }} + {{- if .Values.consolidator.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.consolidator.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/websub/templates/consolidator/gateway.yaml b/charts/websub/templates/consolidator/gateway.yaml new file mode 100644 index 0000000..b0dc882 --- /dev/null +++ b/charts/websub/templates/consolidator/gateway.yaml @@ -0,0 +1,40 @@ +{{- if .Values.consolidator.enabled }} +{{- if .Values.consolidator.istio.enabled }} +{{- if .Values.consolidator.istio.gateway.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ include "common.names.fullname" . }}-consolidator + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + {{ toYaml .Values.consolidator.istio.gateway.ingressController | nindent 4 }} + servers: + {{- if .Values.consolidator.istio.gateway.httpEnabled }} + - port: + name: http2 + number: 8080 + protocol: HTTP2 + hosts: + - {{ default .Values.hostname .Values.consolidator.istio.gateway.host | quote }} + {{- if .Values.consolidator.istio.gateway.httpTlsRedirect }} + tls: + httpsRedirect: true + {{- end }} + {{- end }} + {{- if .Values.consolidator.istio.gateway.httpsEnabled }} + - port: + name: https + number: 8443 + protocol: HTTPS + hosts: + - {{ default .Values.hostname .Values.consolidator.istio.gateway.host | quote }} + tls: + {{ toYaml (omit .Values.consolidator.istio.gateway.tls "enabled") | nindent 6 }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/websub/templates/consolidator/service.yaml b/charts/websub/templates/consolidator/service.yaml new file mode 100644 index 0000000..a2303b1 --- /dev/null +++ b/charts/websub/templates/consolidator/service.yaml @@ -0,0 +1,26 @@ +{{- if .Values.consolidator.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }}-consolidator + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.consolidator.service.type }} + {{- if (or (eq .Values.consolidator.service.type "LoadBalancer") (eq .Values.consolidator.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.consolidator.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.consolidator.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.consolidator.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.consolidator.service.type "LoadBalancer") (not (empty .Values.consolidator.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.consolidator.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.consolidator.service.port }} + targetPort: {{ .Values.consolidator.containerPort }} + selector: {{- include "common.labels.matchLabels" (dict "customLabels" .Values.consolidator.podLabels "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/websub/templates/consolidator/virtualservice.yaml b/charts/websub/templates/consolidator/virtualservice.yaml new file mode 100644 index 0000000..0a0c7c9 --- /dev/null +++ b/charts/websub/templates/consolidator/virtualservice.yaml @@ -0,0 +1,37 @@ +{{- if .Values.consolidator.enabled }} +{{- if .Values.consolidator.istio.enabled }} +{{- if .Values.consolidator.istio.virtualservice.enabled }} +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: {{ include "common.names.fullname" . }}-consolidator + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + hosts: + - {{ default .Values.hostname .Values.consolidator.istio.virtualservice.host | quote }} + gateways: + - {{ default (include "common.names.fullname" .) .Values.consolidator.istio.virtualservice.gateway }} + http: + - headers: + request: + set: + x-forwarded-host: {{ default .Values.hostname .Values.consolidator.istio.virtualservice.host | quote }} + x-forwarded-proto: https + match: + - uri: + prefix: {{ .Values.consolidator.istio.virtualservice.prefix }} + {{- if .Values.consolidator.istio.virtualservice.rewriteUri }} + rewrite: + uri: {{ .Values.consolidator.istio.virtualservice.rewriteUri }} + {{- end }} + route: + - destination: + host: {{ include "common.tplvalues.render" (dict "value" .Values.consolidator.istio.virtualservice.destination "context" $) }} + port: + number: {{ include "common.tplvalues.render" (dict "value" .Values.consolidator.istio.virtualservice.destinationPort "context" $) }} +{{- end }} +{{- end }} +{{- end }} diff --git a/helm/websub/templates/deployment.yaml b/charts/websub/templates/deployment.yaml similarity index 63% rename from helm/websub/templates/deployment.yaml rename to charts/websub/templates/deployment.yaml index 516de09..7940d79 100644 --- a/helm/websub/templates/deployment.yaml +++ b/charts/websub/templates/deployment.yaml @@ -1,15 +1,11 @@ apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} kind: Deployment metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} spec: replicas: {{ .Values.replicaCount }} {{- if .Values.updateStrategy }} @@ -19,15 +15,10 @@ spec: matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} template: metadata: - annotations: - {{- if or .Values.podAnnotations .Values.metrics.enabled }} - {{- include "websub.podAnnotations" . | nindent 8 }} - {{- end }} - - labels: {{- include "common.labels.standard" . | nindent 8 }} - {{- if .Values.podLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} - {{- end }} + {{- if or .Values.podAnnotations .Values.metrics.enabled }} + annotations: {{- include "websub.podAnnotations" . | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.podLabels "context" $) | nindent 8 }} spec: serviceAccountName: {{ template "websub.serviceAccountName" . }} {{- include "websub.imagePullSecrets" . | nindent 6 }} @@ -54,25 +45,10 @@ spec: {{- if .Values.podSecurityContext.enabled }} securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} {{- end }} + {{- if .Values.initContainers }} initContainers: - {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} - - name: volume-permissions - image: {{ include "websub.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: - - %%commands%% - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: foo - mountPath: bar - {{- end }} - {{- if .Values.initContainers }} {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} - {{- end }} + {{- end }} containers: - name: websub image: {{ template "websub.image" . }} @@ -83,35 +59,12 @@ spec: {{- if .Values.containerSecurityContext.enabled }} securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} {{- end }} - {{- if .Values.command }} - command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.args }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} - {{- end }} + {{- include "websub.command" . | nindent 10 }} env: - - name: container_user - value: {{ .Values.containerSecurityContext.runAsUser }} - - name: JDK_JAVA_OPTIONS - value: {{ .Values.additionalResources.javaOpts }} - {{- if .Values.extraEnvVars }} - {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - envFrom: - {{- if .Values.extraEnvVarsCM }} - {{- range .Values.extraEnvVarsCM }} - - configMapRef: - name: {{ . }} - {{- end }} - {{- end }} - {{- if .Values.extraEnvVarsSecret }} - - secretRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} - {{- end }} + {{- include "websub.envVars" . | nindent 12 }} ports: - - name: spring-service - containerPort: {{ .Values.websub.port }} - + - name: http + containerPort: {{ .Values.containerPort }} {{- if .Values.resources }} resources: {{- toYaml .Values.resources | nindent 12 }} {{- end }} @@ -130,6 +83,35 @@ spec: {{- else if .Values.customReadinessProbe }} readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} {{- end }} + volumeMounts: + {{- if and .Values.startUpCommand (not (or .Values.command .Values.args)) }} + - name: startup-command + mountPath: /startup.sh + subPath: startup.sh + {{- end }} + {{- if .Values.configToml }} + - name: config-toml + mountPath: {{ include "common.tplvalues.render" (dict "value" .Values.configTomlMountPath "context" $) }} + subPath: config.toml + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} {{- if .Values.sidecars }} {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} {{- end }} + volumes: + {{- if and .Values.startUpCommand (not (or .Values.command .Values.args)) }} + - name: startup-command + configMap: + name: {{ include "common.names.fullname" . }}-startup + defaultMode: 0755 + {{- end }} + {{- if .Values.configToml }} + - name: config-toml + configMap: + name: {{ include "common.names.fullname" . }}-config + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} diff --git a/helm/websub-consolidator/templates/extra-list.yaml b/charts/websub/templates/extra-list.yaml similarity index 100% rename from helm/websub-consolidator/templates/extra-list.yaml rename to charts/websub/templates/extra-list.yaml diff --git a/charts/websub/templates/gateway.yaml b/charts/websub/templates/gateway.yaml new file mode 100644 index 0000000..a1cd9b4 --- /dev/null +++ b/charts/websub/templates/gateway.yaml @@ -0,0 +1,38 @@ +{{- if .Values.istio.enabled }} +{{- if .Values.istio.gateway.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + {{ toYaml .Values.istio.gateway.ingressController | nindent 4 }} + servers: + {{- if .Values.istio.gateway.httpEnabled }} + - port: + name: http2 + number: 8080 + protocol: HTTP2 + hosts: + - {{ default .Values.hostname .Values.istio.gateway.host | quote }} + {{- if .Values.istio.gateway.httpTlsRedirect }} + tls: + httpsRedirect: true + {{- end }} + {{- end }} + {{- if .Values.istio.gateway.httpsEnabled }} + - port: + name: https + number: 8443 + protocol: HTTPS + hosts: + - {{ default .Values.hostname .Values.istio.gateway.host | quote }} + tls: + {{ toYaml (omit .Values.istio.gateway.tls "enabled") | nindent 6 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/websub/templates/secret.yaml b/charts/websub/templates/secret.yaml new file mode 100644 index 0000000..b72c620 --- /dev/null +++ b/charts/websub/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + encryption-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "encryption-key" "length" 10 "providedValues" (list "encryptionKey") "context" $) }} +{{- end }} diff --git a/helm/websub/templates/service-account.yaml b/charts/websub/templates/service-account.yaml similarity index 51% rename from helm/websub/templates/service-account.yaml rename to charts/websub/templates/service-account.yaml index 990a553..6c6e063 100644 --- a/helm/websub/templates/service-account.yaml +++ b/charts/websub/templates/service-account.yaml @@ -1,12 +1,8 @@ apiVersion: v1 kind: ServiceAccount metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} name: {{ template "websub.serviceAccountName" . }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} {{- if .Values.commonAnnotations }} annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} - namespace: {{ .Release.Namespace }} diff --git a/helm/websub/templates/service.yaml b/charts/websub/templates/service.yaml similarity index 57% rename from helm/websub/templates/service.yaml rename to charts/websub/templates/service.yaml index bd4d827..58ce32e 100644 --- a/helm/websub/templates/service.yaml +++ b/charts/websub/templates/service.yaml @@ -1,15 +1,11 @@ apiVersion: v1 kind: Service metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} spec: type: {{ .Values.service.type }} {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} @@ -23,6 +19,6 @@ spec: {{- end }} ports: - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.websub.port }} + protocol: TCP + targetPort: {{ .Values.containerPort }} selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/helm/websub/templates/servicemonitor.yaml b/charts/websub/templates/servicemonitor.yaml similarity index 68% rename from helm/websub/templates/servicemonitor.yaml rename to charts/websub/templates/servicemonitor.yaml index 15f48fd..03f1d96 100644 --- a/helm/websub/templates/servicemonitor.yaml +++ b/charts/websub/templates/servicemonitor.yaml @@ -2,20 +2,18 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "common.names.fullname" . }} + name: {{ include "common.names.fullname" . }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} - {{- else }} - namespace: {{ .Release.Namespace | quote }} {{- end }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.metrics.serviceMonitor.additionalLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} - {{- end }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} spec: endpoints: - - targetPort: {{ .Values.springServicePort }} - path: {{ .Values.metrics.endpointPath }} + - targetPort: {{ .Values.containerPort }} + path: {{ .Values.metrics.endpointPath }} {{- if .Values.metrics.serviceMonitor.interval }} interval: {{ .Values.metrics.serviceMonitor.interval }} {{- end }} diff --git a/charts/websub/templates/virtualservice.yaml b/charts/websub/templates/virtualservice.yaml new file mode 100644 index 0000000..f0bdb87 --- /dev/null +++ b/charts/websub/templates/virtualservice.yaml @@ -0,0 +1,35 @@ +{{- if .Values.istio.enabled }} +{{- if .Values.istio.virtualservice.enabled }} +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + hosts: + - {{ default .Values.hostname .Values.istio.virtualservice.host | quote }} + gateways: + - {{ default (include "common.names.fullname" .) .Values.istio.virtualservice.gateway }} + http: + - headers: + request: + set: + x-forwarded-host: {{ default .Values.hostname .Values.istio.virtualservice.host | quote }} + x-forwarded-proto: https + match: + - uri: + prefix: {{ include "common.tplvalues.render" (dict "value" .Values.istio.virtualservice.prefix "context" $) }} + {{- if .Values.istio.virtualservice.rewriteUri }} + rewrite: + uri: {{ include "common.tplvalues.render" (dict "value" .Values.istio.virtualservice.rewriteUri "context" $) }} + {{- end }} + route: + - destination: + host: {{ include "common.tplvalues.render" (dict "value" .Values.istio.virtualservice.destination "context" $) }} + port: + number: {{ include "common.tplvalues.render" (dict "value" .Values.istio.virtualservice.destinationPort "context" $) }} +{{- end }} +{{- end }} diff --git a/charts/websub/values.yaml b/charts/websub/values.yaml new file mode 100644 index 0000000..2879d98 --- /dev/null +++ b/charts/websub/values.yaml @@ -0,0 +1,708 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass +global: + keycloakBaseUrl: 'https://keycloak.your.org' + +hostname: websub.openg2p.sandbox.net + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Number of nodes +## +replicaCount: 1 + +service: + type: ClusterIP + port: 80 + ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + ## loadBalancerIP: + ## + ## nodePorts: + ## http: + ## https: + ## + + nodePorts: + http: "" + https: "" + ## Enable client source IP preservation + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + +image: + registry: docker.io + repository: openg2p/websub-service + tag: develop + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## Port on which this particular spring service module is running. +containerPort: 9191 + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## +startupProbe: + enabled: true + httpGet: + path: /hub/actuator/health + port: http + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + +livenessProbe: + enabled: true + httpGet: + path: /hub/actuator/health + port: http + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + httpGet: + path: /hub/actuator/health + port: http + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Command and args for running the container (set to default if not set). Use array form +## +command: [] +args: [] + +## Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] + +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary. +# resources: +# limits: +# cpu: 500m +# memory: 2250Mi +# requests: +# cpu: 100m +# memory: 1500Mi +resources: {} + +## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources. Example: +# javaOpts: "-Xms500M -Xmx500M" +javaOpts: "" + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Clamav container already runs as 'mosip' user, so we may not need to enable this +containerSecurityContext: + enabled: false + runAsUser: 1001 + runAsNonRoot: true + +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + enabled: false + fsGroup: 1001 + +## Pod affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAffinityPreset: "" + +## Pod anti-affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## Allowed values: soft, hard +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## Allowed values: soft, hard +## +nodeAffinityPreset: + ## Node affinity type + ## Allowed values: soft, hard + ## + type: "" + ## Node label key to match + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## Node label values to match + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## Affinity for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Pod extra labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Annotations for server pods. +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## pods' priority. +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +# priorityClassName: "" + +## lifecycleHooks for the container to automate configuration before or after startup. +## +lifecycleHooks: {} + +## Custom Liveness probes for +## +customLivenessProbe: {} + +## Custom Rediness probes +## +customReadinessProbe: {} + +## Update strategy - only really applicable for deployments with RWO PVs attached +## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the +## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will +## terminate the single previous pod, so that the new, incoming pod can attach to the PV +## +updateStrategy: + type: RollingUpdate + +## Add init containers to the pods. +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] + +## Add sidecars to the pods. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + +## Prometheus Metrics +## +metrics: + enabled: false + ## Prometheus pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + + endpointPath: /v1/websub/actuator/prometheus + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: true + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 10s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + # rules: + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + rules: [] + +## Extra volumes to add to the deployment +## +extraVolumes: [] + +## Extra volume mounts to add to the container +## +extraVolumeMounts: [] + +istio: + enabled: true + virtualservice: + enabled: true + host: "" + gateway: "internal" + destination: '{{ include "common.names.fullname" . }}' + destinationPort: '{{ .Values.service.port }}' + prefix: /hub + rewriteUri: "" + gateway: + enabled: false + host: "" + ingressController: + istio: ingressgateway + httpTlsRedirect: false + httpEnabled: true + httpsEnabled: false + tls: + mode: SIMPLE + credentialName: "" + +kafka: + enabled: true + nameOverride: websub-kafka + heapOpts: "" + extraConfig: |- + default.replication.factor=1 + offsets.topic.replication.factor=1 + transaction.state.log.replication.factor=1 + auto.create.topics.enable=true + delete.topic.enable=true + num.partitions=3 + controller: + heapOpts: "" + replicaCount: 1 + resourcesPreset: "none" + broker: + heapOpts: "" + persistence: + enabled: false + listeners: + client: + protocol: PLAINTEXT + controller: + protocol: PLAINTEXT + interbroker: + protocol: PLAINTEXT + external: + protocol: PLAINTEXT + service: + ports: + client: 9092 + +consolidator: + enabled: true + replicaCount: 1 + service: + type: ClusterIP + port: 80 + nodePorts: + http: "" + https: "" + externalTrafficPolicy: Cluster + image: + registry: docker.io + repository: openg2p/consolidator-websub-service + tag: develop + pullPolicy: Always + pullSecrets: [] + # - myRegistryKeySecretName + containerPort: 9192 + startupProbe: + enabled: true + httpGet: + path: /consolidator/actuator/health + port: http + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + livenessProbe: + enabled: true + httpGet: + path: /consolidator/actuator/health + port: http + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + enabled: true + httpGet: + path: /consolidator/actuator/health + port: http + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + command: [] + args: [] + hostAliases: [] + resources: {} + javaOpts: "" + containerSecurityContext: + enabled: false + runAsUser: 1001 + runAsNonRoot: true + podSecurityContext: + enabled: false + fsGroup: 1001 + podAffinityPreset: "" + podAntiAffinityPreset: soft + nodeAffinityPreset: + type: "" + key: "" + values: [] + affinity: {} + nodeSelector: {} + tolerations: [] + podLabels: + app.kubernetes.io/name: '{{ include "common.names.name" . }}-consolidator' + podAnnotations: {} + # priorityClassName: "" + lifecycleHooks: {} + updateStrategy: + type: RollingUpdate + initContainers: [] + sidecars: [] + extraVolumes: [] + extraVolumeMounts: [] + istio: + enabled: true + virtualservice: + enabled: true + host: "" + gateway: "internal" + destination: '{{ include "common.names.fullname" . }}-consolidator' + destinationPort: '{{ .Values.consolidator.service.port }}' + prefix: /consolidator + rewriteUri: "" + gateway: + enabled: false + host: "" + ingressController: + istio: ingressgateway + httpTlsRedirect: false + httpEnabled: true + httpsEnabled: false + tls: + mode: SIMPLE + credentialName: "" + envVars: + JDK_JAVA_OPTIONS: '{{ tpl .Values.consolidator.javaOpts $ }}' + CONSOLIDATOR_PORT: '{{ .Values.consolidator.containerPort }}' + KAFKA_BOOTSTRAP_HOSTNAME: '{{ tpl .Values.kafkaInstallationName $ }}' + KAFKA_PORT: '9092' + KAFKA_BOOTSTRAP_SERVER: '{{ tpl .Values.envVars.KAFKA_BOOTSTRAP_HOSTNAME $ }}:{{ tpl .Values.envVars.KAFKA_PORT $ }}' + + envVarsFrom: {} + + configTomlMountPath: /etc/config.template.toml + + configToml: |- + [consolidatorService.config] + # IP and Port of the Kafka bootstrap node + KAFKA_BOOTSTRAP_NODE = "${KAFKA_BOOTSTRAP_SERVER}" + + # Kafka topic which will get notified for websub topic registration/deregistration + # All the hubs must be pointed to the same Kafka topic to notify websub topic registration/deregistration + REGISTERED_WEBSUB_TOPICS_TOPIC = "registered-websub-topics" + + # Kafka topic which stores consolidated websub topics for the hub + CONSOLIDATED_WEBSUB_TOPICS_TOPIC = "consolidated-websub-topics" + + # Kafka topic which will get notified for websub subscription/unsubscription + # All the hubs must be pointed to the same Kafka topic to notify websub subscription/unsubscription + WEBSUB_SUBSCRIBERS_TOPIC = "registered-websub-subscribers" + + # Kafka topic which is stores consolidated websub subscribers for this server + CONSOLIDATED_WEBSUB_SUBSCRIBERS_TOPIC = "consolidated-websub-subscribers" + + # The interval in which Kafka consumers wait for new messages + POLLING_INTERVAL = 10.0 + + # The period in which Kafka close method waits to complete + GRACEFUL_CLOSE_PERIOD = 5.0 + + # The disk space threshold for healthcheck + DISK_SPACE_THRESHOLD = 10485760 + + # The port that is used to start the consolidator + CONSOLIDATOR_PORT = ${CONSOLIDATOR_PORT} + + # consolidator health endpoint + CONSOLIDATOR_HEALTH_ENDPOINT = "/consolidator/actuator/health" + + [ballerina.http.accessLogConfig] + # Enable printing access logs in console + console = true # Default is false + + [ballerina.http.traceLogAdvancedConfig] + # Enable printing trace logs in console + console = false # Default is false + + startUpCommand: |- + #!/usr/bin/env bash + echo "==> Copying Configs" + envsubst < {{ tpl .Values.consolidator.configTomlMountPath $ }} > Config.toml + echo "==> Waiting for kafka" + RETRY_COUNT=0 + while :; do + if nc -z ${KAFKA_BOOTSTRAP_HOSTNAME} ${KAFKA_PORT} ; then + break + elif [ $RETRY_COUNT -ne 200 ]; then + ((RETRY_COUNT++)) + sleep 1s + else + echo "Could not connect to Kafka even after retries..." + exit 1 + fi + done + java -jar ./consolidator.jar + +# If a hub secret already exists give the secret name here. Else will be generated. +existingSecret: "" + +# Give Hub Encryption Key here. If empty it will be autogenerated. +encryptionKey: "" + +envVars: + JDK_JAVA_OPTIONS: '{{ tpl .Values.javaOpts $ }}' + HUB_PORT: '{{ .Values.containerPort }}' + KAFKA_BOOTSTRAP_HOSTNAME: '{{ tpl .Values.kafkaInstallationName $ }}' + KAFKA_PORT: '9092' + KAFKA_BOOTSTRAP_SERVER: '{{ tpl .Values.envVars.KAFKA_BOOTSTRAP_HOSTNAME $ }}:{{ tpl .Values.envVars.KAFKA_PORT $ }}' + CONSOLIDATOR_URL: 'http://{{ include "common.names.fullname" . }}-consolidator' + CONSOLIDATOR_HEALTH_ENDPOINT: '/consolidator/actuator/health' + SECURITY_JWT_ISSUER: '{{ tpl .Values.global.keycloakBaseUrl $ }}/realms/master' + SECURITY_JWT_ISSUER_JWKS_URL: '{{ tpl .Values.envVars.SECURITY_JWT_ISSUER $ }}/protocol/openid-connect/certs' + +envVarsFrom: + hub_secret_encryption_key: + secretKeyRef: + name: '{{ (not .Values.existingSecret) | ternary (include "common.names.fullname" .) .Values.existingSecret }}' + key: encryption-key + +configTomlMountPath: /etc/config.template.toml + +configToml: |- + [kafkaHub.config] + # Flag to check whether to enable/disable security + SECURITY_ON = true + + # Security: JWT Issuer URL + SECURITY_JWT_ISSUER = "${SECURITY_JWT_ISSUER}"; + + # Security: JWKS Url for the configured issuer + SECURITY_JWT_ISSUER_JWKS_URL = "${SECURITY_JWT_ISSUER_JWKS_URL}"; + + # Server ID is is used to uniquely identify each server + # Each server must have a unique ID + SERVER_ID = "server-1" + + # IP and Port of the Kafka bootstrap node + KAFKA_BOOTSTRAP_NODE = "${KAFKA_BOOTSTRAP_SERVER}" + + # Kafka topic which will get notified for websub topic registration/deregistration + # All the hubs must be pointed to the same Kafka topic to notify websub topic registration/deregistration + REGISTERED_WEBSUB_TOPICS_TOPIC = "registered-websub-topics" + + # Kafka topic which stores consolidated websub topics for the hub + CONSOLIDATED_WEBSUB_TOPICS_TOPIC = "consolidated-websub-topics" + + # Kafka topic which will get notified for websub subscription/unsubscription + # All the hubs must be pointed to the same Kafka topic to notify websub subscription/unsubscription + WEBSUB_SUBSCRIBERS_TOPIC = "registered-websub-subscribers" + + # Kafka topic which is stores consolidated websub subscribers for this server + CONSOLIDATED_WEBSUB_SUBSCRIBERS_TOPIC = "consolidated-websub-subscribers" + + # The interval in which Kafka consumers wait for new messages + POLLING_INTERVAL = 10.0 + + # The period in which Kafka close method waits to complete + GRACEFUL_CLOSE_PERIOD = 5.0 + + # The port that is used to start the hub + HUB_PORT = ${HUB_PORT} + + # The period between retry requests + MESSAGE_DELIVERY_RETRY_INTERVAL = 3.0 + + # The maximum retry count + MESSAGE_DELIVERY_COUNT = 3 + + # The message delivery timeout + MESSAGE_DELIVERY_TIMEOUT = 30.0 + + # The token validation URL of IDP + DISK_SPACE_THRESHOLD = 10485760 + + # The token validation URL of IDP + PARTNER_USER_ID_PREFIX = "service-account-" + + # The period between retry requests + INTENT_VERIFICATION_RETRY_INTERVAL = 3.0 + + # The maximum retry count + INTENT_VERIFICATION_COUNT = 3 + + # The period between retry requests + INTENT_VERIFICATION_BACKOFF_FACTOR = 2.0 + + # The maximum retry count + INTENT_VERIFICATION_MAX_INTERVAL = 20.0 + + # The maximum retry count + KAFKA_CONSUMER_MAX_POLL_RECORDS = 3 + + # The maximum retry count + KAFKA_CONSUMER_FETCH_MAX_BYTES = 3145728 + + # The maximum retry count + KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES = 524288 + + # Kafka topic which is stores consolidated websub subscribers for this server + META_TOPICS = "registered-websub-topics,consolidated-websub-topics,registered-websub-subscribers,consolidated-websub-subscribers" + + # consolidator base url + CONSOLIDATOR_BASE_URL = "${CONSOLIDATOR_URL}" + + # consolidator health endpoint + CONSOLIDATOR_HEALTH_ENDPOINT = "/consolidator/actuator/health" + + #Encryption key that will be used to encrypt / decrypt the hub secret + HUB_SECRET_ENCRYPTION_KEY = "${hub_secret_encryption_key}" + + # Below config will allow base64-encoded-bytes / alpha-numeric. + # Recommended to use base64-encoded-bytes since alpha-numeric is considered less secure. This is just given to ensure the backward compatiblity + HUB_SECRET_ENCRYPTION_KEY_FORMAT = "alpha-numeric" + + [ballerina.http.accessLogConfig] + # Enable printing access logs in console + console = true # Default is false + + [ballerina.http.traceLogAdvancedConfig] + # Enable printing trace logs in console + console = false # Default is false + +startUpCommand: |- + #!/usr/bin/env bash + echo "==> Copying Configs" + envsubst < {{ tpl .Values.consolidator.configTomlMountPath $ }} > Config.toml + echo "==> Waiting for kafka" + RETRY_COUNT=0 + while :; do + if nc -z ${KAFKA_BOOTSTRAP_HOSTNAME} ${KAFKA_PORT} ; then + break + elif [ $RETRY_COUNT -ne 200 ]; then + ((RETRY_COUNT++)) + sleep 1s + else + echo "Could not connect to Kafka even after retries..." + exit 1 + fi + done + echo "==> Waiting for consolidator" + if ! curl -I -s -o /dev/null -m 10 --retry 100 --retry-delay 10 --retry-all-errors "${CONSOLIDATOR_URL}${CONSOLIDATOR_HEALTH_ENDPOINT}"; then + echo "Failed connecting with consolidator after max retries..." + exit 1 + fi + java -jar ./hub.jar + +kafkaInstallationName: '{{ include "common.names.fullname" .Subcharts.kafka }}' diff --git a/consolidator.Dockerfile b/consolidator.Dockerfile new file mode 100644 index 0000000..1259b79 --- /dev/null +++ b/consolidator.Dockerfile @@ -0,0 +1,48 @@ +FROM maven:3.9.9-eclipse-temurin-17 AS builder + +ARG ballerina_version=2201.9.0 +ARG ballerina_download_url=https://dist.ballerina.io/downloads/${ballerina_version}/ballerina-${ballerina_version}-swan-lake-linux-x64.deb + +RUN wget -q --show-progress ${ballerina_download_url} -O ballerina-linux-installer-x64.deb && \ + dpkg -i ballerina-linux-installer-x64.deb + +COPY kafka-admin-client /kafka-admin-client +RUN cd /kafka-admin-client && \ + mvn install -DskipTests -Dgpg.skip + +COPY consolidator /consolidator +RUN bal build /consolidator + +FROM eclipse-temurin:17.0.13_11-jre-alpine + +RUN apk add bash wget curl gettext + +ARG container_user_id=1001 +ARG container_group_id=1001 +ARG container_user=mosip +ARG container_group=mosip + +RUN addgroup ${container_group} -g ${container_group_id} && \ + adduser ${container_user} -G ${container_group} -u ${container_user_id} -s bash -D + +WORKDIR /home/${container_user} + +ARG SOURCE +ARG COMMIT_HASH +ARG COMMIT_ID +ARG BUILD_TIME +LABEL source=${SOURCE} +LABEL commit_hash=${COMMIT_HASH} +LABEL commit_id=${COMMIT_ID} +LABEL build_time=${BUILD_TIME} + +COPY --from=builder --chown=${container_user}:${container_group} /consolidator/target/bin/*.jar consolidator.jar + +ARG consolidator_config_url +ENV consolidator_config_file_url_env=${consolidator_config_url} + +USER ${container_user} +EXPOSE 9192 + +CMD wget -q --show-progress "${consolidator_config_file_url_env}" -O Config.toml; \ + java -jar -Xms256m -Xmx2048m ./consolidator.jar diff --git a/consolidator/Ballerina.toml b/consolidator/Ballerina.toml index 660c151..e537617 100644 --- a/consolidator/Ballerina.toml +++ b/consolidator/Ballerina.toml @@ -1,15 +1,14 @@ [package] org = "mosip" name = "consolidatorService" -version = "1.2.0-patch.1" - +version = "0.0.0-SNAPSHOT" [build-options] observabilityIncluded = true -[[platform.java11.dependency]] +[[platform.java17.dependency]] modules = ["inittopic"] groupId = "io.mosip" artifactId = "kafka-admin-client" -version = "1.2.0.1" -path = "kafka-admin-client/target/kafka-admin-client-1.2.0.1.jar" +version = "0.0.0-SNAPSHOT" +path = "../kafka-admin-client/target/kafka-admin-client-0.0.0-SNAPSHOT.jar" diff --git a/consolidator/Dockerfile b/consolidator/Dockerfile deleted file mode 100644 index 1ca11e7..0000000 --- a/consolidator/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM openjdk:11 - -ARG SOURCE -ARG COMMIT_HASH -ARG COMMIT_ID -ARG BUILD_TIME -LABEL source=${SOURCE} -LABEL commit_hash=${COMMIT_HASH} -LABEL commit_id=${COMMIT_ID} -LABEL build_time=${BUILD_TIME} -ARG consolidator_config_url -ARG max_heap_size=2048m -ARG ballerina_download_url=https://dist.ballerina.io/downloads/2201.0.0/ballerina-2201.0.0-swan-lake-linux-x64.deb -ENV consolidator_config_file_url_env=${consolidator_config_url} -COPY ./target/bin/*.jar consolidator.jar -EXPOSE 9192 -RUN wget -q --show-progress ${ballerina_download_url} -O ballerina-linux-installer-x64.deb -RUN dpkg -i ballerina-linux-installer-x64.deb -#TODO Link to be parameterized instead of hardcoding -CMD wget -q --show-progress "${consolidator_config_file_url_env}" -O Config.toml;\ - java -jar -Xms256m -Xmx2048m ./consolidator.jar ;\ diff --git a/consolidator/consolidator_service.bal b/consolidator/consolidator_service.bal index 68219fa..818c3f1 100644 --- a/consolidator/consolidator_service.bal +++ b/consolidator/consolidator_service.bal @@ -92,8 +92,8 @@ isolated function getTotalSpace(handle fileObj) returns int = @java:Method { isolated function startConsolidator() returns error? { do { while true { - kafka:ConsumerRecord[] records = check conn:websubEventConsumer->poll(config:POLLING_INTERVAL); - foreach kafka:ConsumerRecord currentRecord in records { + kafka:BytesConsumerRecord[] records = check conn:websubEventConsumer->poll(config:POLLING_INTERVAL); + foreach kafka:BytesConsumerRecord currentRecord in records { string lastPersistedData = check string:fromBytes(currentRecord.value); log:printInfo("websub event received in consolidator",payload=lastPersistedData); error? result = processPersistedData(lastPersistedData); diff --git a/consolidator/init_consolidator.bal b/consolidator/init_consolidator.bal index f11c51c..44b8df8 100644 --- a/consolidator/init_consolidator.bal +++ b/consolidator/init_consolidator.bal @@ -64,9 +64,9 @@ isolated function syncRegsisteredTopicsCache() returns error? { } isolated function getPersistedTopics() returns websubhub:TopicRegistration[]|error? { - kafka:ConsumerRecord[] records = check conn:consolidatedTopicsConsumer->poll(config:POLLING_INTERVAL); + kafka:BytesConsumerRecord[] records = check conn:consolidatedTopicsConsumer->poll(config:POLLING_INTERVAL); if records.length() > 0 { - kafka:ConsumerRecord lastRecord = records.pop(); + kafka:BytesConsumerRecord lastRecord = records.pop(); string|error lastPersistedData = string:fromBytes(lastRecord.value); if lastPersistedData is string { return deSerializeTopicsMessage(lastPersistedData); @@ -110,9 +110,9 @@ isolated function syncSubscribersCache() returns error? { } isolated function getPersistedSubscribers() returns websubhub:VerifiedSubscription[]|error? { - kafka:ConsumerRecord[] records = check conn:consolidatedSubscriberConsumer->poll(config:POLLING_INTERVAL); + kafka:BytesConsumerRecord[] records = check conn:consolidatedSubscriberConsumer->poll(config:POLLING_INTERVAL); if records.length() > 0 { - kafka:ConsumerRecord lastRecord = records.pop(); + kafka:BytesConsumerRecord lastRecord = records.pop(); string|error lastPersistedData = string:fromBytes(lastRecord.value); if lastPersistedData is string { return deSerializeSubscribersMessage(lastPersistedData); diff --git a/deploy/README.md b/deploy/README.md deleted file mode 100644 index 3bb7048..0000000 --- a/deploy/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# WebSub - -## Install -``` -./install.sh -``` diff --git a/deploy/copy_cm.sh b/deploy/copy_cm.sh deleted file mode 100755 index b97c8b2..0000000 --- a/deploy/copy_cm.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copy configmaps from other namespaces -# DST_NS: Destination namespace - -function copying_cm() { - UTIL_URL=https://github.com/mosip/mosip-infra/blob/master/deployment/v3/utils/copy_cm_func.sh - COPY_UTIL=./copy_cm_func.sh - DST_NS=websub - - wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh - - $COPY_UTIL configmap global default $DST_NS - $COPY_UTIL configmap config-server-share config-server $DST_NS - return 0 -} - -# set commands for error handling. -set -e -set -o errexit ## set -e : exit the script if any statement returns a non-true return value -set -o nounset ## set -u : exit the script if you try to use an uninitialised variable -set -o errtrace # trace ERR through 'time command' and other functions -set -o pipefail # trace ERR through pipes -copying_cm # calling function - - - diff --git a/deploy/delete.sh b/deploy/delete.sh deleted file mode 100755 index 9b6d56f..0000000 --- a/deploy/delete.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Uninstalls Websub -## Usage: ./delete.sh [kubeconfig] - -if [ $# -ge 1 ] ; then - export KUBECONFIG=$1 -fi - -function deleting_websub() { - NS=websub - while true; do - read -p "Are you sure you want to delete Websub helm chart?(Y/n) " yn - if [ $yn = "Y" ] - then - helm -n $NS delete websub-consolidator - helm -n $NS delete websub - break - else - break - fi - done - return 0 -} - -# set commands for error handling. -set -e -set -o errexit ## set -e : exit the script if any statement returns a non-true return value -set -o nounset ## set -u : exit the script if you try to use an uninitialised variable -set -o errtrace # trace ERR through 'time command' and other functions -set -o pipefail # trace ERR through pipes -deleting_websub # calling function \ No newline at end of file diff --git a/deploy/install.sh b/deploy/install.sh deleted file mode 100755 index d2e094a..0000000 --- a/deploy/install.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# Installs websub -## Usage: ./install.sh [kubeconfig] - -if [ $# -ge 1 ] ; then - export KUBECONFIG=$1 -fi - -NS=websub -CHART_VERSION=12.0.1-develop - -echo Create $NS namespace -kubectl create ns $NS - -function installing_websub() { - echo Istio label - kubectl label ns $NS istio-injection=enabled --overwrite - helm repo update - - echo Copy configmaps - sed -i 's/\r$//' copy_cm.sh - ./copy_cm.sh - - echo Installing websub - helm -n $NS install websub-consolidator mosip/websub-consolidator --version $CHART_VERSION --wait - helm -n $NS install websub mosip/websub --version $CHART_VERSION - - kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Installed websub services - return 0 -} - -# set commands for error handling. -set -e -set -o errexit ## set -e : exit the script if any statement returns a non-true return value -set -o nounset ## set -u : exit the script if you try to use an uninitialised variable -set -o errtrace # trace ERR through 'time command' and other functions -set -o pipefail # trace ERR through pipes -installing_websub # calling function diff --git a/deploy/restart.sh b/deploy/restart.sh deleted file mode 100755 index c7ebb0f..0000000 --- a/deploy/restart.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Restart the websub service -## Usage: ./restart.sh [kubeconfig] - -if [ $# -ge 1 ] ; then - export KUBECONFIG=$1 -fi - - -function Restarting_websub() { - NS=websub - kubectl -n $NS rollout restart deploy - - kubectl -n $NS get deploy -o name | xargs -n1 -t kubectl -n $NS rollout status - echo Restarted websub services - return 0 -} - -# set commands for error handling. -set -e -set -o errexit ## set -e : exit the script if any statement returns a non-true return value -set -o nounset ## set -u : exit the script if you try to use an uninitialised variable -set -o errtrace # trace ERR through 'time command' and other functions -set -o pipefail # trace ERR through pipes -Restarting_websub # calling function \ No newline at end of file diff --git a/helm/websub-consolidator/.gitignore b/helm/websub-consolidator/.gitignore deleted file mode 100644 index b3c94bf..0000000 --- a/helm/websub-consolidator/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -charts/ -Charts.lock diff --git a/helm/websub-consolidator/Chart.yaml b/helm/websub-consolidator/Chart.yaml deleted file mode 100644 index bc95dfc..0000000 --- a/helm/websub-consolidator/Chart.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v2 -name: websub-consolidator -description: A Helm chart for Websub Cconsolidator -type: application -version: 12.0.1-develop -appVersion: "" -dependencies: - - name: common - repository: https://charts.bitnami.com/bitnami - tags: - - bitnami-common - version: 1.x.x -home: https://mosip.io -keywords: - - mosip - - websub-consolidator - - websub -maintainers: - - email: info@mosip.io - name: MOSIP diff --git a/helm/websub-consolidator/README.md b/helm/websub-consolidator/README.md deleted file mode 100644 index 7a0312c..0000000 --- a/helm/websub-consolidator/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Websub Consolidator - -Helm chart for installing Websub Consolidator module. - -## TL;DR - -```console -$ helm repo add mosip https://mosip.github.io -$ helm install my-release mosip/websub-consolidator -``` - -## Service and VS -No service connects to this service, hence service.yaml and virtualservice.yaml both are not required. - diff --git a/helm/websub-consolidator/templates/NOTES.txt b/helm/websub-consolidator/templates/NOTES.txt deleted file mode 100644 index 8b13789..0000000 --- a/helm/websub-consolidator/templates/NOTES.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/helm/websub-consolidator/templates/_helpers.tpl b/helm/websub-consolidator/templates/_helpers.tpl deleted file mode 100644 index 07fe37a..0000000 --- a/helm/websub-consolidator/templates/_helpers.tpl +++ /dev/null @@ -1,60 +0,0 @@ -{{/* -Return the proper image name -*/}} -{{- define "websub-consolidator.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "websub-consolidator.volumePermissions.image" -}} -{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "websub-consolidator.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "websub-consolidator.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Compile all warnings into a single message. -*/}} -{{- define "websub-consolidator.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "websub-consolidator.validateValues.foo" .) -}} -{{- $messages := append $messages (include "websub-consolidator.validateValues.bar" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message -}} -{{- end -}} -{{- end -}} - -{{/* -Return podAnnotations -*/}} -{{- define "websub-consolidator.podAnnotations" -}} -{{- if .Values.podAnnotations }} -{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} -{{- end }} -{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} -{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} -{{- end }} -{{- end -}} - - diff --git a/helm/websub-consolidator/templates/clusterrole.yaml b/helm/websub-consolidator/templates/clusterrole.yaml deleted file mode 100644 index 7ae563c..0000000 --- a/helm/websub-consolidator/templates/clusterrole.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRole -metadata: - name: {{ include "common.names.fullname" . }}-{{ .Release.Namespace }} - namespace: {{ .Release.Namespace | quote }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - "*" - resources: - - endpoints - - pods - - nodes - - services - verbs: - - get - - list - diff --git a/helm/websub-consolidator/templates/clusterrolebinding.yaml b/helm/websub-consolidator/templates/clusterrolebinding.yaml deleted file mode 100644 index cb8ef67..0000000 --- a/helm/websub-consolidator/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }} -subjects: - - kind: ServiceAccount - name: {{ template "websub-consolidator.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} diff --git a/helm/websub-consolidator/templates/deployment.yaml b/helm/websub-consolidator/templates/deployment.yaml deleted file mode 100644 index e083cde..0000000 --- a/helm/websub-consolidator/templates/deployment.yaml +++ /dev/null @@ -1,131 +0,0 @@ -apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} -kind: Deployment -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.replicaCount }} - {{- if .Values.updateStrategy }} - strategy: {{- toYaml .Values.updateStrategy | nindent 4 }} - {{- end }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} - template: - metadata: - annotations: - {{- if or .Values.podAnnotations .Values.metrics.enabled }} - {{- include "websub-consolidator.podAnnotations" . | nindent 8 }} - {{- end }} - - labels: {{- include "common.labels.standard" . | nindent 8 }} - {{- if .Values.podLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ template "websub-consolidator.serviceAccountName" . }} - {{- include "websub-consolidator.imagePullSecrets" . | nindent 6 }} - {{- if .Values.hostAliases }} - hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.affinity }} - affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.affinity "context" $) | nindent 8 }} - {{- else }} - affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} - nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} - {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.tolerations }} - tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} - {{- end }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName | quote }} - {{- end }} - {{- if .Values.podSecurityContext.enabled }} - securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - initContainers: - {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} - - name: volume-permissions - image: {{ include "websub-consolidator.volumePermissions.image" . }} - imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} - command: - - %%commands%% - securityContext: - runAsUser: 0 - {{- if .Values.volumePermissions.resources }} - resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: foo - mountPath: bar - {{- end }} - {{- if .Values.initContainers }} - {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} - {{- end }} - containers: - - name: websub-consolidator - image: {{ template "websub-consolidator.image" . }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.lifecycleHooks }} - lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.command }} - command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.args }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} - {{- end }} - env: - - name: container_user - value: {{ .Values.containerSecurityContext.runAsUser }} - - name: JDK_JAVA_OPTIONS - value: {{ .Values.additionalResources.javaOpts }} - {{- if .Values.extraEnvVars }} - {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - envFrom: - {{- if .Values.extraEnvVarsCM }} - {{- range .Values.extraEnvVarsCM }} - - configMapRef: - name: {{ . }} - {{- end }} - {{- end }} - {{- if .Values.extraEnvVarsSecret }} - - secretRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} - {{- end }} - {{- if .Values.resources }} - resources: {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - {{- if .Values.startupProbe.enabled }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} - {{- else if .Values.customStartupProbe }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} - {{- else if .Values.customLivenessProbe }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} - {{- else if .Values.customReadinessProbe }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.sidecars }} - {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} - {{- end }} diff --git a/helm/websub-consolidator/templates/service-account.yaml b/helm/websub-consolidator/templates/service-account.yaml deleted file mode 100644 index 0a32e1f..0000000 --- a/helm/websub-consolidator/templates/service-account.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "websub-consolidator.serviceAccountName" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - namespace: {{ .Release.Namespace }} diff --git a/helm/websub-consolidator/templates/service.yaml b/helm/websub-consolidator/templates/service.yaml deleted file mode 100644 index 1425023..0000000 --- a/helm/websub-consolidator/templates/service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }} - annotations: - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} - {{- end }} - {{ if eq .Values.service.type "LoadBalancer" }} - loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} - {{ end }} - {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - ports: - - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.websub_consolidator.port }} - selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/helm/websub-consolidator/templates/servicemonitor.yaml b/helm/websub-consolidator/templates/servicemonitor.yaml deleted file mode 100644 index 15f48fd..0000000 --- a/helm/websub-consolidator/templates/servicemonitor.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "common.names.fullname" . }} - {{- if .Values.metrics.serviceMonitor.namespace }} - namespace: {{ .Values.metrics.serviceMonitor.namespace }} - {{- else }} - namespace: {{ .Release.Namespace | quote }} - {{- end }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.metrics.serviceMonitor.additionalLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} - {{- end }} -spec: - endpoints: - - targetPort: {{ .Values.springServicePort }} - path: {{ .Values.metrics.endpointPath }} - {{- if .Values.metrics.serviceMonitor.interval }} - interval: {{ .Values.metrics.serviceMonitor.interval }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.honorLabels }} - honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.relabellings }} - metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} - {{- end }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace | quote }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} -{{- end }} diff --git a/helm/websub-consolidator/values.yaml b/helm/websub-consolidator/values.yaml deleted file mode 100644 index 439bfd0..0000000 --- a/helm/websub-consolidator/values.yaml +++ /dev/null @@ -1,416 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Add labels to all the deployed resources -## -commonLabels: - app.kubernetes.io/component: mosip - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Kubernetes Cluster Domain -## -clusterDomain: cluster.local - -## Extra objects to deploy (value evaluated as a template) -## -extraDeploy: [] - -## Number of nodes -## -replicaCount: 1 - -service: - type: ClusterIP - port: 80 - ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) - ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer - ## - ## loadBalancerIP: - ## - ## nodePorts: - ## http: - ## https: - ## - - nodePorts: - http: "" - https: "" - ## Enable client source IP preservation - ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - -image: - registry: docker.io - repository: mosipid/consolidator-websub-service - tag: 1.2.0.1 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## -## TODO: Ports here are not correct. Update them when available -startupProbe: - enabled: true - httpGet: - path: /consolidator/actuator/health - port: 9192 - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 30 - successThreshold: 1 - -livenessProbe: - enabled: true - httpGet: - path: /consolidator/actuator/health - port: 9192 - initialDelaySeconds: 20 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -readinessProbe: - enabled: true - httpGet: - path: /consolidator/actuator/health - port: 9192 - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -## -# existingConfigmap: - -## Command and args for running the container (set to default if not set). Use array form -## -command: [] -args: [] - -## Deployment pod host aliases -## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -## -hostAliases: [] - -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 200m - memory: 1500Mi - requests: - cpu: 200m - memory: 500Mi - -additionalResources: - ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources - ## Example: java_opts: "-Xms500M -Xmx500M" - javaOpts: "-Xms750M -Xmx750M" - -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## Clamav container already runs as 'mosip' user, so we may not need to enable this -containerSecurityContext: - enabled: false - runAsUser: mosip - runAsNonRoot: true - -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## -podSecurityContext: - enabled: false - fsGroup: 1001 - -## Pod affinity preset -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## Allowed values: soft, hard -## -podAffinityPreset: "" - -## Pod anti-affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## Allowed values: soft, hard -## -podAntiAffinityPreset: soft - -## Node affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## Allowed values: soft, hard -## -nodeAffinityPreset: - ## Node affinity type - ## Allowed values: soft, hard - ## - type: "" - ## Node label key to match - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## Node label values to match - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - -## Affinity for pod assignment. Evaluated as a template. -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Node labels for pod assignment. Evaluated as a template. -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} - -## Tolerations for pod assignment. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Pod extra labels -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} - -## Annotations for server pods. -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} - -## pods' priority. -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -## -# priorityClassName: "" - -## lifecycleHooks for the container to automate configuration before or after startup. -## -lifecycleHooks: {} - -## Custom Liveness probes for -## -customLivenessProbe: {} - -## Custom Rediness probes -## -customReadinessProbe: {} - -## Update strategy - only really applicable for deployments with RWO PVs attached -## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the -## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will -## terminate the single previous pod, so that the new, incoming pod can attach to the PV -## -updateStrategy: - type: RollingUpdate - -## Additional environment variables to set -## Example: -## extraEnvVars: -## - name: FOO -## value: "bar" -## -extraEnvVars: [] - -## ConfigMap with extra environment variables that used -## -extraEnvVarsCM: - - global - - config-server-share - -## Secret with extra environment variables -## -extraEnvVarsSecret: - -## Extra volumes to add to the deployment -## -extraVolumes: [] - -## Extra volume mounts to add to the container -## -extraVolumeMounts: [] - -## Add init containers to the pods. -## Example: -## initContainers: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -initContainers: {} - -## Add sidecars to the pods. -## Example: -## sidecars: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -sidecars: {} - -persistence: - enabled: false - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack). - ## - # storageClass: "-" - ## - ## If you want to reuse an existing claim, you can pass the name of the PVC using - ## the existingClaim variable - # existingClaim: your-claim - ## ReadWriteMany not supported by AWS gp2 - storageClass: - accessModes: - - ReadWriteOnce - size: 10M - existingClaim: - # Dir where config and keys are written inside container - mountDir: - -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/bitnami-shell - tag: "10" - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - pullSecrets: [] - ## - myRegistryKeySecretName - ## Init containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## - limits: {} - ## cpu: 100m - ## memory: 128Mi - ## - requests: {} - ## cpu: 100m - ## memory: 128Mi - ## - -## Specifies whether RBAC resources should be created -## -rbac: - create: true - -## Specifies whether a ServiceAccount should be created -## -serviceAccount: - create: true - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - ## - name: - -## Prometheus Metrics -## -## TODO: Enable metrics when prometheus URL is available -metrics: - enabled: false - ## Prometheus pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: - prometheus.io/scrape: "true" - - endpointPath: /v1/websub-consolidator-consolidator/actuator/prometheus - - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## - serviceMonitor: - ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry - ## - enabled: true - ## Specify the namespace in which the serviceMonitor resource will be created - ## - # namespace: "" - ## Specify the interval at which metrics should be scraped - ## - interval: 10s - ## Specify the timeout after which the scrape is ended - ## - # scrapeTimeout: 30s - ## Specify Metric Relabellings to add to the scrape endpoint - ## - # relabellings: - ## Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec - ## - additionalLabels: {} - - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - enabled: false - additionalLabels: {} - namespace: '' - ## List of rules, used as template by Helm. - ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html - # rules: - # - alert: RabbitmqDown - # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 - # for: 5m - # labels: - # severity: error - rules: [] - -# NOTE: No one connects to this service, so both service and virtualservice don't exist for this service. - -websub_consolidator: - port: 9192 diff --git a/helm/websub/.gitignore b/helm/websub/.gitignore deleted file mode 100644 index b3c94bf..0000000 --- a/helm/websub/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -charts/ -Charts.lock diff --git a/helm/websub/.helmignore b/helm/websub/.helmignore deleted file mode 100644 index f0c1319..0000000 --- a/helm/websub/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/helm/websub/Chart.yaml b/helm/websub/Chart.yaml deleted file mode 100644 index ff5554f..0000000 --- a/helm/websub/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v2 -name: websub -description: A Helm chart for Websub service -type: application -version: 12.0.1-develop -appVersion: "" -dependencies: - - name: common - repository: https://charts.bitnami.com/bitnami - tags: - - bitnami-common - version: 1.x.x -home: https://mosip.io -keywords: - - mosip - - websub -maintainers: - - email: info@mosip.io - name: MOSIP diff --git a/helm/websub/README.md b/helm/websub/README.md deleted file mode 100644 index 367ee6f..0000000 --- a/helm/websub/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Websub - -Helm chart for installing Websub module. The module is generally external facing for other partners to connect a receive events. - -## TL;DR - -```console -$ helm repo add mosip https://mosip.github.io -$ helm install my-release mosip/websub -``` -## Prerequisites - -- Kubernetes 1.12+ -- Helm 3.1.0 -- PV provisioner support in the underlying infrastructure -- ReadWriteMany volumes for deployment scaling - diff --git a/helm/websub/templates/NOTES.txt b/helm/websub/templates/NOTES.txt deleted file mode 100644 index 8b13789..0000000 --- a/helm/websub/templates/NOTES.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/helm/websub/templates/_helpers.tpl b/helm/websub/templates/_helpers.tpl deleted file mode 100644 index 0d046e2..0000000 --- a/helm/websub/templates/_helpers.tpl +++ /dev/null @@ -1,60 +0,0 @@ -{{/* -Return the proper image name -*/}} -{{- define "websub.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "websub.volumePermissions.image" -}} -{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "websub.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "websub.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Compile all warnings into a single message. -*/}} -{{- define "websub.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "websub.validateValues.foo" .) -}} -{{- $messages := append $messages (include "websub.validateValues.bar" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message -}} -{{- end -}} -{{- end -}} - -{{/* -Return podAnnotations -*/}} -{{- define "websub.podAnnotations" -}} -{{- if .Values.podAnnotations }} -{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }} -{{- end }} -{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} -{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }} -{{- end }} -{{- end -}} - - diff --git a/helm/websub/templates/clusterrole.yaml b/helm/websub/templates/clusterrole.yaml deleted file mode 100644 index 7ae563c..0000000 --- a/helm/websub/templates/clusterrole.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRole -metadata: - name: {{ include "common.names.fullname" . }}-{{ .Release.Namespace }} - namespace: {{ .Release.Namespace | quote }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - "*" - resources: - - endpoints - - pods - - nodes - - services - verbs: - - get - - list - diff --git a/helm/websub/templates/clusterrolebinding.yaml b/helm/websub/templates/clusterrolebinding.yaml deleted file mode 100644 index 806b153..0000000 --- a/helm/websub/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }} -subjects: - - kind: ServiceAccount - name: {{ template "websub.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} diff --git a/helm/websub/templates/extra-list.yaml b/helm/websub/templates/extra-list.yaml deleted file mode 100644 index 9ac65f9..0000000 --- a/helm/websub/templates/extra-list.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{- range .Values.extraDeploy }} ---- -{{ include "common.tplvalues.render" (dict "value" . "context" $) }} -{{- end }} diff --git a/helm/websub/templates/virtualservice.yaml b/helm/websub/templates/virtualservice.yaml deleted file mode 100644 index 37c614d..0000000 --- a/helm/websub/templates/virtualservice.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if .Values.istio.enabled }} -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: {{ template "common.names.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - hosts: - - "*" - gateways: - - {{ .Values.istio.gateway }} - http: - - match: - - uri: - prefix: {{ .Values.istio.prefix }} - route: - - destination: - host: {{ template "common.names.fullname" . }} - port: - number: {{ .Values.service.port }} - headers: - request: - set: - x-forwarded-proto: https -{{- end }} diff --git a/helm/websub/values.yaml b/helm/websub/values.yaml deleted file mode 100644 index 7d49ef5..0000000 --- a/helm/websub/values.yaml +++ /dev/null @@ -1,453 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Add labels to all the deployed resources -## -commonLabels: - app.kubernetes.io/component: mosip - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Kubernetes Cluster Domain -## -clusterDomain: cluster.local - -## Extra objects to deploy (value evaluated as a template) -## -extraDeploy: [] - -## Number of nodes -## -replicaCount: 1 - -service: - type: ClusterIP - port: 80 - ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific) - ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer - ## - ## loadBalancerIP: - ## - ## nodePorts: - ## http: - ## https: - ## - - nodePorts: - http: "" - https: "" - ## Enable client source IP preservation - ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - -image: - registry: docker.io - repository: mosipid/websub-service - tag: 1.2.0.1 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## -## TODO: Better probes to be added. -startupProbe: - enabled: true - httpGet: - path: /hub/actuator/health - port: 9191 - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 30 - successThreshold: 1 - -livenessProbe: - enabled: true - httpGet: - path: /hub/actuator/health - port: 9191 - initialDelaySeconds: 20 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -readinessProbe: - enabled: true - httpGet: - path: /hub/actuator/health - port: 9191 - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - -## -# existingConfigmap: - -## Command and args for running the container (set to default if not set). Use array form -## -command: [] -args: [] - -## Deployment pod host aliases -## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -## -hostAliases: [] - -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 2000m - memory: 8000Mi - requests: - cpu: 200m - memory: 1000Mi - -additionalResources: - ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources - ## Example: java_opts: "-Xms500M -Xmx500M" - javaOpts: "-Xms2000M -Xmx2000M" - -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## Clamav container already runs as 'mosip' user, so we may not need to enable this -containerSecurityContext: - enabled: false - runAsUser: mosip - runAsNonRoot: true - -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## -podSecurityContext: - enabled: false - fsGroup: 1001 - -## Pod affinity preset -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## Allowed values: soft, hard -## -podAffinityPreset: "" - -## Pod anti-affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## Allowed values: soft, hard -## -podAntiAffinityPreset: soft - -## Node affinity preset -## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## Allowed values: soft, hard -## -nodeAffinityPreset: - ## Node affinity type - ## Allowed values: soft, hard - ## - type: "" - ## Node label key to match - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## Node label values to match - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - -## Affinity for pod assignment. Evaluated as a template. -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Node labels for pod assignment. Evaluated as a template. -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} - -## Tolerations for pod assignment. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Pod extra labels -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} - -## Annotations for server pods. -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} - -## pods' priority. -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -## -# priorityClassName: "" - -## lifecycleHooks for the container to automate configuration before or after startup. -## -lifecycleHooks: {} - -## Custom Liveness probes for -## -customLivenessProbe: {} - -## Custom Rediness probes -## -customReadinessProbe: {} - -## Update strategy - only really applicable for deployments with RWO PVs attached -## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the -## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will -## terminate the single previous pod, so that the new, incoming pod can attach to the PV -## -updateStrategy: - type: RollingUpdate - -## Additional environment variables to set -## Example: -## extraEnvVars: -## - name: FOO -## value: "bar" -## -extraEnvVars: [] - -## ConfigMap with extra environment variables that used -## -extraEnvVarsCM: - - global - - config-server-share - -## Secret with extra environment variables -## -extraEnvVarsSecret: - -## Extra volumes to add to the deployment -## -extraVolumes: [] - -## Extra volume mounts to add to the container -## -extraVolumeMounts: [] - -## Add init containers to the pods. -## Example: -## initContainers: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -initContainers: - - command: - - /bin/bash - - -c - - | - i=1; - n=5 - while(($i <= $n)); do - echo "Trying to access websub consolidator health url : $i" - curl_output="$( curl -w %{http_code} -o /dev/null -s $WEBSUB_CONSOLIDATOR_URL )"; - if ! [ "$curl_output" = "200" ]; then - echo "$WEBSUB_CONSOLIDATOR_URL failed with status code $curl_output" >> /dev/stderr; - if [ "$i" = "$n" ]; then - echo "EXITING;"; - exit 1; - fi - sleep 10; - ((i++)); - continue; - fi; - echo "Got response from websub consolidator url $curl_output" - break; - done - env: - - name: WEBSUB_CONSOLIDATOR_URL - value: 'http://websub-consolidator.websub:80/consolidator/actuator/health' - image: docker.io/mosipdev/alpine:latest - imagePullPolicy: Always - name: consolidator-health-chk - resources: {} - securityContext: - runAsUser: 0 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - -## Add sidecars to the pods. -## Example: -## sidecars: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -sidecars: {} - -persistence: - enabled: false - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack). - ## - # storageClass: "-" - ## - ## If you want to reuse an existing claim, you can pass the name of the PVC using - ## the existingClaim variable - # existingClaim: your-claim - ## ReadWriteMany not supported by AWS gp2 - storageClass: - accessModes: - - ReadWriteOnce - size: 10M - existingClaim: - # Dir where config and keys are written inside container - mountDir: - -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/bitnami-shell - tag: "10" - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - pullSecrets: [] - ## - myRegistryKeySecretName - ## Init containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## - limits: {} - ## cpu: 100m - ## memory: 128Mi - ## - requests: {} - ## cpu: 100m - ## memory: 128Mi - ## - -## Specifies whether RBAC resources should be created -## -rbac: - create: true - -## Specifies whether a ServiceAccount should be created -## -serviceAccount: - create: true - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fullname template - ## - name: - -## Prometheus Metrics -## -## TODO: Enable metrics when prometheus URL is available -metrics: - enabled: false - ## Prometheus pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: - prometheus.io/scrape: "true" - - endpointPath: /v1/websub/actuator/prometheus - - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## - serviceMonitor: - ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry - ## - enabled: true - ## Specify the namespace in which the serviceMonitor resource will be created - ## - # namespace: "" - ## Specify the interval at which metrics should be scraped - ## - interval: 10s - ## Specify the timeout after which the scrape is ended - ## - # scrapeTimeout: 30s - ## Specify Metric Relabellings to add to the scrape endpoint - ## - # relabellings: - ## Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec - ## - additionalLabels: {} - - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - enabled: false - additionalLabels: {} - namespace: '' - ## List of rules, used as template by Helm. - ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html - # rules: - # - alert: RabbitmqDown - # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 - # for: 5m - # labels: - # severity: error - rules: [] - -## Websub needs public access. -istio: - enabled: true - gateway: istio-system/internal - prefix: /hub - -websub: - port: 9191 diff --git a/hub.Dockerfile b/hub.Dockerfile new file mode 100644 index 0000000..61794c2 --- /dev/null +++ b/hub.Dockerfile @@ -0,0 +1,48 @@ +FROM maven:3.9.9-eclipse-temurin-17 AS builder + +ARG ballerina_version=2201.9.0 +ARG ballerina_download_url=https://dist.ballerina.io/downloads/${ballerina_version}/ballerina-${ballerina_version}-swan-lake-linux-x64.deb + +RUN wget -q --show-progress ${ballerina_download_url} -O ballerina-linux-installer-x64.deb && \ + dpkg -i ballerina-linux-installer-x64.deb + +COPY kafka-admin-client /kafka-admin-client +RUN cd /kafka-admin-client && \ + mvn install -DskipTests -Dgpg.skip + +COPY hub /hub +RUN bal build /hub + +FROM eclipse-temurin:17.0.13_11-jre-alpine + +RUN apk add bash wget curl gettext + +ARG container_user_id=1001 +ARG container_group_id=1001 +ARG container_user=mosip +ARG container_group=mosip + +RUN addgroup ${container_group} -g ${container_group_id} && \ + adduser ${container_user} -G ${container_group} -u ${container_user_id} -s bash -D + +WORKDIR /home/${container_user} + +ARG SOURCE +ARG COMMIT_HASH +ARG COMMIT_ID +ARG BUILD_TIME +LABEL source=${SOURCE} +LABEL commit_hash=${COMMIT_HASH} +LABEL commit_id=${COMMIT_ID} +LABEL build_time=${BUILD_TIME} + +COPY --from=builder --chown=${container_user}:${container_group} /hub/target/bin/*.jar hub.jar + +ARG hub_config_url +ENV hub_config_file_url_env=${hub_config_url} + +USER ${container_user} +EXPOSE 9191 + +CMD wget -q --show-progress "${hub_config_file_url_env}" -O Config.toml; \ + java -jar -Xms256m -Xmx2048m ./hub.jar diff --git a/hub/Ballerina.toml b/hub/Ballerina.toml index 208642a..7dc7e19 100644 --- a/hub/Ballerina.toml +++ b/hub/Ballerina.toml @@ -1,14 +1,14 @@ [package] org = "mosip" name = "kafkaHub" -version = "1.2.0-patch.1" +version = "0.0.0-SNAPSHOT" [build-options] observabilityIncluded = true -[[platform.java11.dependency]] +[[platform.java17.dependency]] modules = ["inittopic"] groupId = "io.mosip" artifactId = "kafka-admin-client" -version = "1.2.0.1" -path = "kafka-admin-client/target/kafka-admin-client-1.2.0.1.jar" +version = "0.0.0-SNAPSHOT" +path = "../kafka-admin-client/target/kafka-admin-client-0.0.0-SNAPSHOT.jar" diff --git a/hub/Dockerfile b/hub/Dockerfile deleted file mode 100644 index d17724e..0000000 --- a/hub/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM openjdk:11 - -ARG SOURCE -ARG COMMIT_HASH -ARG COMMIT_ID -ARG BUILD_TIME -LABEL source=${SOURCE} -LABEL commit_hash=${COMMIT_HASH} -LABEL commit_id=${COMMIT_ID} -LABEL build_time=${BUILD_TIME} -ARG hub_config_url -ARG max_heap_size=2048m -ARG ballerina_download_url=https://dist.ballerina.io/downloads/2201.0.0/ballerina-2201.0.0-swan-lake-linux-x64.deb -ENV hub_config_file_url_env=${hub_config_url} -COPY ./target/bin/*.jar hub.jar -EXPOSE 9191 -RUN wget -q --show-progress ${ballerina_download_url} -O ballerina-linux-installer-x64.deb -RUN dpkg -i ballerina-linux-installer-x64.deb -#TODO Link to be parameterized instead of hardcoding -CMD wget -q --show-progress "${hub_config_file_url_env}" -O Config.toml;\ - java -jar -Xms256m -Xmx2048m ./hub.jar ;\ diff --git a/hub/hub_service.bal b/hub/hub_service.bal index 8da7e1f..542de81 100644 --- a/hub/hub_service.bal +++ b/hub/hub_service.bal @@ -91,7 +91,7 @@ isolated function getTotalSpace(handle fileObj) returns int = @java:Method { 'class: "java.io.File" } external; -websubhub:Service hubService = @websubhub:ServiceConfig { +websubhub:Service hubService = @websubhub:ServiceConfig { webHookConfig: { retryConfig: { interval: config:INTENT_VERIFICATION_RETRY_INTERVAL, @@ -123,7 +123,7 @@ service object { lock { if registeredTopicsCache.hasKey(topicName) { log:printError("Topic has already registered with the Hub", topic = topicName); - return error websubhub:TopicRegistrationError("Topic has already registered with the Hub"); + return error websubhub:TopicRegistrationError("Topic has already registered with the Hub", statusCode = http:STATUS_OK); } log:printDebug("Registering topic", topic = topicName); error? persistingResult = persist:addRegsiteredTopic(message.cloneReadOnly()); @@ -154,7 +154,7 @@ service object { string topicName = util:sanitizeTopicName(message.topic); lock { if !registeredTopicsCache.hasKey(topicName) { - return error websubhub:TopicDeregistrationError("Topic has not been registered in the Hub"); + return error websubhub:TopicDeregistrationError("Topic has not been registered in the Hub", statusCode = http:STATUS_OK); } log:printInfo("Running topic de-registration", payload = message); error? persistingResult = persist:removeRegsiteredTopic(message.cloneReadOnly()); @@ -192,7 +192,7 @@ service object { string topicName = util:sanitizeTopicName(message.hubTopic); error? topicRegistrationFailed = self.createTopicIFNotExist(topicName, message.hubCallback); if (topicRegistrationFailed is error) { - return error websubhub:SubscriptionDeniedError(topicRegistrationFailed.message()); + return error websubhub:SubscriptionDeniedError(topicRegistrationFailed.message(), statusCode = http:STATUS_OK); } string subscriberId = util:generateSubscriberId(message.hubTopic, message.hubCallback); boolean subscriberAvailable = false; @@ -201,7 +201,7 @@ service object { } if subscriberAvailable { log:printError("Subscriber has already registered with the Hub", topic = topicName, callback = message.hubCallback); - return error websubhub:SubscriptionDeniedError("Subscriber has already registered with the Hub"); + return error websubhub:SubscriptionDeniedError("Subscriber has already registered with the Hub", statusCode = http:STATUS_OK); } else { string hubSecret = message.hubSecret; message.hubSecret = (crypto:hashSha256(hubSecret.toBytes())).toBase64(); @@ -226,7 +226,7 @@ service object { foreach int i in 0...15 { initialVector[i] = (check random:createIntInRange(0, 255)); } - byte[] cipherText = check crypto:encryptAesGcm(hubSecret.toBytes(), encryptionKeyInBytes, initialVector); + byte[] cipherText = check crypto:encryptAesGcm(hubSecret.toBytes(), encryptionKeyInBytes, initialVector, crypto:NONE); cipherText.push(...initialVector); message.hubSecret = config:ENCRYPTED_SECRET_PREFIX + cipherText.toBase64() + config:ENCRYPTED_SECRET_SUFFIX; } @@ -265,7 +265,7 @@ service object { topicAvailable = registeredTopicsCache.hasKey(topicName); } if !topicAvailable { - return error websubhub:UnsubscriptionDeniedError("Topic [" + message.hubTopic + "] is not registered with the Hub"); + return error websubhub:UnsubscriptionDeniedError("Topic [" + message.hubTopic + "] is not registered with the Hub", statusCode = http:STATUS_OK); } else { string subscriberId = util:generateSubscriberId(message.hubTopic, message.hubCallback); lock { @@ -273,7 +273,7 @@ service object { } if !subscriberAvailable { return error websubhub:UnsubscriptionDeniedError("Could not find a valid subscriber for Topic [" - + message.hubTopic + "] and Callback [" + message.hubCallback + "]"); + + message.hubTopic + "] and Callback [" + message.hubCallback + "]", statusCode = http:STATUS_OK); } } log:printInfo("Validation done a incomming Unsubscription request", payload = message); @@ -287,7 +287,7 @@ service object { var persistingResult = persist:removeSubscription(message.cloneReadOnly()); if (persistingResult is error) { log:printError("Error occurred while persisting the unsubscription ", err = persistingResult.message()); - } + } } # Publishes content to the hub. @@ -310,9 +310,9 @@ service object { string topicName = util:sanitizeTopicName(msg.hubTopic); error? topicIFNotExist = self.createTopicIFNotExist(topicName, "null"); if (topicIFNotExist is error) { - return error websubhub:UpdateMessageError(topicIFNotExist.message()); + return error websubhub:UpdateMessageError(topicIFNotExist.message(), statusCode = http:STATUS_OK); } - log:printDebug("Received publish message", topic = msg.hubTopic, message = msg.cloneReadOnly()); + log:printDebug("Received publish message", topic = msg.hubTopic, updateMessage = msg.cloneReadOnly()); error? errorResponse = persist:addUpdateMessage(topicName, msg); // TODO: remove this condition if errorResponse is websubhub:UpdateMessageError { @@ -320,7 +320,7 @@ service object { return errorResponse; } else if errorResponse is error { log:printError("Error occurred while publishing the content ", errorMessage = errorResponse.message(), topic = topicName); - return error websubhub:UpdateMessageError(errorResponse.message()); + return error websubhub:UpdateMessageError(errorResponse.message(), statusCode = http:STATUS_OK); } } @@ -347,4 +347,3 @@ service object { } }; - diff --git a/hub/modules/config/configurations.bal b/hub/modules/config/configurations.bal index 76893e6..664bbe4 100644 --- a/hub/modules/config/configurations.bal +++ b/hub/modules/config/configurations.bal @@ -19,6 +19,22 @@ import kafkaHub.util; # Flag to check whether to enable/disable security public configurable boolean SECURITY_ON = true; +# Security: JWT Issuer URL +public configurable string SECURITY_JWT_ISSUER = ""; + +# Security: JWKS Url for the configured issuer +public configurable string SECURITY_JWT_ISSUER_JWKS_URL = ""; + +# Security: Maximum age of JWKs before they are fetched again +public configurable decimal SECURITY_JWT_ISSUER_JWKS_MAX_AGE = 86400; + +# Security: Maximum age of JWKs before they are fetched again +public configurable string SECURITY_JWT_USERID_FIELD = "preferred_username"; + +# Security: Maximum age of JWKs before they are fetched again +public configurable string SECURITY_JWT_ROLES_FIELD = "groups"; + + # Server ID is is used to uniquely identify each server # Each server must have a unique ID public configurable string SERVER_ID = "server-1"; @@ -58,12 +74,6 @@ public configurable int MESSAGE_DELIVERY_COUNT = 3; # The message delivery timeout public configurable decimal MESSAGE_DELIVERY_TIMEOUT = 10; -# The base URL of IDP -public configurable string MOSIP_AUTH_BASE_URL = "https://host/"; - -# The token validation URL of IDP -public configurable string MOSIP_AUTH_VALIDATE_TOKEN_URL = "https://host/oauth2/token"; - # The token validation URL of IDP public configurable int DISK_SPACE_THRESHOLD = 10485760; @@ -116,4 +126,4 @@ public configurable string HUB_SECRET_ENCRYPTION_KEY_FORMAT = "base64-encoded-by public configurable string ENCRYPTED_SECRET_PREFIX = "cipher{"; # Suffix to the encrypted hubsecret for backward compatibility -public configurable string ENCRYPTED_SECRET_SUFFIX = "}"; \ No newline at end of file +public configurable string ENCRYPTED_SECRET_SUFFIX = "}"; diff --git a/hub/modules/security/security.bal b/hub/modules/security/security.bal index 4dd2ffd..dc4d6e4 100644 --- a/hub/modules/security/security.bal +++ b/hub/modules/security/security.bal @@ -15,16 +15,15 @@ // under the License. import ballerina/http; -import ballerina/regex; import ballerina/log; +import ballerina/jwt; + import kafkaHub.config; const string SUFFIX_GENERAL = "_GENERAL"; const string SUFFIX_ALL_INDIVIDUAL = "_ALL_INDIVIDUAL"; const string SUFFIX_INDIVIDUAL = "_INDIVIDUAL"; -final http:Client clientEP = check new (config:MOSIP_AUTH_BASE_URL); - # Authorize the subscriber. # # + headers - `http:Headers` of request @@ -33,11 +32,10 @@ final http:Client clientEP = check new (config:MOSIP_AUTH_BASE_URL); public isolated function authorizeSubscriber(http:Headers headers, string topic) returns error? { string token = check getToken(headers); log:printDebug("getting token for Subscriber from request", topic = topic); - json response = check getValidatedTokenResponse(token); - string roles = (check response?.response.role).toString(); - string[] rolesArr = regex:split(roles, ","); - string userId = (check response?.response.userId).toString(); - log:printDebug("received response for subscriber from auth service", userId = userId, roles = roles, topic = topic); + jwt:Payload response = check getValidatedTokenPayload(token); + json[] rolesArr = (response.hasKey(config:SECURITY_JWT_ROLES_FIELD) && response[config:SECURITY_JWT_ROLES_FIELD] is json[]) ? response[config:SECURITY_JWT_ROLES_FIELD] : []; + string userId = response[config:SECURITY_JWT_USERID_FIELD]; + log:printDebug("received response for subscriber from auth service", userId = userId, roles = rolesArr, topic = topic); if (userId.startsWith(config:PARTNER_USER_ID_PREFIX)) { userId = userId.substring(config:PARTNER_USER_ID_PREFIX.length(), userId.length()); } @@ -57,11 +55,10 @@ public isolated function authorizeSubscriber(http:Headers headers, string topic) public isolated function authorizePublisher(http:Headers headers, string topic) returns error? { string token = check getToken(headers); log:printDebug("got token for publisher from request", topic = topic); - json response = check getValidatedTokenResponse(token); - string roles = (check response?.response.role).toString(); - string userId = (check response?.response.userId).toString(); - log:printDebug("received response for publisher from auth service", userId = userId, roles = roles, topic = topic); - string[] rolesArr = regex:split(roles, ","); + jwt:Payload response = check getValidatedTokenPayload(token); + json[] rolesArr = (response.hasKey(config:SECURITY_JWT_ROLES_FIELD) && response[config:SECURITY_JWT_ROLES_FIELD] is json[]) ? response[config:SECURITY_JWT_ROLES_FIELD] : []; + string? userId = response.hasKey(config:SECURITY_JWT_USERID_FIELD) && response[config:SECURITY_JWT_USERID_FIELD] is string ? response[config:SECURITY_JWT_USERID_FIELD] : null; + log:printDebug("received response for publisher from auth service", userId = userId, roles = rolesArr, topic = topic); string? partnerID = buildPartnerId(topic); string rolePrefix = buildRolePrefix(topic, "PUBLISH_"); boolean authorized = isPublisherAuthorized(partnerID, rolePrefix, rolesArr); @@ -70,13 +67,12 @@ public isolated function authorizePublisher(http:Headers headers, string topic) } } -// Token is extracted from the cookies header which has the key `Authorization` +// Token is extracted from the Authorization header isolated function getToken(http:Headers headers) returns string|error { - string cookieHeader = check headers.getHeader("Cookie"); - string[] values = regex:split(cookieHeader, "; "); - foreach string value in values { - if value.startsWith("Authorization=") { - return regex:split(value, "=")[1]; + string|error authHeader = check headers.getHeader("Authorization"); + if !(authHeader is error) { + if authHeader.startsWith("Bearer") { + return authHeader.substring("Bearer".length()).trim(); } } return error("Authorization token cannot be found"); @@ -96,26 +92,34 @@ isolated function buildPartnerId(string topic) returns string? { if index is int { return topic.substring(0, index); } + return null; } -isolated function getValidatedTokenResponse(string token) returns json|error { - map headerMap = { - "Cookie": "Authorization=".concat(token) +isolated function getValidatedTokenPayload(string token) returns jwt:Payload|jwt:Error { + jwt:ValidatorConfig validatorConfig = { + issuer: config:SECURITY_JWT_ISSUER, + signatureConfig: { + jwksConfig: { + url: config:SECURITY_JWT_ISSUER_JWKS_URL, + cacheConfig: { + defaultMaxAge: config:SECURITY_JWT_ISSUER_JWKS_MAX_AGE + } + } + } }; - json response = check clientEP->get(config:MOSIP_AUTH_VALIDATE_TOKEN_URL, headers = headerMap); - return response; + return jwt:validate(token, validatorConfig); } -isolated function isPublisherAuthorized(string? partnerID, string rolePrefix, string[] rolesArr) returns boolean { +isolated function isPublisherAuthorized(string? partnerID, string rolePrefix, json[] rolesArr) returns boolean { if partnerID is string { - foreach string role in rolesArr { - if role == rolePrefix.concat(SUFFIX_ALL_INDIVIDUAL) { + foreach json role in rolesArr { + if role.toString() == rolePrefix.concat(SUFFIX_ALL_INDIVIDUAL) { return true; } } } else { - foreach string role in rolesArr { - if role == rolePrefix.concat(SUFFIX_GENERAL) { + foreach json role in rolesArr { + if role.toString() == rolePrefix.concat(SUFFIX_GENERAL) { return true; } } @@ -123,17 +127,17 @@ isolated function isPublisherAuthorized(string? partnerID, string rolePrefix, st return false; } -isolated function isSubscriberAuthorized(string? partnerID, string rolePrefix, string[] rolesArr, string userId) +isolated function isSubscriberAuthorized(string? partnerID, string rolePrefix, json[] rolesArr, string userId) returns boolean { if partnerID is string { - foreach string role in rolesArr { - if role == rolePrefix.concat(SUFFIX_INDIVIDUAL) && partnerID == userId { + foreach json role in rolesArr { + if role.toString() == rolePrefix.concat(SUFFIX_INDIVIDUAL) && partnerID == userId { return true; } } } else { - foreach string role in rolesArr { - if role == rolePrefix.concat(SUFFIX_GENERAL) { + foreach json role in rolesArr { + if role.toString() == rolePrefix.concat(SUFFIX_GENERAL) { return true; } } diff --git a/hub/start_hub.bal b/hub/start_hub.bal index 22d1e32..46eb7be 100644 --- a/hub/start_hub.bal +++ b/hub/start_hub.bal @@ -26,6 +26,7 @@ import kafkaHub.config; import kafkaHub.internal_topic_helper as internalTopicHelper; import ballerina/lang.array; import ballerina/crypto; +import ballerina/lang.runtime; isolated map registeredTopicsCache = {}; isolated map subscribersCache = {}; @@ -50,18 +51,20 @@ public function main() returns error? { } // Initialize the Hub - _ = @strand {thread: "any"} start syncRegsisteredTopicsCache(); + syncRegsisteredTopicsCache(); _ = @strand {thread: "any"} start syncSubscribersCache(); // Start the Hub + log:printInfo("Hub initialization done and starting the hub..."); + http:Listener httpListener = check new (config:HUB_PORT); check httpListener.attach(healthCheckService, "hub/actuator/health"); websubhub:Listener hubListener = check new (httpListener); check hubListener.attach(hubService, "hub"); - check hubListener.'start(); + websubhub:Error? websubError = check hubListener.'start(); + runtime:registerListener(hubListener); } - function validateConfigs() returns boolean|error { if (config:HUB_SECRET_ENCRYPTION_KEY_FORMAT.equalsIgnoreCaseAscii("base64-encoded-bytes")){ byte[]|error decodedEncryptionKey = array:fromBase64(config:HUB_SECRET_ENCRYPTION_KEY); @@ -92,9 +95,9 @@ function syncRegsisteredTopicsCache() { } function getPersistedTopics() returns websubhub:TopicRegistration[]|error? { - kafka:ConsumerRecord[] records = check conn:registeredTopicsConsumer->poll(config:POLLING_INTERVAL); + kafka:BytesConsumerRecord[] records = check conn:registeredTopicsConsumer->poll(config:POLLING_INTERVAL); if records.length() > 0 { - kafka:ConsumerRecord lastRecord = records.pop(); + kafka:BytesConsumerRecord lastRecord = records.pop(); string|error lastPersistedData = string:fromBytes(lastRecord.value); if lastPersistedData is string { return deSerializeTopicsMessage(lastPersistedData); @@ -148,9 +151,9 @@ function syncSubscribersCache() { } function getPersistedSubscribers() returns websubhub:VerifiedSubscription[]|error? { - kafka:ConsumerRecord[] records = check conn:subscribersConsumer->poll(config:POLLING_INTERVAL); + kafka:BytesConsumerRecord[] records = check conn:subscribersConsumer->poll(config:POLLING_INTERVAL); if records.length() > 0 { - kafka:ConsumerRecord lastRecord = records.pop(); + kafka:BytesConsumerRecord lastRecord = records.pop(); string|error lastPersistedData = string:fromBytes(lastRecord.value); if lastPersistedData is string { return deSerializeSubscribersMessage(lastPersistedData); @@ -204,7 +207,7 @@ function startMissingSubscribers(websubhub:VerifiedSubscription[] persistedSubsc byte[] cipher = ivAppendedCipherText.slice(0, cipherLength-16); byte[] iv = ivAppendedCipherText.slice(cipherLength-16, cipherLength); string encryptionKey = config:HUB_SECRET_ENCRYPTION_KEY; - byte[] plainText = check crypto:decryptAesGcm(cipher, encryptionKey.toBytes(), iv); + byte[] plainText = check crypto:decryptAesGcm(cipher, encryptionKey.toBytes(), iv, crypto:NONE); subscriber.hubSecret = check string:fromBytes(plainText); log:printInfo("Decrypted the hubSecret", topic = subscriber.hubTopic); } @@ -226,7 +229,7 @@ isolated function pollForNewUpdates(websubhub:HubClient clientEp, kafka:Consumer do { log:printInfo("pollForNewUpdates operation - Thread started ", topic = topicName, callback = callback); while true { - kafka:ConsumerRecord[] records = check consumerEp->poll(config:POLLING_INTERVAL); + kafka:BytesConsumerRecord[] records = check consumerEp->poll(config:POLLING_INTERVAL); log:printDebug("pollForNewUpdates operation - records pull ", length = records.length(), subscriberId = subscriberId); if !isValidConsumer(topicName, subscriberId, callback) { fail error(string `Subscriber with Id ${subscriberId} or topic ${topicName} and ${callback} is invalid`); @@ -263,12 +266,12 @@ isolated function isValidConsumer(string topicName, string subscriberId, string return topicAvailable && subscriberAvailable; } -isolated function notifySubscribers(kafka:ConsumerRecord[] records, websubhub:HubClient clientEp, kafka:Consumer consumerEp, string topic, string callback) returns error? { - foreach kafka:ConsumerRecord kafkaRecord in records { +isolated function notifySubscribers(kafka:BytesConsumerRecord[] records, websubhub:HubClient clientEp, kafka:Consumer consumerEp, string topic, string callback) returns error? { + foreach kafka:BytesConsumerRecord kafkaRecord in records { websubhub:ContentDistributionMessage|error message = deSerializeKafkaRecord(kafkaRecord); if (message is websubhub:ContentDistributionMessage) { - log:printDebug("notifying subscriber with message", message = message.cloneReadOnly(), topic = topic, callback = callback, offset = kafkaRecord.offset); + log:printDebug("notifying subscriber with message", contentDistributionMessage = message.cloneReadOnly(), topic = topic, callback = callback, offset = kafkaRecord.offset); websubhub:ContentDistributionSuccess|websubhub:SubscriptionDeletedError|websubhub:Error response = clientEp->notifyContentDistribution(message); if (response is websubhub:SubscriptionDeletedError) { log:printError("Subscription Deletion Error occurred while sending notification to subscriber ", topic = topic, callback = callback, offset = kafkaRecord.offset,response = response.cloneReadOnly().toString()); @@ -295,7 +298,7 @@ isolated function notifySubscribers(kafka:ConsumerRecord[] records, websubhub:Hu } } -isolated function deSerializeKafkaRecord(kafka:ConsumerRecord kafkaRecord) returns websubhub:ContentDistributionMessage|error { +isolated function deSerializeKafkaRecord(kafka:BytesConsumerRecord kafkaRecord) returns websubhub:ContentDistributionMessage|error { byte[] content = kafkaRecord.value; string|error message = check string:fromBytes(content); if (message is string) { diff --git a/kafka-admin-client/pom.xml b/kafka-admin-client/pom.xml index cdfa4cd..6e44bb6 100644 --- a/kafka-admin-client/pom.xml +++ b/kafka-admin-client/pom.xml @@ -1,17 +1,16 @@ + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 io.mosip kafka-admin-client - 1.2.0.1 + 0.0.0-SNAPSHOT jar kafka-admin-client http://maven.apache.org Project of MOSIP kafka admin client - + scm:git:git://github.com/mosip/websub.git scm:git:ssh://github.com:mosip/websub.git @@ -35,21 +34,46 @@ UTF-8 - 11 - 11 + 17 + 17 3.8.0 + 3.5.2 + 4.13.2 + 3.11.2 + 2.0.9 + 2.22.0 + 0.8.11 + 3.7.0.1746 org.apache.kafka kafka-clients - 2.8.0 + ${apache.kafka.version} junit junit - 3.8.1 + ${junit.version} + test + + + org.mockito + mockito-core + ${mockito.version} + test + + + org.powermock + powermock-module-junit4 + ${powermock.version} + test + + + org.powermock + powermock-api-mockito2 + ${powermock.version} test @@ -69,34 +93,65 @@ + false true - ${project.build.outputDirectory}/git.properties + + ${project.build.outputDirectory}/git.properties ^git.build.(time|version)$ ^git.commit.id.(abbrev|full)$ full ${project.basedir}/.git - - - org.apache.maven.plugins - maven-javadoc-plugin - 3.2.0 - - - attach-javadocs - - jar - - - - - none - - + org.apache.maven.plugins + maven-surefire-plugin + ${maven.surefire.plugin.version} + + ${skipTests} + false + + ${argLine} --add-opens java.xml/jdk.xml.internal=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/java.util=ALL-UNNAMED --illegal-access=permit + + + + + org.jacoco + jacoco-maven-plugin + ${maven.jacoco.version} + + + + prepare-agent + + + + report + prepare-package + + report + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.2.0 + + + attach-javadocs + + jar + + + + + none + + org.apache.maven.plugins maven-compiler-plugin @@ -107,39 +162,39 @@ - org.apache.maven.plugins - maven-source-plugin - true - 2.2.1 - - - attach-sources - - jar-no-fork - - - - - - org.apache.maven.plugins - maven-gpg-plugin - 1.5 - - - sign-artifacts - verify - - sign - - - - --pinentry-mode - loopback - - - - - + org.apache.maven.plugins + maven-source-plugin + true + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + --pinentry-mode + loopback + + + + + org.apache.maven.plugins maven-assembly-plugin @@ -173,7 +228,8 @@ - + @@ -182,4 +238,35 @@ + + + sonar + + . + src/main/java/**,src/main/resources/** + ${sonar.coverage.exclusions} + https://sonarcloud.io + + + false + + + + + org.sonarsource.scanner.maven + sonar-maven-plugin + ${maven.sonar.plugin.version} + + + verify + + sonar + + + + + + + + diff --git a/kafka-admin-client/src/main/java/io/mosip/kafkaadminclient/MosipKafkaAdminClient.java b/kafka-admin-client/src/main/java/io/mosip/kafkaadminclient/MosipKafkaAdminClient.java index f3fa2bb..974578e 100644 --- a/kafka-admin-client/src/main/java/io/mosip/kafkaadminclient/MosipKafkaAdminClient.java +++ b/kafka-admin-client/src/main/java/io/mosip/kafkaadminclient/MosipKafkaAdminClient.java @@ -7,8 +7,6 @@ import java.util.Optional; import java.util.Properties; import java.util.Set; -import java.util.concurrent.ExecutionException; - import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.CreateTopicsResult; @@ -17,8 +15,6 @@ import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.admin.TopicDescription; import org.apache.kafka.common.KafkaFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class MosipKafkaAdminClient { @@ -37,15 +33,14 @@ public void createTopic(String topicName) throws Exception { // get the async result for the new topic creation KafkaFuture future = result.values().get(topicName); // call get() to block until topic creation has completed or failed - future.get(); + future.get(); } } - public boolean isTopicsPresent(String topics) throws Exception { - List topicsList = Arrays.asList(topics.split(",")); - Set kafkaTopics = getAllTopics(); - return topicsList.stream().allMatch(kafkaTopics::contains); + List topicsList = Arrays.asList(topics.split(",")); + Set kafkaTopics = getAllTopics(); + return topicsList.stream().allMatch(kafkaTopics::contains); } public Set getAllTopics() throws Exception { @@ -56,7 +51,6 @@ public Set getAllTopics() throws Exception { } } - public Map describeTopic(String topic) throws Exception { try (Admin admin = Admin.create(properties)) { DescribeTopicsResult result = admin.describeTopics(Collections.singleton(topic)); diff --git a/kafka-admin-client/src/test/java/io/mosip/kafkaadminclient/MosipKafkaAdminClientTest.java b/kafka-admin-client/src/test/java/io/mosip/kafkaadminclient/MosipKafkaAdminClientTest.java new file mode 100644 index 0000000..3769ddb --- /dev/null +++ b/kafka-admin-client/src/test/java/io/mosip/kafkaadminclient/MosipKafkaAdminClientTest.java @@ -0,0 +1,175 @@ +package io.mosip.kafkaadminclient; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DescribeTopicsResult; +import org.apache.kafka.clients.admin.KafkaAdminClient; +import org.apache.kafka.clients.admin.ListTopicsResult; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.KafkaFuture; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({ Admin.class }) +public class MosipKafkaAdminClientTest { + + @SuppressWarnings("unchecked") + @Test + public void isTopicsPresentTest() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + ListTopicsResult listTopicsResult = Mockito.mock(ListTopicsResult.class); + KafkaFuture> kafkaFuture = Mockito.mock(KafkaFuture.class); + Set sampleStrings = new HashSet<>(); + sampleStrings.add("registered-websub-topics"); + sampleStrings.add("consolidated-websub-topics"); + Mockito.when(kafkaFuture.get()).thenReturn(sampleStrings); + Mockito.when(listTopicsResult.names()).thenReturn(kafkaFuture); + Mockito.when(adminMock.listTopics(Mockito.any())).thenReturn(listTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertTrue(mosipKafkaAdminClient.isTopicsPresent("registered-websub-topics,consolidated-websub-topics")); + } + + @SuppressWarnings("unchecked") + @Test + public void isTopicsPresentTestWithOneMatch() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + ListTopicsResult listTopicsResult = Mockito.mock(ListTopicsResult.class); + KafkaFuture> kafkaFuture = Mockito.mock(KafkaFuture.class); + Set sampleStrings = new HashSet<>(); + sampleStrings.add("registered-websub-topics"); + sampleStrings.add("consolidated-websub-topics"); + Mockito.when(kafkaFuture.get()).thenReturn(sampleStrings); + Mockito.when(listTopicsResult.names()).thenReturn(kafkaFuture); + Mockito.when(adminMock.listTopics(Mockito.any())).thenReturn(listTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertTrue(mosipKafkaAdminClient.isTopicsPresent("registered-websub-topics")); + } + + @SuppressWarnings("unchecked") + @Test + public void isTopicsPresentTestWithNoMatch() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + ListTopicsResult listTopicsResult = Mockito.mock(ListTopicsResult.class); + KafkaFuture> kafkaFuture = Mockito.mock(KafkaFuture.class); + Set sampleStrings = new HashSet<>(); + sampleStrings.add("test-topic"); + Mockito.when(kafkaFuture.get()).thenReturn(sampleStrings); + Mockito.when(listTopicsResult.names()).thenReturn(kafkaFuture); + Mockito.when(adminMock.listTopics(Mockito.any())).thenReturn(listTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertFalse( + mosipKafkaAdminClient.isTopicsPresent("registered-websub-topics,consolidated-websub-topics")); + } + + @SuppressWarnings("unchecked") + @Test + public void getAllTopicsTest() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + ListTopicsResult listTopicsResult = Mockito.mock(ListTopicsResult.class); + KafkaFuture> kafkaFuture = Mockito.mock(KafkaFuture.class); + Set sampleStrings = new HashSet<>(); + sampleStrings.add("registered-websub-topics"); + sampleStrings.add("consolidated-websub-topics"); + Mockito.when(kafkaFuture.get()).thenReturn(sampleStrings); + Mockito.when(listTopicsResult.names()).thenReturn(kafkaFuture); + Mockito.when(adminMock.listTopics(Mockito.any())).thenReturn(listTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertEquals(mosipKafkaAdminClient.getAllTopics().size(), 2); + } + + @SuppressWarnings("unchecked") + @Test + public void createTopicsTestWithException() throws InterruptedException, ExecutionException { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + CreateTopicsResult createTopicsResult = Mockito.mock(CreateTopicsResult.class); + Map> testMap = new HashMap<>(); + KafkaFuture kafkaFuture = Mockito.mock(KafkaFuture.class); + testMap.put("registered-websub-topics", kafkaFuture); + Mockito.when(kafkaFuture.get()).thenThrow(ExecutionException.class); + Mockito.when(createTopicsResult.values()).thenReturn(testMap); + Mockito.when(adminMock.createTopics(Mockito.any())).thenReturn(createTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertThrows(ExecutionException.class, + () -> mosipKafkaAdminClient.createTopic("registered-websub-topics")); + } + + @Test + public void createTopicsTestWithKafkaException() { + PowerMockito.mockStatic(Admin.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenThrow(KafkaException.class); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertThrows(KafkaException.class, () -> mosipKafkaAdminClient.createTopic("registered-websub-topics")); + } + + @SuppressWarnings("unchecked") + @Test + public void createTopicsTest() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + CreateTopicsResult createTopicsResult = Mockito.mock(CreateTopicsResult.class); + Map> testMap = new HashMap<>(); + KafkaFuture kafkaFuture = Mockito.mock(KafkaFuture.class); + testMap.put("registered-websub-subscribers", kafkaFuture); + Mockito.when(kafkaFuture.get()).thenReturn(null); + Assert.assertEquals(kafkaFuture.get(), null); + Mockito.when(createTopicsResult.values()).thenReturn(testMap); + Mockito.when(adminMock.createTopics(Mockito.any())).thenReturn(createTopicsResult); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + mosipKafkaAdminClient.createTopic("registered-websub-subscribers"); + } + + @SuppressWarnings("unchecked") + @Test + public void describeTopicTest() throws Exception { + PowerMockito.mockStatic(Admin.class); + KafkaAdminClient adminMock = mock(KafkaAdminClient.class); + PowerMockito.when(Admin.create(any(Properties.class))).thenReturn(adminMock); + DescribeTopicsResult result = Mockito.mock(DescribeTopicsResult.class); + KafkaFuture> kafkaFutureVal = Mockito.mock(KafkaFuture.class); + Map resultMap = new HashMap<>(); + resultMap.put("consolidated-websub-subscribers", + new TopicDescription("consolidated-websub-subscribers", false, null)); + Mockito.when(kafkaFutureVal.get()).thenReturn(resultMap); + Mockito.when(result.all()).thenReturn(kafkaFutureVal); + Mockito.when(adminMock.describeTopics(Mockito.anyCollection())).thenReturn(result); + MosipKafkaAdminClient mosipKafkaAdminClient = new MosipKafkaAdminClient( + "kafka-0.kafka-headless.default.svc.cluster.local:9092"); + Assert.assertEquals(mosipKafkaAdminClient.describeTopic("consolidated-websub-subscribers"), resultMap); + } + +} \ No newline at end of file