From 525a52214186fc1efa0afb98a8954765778d23d7 Mon Sep 17 00:00:00 2001 From: Jim McVea Date: Tue, 23 Jan 2024 15:11:40 -0500 Subject: [PATCH 1/2] feat: 0.3.0 release --- README.md | 118 +- ....diff.patch => backstage_0.5.8.diff.patch} | 192 +- .../plugins/aws-apps-backend/README.md | 2 +- .../plugins/aws-apps-backend/package.json | 23 +- .../aws-apps-backend/src/api/AwsAppsApi.ts | 82 +- .../aws-apps-backend/src/api/aws-audit.ts | 4 +- .../aws-apps-backend/src/api/aws-auth.ts | 1 - .../aws-apps-backend/src/api/aws-platform.ts | 8 +- .../aws-apps-backend/src/service/router.ts | 90 +- .../plugins/aws-apps-common/package.json | 8 +- .../src/types/AWSUIInterfaces.ts | 42 +- .../src/types/AppPromoTypes.ts | 2 +- .../src/types/PlatformTypes.ts | 6 + .../plugins/aws-apps-demo/package.json | 24 +- .../plugins/aws-apps/package.json | 29 +- .../plugins/aws-apps/src/api/OPAApi.ts | 55 +- .../plugins/aws-apps/src/api/OPAApiClient.ts | 140 +- .../AppCatalogPage/AppCatalogPage.tsx | 15 +- .../components/AppCatalogPage/awsColumns.tsx | 57 +- .../components/AppPromoCard/AppPromoCard.tsx | 123 +- .../AppPromoCard/AwsEksEnvPromoDialog.tsx | 184 ++ .../AwsEnvironmentProviderCard.tsx | 27 +- .../DeleteComponentCard.tsx | 282 ++- .../DeleteEnvironmentCard.tsx | 2 +- .../DeleteProviderCard/DeleteProviderCard.tsx | 8 +- .../EnvironmentInfoCard.tsx | 8 +- .../GeneralInfoCard/GeneralInfoCard.tsx | 2 +- .../InfrastructureCard/InfrastructureCard.tsx | 6 + .../K8sAppStateCard/K8sAppStateCard.tsx | 806 ++++++++ .../ProviderInfoCard/ProviderInfoCard.tsx | 118 +- .../ResourceBindingCard/ResourceBinding.tsx | 20 +- .../ResourceSelectorDialog.tsx | 39 +- .../plugins/aws-apps/src/helpers/constants.ts | 5 +- .../plugins/aws-apps/src/helpers/util.ts | 8 + .../plugins/aws-apps/src/hooks/useAwsApp.tsx | 57 +- .../plugins/aws-apps/src/index.ts | 1 + .../src/pages/AwsAppPage/AwsAppPage.tsx | 42 +- .../src/pages/AwsEKSAppPage/AwsEKSAppPage.tsx | 43 + .../AwsEKSEnvironmentProviderPage.tsx | 59 + .../AwsEnvironmentProviderPage.tsx | 8 +- .../pages/AwsResourcePage/AwsResourcePage.tsx | 25 +- .../AwsS3ResourcePage/AwsS3ResourcePage.tsx | 39 + .../plugins/aws-apps/src/plugin.ts | 9 + .../README.md | 6 +- .../package.json | 24 +- .../get-env-providers/get-env-providers.ts | 72 +- .../get-platform-parameters.ts | 18 +- .../get-ssm-parameters/get-ssm-parameters.ts | 17 +- .../src/example/template.yaml | 15 +- .../src/types.ts | 2 + backstage-reference/.gitignore | 2 +- backstage-reference/common/aws_ecs/README.md | 3 +- .../common/aws_ecs/package.json | 4 +- .../common/aws_ecs/src/cdk-ecs-module-app.ts | 2 +- .../aws_ecs/src/cdk-ecs-module-stack.ts | 25 +- backstage-reference/common/aws_efs/README.md | 4 - .../common/aws_efs/src/cdk-efs-module-app.ts | 39 - .../aws_efs/src/cdk-efs-module-stack.ts | 75 - .../common/{aws_efs => aws_eks}/.gitignore | 0 .../common/{aws_efs => aws_eks}/.npmignore | 0 backstage-reference/common/aws_eks/README.md | 6 + backstage-reference/common/aws_eks/cdk.json | 41 + .../common/aws_eks/package.json | 24 + .../common/aws_eks/permissions/README.md | 10 + .../common/aws_eks/src/cdk-eks-module-app.ts | 20 + .../aws_eks/src/cdk-eks-module-stack.ts | 220 ++ .../eks-env-app-admin-role-construct.ts | 69 + .../common/aws_eks/src/eks-input.ts | 96 + .../common/{aws_efs => aws_eks}/tsconfig.json | 0 .../common/aws_rds/package.json | 6 +- backstage-reference/common/aws_s3/.gitignore | 8 + backstage-reference/common/aws_s3/.npmignore | 6 + backstage-reference/common/aws_s3/README.md | 4 + .../common/{aws_efs => aws_s3}/buildspec.yml | 2 +- .../common/{aws_efs => aws_s3}/cdk.json | 2 +- .../common/{aws_efs => aws_s3}/package.json | 9 +- .../common/aws_s3/src/cdk-s3-module-app.ts | 25 + .../common/aws_s3/src/cdk-s3-module-stack.ts | 91 + .../common/aws_s3/tsconfig.json | 30 + .../common/aws_serverless_api/package.json | 4 +- .../cicd/.gitlab-ci-aws-dind-spring-boot.yml | 14 +- .../common/cicd/.gitlab-ci-aws-iac-ecs.yml | 3 +- .../cicd/.gitlab-ci-aws-iac-eks-kubectl.yml | 249 +++ .../common/cicd/.gitlab-ci-aws-iac-eks.yml | 234 +++ .../common/cicd/.gitlab-ci-aws-iac-rds.yml | 3 +- .../common/cicd/.gitlab-ci-aws-iac-s3.yml | 39 + .../.gitlab-ci-aws-iac-serverless-api.yml | 25 +- .../common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml | 3 +- .../cicd/.gitlab-ci-aws-image-deploy.yml | 50 + .../cicd/.gitlab-ci-aws-image-kaniko.yml | 7 +- .../cicd/.gitlab-ci-aws-provider-basic.yml | 46 + .../cicd/.gitlab-ci-aws-provider-ecs-ec2.yml | 70 + .../cicd/.gitlab-ci-aws-provider-ecs.yml | 40 +- .../cicd/.gitlab-ci-aws-provider-eks.yml | 148 ++ .../.gitlab-ci-aws-provider-serverless.yml | 24 +- .../cicd/.gitlab-ci-job-defaults-cdk.yml | 29 +- .../create-ci-stages.sh | 63 +- .../example-generic/create-ci-stages.sh | 8 + .../example-nodejs-rds/create-ci-stages.sh | 12 + .../example-nodejs/create-ci-stages.sh | 12 + .../create-ci-stages.sh | 185 ++ .../example-python-flask/create-ci-stages.sh | 20 + .../create-ci-stages.sh | 8 + .../create-ci-stages.sh | 176 ++ .../example-springboot/create-ci-stages.sh | 22 + .../example-tf-nodejs/create-ci-stages.sh | 17 +- .../k8s/add-role-to-aws-auth-configmap.sh | 143 ++ .../cicd/scripts/k8s/apply-k8s-lambda.sh | 43 + .../cicd/scripts/k8s/get-ingress-dns-name.sh | 35 + .../common/cicd/scripts/k8s/install-helm.sh | 12 + .../cicd/scripts/k8s/install-kubectl.sh | 13 + .../cicd/scripts/k8s/resolve-placeholders.sh | 58 + .../cicd/scripts/k8s/save-template-output.sh | 73 + .../cicd/scripts/terraform/destroy-tf.sh | 3 +- backstage-reference/common/tf_aws_ecs/main.tf | 4 +- .../templates/all-templates.yaml | 14 +- .../content/.backstage/catalog-info.yaml | 21 + .../content/.gitlab-ci.yml | 15 + .../content/stack-parameters.properties | 7 + .../template.yaml | 184 ++ .../content/.backstage/catalog-info.yaml | 27 + .../content/.gitlab-ci.yml | 15 + .../content/stack-parameters.properties | 10 + .../template.yaml | 233 +++ .../content/.backstage/catalog-info.yaml | 21 +- .../content/.gitlab-ci.yml | 9 +- .../content/stack-parameters.properties | 14 +- .../template.yaml | 99 +- .../content/.backstage/catalog-info.yaml | 33 + .../content/.gitlab-ci.yml | 16 + .../content/stack-parameters.properties | 24 + .../template.yaml | 275 +++ .../content/.backstage/catalog-info.yaml | 33 + .../content/.gitlab-ci.yml | 16 + .../content/stack-parameters.properties | 25 + .../template.yaml | 360 ++++ .../content/.backstage/catalog-info.yaml | 13 +- .../templates/aws-environment/template.yaml | 9 +- .../content/.backstage/catalog-info.yaml | 12 +- .../aws-rds-resource/content/.gitlab-ci.yml | 4 +- .../aws-rds-resource/content/queries.js | 50 +- .../templates/aws-rds-resource/template.yaml | 37 +- .../.gitignore | 0 .../content/.backstage/catalog-info.yaml | 24 + .../content/.editorconfig | 0 .../content/.gitignore | 0 .../aws-s3-resource/content/.gitlab-ci.yml | 16 + .../aws-s3-resource/content/package.json | 16 + .../templates/aws-s3-resource/template.yaml | 166 ++ .../content/.backstage/catalog-info.yaml | 11 +- .../content/.gitlab-ci.yml | 8 +- .../content/stack-parameters.properties | 13 +- .../template.yaml | 96 +- .../example-eks-nodejs-rds-helm/.gitignore | 2 + .../content/.backstage/catalog-info.yaml | 26 + .../content/.dockerignore | 2 + .../content/.editorconfig | 36 + .../content/.gitignore | 64 + .../content/.gitlab-ci.yml | 20 + .../content/Dockerfile | 18 + .../content/README.md | 10 + .../content/k8s/.helmignore | 23 + .../content/k8s/Chart.yaml | 16 + .../content/k8s/templates/NOTES.txt | 8 + .../content/k8s/templates/_helpers.tpl | 34 + .../content/k8s/templates/deployment.yaml | 53 + .../k8s/templates/envVarsConfigMap.yaml | 9 + .../content/k8s/templates/ingress.yaml | 45 + .../content/k8s/templates/namespace.yaml | 6 + .../k8s/templates/nsAdminRoleBinding.yaml | 16 + .../k8s/templates/nsViewerRoleBinding.yaml | 14 + .../content/k8s/templates/service.yaml | 16 + .../content/k8s/templates/serviceAccount.yaml | 9 + .../content/k8s/values.yaml | 42 + .../content/src/index.js | 95 + .../content/src/package.json | 17 + .../example-eks-nodejs-rds-helm/template.yaml | 239 +++ .../.gitignore | 2 + .../content/.backstage/catalog-info.yaml | 26 + .../content/.dockerignore | 2 + .../content/.editorconfig | 36 + .../content/.gitignore | 64 + .../content/.gitlab-ci.yml | 20 + .../content/Dockerfile | 18 + .../content/README.md | 10 + .../content/k8s/base/deployment.yaml | 48 + .../content/k8s/base/ingress.yaml | 45 + .../content/k8s/base/kustomization.yaml | 9 + .../content/k8s/base/nsAdminRoleBinding.yaml | 15 + .../content/k8s/base/nsViewerRoleBinding.yaml | 15 + .../content/k8s/base/service.yaml | 16 + .../content/k8s/base/serviceAccount.yaml | 9 + .../k8s/new-env-template/deployment.yaml | 22 + .../content/k8s/new-env-template/ingress.yaml | 6 + .../k8s/new-env-template/kustomization.yaml | 22 + .../k8s/new-env-template/namespace.yaml | 6 + .../new-env-template/nsAdminRoleBinding.yaml | 8 + .../new-env-template/nsViewerRoleBinding.yaml | 7 + .../content/k8s/new-env-template/service.yaml | 9 + .../k8s/new-env-template/serviceAccount.yaml | 9 + .../content/src/index.js | 95 + .../content/src/package.json | 17 + .../template.yaml | 239 +++ .../content/.backstage/aws-catalog-info.yaml | 24 - .../content/.backstage/catalog-info.yaml | 32 - .../example-nodejs-efs/content/.gitlab-ci.yml | 49 - .../example-nodejs-efs/content/index.js | 120 -- .../example-nodejs-efs/template.yaml | 191 -- .../example-nodejs-microservice/.gitignore | 2 + .../content/.backstage/catalog-info.yaml | 22 + .../content/.dockerignore | 2 + .../content/.editorconfig | 36 + .../content/.gitignore | 64 + .../content/.gitlab-ci.yml | 19 + .../content/Dockerfile | 15 + .../content/src/index.js | 30 + .../content/src}/package.json | 2 +- .../example-nodejs-microservice/template.yaml | 190 ++ .../content/.backstage/catalog-info.yaml | 8 +- .../example-nodejs-rds/content/.gitlab-ci.yml | 7 +- .../example-nodejs-rds/content/Dockerfile | 7 +- .../example-nodejs-rds/content/src/index.js | 98 +- .../content/src/package.json | 2 +- .../example-nodejs-rds/template.yaml | 29 +- .../content/.backstage/catalog-info.yaml | 8 +- .../example-nodejs/content/.gitlab-ci.yml | 7 +- .../example-nodejs/content/Dockerfile | 2 +- .../example-nodejs/content/src/index.js | 2 +- .../templates/example-nodejs/template.yaml | 22 +- .../content/.backstage/catalog-info.yaml | 24 + .../content/.editorconfig | 36 + .../content/.gitignore | 18 + .../content/.gitlab-ci.yml | 20 + .../content/Dockerfile | 20 + .../example-python-flask-eks/content/Procfile | 1 + .../content/README.md | 14 + .../content/k8s/base/deployment.yaml | 48 + .../content/k8s/base/ingress.yaml | 45 + .../content/k8s/base/kustomization.yaml | 9 + .../content/k8s/base/nsAdminRoleBinding.yaml | 15 + .../content/k8s/base/nsViewerRoleBinding.yaml | 15 + .../content/k8s/base/service.yaml | 16 + .../content/k8s/base/serviceAccount.yaml | 9 + .../k8s/new-env-template/deployment.yaml | 22 + .../content/k8s/new-env-template/ingress.yaml | 6 + .../k8s/new-env-template/kustomization.yaml | 23 + .../k8s/new-env-template/namespace.yaml | 6 + .../new-env-template/nsAdminRoleBinding.yaml | 8 + .../new-env-template/nsViewerRoleBinding.yaml | 7 + .../content/k8s/new-env-template/service.yaml | 9 + .../k8s/new-env-template/serviceAccount.yaml | 9 + .../content/requirements.txt | 7 + .../content/server.py | 30 + .../content/templates/index.html | 14 + .../example-python-flask-eks/template.yaml | 240 +++ .../content/.backstage/catalog-info.yaml | 8 +- .../content/.gitlab-ci.yml | 7 +- .../example-python-flask/content/Dockerfile | 3 + .../content/requirements.txt | 4 +- .../content/templates/index.html | 13 +- .../example-python-flask/template.yaml | 40 +- .../content/.backstage/catalog-info.yaml | 9 +- .../content/.gitlab-ci.yml | 6 +- .../content/README.md | 4 +- .../content/template.yml | 4 +- .../example-serverless-rest-api/template.yaml | 22 +- .../example-springboot-eks/.gitignore | 2 + .../content/.backstage/catalog-info.yaml | 28 + .../content/.editorconfig | 36 + .../example-springboot-eks/content/.gitignore | 64 + .../content/.gitlab-ci.yml | 20 + .../example-springboot-eks/content/HELP.md | 22 + .../example-springboot-eks/content/README.md | 14 + .../content/k8s/base/deployment.yaml | 49 + .../content/k8s/base/ingress.yaml | 45 + .../content/k8s/base/kustomization.yaml | 9 + .../content/k8s/base/nsAdminRoleBinding.yaml | 15 + .../content/k8s/base/nsViewerRoleBinding.yaml | 15 + .../content/k8s/base/service.yaml | 16 + .../content/k8s/base/serviceAccount.yaml | 9 + .../k8s/new-env-template/deployment.yaml | 22 + .../content/k8s/new-env-template/ingress.yaml | 6 + .../k8s/new-env-template/kustomization.yaml | 22 + .../k8s/new-env-template/namespace.yaml | 6 + .../new-env-template/nsAdminRoleBinding.yaml | 8 + .../new-env-template/nsViewerRoleBinding.yaml | 7 + .../content/k8s/new-env-template/service.yaml | 9 + .../k8s/new-env-template/serviceAccount.yaml | 9 + .../example-springboot-eks/content/mvnw | 316 +++ .../example-springboot-eks/content/mvnw.cmd | 188 ++ .../example-springboot-eks/content/pom.xml | 53 + .../aws/pace/fsi/restservice/Greeting.java | 3 + .../fsi/restservice/GreetingController.java | 21 + .../restservice/RestServiceApplication.java | 13 + .../src/main/resources/application.properties | 1 + .../target/classes/application.properties | 1 + .../example-springboot-eks/template.yaml | 235 +++ .../content/.backstage/catalog-info.yaml | 8 +- .../example-springboot/content/.gitlab-ci.yml | 7 +- .../example-springboot/template.yaml | 34 +- .../content/.backstage/catalog-info.yaml | 8 +- .../example-tf-nodejs/content/.gitlab-ci.yml | 7 +- .../example-tf-nodejs/content/Dockerfile | 2 +- .../example-tf-nodejs/content/src/index.js | 2 +- .../templates/example-tf-nodejs/template.yaml | 24 +- build-script/backstage-install.sh | 16 +- build-script/gitlab-tools.sh | 49 +- config/app-config.aws-production.yaml | 9 +- config/aws-production.Dockerfile | 6 +- config/sample.env | 2 +- iac/roots/opa-basic-environment/.gitignore | 8 + iac/roots/opa-basic-environment/.npmignore | 6 + iac/roots/opa-basic-environment/README.md | 22 + iac/roots/opa-basic-environment/cdk.json | 54 + .../opa-basic-environment/jest.config.js | 8 + iac/roots/opa-basic-environment/package.json | 33 + .../basic-env-operations-role-construct.ts | 225 +++ .../basic-env-provisioning-role-construct.ts | 177 ++ .../src/opa-basic-env-app.ts | 50 + .../src/opa-basic-environment-stack.ts | 126 ++ iac/roots/opa-common-constructs/package.json | 7 +- .../src/ecs-cluster-construct.ts | 2 +- .../src/network-construct.ts | 278 ++- .../src/rds-construct.ts | 2 +- iac/roots/opa-ecs-ec2-environment/.gitignore | 8 + iac/roots/opa-ecs-ec2-environment/.npmignore | 6 + iac/roots/opa-ecs-ec2-environment/README.md | 14 + iac/roots/opa-ecs-ec2-environment/cdk.json | 55 + .../opa-ecs-ec2-environment/jest.config.js | 8 + .../opa-ecs-ec2-environment/package.json | 28 + .../ecs-env-operations-role-construct.ts | 225 +++ .../ecs-env-provisioning-role-construct.ts | 219 ++ .../src/opa-ecs-env-app.ts | 53 + .../src/opa-ecs-environment-stack.ts | 176 ++ iac/roots/opa-ecs-environment/README.md | 8 +- iac/roots/opa-ecs-environment/package.json | 11 +- .../ecs-env-operations-role-construct.ts | 4 +- .../ecs-env-provisioning-role-construct.ts | 4 +- .../opa-ecs-environment/src/ecs-input.ts | 60 + .../src/opa-ecs-env-app.ts | 38 +- .../src/opa-ecs-environment-stack.ts | 67 +- iac/roots/opa-eks-environment/.gitignore | 8 + iac/roots/opa-eks-environment/.npmignore | 6 + iac/roots/opa-eks-environment/README.md | 12 + iac/roots/opa-eks-environment/cdk.json | 54 + iac/roots/opa-eks-environment/jest.config.js | 8 + iac/roots/opa-eks-environment/package.json | 35 + .../eks-env-cluster-admin-role-construct.ts | 135 ++ .../constructs/eks-env-cluster-construct.ts | 260 +++ .../src/constructs/eks-env-cluster-fargate.ts | 119 ++ .../eks-env-cluster-managed-node.ts | 117 ++ .../eks-env-control-plane-role-construct.ts | 78 + ...env-fargate-fluent-bit-config-construct.ts | 57 + .../eks-env-fluent-bit-role-construct.ts | 59 + .../eks-env-kubectl-lambda-role-construct.ts | 98 + ...anaged-node-fluent-bit-config-construct.ts | 363 ++++ .../eks-env-operations-role-construct.ts | 260 +++ .../eks-env-pod-execution-role-construct.ts | 102 + .../eks-env-provisioning-role-construct.ts | 252 +++ .../opa-eks-environment/src/eks-input.ts | 170 ++ .../src/opa-eks-env-app.ts | 45 + .../src/opa-eks-environment-stack.ts | 285 +++ .../opa-eks-environment/src/tsconfig.json | 39 + iac/roots/opa-platform/package.json | 71 +- .../src/scripts/gitlab-runner-user-data.sh | 28 +- .../opa-platform/src/scripts/user-data.sh | 28 +- .../opa-serverless-environment/README.md | 28 +- .../opa-serverless-environment/package.json | 9 +- ...rless-api-env-operations-role-construct.ts | 2 +- ...ess-api-env-provisioning-role-construct.ts | 2 +- .../src/opa-serverless-env-app.ts | 25 +- .../src/opa-serverless-environment-stack.ts | 48 +- .../src/serverless-input.ts | 57 + iac/roots/package.json | 5 +- iac/roots/yarn.lock | 1767 +++++++++-------- website/docs/faq.md | 14 +- .../getting-started/deploy-the-platform.md | 6 +- website/docs/techdocs/entities.md | 35 +- website/docs/techdocs/security.md | 225 +++ website/docs/tests.md | 163 ++ website/docusaurus.config.ts | 14 +- website/package.json | 15 +- website/src/components/HomepageFeatures.tsx | 16 +- website/yarn.lock | 894 ++++----- 384 files changed, 17267 insertions(+), 3160 deletions(-) rename backstage-mods/{backstage_0.5.4.diff.patch => backstage_0.5.8.diff.patch} (84%) create mode 100644 backstage-plugins/plugins/aws-apps-common/src/types/PlatformTypes.ts create mode 100644 backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AwsEksEnvPromoDialog.tsx create mode 100644 backstage-plugins/plugins/aws-apps/src/components/K8sAppStateCard/K8sAppStateCard.tsx create mode 100644 backstage-plugins/plugins/aws-apps/src/pages/AwsEKSAppPage/AwsEKSAppPage.tsx create mode 100644 backstage-plugins/plugins/aws-apps/src/pages/AwsEKSEnvironmentProviderPage/AwsEKSEnvironmentProviderPage.tsx create mode 100644 backstage-plugins/plugins/aws-apps/src/pages/AwsS3ResourcePage/AwsS3ResourcePage.tsx delete mode 100644 backstage-reference/common/aws_efs/README.md delete mode 100644 backstage-reference/common/aws_efs/src/cdk-efs-module-app.ts delete mode 100644 backstage-reference/common/aws_efs/src/cdk-efs-module-stack.ts rename backstage-reference/common/{aws_efs => aws_eks}/.gitignore (100%) rename backstage-reference/common/{aws_efs => aws_eks}/.npmignore (100%) create mode 100644 backstage-reference/common/aws_eks/README.md create mode 100644 backstage-reference/common/aws_eks/cdk.json create mode 100644 backstage-reference/common/aws_eks/package.json create mode 100644 backstage-reference/common/aws_eks/permissions/README.md create mode 100644 backstage-reference/common/aws_eks/src/cdk-eks-module-app.ts create mode 100644 backstage-reference/common/aws_eks/src/cdk-eks-module-stack.ts create mode 100644 backstage-reference/common/aws_eks/src/constructs/eks-env-app-admin-role-construct.ts create mode 100644 backstage-reference/common/aws_eks/src/eks-input.ts rename backstage-reference/common/{aws_efs => aws_eks}/tsconfig.json (100%) create mode 100644 backstage-reference/common/aws_s3/.gitignore create mode 100644 backstage-reference/common/aws_s3/.npmignore create mode 100644 backstage-reference/common/aws_s3/README.md rename backstage-reference/common/{aws_efs => aws_s3}/buildspec.yml (92%) rename backstage-reference/common/{aws_efs => aws_s3}/cdk.json (95%) rename backstage-reference/common/{aws_efs => aws_s3}/package.json (64%) create mode 100644 backstage-reference/common/aws_s3/src/cdk-s3-module-app.ts create mode 100644 backstage-reference/common/aws_s3/src/cdk-s3-module-stack.ts create mode 100644 backstage-reference/common/aws_s3/tsconfig.json create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-iac-s3.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-image-deploy.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-provider-basic.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs-ec2.yml create mode 100644 backstage-reference/common/cicd/.gitlab-ci-aws-provider-eks.yml rename backstage-reference/common/cicd/scripts/{example-eks-note-app => example-eks-nodejs-rds}/create-ci-stages.sh (70%) mode change 100644 => 100755 create mode 100755 backstage-reference/common/cicd/scripts/example-python-flask-eks/create-ci-stages.sh create mode 100755 backstage-reference/common/cicd/scripts/example-springboot-eks/create-ci-stages.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/add-role-to-aws-auth-configmap.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/apply-k8s-lambda.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/get-ingress-dns-name.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/install-helm.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/install-kubectl.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/resolve-placeholders.sh create mode 100755 backstage-reference/common/cicd/scripts/k8s/save-template-output.sh create mode 100644 backstage-reference/templates/aws-basic-environment-provider/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/aws-basic-environment-provider/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/aws-basic-environment-provider/content/stack-parameters.properties create mode 100644 backstage-reference/templates/aws-basic-environment-provider/template.yaml create mode 100644 backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/aws-ecs-ec2-environment-provider/content/stack-parameters.properties create mode 100644 backstage-reference/templates/aws-ecs-ec2-environment-provider/template.yaml create mode 100644 backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/stack-parameters.properties create mode 100644 backstage-reference/templates/aws-eks-environment-existing-cluster-provider/template.yaml create mode 100644 backstage-reference/templates/aws-eks-environment-provider/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/aws-eks-environment-provider/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/aws-eks-environment-provider/content/stack-parameters.properties create mode 100644 backstage-reference/templates/aws-eks-environment-provider/template.yaml rename backstage-reference/templates/{example-nodejs-efs => aws-s3-resource}/.gitignore (100%) create mode 100644 backstage-reference/templates/aws-s3-resource/content/.backstage/catalog-info.yaml rename backstage-reference/templates/{example-nodejs-efs => aws-s3-resource}/content/.editorconfig (100%) rename backstage-reference/templates/{example-nodejs-efs => aws-s3-resource}/content/.gitignore (100%) create mode 100644 backstage-reference/templates/aws-s3-resource/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/aws-s3-resource/content/package.json create mode 100644 backstage-reference/templates/aws-s3-resource/template.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/.gitignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/.dockerignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/.editorconfig create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/Dockerfile create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/README.md create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/.helmignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/Chart.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/NOTES.txt create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/_helpers.tpl create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/deployment.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/envVarsConfigMap.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/ingress.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/namespace.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/service.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/values.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/index.js create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/package.json create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-helm/template.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/.gitignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.dockerignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.editorconfig create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitignore create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/Dockerfile create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/README.md create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/deployment.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/ingress.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/kustomization.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/service.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/deployment.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/ingress.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/kustomization.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/namespace.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/service.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/index.js create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/package.json create mode 100644 backstage-reference/templates/example-eks-nodejs-rds-kustomize/template.yaml delete mode 100644 backstage-reference/templates/example-nodejs-efs/content/.backstage/aws-catalog-info.yaml delete mode 100644 backstage-reference/templates/example-nodejs-efs/content/.backstage/catalog-info.yaml delete mode 100644 backstage-reference/templates/example-nodejs-efs/content/.gitlab-ci.yml delete mode 100644 backstage-reference/templates/example-nodejs-efs/content/index.js delete mode 100644 backstage-reference/templates/example-nodejs-efs/template.yaml create mode 100644 backstage-reference/templates/example-nodejs-microservice/.gitignore create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/.dockerignore create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/.editorconfig create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/.gitignore create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/Dockerfile create mode 100644 backstage-reference/templates/example-nodejs-microservice/content/src/index.js rename backstage-reference/templates/{example-nodejs-efs/content => example-nodejs-microservice/content/src}/package.json (92%) create mode 100644 backstage-reference/templates/example-nodejs-microservice/template.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/.editorconfig create mode 100644 backstage-reference/templates/example-python-flask-eks/content/.gitignore create mode 100644 backstage-reference/templates/example-python-flask-eks/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/Dockerfile create mode 100644 backstage-reference/templates/example-python-flask-eks/content/Procfile create mode 100644 backstage-reference/templates/example-python-flask-eks/content/README.md create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/deployment.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/ingress.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/kustomization.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/service.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/base/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/deployment.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/ingress.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/kustomization.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/namespace.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/service.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-python-flask-eks/content/requirements.txt create mode 100644 backstage-reference/templates/example-python-flask-eks/content/server.py create mode 100644 backstage-reference/templates/example-python-flask-eks/content/templates/index.html create mode 100644 backstage-reference/templates/example-python-flask-eks/template.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/.gitignore create mode 100644 backstage-reference/templates/example-springboot-eks/content/.backstage/catalog-info.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/.editorconfig create mode 100644 backstage-reference/templates/example-springboot-eks/content/.gitignore create mode 100644 backstage-reference/templates/example-springboot-eks/content/.gitlab-ci.yml create mode 100644 backstage-reference/templates/example-springboot-eks/content/HELP.md create mode 100644 backstage-reference/templates/example-springboot-eks/content/README.md create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/deployment.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/ingress.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/kustomization.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/service.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/base/serviceAccount.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/deployment.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/ingress.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/kustomization.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/namespace.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/service.yaml create mode 100644 backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/serviceAccount.yaml create mode 100755 backstage-reference/templates/example-springboot-eks/content/mvnw create mode 100644 backstage-reference/templates/example-springboot-eks/content/mvnw.cmd create mode 100644 backstage-reference/templates/example-springboot-eks/content/pom.xml create mode 100644 backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/Greeting.java create mode 100644 backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/GreetingController.java create mode 100644 backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/RestServiceApplication.java create mode 100644 backstage-reference/templates/example-springboot-eks/content/src/main/resources/application.properties create mode 100644 backstage-reference/templates/example-springboot-eks/content/target/classes/application.properties create mode 100644 backstage-reference/templates/example-springboot-eks/template.yaml create mode 100644 iac/roots/opa-basic-environment/.gitignore create mode 100644 iac/roots/opa-basic-environment/.npmignore create mode 100644 iac/roots/opa-basic-environment/README.md create mode 100644 iac/roots/opa-basic-environment/cdk.json create mode 100644 iac/roots/opa-basic-environment/jest.config.js create mode 100644 iac/roots/opa-basic-environment/package.json create mode 100644 iac/roots/opa-basic-environment/src/constructs/basic-env-operations-role-construct.ts create mode 100644 iac/roots/opa-basic-environment/src/constructs/basic-env-provisioning-role-construct.ts create mode 100644 iac/roots/opa-basic-environment/src/opa-basic-env-app.ts create mode 100644 iac/roots/opa-basic-environment/src/opa-basic-environment-stack.ts create mode 100644 iac/roots/opa-ecs-ec2-environment/.gitignore create mode 100644 iac/roots/opa-ecs-ec2-environment/.npmignore create mode 100644 iac/roots/opa-ecs-ec2-environment/README.md create mode 100644 iac/roots/opa-ecs-ec2-environment/cdk.json create mode 100644 iac/roots/opa-ecs-ec2-environment/jest.config.js create mode 100644 iac/roots/opa-ecs-ec2-environment/package.json create mode 100644 iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-operations-role-construct.ts create mode 100644 iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-provisioning-role-construct.ts create mode 100644 iac/roots/opa-ecs-ec2-environment/src/opa-ecs-env-app.ts create mode 100644 iac/roots/opa-ecs-ec2-environment/src/opa-ecs-environment-stack.ts create mode 100644 iac/roots/opa-ecs-environment/src/ecs-input.ts create mode 100644 iac/roots/opa-eks-environment/.gitignore create mode 100644 iac/roots/opa-eks-environment/.npmignore create mode 100644 iac/roots/opa-eks-environment/README.md create mode 100644 iac/roots/opa-eks-environment/cdk.json create mode 100644 iac/roots/opa-eks-environment/jest.config.js create mode 100644 iac/roots/opa-eks-environment/package.json create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-admin-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-fargate.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-managed-node.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-control-plane-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-fargate-fluent-bit-config-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-fluent-bit-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-kubectl-lambda-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-managed-node-fluent-bit-config-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-operations-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-pod-execution-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/constructs/eks-env-provisioning-role-construct.ts create mode 100644 iac/roots/opa-eks-environment/src/eks-input.ts create mode 100644 iac/roots/opa-eks-environment/src/opa-eks-env-app.ts create mode 100644 iac/roots/opa-eks-environment/src/opa-eks-environment-stack.ts create mode 100644 iac/roots/opa-eks-environment/src/tsconfig.json create mode 100644 iac/roots/opa-serverless-environment/src/serverless-input.ts create mode 100644 website/docs/techdocs/security.md create mode 100644 website/docs/tests.md diff --git a/README.md b/README.md index 179f187f..a981441d 100644 --- a/README.md +++ b/README.md @@ -8,24 +8,124 @@ Built on the [Backstage open platform](https://backstage.io), this solution make This solution leverages the flexibility and extensibility of the Backstage platform to provide customizable software templates, scaffolder actions, and deployment patterns. While this provides a lot of freedom in implementation, it can be overwhelming to get started. To help users get started, a reference repository is also provided with samples to show how to use the solution. -:clapper: **Demonstrations and Tutorials** +:clapper: **Demonstrations and Tutorials** +**COMING SOON!** Demonstration and tutorial videos. + -[OPA on AWS Website](https://opaonaws.io)
-[OPA on AWS Documentation](https://opaonaws.io/docs/intro)
-[Tutorial YouTube videos](https://www.youtube.com/playlist?list=PLhr1KZpdzukcf5e7vYOVkpw4h-rzy7Pn3) ## Getting Started -Please see our [Getting started documentation](https://opaonaws.io/docs/getting-started/deploy-the-platform) +The solution is composed of two parts, the solution platform and Backstage plugins. Combined, these provide the build, deployment, and security requirements to manage, provision, and operate your applications and runtime infrastructure in the AWS cloud: + +1. Solution Platform - The solution platform provides the infrastructure to run a Backstage instance and supporting resources on AWS. This provides the build, deployment, and security requirements to provision and operate your applications and runtime environments in the AWS cloud. +2. Backstage plugins - OPA plugins for Backstage contribute the user experiance and actions to Backstage to support the creation and management of applications and environments. + +## Content + +1. Architecture overview +2. Installation instructions +3. FAQs ## 1. Architecture Overview -See our [high level architecture](https://opaonaws.io/docs/techdocs/architecture) -For details on the solution architecture lower level design. -> [ARCHITECTURE.md](./docs/ARCHITECTURE.md) + +See [ARCHITECTURE.md](./docs/ARCHITECTURE.md) for details about the solution architecture. + + +## Prerequisites + +### Software prerequisites + +The following software is required to perform the installation of the platform solution: +- [node.js](https://nodejs.org/en/) - 18.16 or higher +- [yarn](https://classic.yarnpkg.com/en/docs/install) - v1.x +- [aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +- [aws-cdk](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install) +- [jq](https://stedolan.github.io/jq/) +- [docker](https://www.docker.com/) +- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + + +> [!NOTE] +> The installation instructions documented here were tested using the following versions: +> - Backstage v1.17 +> - node v18.17 +> - npm 9.6.7 +> - cdk v2.95 +> - yarn 1.22.19 + +### Solution Platform prerequisites + +Prior to installing the OPA solution platform, you will need to ensure that the following items are configured and available: + +* **AWS Account ID and region** - The solution will be installed into an AWS account and region. You will need the 12-digit account ID and must be able to log into the account with sufficient permissions to provision infrastructure resources. + +* **GitLab Community Edition EC2 AMI id** - The solution will install a small Gitlab instance where application source code will be stored. The AWS Marketplace provides a **free**, community edition of Gitlab used by the solution. + * You will need to subscribe to the marketplace offering. Search for "GitLab Community Edition" by GitLab or use a direct link: https://aws.amazon.com/marketplace/pp/prodview-w6ykryurkesjq + * Once your account is subscribed to the Gitlab CE Marketplace offering, save the EC2 AMI for the appropriate region from the "Launch new instance" page as shown in the image below (_do not actually launch an instance as this will be done for you during installation_). + GitLab CE Marketplace launch + Alternatively, you can query for the AMI using the AWS CLI (substitute the apporpriate region value for the `--region` option): + ```sh + aws ec2 describe-images --owners "aws-marketplace" --filters "Name=name,Values=*GitLab CE 16.4.0*" --query 'Images[].[ImageId]' --region us-west-2 --output text + ``` + +* **GitLab Runner image** - The solution will set up an EC2 instance as a GitLab Runner to execute GitLab CI/CD pipelines. The Amazon-provided "Jammy" image will be used for the runner image. Save the EC2 AMI for the appropriate region for this AMI. The following AMI command will return the appropriate image id. Replace the value for "--region" to reflect your target region: + ```sh + aws ec2 describe-images --owners "amazon" --filters "Name=name,Values=*ubuntu-jammy-22.04-amd64-server-20230208*" --query 'Images[].[ImageId]' --region us-west-2 --output text + ``` + +* **Route 53 Hosted Zone** - The solution will ensure secure communcations and set up a certificate for your defined domain. Ensure that a public hosted zone is set up in your account. See the AWS documentation for [creating a public hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html) + +* **Okta authentication** - The solution uses Okta and RoadieHQ Backstage plugins for authentication of users and groups. You will need a client id, client secret, and API key for configuration of the solution. If you wish to use Okta for authentication, you can [sign up a free developer account](https://developer.okta.com/signup/). + * Once the account is set up, you will need to [configure an Okta API key](https://developer.okta.com/docs/guides/create-an-api-token/main/) for the [RoadieHQ backend catalog plugin](https://www.npmjs.com/package/@roadiehq/catalog-backend-module-okta) + * A client id and secret are required to set up a Backstage Okta authentication provider. See the [Backstage Okta auth documentation](https://backstage.io/docs/auth/okta/provider) for more details. + * Other IDPs are supported and could be substituted using different plugins. Configuring alternative authentication is not covered in this README, but the [Backstage Authentication documentation](https://backstage.io/docs/auth/) provides details for other providers. ## 2. Installation -Please see our [Installation instructions](https://opaonaws.io/docs/getting-started/deploy-the-platform) + +1. Clone the repository and change to the repository location + ```sh + git clone https://github.com/awslabs/app-development-for-backstage-io-on-aws.git + cd app-development-for-backstage-io-on-aws + ``` + +2. Configure the solution + 1. Copy the `config/sample.env` file to `config/.env` + 2. Edit the `config/.env` file and provide values for all of the environment variables. The file is commented to explain the purpose of the variables and requires some of the information from the [Solution Platform Prerequisites](#solution-platform-prerequisites) section above. + :exclamation: The `SECRET_GITLAB_CONFIG_PROP_apiToken` variable **does not** need to be provided. This will be automatically configured during installation after the platform is deployed. + +3. Perform the installation + 1. Run `make install` + After the installation completes, the application will start up. Open a browser and navigate to the 'OPA on AWS' endpoint using the Route 53 hosted zone name that you configured (e.g. `https://${R53_HOSTED_ZONE_NAME}`). + If any errors occur during installation, please review the `install_{datestamp}.log` file for details. + The Makefile target will automatically perform the following actions: + * Install and configure Backstage + * Install/update CDK + * Deploy the solution platform AWS infrastructure + * Update the configuration with GitLab information + * Push a sample repository to GitLab + * Build and deploy the Backstage image to AWS + ## 3. FAQs -Please see our [FAQs page](https://opaonaws.io/docs/faq) + +Q. I don't use Okta. Can i change the identity provider to another one? +A. Yes. Backstage [supports many IDPs](https://backstage.io/docs/auth/). Once you configure Backstage for your chosen IdP, make sure Backstage catalog is synced with the users and groups from your IDP. + +Q. I want to use another source control that is not GitLab. How can i do that? +A. Backstage supports multiple source control providers which can be integrated through the Backstage config. OPA uses GitLab for several usage scenarios which you will need to migrate to another source control provider: + +1. Storing application source code +2. Storing template source code +3. Storing pipelines jobs and orchestration +4. Update the Client API plugin that interacts with GitLab to the new source control provider + + +Q. I'm using Terraform, can I use this solution with Terraform to provision application resources? +A. Yes. We provide a Node.js Terraform application for demonstration. You may also write your own provider with Terraform. ## Security diff --git a/backstage-mods/backstage_0.5.4.diff.patch b/backstage-mods/backstage_0.5.8.diff.patch similarity index 84% rename from backstage-mods/backstage_0.5.4.diff.patch rename to backstage-mods/backstage_0.5.8.diff.patch index ab8ea463..243e14ac 100644 --- a/backstage-mods/backstage_0.5.4.diff.patch +++ b/backstage-mods/backstage_0.5.8.diff.patch @@ -1,6 +1,6 @@ -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/app-config.yaml backstage/app-config.yaml ---- backstage_0.5.4/app-config.yaml 2023-09-27 17:14:52 -+++ backstage/app-config.yaml 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/app-config.yaml backstage/app-config.yaml +--- backstage_0.5.8/app-config.yaml 2024-01-23 15:03:23 ++++ backstage/app-config.yaml 2024-01-23 15:03:06 @@ -1,9 +1,11 @@ app: - title: Scaffolded Backstage App @@ -186,17 +186,32 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + schedule: + frequency: { minutes: 30 } + timeout: { minutes: 3 } -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/backstage.json backstage/backstage.json ---- backstage_0.5.4/backstage.json 2023-09-27 17:14:52 -+++ backstage/backstage.json 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/backstage.json backstage/backstage.json +--- backstage_0.5.8/backstage.json 2024-01-23 15:03:23 ++++ backstage/backstage.json 2024-01-23 15:03:06 @@ -1,3 +1,3 @@ { -- "version": "1.17.0" -+ "version": "1.17.5" +- "version": "1.21.0" ++ "version": "1.21.1" } -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/app/src/App.tsx backstage/packages/app/src/App.tsx ---- backstage_0.5.4/packages/app/src/App.tsx 2023-09-27 17:14:52 -+++ backstage/packages/app/src/App.tsx 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/app/e2e-tests/app.test.ts backstage/packages/app/e2e-tests/app.test.ts +--- backstage_0.5.8/packages/app/e2e-tests/app.test.ts 2024-01-23 15:03:24 ++++ backstage/packages/app/e2e-tests/app.test.ts 2024-01-23 15:03:06 +@@ -13,11 +13,8 @@ + * See the License for the specific language governing permissions and + * limitations under the License. + */ +- + import { test, expect } from '@playwright/test'; +- + test('App should render the welcome page', async ({ page }) => { + await page.goto('/'); +- + await expect(page.getByText('My Company Catalog')).toBeVisible(); + }); +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/app/src/App.tsx backstage/packages/app/src/App.tsx +--- backstage_0.5.8/packages/app/src/App.tsx 2024-01-23 15:03:24 ++++ backstage/packages/app/src/App.tsx 2024-01-23 15:03:06 @@ -27,15 +27,38 @@ import { searchPage } from './components/search/SearchPage'; import { Root } from './components/Root'; @@ -303,7 +318,31 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- } /> + + +- } /> ++ ++ (entity?.metadata?.tags?.includes('environment-provider') || entity?.metadata?.tags?.includes('aws-environment')) ?? false, ++ }, ++ { ++ title: "AWS Resources", ++ filter: entity => ++ entity?.metadata?.tags?.includes('aws-resource') ?? false, ++ }, ++ ]} ++ /> ++ } /> + } /> + } /> } /> @@ -325,9 +364,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- ); -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/app/src/components/Root/Root.tsx backstage/packages/app/src/components/Root/Root.tsx ---- backstage_0.5.4/packages/app/src/components/Root/Root.tsx 2023-09-27 17:14:52 -+++ backstage/packages/app/src/components/Root/Root.tsx 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/app/src/components/Root/Root.tsx backstage/packages/app/src/components/Root/Root.tsx +--- backstage_0.5.8/packages/app/src/components/Root/Root.tsx 2024-01-23 15:03:24 ++++ backstage/packages/app/src/components/Root/Root.tsx 2024-01-23 15:03:06 @@ -23,9 +23,14 @@ SidebarSpace, useSidebarOpenState, @@ -411,9 +450,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/app/src/components/catalog/EntityPage.tsx backstage/packages/app/src/components/catalog/EntityPage.tsx ---- backstage_0.5.4/packages/app/src/components/catalog/EntityPage.tsx 2023-09-27 17:14:52 -+++ backstage/packages/app/src/components/catalog/EntityPage.tsx 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/app/src/components/catalog/EntityPage.tsx backstage/packages/app/src/components/catalog/EntityPage.tsx +--- backstage_0.5.8/packages/app/src/components/catalog/EntityPage.tsx 2024-01-23 15:03:24 ++++ backstage/packages/app/src/components/catalog/EntityPage.tsx 2024-01-23 15:03:06 @@ -27,6 +27,7 @@ isOrphan, hasRelationWarnings, @@ -509,7 +548,7 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- {websiteEntityPage} -@@ -374,9 +405,34 @@ +@@ -377,9 +408,34 @@ ); @@ -544,19 +583,27 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/Dockerfile backstage/packages/backend/Dockerfile ---- backstage_0.5.4/packages/backend/Dockerfile 2023-09-27 17:14:52 -+++ backstage/packages/backend/Dockerfile 2023-09-27 17:14:43 -@@ -48,4 +48,4 @@ +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/Dockerfile backstage/packages/backend/Dockerfile +--- backstage_0.5.8/packages/backend/Dockerfile 2024-01-23 15:03:23 ++++ backstage/packages/backend/Dockerfile 2024-01-23 15:03:06 +@@ -22,7 +22,6 @@ + # in which case you should also move better-sqlite3 to "devDependencies" in package.json. + RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ +- apt-get update && \ + apt-get install -y --no-install-recommends libsqlite3-dev + + # From here on we use the least-privileged `node` user to run the backend. +@@ -49,4 +48,4 @@ COPY --chown=node:node packages/backend/dist/bundle.tar.gz app-config*.yaml ./ RUN tar xzf bundle.tar.gz && rm bundle.tar.gz -CMD ["node", "packages/backend", "--config", "app-config.yaml", "--config", "app-config.production.yaml"] +CMD ["node", "packages/backend", "--config", "app-config.yaml", "--config", "app-config.production.yaml"] \ No newline at end of file -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/index.ts backstage/packages/backend/src/index.ts ---- backstage_0.5.4/packages/backend/src/index.ts 2023-09-27 17:14:52 -+++ backstage/packages/backend/src/index.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/index.ts backstage/packages/backend/src/index.ts +--- backstage_0.5.8/packages/backend/src/index.ts 2024-01-23 15:03:23 ++++ backstage/packages/backend/src/index.ts 2024-01-23 15:03:06 @@ -28,9 +28,12 @@ import proxy from './plugins/proxy'; import techdocs from './plugins/techdocs'; @@ -576,7 +623,7 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- const databaseManager = DatabaseManager.fromConfig(config, { logger: root }); - const tokenManager = ServerTokenManager.noop(); + const tokenManager = ServerTokenManager.fromConfig(config, { logger: root }); - const taskScheduler = TaskScheduler.fromConfig(config); + const taskScheduler = TaskScheduler.fromConfig(config, { databaseManager }); const identity = DefaultIdentityClient.create({ @@ -85,6 +88,9 @@ @@ -599,9 +646,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- // Add backends ABOVE this line; this 404 handler is the catch-all fallback apiRouter.use(notFoundHandler()); -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts backstage/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts ---- backstage_0.5.4/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts 1969-12-31 19:00:00 -+++ backstage/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts backstage/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts +--- backstage_0.5.8/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts 1969-12-31 19:00:00 ++++ backstage/packages/backend/src/plugins/OpaSamplePermissionPolicy.ts 2024-01-23 15:03:06 @@ -0,0 +1,94 @@ +import { readOpaAppAuditPermission } from '@aws/plugin-aws-apps-common-for-backstage'; +import { DEFAULT_NAMESPACE, stringifyEntityRef } from '@backstage/catalog-model'; @@ -697,9 +744,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + return { result: AuthorizeResult.ALLOW }; + } +} -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/auth.ts backstage/packages/backend/src/plugins/auth.ts ---- backstage_0.5.4/packages/backend/src/plugins/auth.ts 2023-09-27 17:14:52 -+++ backstage/packages/backend/src/plugins/auth.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/auth.ts backstage/packages/backend/src/plugins/auth.ts +--- backstage_0.5.8/packages/backend/src/plugins/auth.ts 2024-01-23 15:03:23 ++++ backstage/packages/backend/src/plugins/auth.ts 2024-01-23 15:03:06 @@ -49,6 +49,23 @@ // resolver: providers.github.resolvers.usernameMatchingUserEntityName(), }, @@ -724,9 +771,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- }, }); } -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/awsApps.ts backstage/packages/backend/src/plugins/awsApps.ts ---- backstage_0.5.4/packages/backend/src/plugins/awsApps.ts 1969-12-31 19:00:00 -+++ backstage/packages/backend/src/plugins/awsApps.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/awsApps.ts backstage/packages/backend/src/plugins/awsApps.ts +--- backstage_0.5.8/packages/backend/src/plugins/awsApps.ts 1969-12-31 19:00:00 ++++ backstage/packages/backend/src/plugins/awsApps.ts 2024-01-23 15:03:06 @@ -0,0 +1,24 @@ +//awsApps.ts + @@ -753,11 +800,11 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + }); +} \ No newline at end of file -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/catalog.ts backstage/packages/backend/src/plugins/catalog.ts ---- backstage_0.5.4/packages/backend/src/plugins/catalog.ts 2023-09-27 17:14:52 -+++ backstage/packages/backend/src/plugins/catalog.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/catalog.ts backstage/packages/backend/src/plugins/catalog.ts +--- backstage_0.5.8/packages/backend/src/plugins/catalog.ts 2024-01-23 15:03:23 ++++ backstage/packages/backend/src/plugins/catalog.ts 2024-01-23 15:03:06 @@ -2,13 +2,37 @@ - import { ScaffolderEntitiesProcessor } from '@backstage/plugin-scaffolder-backend'; + import { ScaffolderEntitiesProcessor } from '@backstage/plugin-catalog-backend-module-scaffolder-entity-model'; import { Router } from 'express'; import { PluginEnvironment } from '../types'; +import { OktaOrgEntityProvider } from '@roadiehq/catalog-backend-module-okta'; @@ -794,9 +841,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- await processingEngine.start(); return router; } -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/gitlab.ts backstage/packages/backend/src/plugins/gitlab.ts ---- backstage_0.5.4/packages/backend/src/plugins/gitlab.ts 1969-12-31 19:00:00 -+++ backstage/packages/backend/src/plugins/gitlab.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/gitlab.ts backstage/packages/backend/src/plugins/gitlab.ts +--- backstage_0.5.8/packages/backend/src/plugins/gitlab.ts 1969-12-31 19:00:00 ++++ backstage/packages/backend/src/plugins/gitlab.ts 2024-01-23 15:03:06 @@ -0,0 +1,12 @@ +import { PluginEnvironment } from '../types'; +import { Router } from 'express-serve-static-core'; @@ -810,9 +857,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + config: env.config, + }); +} -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/permission.ts backstage/packages/backend/src/plugins/permission.ts ---- backstage_0.5.4/packages/backend/src/plugins/permission.ts 1969-12-31 19:00:00 -+++ backstage/packages/backend/src/plugins/permission.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/permission.ts backstage/packages/backend/src/plugins/permission.ts +--- backstage_0.5.8/packages/backend/src/plugins/permission.ts 1969-12-31 19:00:00 ++++ backstage/packages/backend/src/plugins/permission.ts 2024-01-23 15:03:06 @@ -0,0 +1,15 @@ +import { createRouter } from '@backstage/plugin-permission-backend'; +import { Router } from 'express'; @@ -829,9 +876,9 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + identity: env.identity, + }); +} -diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.4/packages/backend/src/plugins/scaffolder.ts backstage/packages/backend/src/plugins/scaffolder.ts ---- backstage_0.5.4/packages/backend/src/plugins/scaffolder.ts 2023-09-27 17:14:52 -+++ backstage/packages/backend/src/plugins/scaffolder.ts 2023-09-27 17:14:43 +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/packages/backend/src/plugins/scaffolder.ts backstage/packages/backend/src/plugins/scaffolder.ts +--- backstage_0.5.8/packages/backend/src/plugins/scaffolder.ts 2024-01-23 15:03:23 ++++ backstage/packages/backend/src/plugins/scaffolder.ts 2024-01-23 15:03:06 @@ -1,7 +1,33 @@ import { CatalogClient } from '@backstage/catalog-client'; -import { createRouter } from '@backstage/plugin-scaffolder-backend'; @@ -914,3 +961,50 @@ diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json -- + actions }); } +diff -Naur --exclude=node_modules --exclude=*aws-apps* --exclude=package.json --exclude=yarn.lock --exclude=app-config.local.yaml --exclude=.git --exclude=dist-types --exclude=.env --exclude=*.d.ts --exclude=dist --exclude=.DS_Store backstage_0.5.8/playwright.config.ts backstage/playwright.config.ts +--- backstage_0.5.8/playwright.config.ts 2024-01-23 15:03:23 ++++ backstage/playwright.config.ts 2024-01-23 15:03:06 +@@ -13,20 +13,16 @@ + * See the License for the specific language governing permissions and + * limitations under the License. + */ +- + import { defineConfig } from '@playwright/test'; + import { generateProjects } from '@backstage/e2e-test-utils/playwright'; +- + /** + * See https://playwright.dev/docs/test-configuration. + */ + export default defineConfig({ + timeout: 60_000, +- + expect: { + timeout: 5_000, + }, +- + // Run your local dev server before starting the tests + webServer: process.env.CI + ? [] +@@ -38,13 +34,9 @@ + timeout: 60_000, + }, + ], +- + forbidOnly: !!process.env.CI, +- + retries: process.env.CI ? 2 : 0, +- + reporter: [['html', { open: 'never', outputFolder: 'e2e-test-report' }]], +- + use: { + actionTimeout: 0, + baseURL: +@@ -53,8 +45,6 @@ + screenshot: 'only-on-failure', + trace: 'on-first-retry', + }, +- + outputDir: 'node_modules/.cache/e2e-test-results', +- + projects: generateProjects(), // Find all packages with e2e-test folders + }); diff --git a/backstage-plugins/plugins/aws-apps-backend/README.md b/backstage-plugins/plugins/aws-apps-backend/README.md index 92362bbc..1015200f 100644 --- a/backstage-plugins/plugins/aws-apps-backend/README.md +++ b/backstage-plugins/plugins/aws-apps-backend/README.md @@ -89,7 +89,7 @@ Add to the Backstage catalog so that it's aware of the processors for the AWSEnv // packages/backend/src/plugins/catalog.ts import { CatalogBuilder } from '@backstage/plugin-catalog-backend'; -import { ScaffolderEntitiesProcessor } from '@backstage/plugin-scaffolder-backend'; +import { ScaffolderEntitiesProcessor } from '@backstage/plugin-catalog-backend-module-scaffolder-entity-model'; import { Router } from 'express'; import { PluginEnvironment } from '../types'; + import { AWSEnvironmentEntitiesProcessor, AWSEnvironmentProviderEntitiesProcessor} from '@aws/plugin-aws-apps-backend-for-backstage'; diff --git a/backstage-plugins/plugins/aws-apps-backend/package.json b/backstage-plugins/plugins/aws-apps-backend/package.json index f308e74c..ce018872 100644 --- a/backstage-plugins/plugins/aws-apps-backend/package.json +++ b/backstage-plugins/plugins/aws-apps-backend/package.json @@ -1,7 +1,7 @@ { "name": "@aws/plugin-aws-apps-backend-for-backstage", "description": "App Development for Backstage.io on AWS Backend plugin", - "version": "0.2.0", + "version": "0.2.1", "main": "src/index.ts", "types": "src/index.ts", "license": "Apache-2.0", @@ -16,7 +16,7 @@ }, "repository": { "type": "git", - "url": "github:awslabs/app-development-for-backstage-io-on-aws", + "url": "git+https://github.com/awslabs/app-development-for-backstage-io-on-aws.git", "directory": "backstage-plugins/plugins/aws-apps-backend" }, "bugs": { @@ -42,19 +42,20 @@ "@aws-sdk/client-eks": "^3.405.0", "@aws-sdk/client-resource-groups": "^3.296.0", "@aws-sdk/client-s3": "^3.290.0", + "@aws-sdk/client-lambda": "^3.290.0", "@aws-sdk/client-secrets-manager": "^3.290.0", "@aws-sdk/client-ssm": "^3.290.0", "@aws-sdk/client-sts": "^3.290.0", "@aws-sdk/types": "^3.290.0", "@aws-sdk/util-arn-parser": "^3.310.0", "@aws/plugin-aws-apps-common-for-backstage": "^0.2.0", - "@backstage/backend-common": "^0.19.4", - "@backstage/catalog-model": "^1.4.1", - "@backstage/config": "^1.0.8", - "@backstage/plugin-auth-node": "^0.2.17", - "@backstage/plugin-catalog-common": "^1.0.15", - "@backstage/plugin-catalog-node": "^1.4.1", - "@backstage/types": "^1.1.0", + "@backstage/backend-common": "^0.20.0", + "@backstage/catalog-model": "^1.4.3", + "@backstage/config": "^1.1.1", + "@backstage/plugin-auth-node": "^0.4.2", + "@backstage/plugin-catalog-common": "^1.0.19", + "@backstage/plugin-catalog-node": "^1.6.0", + "@backstage/types": "^1.1.1", "@kubernetes/client-node": "^0.18.1", "@types/express": "*", "express": "^4.17.3", @@ -64,8 +65,8 @@ "yn": "^4.0.0" }, "devDependencies": { - "@backstage/cli": "^0.22.12", - "@backstage/plugin-scaffolder-common": "^1.4.0", + "@backstage/cli": "^0.25.0", + "@backstage/plugin-scaffolder-common": "^1.4.4", "@types/supertest": "^2.0.8", "msw": "^0.49.0", "supertest": "^6.2.4" diff --git a/backstage-plugins/plugins/aws-apps-backend/src/api/AwsAppsApi.ts b/backstage-plugins/plugins/aws-apps-backend/src/api/AwsAppsApi.ts index 886607d6..41945e0d 100644 --- a/backstage-plugins/plugins/aws-apps-backend/src/api/AwsAppsApi.ts +++ b/backstage-plugins/plugins/aws-apps-backend/src/api/AwsAppsApi.ts @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import { + Capability, CloudFormationClient, CreateStackCommand, CreateStackCommandOutput, @@ -64,6 +65,12 @@ import { DescribeClusterCommandOutput, EKSClient, } from '@aws-sdk/client-eks'; +import { + InvokeCommand, + InvokeCommandInput, + InvokeCommandOutput, + LambdaClient +} from '@aws-sdk/client-lambda'; import { ListGroupResourcesCommand, ListGroupResourcesCommandInput, @@ -71,6 +78,7 @@ import { ResourceGroupsClient, } from '@aws-sdk/client-resource-groups'; import { + BucketLocationConstraint, CreateBucketCommand, CreateBucketCommandInput, CreateBucketCommandOutput, @@ -97,11 +105,10 @@ import { GetParameterCommandOutput, SSMClient, } from '@aws-sdk/client-ssm'; + import { AwsCredentialIdentity } from '@aws-sdk/types'; import { parse as parseArn } from '@aws-sdk/util-arn-parser'; import { AWSServiceResources } from '@aws/plugin-aws-apps-common-for-backstage'; -import { KubeConfig, AppsV1Api } from '@kubernetes/client-node'; -import { response } from 'express'; import { Logger } from 'winston'; export type DynamoDBTableData = { @@ -130,7 +137,7 @@ export class AwsAppsApi { private readonly awsRegion: string, private readonly awsAccount: string, ) { - this.logger.info('Instatiating AWS Apps API with:'); + this.logger.info('Instantiating AWS Apps API with:'); this.logger.info(`awsAccount: ${this.awsAccount}`); this.logger.info(`awsRegion: ${this.awsRegion}`); } @@ -346,7 +353,7 @@ export class AwsAppsApi { // See https://github.com/aws/aws-sdk-js/issues/3647 if (this.awsRegion !== 'us-east-1') { createInput.CreateBucketConfiguration = { - LocationConstraint: this.awsRegion, + LocationConstraint: BucketLocationConstraint[this.awsRegion as keyof typeof BucketLocationConstraint], }; } @@ -793,8 +800,8 @@ export class AwsAppsApi { TemplateURL: `https://${s3BucketName}.s3.amazonaws.com/${cfFileName}`, Parameters: parameters, Capabilities: [ - "CAPABILITY_NAMED_IAM", - "CAPABILITY_AUTO_EXPAND", + Capability.CAPABILITY_IAM, + Capability.CAPABILITY_AUTO_EXPAND, ], Tags: [ { @@ -845,8 +852,8 @@ export class AwsAppsApi { TemplateURL: `https://${s3BucketName}.s3.amazonaws.com/${cfFileName}`, Parameters: parameters, Capabilities: [ - "CAPABILITY_NAMED_IAM", - "CAPABILITY_AUTO_EXPAND", + Capability.CAPABILITY_NAMED_IAM, + Capability.CAPABILITY_AUTO_EXPAND, ], Tags: [ { @@ -915,48 +922,23 @@ public async getEksCluster(clusterName: string): Promise { - const kc = new KubeConfig(); - kc.loadFromDefault(); - - const k8sApi = kc.makeApiClient(AppsV1Api); - try { - const deployment = await k8sApi.readNamespacedDeployment(deploymentName, namespace); - - if (deployment && deployment.body.spec){ - // Set the replicas to 0 to pause the deployment - deployment.body.spec.replicas = replicaCount; - } - else{ - this.logger.info("error when scaling deployment. check deployment name, body or specs") - } - - const response = await k8sApi.replaceNamespacedDeployment(deploymentName, namespace, deployment.body); - - console.log(`Deployment ${deploymentName} scaled down.`); - console.log(response); - } catch (error) { - console.error(`Error scaling down deployment: ${error}`); - } - - // Perform the scaling down operation, e.g., using Kubernetes client or AWS SDK +public async callLambda(functionName: string, body: string) :Promise +{ + this.logger.info('Calling callLambda'); + const client = new LambdaClient({ + region: this.awsRegion, + credentials: this.awsCredentials, + }); - // Return the result of the scaling down operation + const params: InvokeCommandInput = { + FunctionName: functionName, + LogType: 'Tail', + Payload: Buffer.from(body), + InvocationType:'RequestResponse' + }; + const command = new InvokeCommand(params); + const response = await client.send(command); return response; - } +} + } diff --git a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-audit.ts b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-audit.ts index 599582f9..eaaa95f7 100644 --- a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-audit.ts +++ b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-audit.ts @@ -47,10 +47,10 @@ export async function createAuditRecord({ let tableNameResponse; try { - tableNameResponse = await apiClient.getSSMParameter(`/${envProviderPrefix}/${envProviderName}/${envProviderName}-audit`); + tableNameResponse = await apiClient.getSSMParameter(`/${envProviderPrefix.toLowerCase()}/${envProviderName.toLowerCase()}/${envProviderName.toLowerCase()}-audit`); } catch (err) { response.status = 'FAILED'; - response.message = "Audit failed - audit table name was set to FIXME."; + response.message = `Audit failed - audit table name was set to FIXME. ${tableNameResponse}`; } if (tableNameResponse?.Parameter?.Value) { diff --git a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-auth.ts b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-auth.ts index b0dfc542..f2c5c7d4 100644 --- a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-auth.ts +++ b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-auth.ts @@ -165,7 +165,6 @@ export async function getAWSCredsWorkaround(accountId: string, region: string, p const userName = user?.metadata.name || "unknown"; //assemble the arn format to the desire destination environment - //arn:aws:iam::115272120974:role/opa-dev-p1-operations-role const roleArn = `arn:aws:iam::${accountId}:role/${prefix}-${providerName}-operations-role`; console.log(roleArn) diff --git a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-platform.ts b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-platform.ts index 41358847..c8eb61f5 100644 --- a/backstage-plugins/plugins/aws-apps-backend/src/api/aws-platform.ts +++ b/backstage-plugins/plugins/aws-apps-backend/src/api/aws-platform.ts @@ -329,7 +329,7 @@ export class AwsAppsPlatformApi { `ACCOUNT=${provider.awsAccount}\nREGION=${provider.awsRegion}\nTARGET_ENV_NAME=${provider.environmentName}\nPREFIX=${provider.prefix}\n` + `TARGET_ENV_PROVIDER_NAME=${provider.providerName}\nOPA_CI_ENVIRONMENT=${provider.environmentName}-${provider.providerName}\n` + `OPA_CI_ENVIRONMENT_MANUAL_APPROVAL=${input.envRequiresManualApproval}\n` + - `OPA_CI_REGISTRY_IMAGE=${provider.awsAccount}.dkr.ecr.${provider.awsRegion}.amazonaws.com/${input.appName}-${provider.providerName}\n` + + `OPA_CI_REGISTRY_IMAGE=${provider.awsAccount}.dkr.ecr.${provider.awsRegion}.amazonaws.com/${input.appName}-${input.envName}-${provider.providerName}\n` + `OPA_CI_REGISTRY=${provider.awsAccount}.dkr.ecr.${provider.awsRegion}.amazonaws.com\n`; Object.keys(provider.parameters).forEach(key => { @@ -474,7 +474,7 @@ export class AwsAppsPlatformApi { const commit = { branch: 'main', - commit_message: `Unbind Resource`, + commit_message: `UnBind Resource`, actions: actions, }; @@ -533,7 +533,7 @@ export class AwsAppsPlatformApi { if (action === 'add') { console.log(entityCatalog); const newDependencies = entityCatalog.spec.dependsOn as Array; - newDependencies.push(`awsenvironmentprovider:default/${provider.name}`); + newDependencies.push(`awsenvironmentprovider:default/${provider.name.toLowerCase()}`); entityCatalog.spec.dependsOn = newDependencies; const providerContent = YAML.stringify(entityCatalog); console.log(providerContent); @@ -548,7 +548,7 @@ export class AwsAppsPlatformApi { const dependencies = entityCatalog.spec.dependsOn as Array; let newDependencies = Array(); dependencies.forEach(p => { - const providerToRemove = `awsenvironmentprovider:default/${provider.name}`; + const providerToRemove = `awsenvironmentprovider:default/${provider.name.toLowerCase()}`; if (p != providerToRemove) { newDependencies.push(p); } diff --git a/backstage-plugins/plugins/aws-apps-backend/src/service/router.ts b/backstage-plugins/plugins/aws-apps-backend/src/service/router.ts index f9f057b8..eeb38cab 100644 --- a/backstage-plugins/plugins/aws-apps-backend/src/service/router.ts +++ b/backstage-plugins/plugins/aws-apps-backend/src/service/router.ts @@ -18,6 +18,7 @@ import { AwsAppsApi, getAWScreds } from '../api'; import { AwsAuditResponse, createAuditRecord } from '../api/aws-audit'; import { AwsAppsPlatformApi } from '../api/aws-platform'; import { Config } from '@backstage/config'; +import { PlatformSCMParams } from '@aws/plugin-aws-apps-common-for-backstage/src/types/PlatformTypes'; export interface RouterOptions { logger: Logger; @@ -30,9 +31,9 @@ export async function createRouter(options: RouterOptions): Promise { logger.info('router entry: /platform/delete-secret'); const apiPlatformClient = getAwsAppsPlatformApi(req); @@ -415,6 +416,24 @@ export async function createRouter(options: RouterOptions): Promise { + logger.info('router entry: /platform/fetch-eks-config'); + console.log(req.body) + const apiPlatformClient = getAwsAppsPlatformApi(req); + const secretName = req.body.gitAdminSecret?.toString(); + const envName = req.body.envName?.toString(); + const providerName = req.body.providerName; + const platformParams: PlatformSCMParams = req.body.platformSCMConfig; + + const filePath = encodeURIComponent(`k8s/${envName}-${providerName}/next-release.json`); + logger.info(`fetching environment entity file path is ${filePath}`); + // get the JSON file from the repo + const jsonResponse = await apiPlatformClient.getFileContentsFromGit({ gitHost: platformParams.host, gitProjectGroup: platformParams.projectGroup, gitRepoName: platformParams.repoName }, filePath, secretName); + const configJson = JSON.parse(atob(jsonResponse.content)) + res.status(200).json(configJson); + }); + //Route for getting resource router.post('/resource-group', async (req, res) => { logger.info('router entry: /resource-group'); @@ -458,16 +477,28 @@ export async function createRouter(options: RouterOptions): Promise { @@ -747,24 +778,33 @@ export async function createRouter(options: RouterOptions): Promise { - try { - logger.info('router entry: /kubernetes/scaleEKSDeployment'); - const { apiClient } = await getApiClient(req) - - const namespace = req.body.namespace; - const deploymentName = req.body.deploymentName; - const replicaCount = req.body.replicaCount - // Call the scaleEKSDeployment API client method here - const result = await apiClient.scaleEKSDeployment(deploymentName, namespace, replicaCount) - - // Respond with a successful result - res.status(200).json(result); - } catch (error) { - // Handle errors appropriately and respond with an error status code and message - console.error('Error in /kubernetes/scaleEKSDeployment:', error); - res.status(500).json({ message: 'Internal Server Error' }); + // Route for interacting with lambda + router.post('/lambda/invoke', async (req, res) => { + logger.info('router entry: /lambda/invoke'); + const { apiClient, apiClientConfig } = await getApiClient(req); + const functionName = req.body.functionName?.toString(); + const actionDescription = req.body.actionDescription?.toString() || ''; + const body = req.body.body?.toString(); + const lambdaOutput = await apiClient.callLambda(functionName, body); + + if (actionDescription) { + const auditResponse = await createRouterAuditRecord({ + actionType: 'Invoke Lambda', + actionName: actionDescription, + status: lambdaOutput.StatusCode == 200 ? 'SUCCESS' : 'FAILED', + apiClientConfig, + }); + if (auditResponse.status == 'FAILED') res.status(500).json({ message: 'auditing request FAILED.' }); + } + + if (lambdaOutput.StatusCode == 200) { + res.status(200).send(lambdaOutput); + } else { + res.status(400).send({ + error: `Error calling ${functionName}`, + }); } + }); return router; diff --git a/backstage-plugins/plugins/aws-apps-common/package.json b/backstage-plugins/plugins/aws-apps-common/package.json index da08a664..3ab57f6f 100644 --- a/backstage-plugins/plugins/aws-apps-common/package.json +++ b/backstage-plugins/plugins/aws-apps-common/package.json @@ -1,7 +1,7 @@ { "name": "@aws/plugin-aws-apps-common-for-backstage", "description": "Common functionalities for the aws-apps plugin", - "version": "0.2.0", + "version": "0.2.1", "main": "src/index.ts", "types": "src/index.ts", "license": "Apache-2.0", @@ -17,7 +17,7 @@ }, "repository": { "type": "git", - "url": "github:awslabs/app-development-for-backstage-io-on-aws", + "url": "git+https://github.com/awslabs/app-development-for-backstage-io-on-aws.git", "directory": "backstage-plugins/plugins/aws-apps-common" }, "bugs": { @@ -35,10 +35,10 @@ "postpack": "backstage-cli package postpack" }, "dependencies": { - "@backstage/plugin-permission-common": "^0.7.7" + "@backstage/plugin-permission-common": "^0.7.11" }, "devDependencies": { - "@backstage/cli": "^0.22.12" + "@backstage/cli": "^0.25.0" }, "files": [ "dist", diff --git a/backstage-plugins/plugins/aws-apps-common/src/types/AWSUIInterfaces.ts b/backstage-plugins/plugins/aws-apps-common/src/types/AWSUIInterfaces.ts index 16e211db..16f3d192 100644 --- a/backstage-plugins/plugins/aws-apps-common/src/types/AWSUIInterfaces.ts +++ b/backstage-plugins/plugins/aws-apps-common/src/types/AWSUIInterfaces.ts @@ -104,8 +104,7 @@ export function isAWSEKSAppDeploymentEnvironment(variable: any): variable is AWS "clusterName" in variable && "app" in variable && "ecrArn" in variable.app && - "pod" in variable.app && - "node" in variable.app && + "namespace" in variable.app && "resourceGroupArn" in variable.app && "logGroupName" in variable.app ); @@ -114,11 +113,9 @@ export function isAWSEKSAppDeploymentEnvironment(variable: any): variable is AWS export type AWSEKSAppDeploymentEnvironment = AWSDeploymentEnvironment & { clusterName: string; app: AWSDeploymentEnvironmentComponent & { + appAdminRoleArn: string; ecrArn: string; namespace: string; - deploymentName: string; - pod?: string[]; - node?: string; resourceGroupArn: string; logGroupName: string; } @@ -181,6 +178,7 @@ export enum AWSComponentType { export type AWSComponent = { componentName: string; componentType: AWSComponentType; + componentSubType: string; gitRepo: string; gitHost: string; iacType: string; @@ -201,3 +199,37 @@ export type AWSEnvironmentProviderRecord = { accountNumber: string; region: string; } + +export enum AppStateType { + RUNNING = "Running", + STOPPED = "Stopped", + UPDATING = "Updating", + PROVISIONING = "Provisioning" +} + +export type AppState = { + appID?: string; + appState?: AppStateType + deploymentIdentifier?: string; + runningCount?: number; + desiredCount?: number; + pendingCount?: number; + lastStateTimestamp?: Date; + stateObject?: any; + additionalInfo?: keyValue[]; +} + +export interface keyValue { + id: string; + key: string; + value: string; +} + +export interface keyValueDouble { + id: string; + key: string; + value: string; + key2: string; + value2: string; +} + diff --git a/backstage-plugins/plugins/aws-apps-common/src/types/AppPromoTypes.ts b/backstage-plugins/plugins/aws-apps-common/src/types/AppPromoTypes.ts index c301d53f..2bb15d26 100644 --- a/backstage-plugins/plugins/aws-apps-common/src/types/AppPromoTypes.ts +++ b/backstage-plugins/plugins/aws-apps-common/src/types/AppPromoTypes.ts @@ -23,5 +23,5 @@ export type AWSProviderParams = { envRequiresManualApproval: boolean; prefix: string; providerName: string; - parameters: { [key: string]: string } //Parameters key value map for provision the app on the designated provider + parameters: { [key: string]: string } //Parameters key value map for provisioning the app on the designated provider } diff --git a/backstage-plugins/plugins/aws-apps-common/src/types/PlatformTypes.ts b/backstage-plugins/plugins/aws-apps-common/src/types/PlatformTypes.ts new file mode 100644 index 00000000..5b832c90 --- /dev/null +++ b/backstage-plugins/plugins/aws-apps-common/src/types/PlatformTypes.ts @@ -0,0 +1,6 @@ +export type PlatformSCMParams = +{ + host: string + projectGroup: string; + repoName: string; +} \ No newline at end of file diff --git a/backstage-plugins/plugins/aws-apps-demo/package.json b/backstage-plugins/plugins/aws-apps-demo/package.json index dc360888..c119d43a 100644 --- a/backstage-plugins/plugins/aws-apps-demo/package.json +++ b/backstage-plugins/plugins/aws-apps-demo/package.json @@ -1,7 +1,7 @@ { "name": "@aws/plugin-aws-apps-demo-for-backstage", "description": "App Development for Backstage.io on AWS - home page and theme demo", - "version": "0.2.0", + "version": "0.2.1", "main": "src/index.ts", "types": "src/index.ts", "license": "Apache-2.0", @@ -16,7 +16,7 @@ }, "repository": { "type": "git", - "url": "github:awslabs/app-development-for-backstage-io-on-aws", + "url": "git+https://github.com/awslabs/app-development-for-backstage-io-on-aws.git", "directory": "backstage-plugins/plugins/aws-apps-demo" }, "bugs": { @@ -35,12 +35,12 @@ "postpack": "backstage-cli package postpack" }, "dependencies": { - "@backstage/core-components": "^0.13.4", - "@backstage/core-plugin-api": "^1.5.3", - "@backstage/plugin-catalog-react": "^1.8.3", - "@backstage/plugin-home": "^0.5.7", - "@backstage/plugin-search": "^1.3.6", - "@backstage/theme": "^0.4.1", + "@backstage/core-components": "^0.13.9", + "@backstage/core-plugin-api": "^1.8.1", + "@backstage/plugin-catalog-react": "^1.9.2", + "@backstage/plugin-home": "^0.6.0", + "@backstage/plugin-search": "^1.4.4", + "@backstage/theme": "^0.5.0", "@material-ui/core": "^4.12.2", "@material-ui/icons": "^4.9.1", "@material-ui/lab": "^4.0.0-alpha.57", @@ -50,10 +50,10 @@ "react": "^16.13.1 || ^17.0.0" }, "devDependencies": { - "@backstage/cli": "^0.22.12", - "@backstage/core-app-api": "^1.9.1", - "@backstage/dev-utils": "^1.0.20", - "@backstage/test-utils": "^1.4.2", + "@backstage/cli": "^0.25.0", + "@backstage/core-app-api": "^1.11.2", + "@backstage/dev-utils": "^1.0.25", + "@backstage/test-utils": "^1.4.6", "@testing-library/jest-dom": "^5.10.1", "@testing-library/react": "^12.1.3", "@testing-library/user-event": "^14.0.0", diff --git a/backstage-plugins/plugins/aws-apps/package.json b/backstage-plugins/plugins/aws-apps/package.json index 47afebb2..cc3cf24d 100644 --- a/backstage-plugins/plugins/aws-apps/package.json +++ b/backstage-plugins/plugins/aws-apps/package.json @@ -1,7 +1,7 @@ { "name": "@aws/plugin-aws-apps-for-backstage", "description": "App Development for Backstage.io on AWS Frontend plugin", - "version": "0.2.0", + "version": "0.2.1", "main": "src/index.ts", "types": "src/index.ts", "license": "Apache-2.0", @@ -16,7 +16,7 @@ }, "repository": { "type": "git", - "url": "github:awslabs/app-development-for-backstage-io-on-aws", + "url": "git+https://github.com/awslabs/app-development-for-backstage-io-on-aws.git", "directory": "backstage-plugins/plugins/aws-apps" }, "bugs": { @@ -41,18 +41,19 @@ "@aws-sdk/client-ecs": "^3.290.0", "@aws-sdk/client-eks": "^3.405.0", "@aws-sdk/client-s3": "^3.290.0", + "@aws-sdk/client-lambda": "^3.290.0", "@aws-sdk/client-secrets-manager": "^3.294.0", "@aws-sdk/client-ssm": "^3.290.0", "@aws-sdk/util-arn-parser": "^3.310.0", "@aws/plugin-aws-apps-common-for-backstage": "^0.2.0", - "@backstage/catalog-model": "^1.4.1", - "@backstage/core-components": "^0.13.4", - "@backstage/core-plugin-api": "^1.5.3", - "@backstage/errors": "^1.2.1", - "@backstage/plugin-catalog": "^1.12.4", - "@backstage/plugin-catalog-react": "^1.8.3", - "@backstage/plugin-permission-react": "^0.4.14", - "@backstage/theme": "^0.4.1", + "@backstage/catalog-model": "^1.4.3", + "@backstage/core-components": "^0.13.9", + "@backstage/core-plugin-api": "^1.8.1", + "@backstage/errors": "^1.2.3", + "@backstage/plugin-catalog": "^1.16.0", + "@backstage/plugin-catalog-react": "^1.9.2", + "@backstage/plugin-permission-react": "^0.4.18", + "@backstage/theme": "^0.5.0", "@emotion/react": "^11.10.5", "@emotion/styled": "^11.10.5", "@kubernetes/client-node": "^0.18.1", @@ -69,10 +70,10 @@ "react": "^16.13.1 || ^17.0.0" }, "devDependencies": { - "@backstage/cli": "^0.22.12", - "@backstage/core-app-api": "^1.9.1", - "@backstage/dev-utils": "^1.0.20", - "@backstage/test-utils": "^1.4.2", + "@backstage/cli": "^0.25.0", + "@backstage/core-app-api": "^1.11.2", + "@backstage/dev-utils": "^1.0.25", + "@backstage/test-utils": "^1.4.6", "@testing-library/jest-dom": "^5.10.1", "@testing-library/react": "^12.1.3", "@testing-library/user-event": "^14.0.0", diff --git a/backstage-plugins/plugins/aws-apps/src/api/OPAApi.ts b/backstage-plugins/plugins/aws-apps/src/api/OPAApi.ts index db3d19ff..608b3514 100644 --- a/backstage-plugins/plugins/aws-apps/src/api/OPAApi.ts +++ b/backstage-plugins/plugins/aws-apps/src/api/OPAApi.ts @@ -17,6 +17,8 @@ import { GetParameterCommandOutput } from '@aws-sdk/client-ssm'; import { AWSProviderParams, AWSServiceResources, BackendParams, BindResourceParams, AWSEnvironmentProviderRecord } from '@aws/plugin-aws-apps-common-for-backstage'; import { createApiRef } from '@backstage/core-plugin-api'; import { ContainerDetailsType } from '../types'; +import { InvokeCommandOutput } from "@aws-sdk/client-lambda"; +import { PlatformSCMParams } from "@aws/plugin-aws-apps-common-for-backstage/src/types/PlatformTypes"; export const opaApiRef = createApiRef({ id: 'plugin.opa.app', @@ -317,16 +319,51 @@ export interface OPAApi { providersData: AWSProviderParams[]; }): Promise; - scaleEKSDeployment({ - deploymentName, - namespace, - replicaCount, - backendParamsOverrides, + invokeLambda({ + functionName, + actionDescription, + body, + backendParamsOverrides }: { - deploymentName: string; - namespace: string, - replicaCount: number + functionName: string; + actionDescription: string; + body: string; backendParamsOverrides?: BackendParams; - }): Promise; + }): Promise; + + getEKSAppManifests({ + envName, + gitAdminSecret, + platformSCMConfig, + backendParamsOverrides + }: { + envName: string; + gitAdminSecret: string; + platformSCMConfig: PlatformSCMParams; + backendParamsOverrides?: BackendParams; + }): Promise + updateEKSApp({ + actionDescription, + envName, + cluster, + updateKey, + updateValue, + kubectlLambda, + lambdaRoleArn, + gitAdminSecret, + platformSCMConfig, + backendParamsOverrides + }: { + actionDescription: string; + envName: string; + cluster: string; + updateKey: string; + updateValue: string | number; + kubectlLambda: string; + lambdaRoleArn: string; + gitAdminSecret: string; + platformSCMConfig: PlatformSCMParams; + backendParamsOverrides?: BackendParams; + }): Promise; } diff --git a/backstage-plugins/plugins/aws-apps/src/api/OPAApiClient.ts b/backstage-plugins/plugins/aws-apps/src/api/OPAApiClient.ts index 5393127f..087132a0 100644 --- a/backstage-plugins/plugins/aws-apps/src/api/OPAApiClient.ts +++ b/backstage-plugins/plugins/aws-apps/src/api/OPAApiClient.ts @@ -20,6 +20,8 @@ import { ResponseError } from '@backstage/errors'; import { OPAApi } from '.'; import { HTTP } from '../helpers/constants'; import { ContainerDetailsType } from '../types'; +import { InvokeCommandOutput } from "@aws-sdk/client-lambda"; +import { PlatformSCMParams } from "@aws/plugin-aws-apps-common-for-backstage/src/types/PlatformTypes"; export class OPAApiClient implements OPAApi { @@ -216,7 +218,7 @@ export class OPAApiClient implements OPAApi { gitAdminSecret: string; backendParamsOverrides?: BackendParams }): Promise { - + const postBody = { ...this.backendParams, @@ -336,7 +338,7 @@ export class OPAApiClient implements OPAApi { gitProjectGroup: string; gitAdminSecret: string; envName: string; - }): Promise { + }): Promise { const beParams = this.getAppliedBackendParams(backendParamsOverrides); const postBody = { ...beParams, @@ -662,31 +664,131 @@ export class OPAApiClient implements OPAApi { return results; } - async scaleEKSDeployment({ - deploymentName, - namespace, - replicaCount, - backendParamsOverrides, + async invokeLambda({ + functionName, + actionDescription, + body, + backendParamsOverrides }: { - namespace: string; - deploymentName: string; - replicaCount: number; + functionName: string; + actionDescription: string; + body: string; + backendParamsOverrides?: BackendParams; + }): Promise { + const beParams = this.getAppliedBackendParams(backendParamsOverrides); + const postBody = { + ...beParams, + functionName, + actionDescription, + body + }; + // console.log(postBody) + const lambdaResult = await this.fetch('/lambda/invoke', HTTP.POST, postBody); + return lambdaResult; + } + + async getEKSAppManifests({ + envName, + gitAdminSecret, + platformSCMConfig, + backendParamsOverrides + }: { + envName: string; + gitAdminSecret: string; + platformSCMConfig: PlatformSCMParams; backendParamsOverrides?: BackendParams; }): Promise { const beParams = this.getAppliedBackendParams(backendParamsOverrides); + // Fetch current config const postBody = { ...beParams, - namespace: namespace, - deploymentName: deploymentName, - replicaCount: replicaCount + envName, + gitAdminSecret, + platformSCMConfig }; - try { - const cluster = await this.fetch('/kubernetes/scaleEKSDeployment', HTTP.POST, postBody); - return cluster; - } catch (error) { - console.error('Error:', error); - } + // console.log(postBody) + let configResult = await this.fetch('/platform/fetch-eks-config', HTTP.POST, postBody); + // console.log(configResult); + + return configResult; + } + + async updateEKSApp({ + actionDescription, + envName, + cluster, + updateKey, + updateValue, + kubectlLambda, + lambdaRoleArn, + gitAdminSecret, + platformSCMConfig, + backendParamsOverrides + }: { + actionDescription: string; + envName: string; + cluster: string; + updateKey: string; + updateValue: string | number; + kubectlLambda: string; + lambdaRoleArn: string; + gitAdminSecret: string; + platformSCMConfig: PlatformSCMParams; + backendParamsOverrides?: BackendParams; + }): Promise { + let configResult = await this.getEKSAppManifests({ + envName, + gitAdminSecret, + platformSCMConfig, + backendParamsOverrides + }); + + const updateKeyArray = updateKey.split('.'); + Object.values(configResult).forEach(value => { + let currObj: any = value; + for (let i = 0; i < updateKeyArray.length; i++) { + const currKey = updateKeyArray[i]; + if (currObj.hasOwnProperty(currKey)) { + // console.log(currKey) + // console.log(currObj) + if (i === updateKeyArray.length - 1) { + // console.log(`key ${currKey} found , current value ${currObj[currKey]}`) + currObj[currKey] = updateValue + // console.log(`Updating key ${currKey} found , current value ${currObj[currKey]}`) + + } else { + currObj = currObj[currKey] + } + } + else { + break + } + } + }) + // console.log(configResult) + + //make changes to config + const manifest = JSON.stringify(configResult) + const bodyParam = { + RequestType: "Update", + ResourceType: "Custom::AWSCDK-EKS-KubernetesResource", + ResourceProperties: { + TimeoutSeconds: "5", + ClusterName: cluster, + RoleArn: lambdaRoleArn, + InvocationType: 'RequestResponse', + Manifest: manifest, + } + }; + + const configUpdateResult = await this.invokeLambda({ + functionName: kubectlLambda, + actionDescription, + body: JSON.stringify(bodyParam) + }) + + return configUpdateResult; } private async fetch(path: string, method = HTTP.GET, data?: any): Promise { diff --git a/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/AppCatalogPage.tsx b/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/AppCatalogPage.tsx index fde8faf7..cd5db3f0 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/AppCatalogPage.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/AppCatalogPage.tsx @@ -101,8 +101,21 @@ export function AppCatalogPage(props: AppCatalogPageProps) { initialKind = 'awsenvironmentprovider'; allowedTypesComponent = []; initiallySelectedFilter = 'all'; - } else if (kind === 'component') { + } else if (kind === 'component' && initialType === 'aws-app') { + const awsAppsColumns: TableColumn[] = [ + columnFactories.createTitleColumn({ hidden: true }), + columnFactories.createNameColumn({ defaultKind: initialKind }), + columnFactories.createMetadataDescriptionColumn(), + columnFactories.createComponentSubTypeColumn(), + columnFactories.createOwnerColumn(), + columnFactories.createSpecLifecycleColumn(), + columnFactories.createMetadataDescriptionColumn(), + columnFactories.createTagsColumn(), + ]; + columns=awsAppsColumns allowedKinds = ['Component']; + initiallySelectedFilter = 'all'; + } else if (kind === 'resource') { const awsResourcesColumns: TableColumn[] = [ columnFactories.createTitleColumn({ hidden: true }), diff --git a/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/awsColumns.tsx b/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/awsColumns.tsx index 46a677a2..e24fc673 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/awsColumns.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/AppCatalogPage/awsColumns.tsx @@ -32,7 +32,7 @@ export const columnFactories = Object.freeze({ return { title: 'Name', - field: 'metadata.name', + field: 'resolved.entityRef', highlight: true, customSort({ entity: entity1 }, { entity: entity2 }) { // TODO: We could implement this more efficiently by comparing field by field. @@ -46,7 +46,6 @@ export const columnFactories = Object.freeze({ ), @@ -69,6 +68,9 @@ export const columnFactories = Object.freeze({ return { title: 'Owner', field: 'resolved.ownedByRelationsTitle', + cellStyle: { + minWidth:'130px' + }, render: ({ resolved }) => ( { return { title: 'AWS Account', - field: 'entity.metadata["aws-account"]', + field: 'entity.metadata["awsAccount"]', cellStyle: { padding: '0px 16px 0px 20px', + minWidth:'150px' }, render: ({ entity }) => ( <> { - entity.metadata["aws-account"]?.toString() || "" + entity.metadata["awsAccount"]?.toString() || "" } ), @@ -149,14 +152,14 @@ export const columnFactories = Object.freeze({ createProviderRegionColumn(): TableColumn { return { title: 'AWS Region', - field: 'entity.metadata["aws-region"]', + field: 'entity.metadata["awsRegion"]', cellStyle: { padding: '0px 16px 0px 20px', }, render: ({ entity }) => ( <> { - entity.metadata["aws-region"]?.toString() || "" + entity.metadata["awsRegion"]?.toString() || "" } ), @@ -183,7 +186,7 @@ export const columnFactories = Object.freeze({ createAWSResourceTypeColumn(): TableColumn { return { title: 'Resource Type', - field: 'entity.metadata["resource-type"]', + field: 'entity.metadata["resourceType"]', cellStyle: { padding: '0px 16px 0px 20px', minWidth:'30px' @@ -191,7 +194,7 @@ export const columnFactories = Object.freeze({ render: ({ entity }) => ( <> { - entity.metadata["resource-type"]?.toString() || "" + entity.metadata["resourceType"]?.toString() || "" } ), @@ -202,14 +205,14 @@ export const columnFactories = Object.freeze({ createIACColumn(): TableColumn { return { title: 'IAC', - field: 'entity.metadata["iac-type"]', + field: 'entity.metadata["iacType"]', cellStyle: { padding: '0px 16px 0px 20px', }, render: ({ entity }) => ( <> { - entity.metadata["iac-type"]?.toString() || "" + entity.metadata["iacType"]?.toString() || "" } ), @@ -219,14 +222,14 @@ export const columnFactories = Object.freeze({ createEnvironmentTypeColumn(): TableColumn { return { title: 'Type', - field: 'entity.metadata["environment-type"]', + field: 'entity.metadata["environmentType"]', cellStyle: { padding: '0px 16px 0px 20px', }, render: ({ entity }) => ( <> { - entity.metadata["environment-type"]?.toString() || "" + entity.metadata["environmentType"]?.toString() || "" } ), @@ -304,14 +307,14 @@ export const columnFactories = Object.freeze({ createEnvironmentAccountTypeColumn(): TableColumn { return { title: 'Account Type', - field: 'entity.metadata["env-type-account"]', + field: 'entity.metadata["envTypeAccount"]', cellStyle: { padding: '0px 16px 0px 20px', }, render: ({ entity }) => ( <> { - entity.metadata["env-type-account"]?.toString() || "" + entity.metadata["envTypeAccount"]?.toString() || "" } ), @@ -321,14 +324,14 @@ export const columnFactories = Object.freeze({ createEnvironmentRegionTypeColumn(): TableColumn { return { title: 'Region Type', - field: 'entity.metadata["env-type-region"]', + field: 'entity.metadata["envTypeRegion"]', cellStyle: { padding: '0px 16px 0px 20px', }, render: ({ entity }) => ( <> { - entity.metadata["env-type-region"]?.toString() || "" + entity.metadata["envTypeRegion"]?.toString() || "" } ), @@ -338,14 +341,32 @@ export const columnFactories = Object.freeze({ createProviderTypeColumn(): TableColumn { return { title: 'Provider Type', - field: 'entity.metadata["env-type"]', + field: 'entity.metadata["envType"]', + cellStyle: { + padding: '0px 16px 0px 20px', + }, + render: ({ entity }) => ( + <> + { + entity.metadata["envType"]?.toString() || "" + } + + ), + width: 'auto', + }; + }, + createComponentSubTypeColumn(): TableColumn { + return { + title: 'Sub Type', + field: 'entity.spec.subType', cellStyle: { padding: '0px 16px 0px 20px', + minWidth:'150px' }, render: ({ entity }) => ( <> { - entity.metadata["env-type"]?.toString() || "" + entity.spec?.subType?.toString() || "" } ), diff --git a/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AppPromoCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AppPromoCard.tsx index d8a8e602..9871c454 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AppPromoCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AppPromoCard.tsx @@ -3,7 +3,7 @@ import React, { ChangeEvent, useEffect, useState } from 'react'; import { EmptyState, InfoCard, } from '@backstage/core-components'; -import { CatalogApi } from '@backstage/plugin-catalog-react'; +import { CatalogApi, useEntity } from '@backstage/plugin-catalog-react'; import { Button, CardContent, FormControl, FormHelperText, Grid, InputLabel, LinearProgress, MenuItem, Select } from '@material-ui/core'; import { Alert, AlertTitle, Typography } from '@mui/material'; import { useAsyncAwsApp } from '../../hooks/useAwsApp'; @@ -19,22 +19,26 @@ import InfoIcon from '@mui/icons-material/Info'; import IconButton from '@mui/material/IconButton'; import Tooltip from '@mui/material/Tooltip'; import { ProviderType } from '../../helpers/constants'; +import { AwsEksEnvPromoDialog } from './AwsEksEnvPromoDialog'; const AppPromoCard = ({ - input: { awsComponent, catalogApi }, + input: { awsComponent, catalogApi, appEntity }, }: { - input: { awsComponent: AWSComponent; catalogApi: CatalogApi }; + input: { awsComponent: AWSComponent; catalogApi: CatalogApi, appEntity: Entity }; }) => { const [envChoices, setEnvChoices] = useState([]); const [selectedItem, setSelectedItem] = useState(""); const [disabled, setDisabled] = useState(false); const [spinning, setSpinning] = useState(false); + const [openEksDialog, setOpenEksDialog] = useState(false); const [isPromotionSuccessful, setIsPromotionSuccessful] = useState(false); const [promotedEnvName, setPromotedEnvName] = useState(""); const [promoteResultMessage, setPromoteResultMessage] = useState(""); + const [suggestedEksNamespace, setSuggestedEksNamespace] = useState(""); + const [suggestedIamRoleArn, setSuggestedIamRoleArn] = useState(""); const api = useApi(opaApiRef); - + function getHighestLevelEnvironment(currentEnvironments: AwsDeploymentEnvironments) { let highestLevel = 1; Object.keys(currentEnvironments).forEach(env => { @@ -59,7 +63,7 @@ const AppPromoCard = ({ return catalogEntities .filter(en => { return ( - en.metadata["environment-type"] === envType && + en.metadata["environmentType"] === envType && !currentEnvKeys.includes(en.metadata.name) && Number.parseInt(en.metadata["level"]?.toString()!) >= lowestEnvironmentLevel ) @@ -71,7 +75,7 @@ const AppPromoCard = ({ const filterExpression = { 'kind': "awsenvironment", - // 'metadata.environment-type': component.currentEnvironment.environment.envType, + 'metadata.environmentType': awsComponent.currentEnvironment.environment.envType // 'spec.system': component.currentEnvironment.environment.system, TODO: when system is implemented filter on similar system. }; @@ -96,18 +100,18 @@ const AppPromoCard = ({ const backendParamsOverrides = { appName: awsComponent.componentName, - awsAccount: envProviderEntity.metadata['aws-account']?.toString() || "", - awsRegion: envProviderEntity.metadata['aws-region']?.toString() || "", + awsAccount: envProviderEntity.metadata['awsAccount']?.toString() || "", + awsRegion: envProviderEntity.metadata['awsRegion']?.toString() || "", prefix: envProviderEntity.metadata['prefix']?.toString() || "", providerName: envProviderEntity.metadata.name }; - const envType = envProviderEntity.metadata['env-type']?.toString().toLowerCase(); + const envType = envProviderEntity.metadata['envType']?.toString().toLowerCase(); if (envType === ProviderType.ECS) { const metaVpc = "vpc"; - const metaRole = "provisioning-role"; - const metaCluster = "cluster-name"; + const metaRole = "provisioningRole"; + const metaCluster = "clusterName"; const metadataKeys = [metaVpc, metaCluster, metaRole]; const ssmValues = await Promise.all(metadataKeys.map(async (metaKey) => { @@ -123,14 +127,33 @@ const AppPromoCard = ({ // 'TARGET_ENV_AUDIT': auditTable }; return parametersMap; - } - else if (envType === ProviderType.EKS) { - throw new Error("TO BE IMPLEMENTED - eks"); // TODO: Implement EKS support for AppPromoCard.tsx - } - else if (envType === ProviderType.SERVERLESS) { + + } else if (envType === ProviderType.EKS) { const metaVpc = "vpc"; - const metaRole = "provisioning-role"; + const metaRole = "provisioningRole"; + const metaCluster = "clusterName"; + const metadataKeys = [metaVpc, metaCluster, metaRole]; + + const ssmValues = await Promise.all(metadataKeys.map(async (metaKey) => { + const paramKey = envProviderEntity.metadata[metaKey]?.toString() || metaKey; + const value = (await api.getSSMParameter({ ssmParamName: paramKey, backendParamsOverrides })).Parameter?.Value || ""; + return value; + })); + + let parametersMap = { + TARGET_VPCID: ssmValues[metadataKeys.indexOf(metaVpc)], + TARGET_EKS_CLUSTER_ARN: ssmValues[metadataKeys.indexOf(metaCluster)], + ENV_ROLE_ARN: ssmValues[metadataKeys.indexOf(metaRole)], + TARGET_KUBECTL_LAMBDA_ARN: envProviderEntity.metadata.kubectlLambdaArn as string, + TARGET_KUBECTL_LAMBDA_ROLE_ARN: envProviderEntity.metadata.clusterAdminRole as string, + }; + return parametersMap; + + } else if (envType === ProviderType.SERVERLESS) { + + const metaVpc = "vpc"; + const metaRole = "provisioningRole"; const vpcParam = envProviderEntity.metadata[metaVpc]?.toString() || ""; const metaPubNet = `${vpcParam}/public-subnets`; const metaPrivNet = `${vpcParam}/private-subnets`; @@ -166,7 +189,7 @@ const AppPromoCard = ({ const selectedEnv = await catalogApi.getEntities({ filter: { 'kind': "awsenvironment", 'metadata.name': selectedItem } }); const envEntity = selectedEnv.items[0]; - const envRequiresManualApproval = !!envEntity.metadata['deployment_requires_approval']; + const envRequiresManualApproval = !!envEntity.metadata['deploymentRequiresApproval']; const envProviderRefs: EntityRelation[] | undefined = envEntity.relations?.filter( relation => parseEntityRef(relation?.targetRef).kind === 'awsenvironmentprovider')!; @@ -183,10 +206,10 @@ const AppPromoCard = ({ environmentName: envEntity.metadata.name, envRequiresManualApproval, providerName: et?.metadata.name || '', - awsAccount: et?.metadata['aws-account']?.toString() || '', - awsRegion: et?.metadata['aws-region']?.toString() || '', + awsAccount: et?.metadata['awsAccount']?.toString() || '', + awsRegion: et?.metadata['awsRegion']?.toString() || '', prefix: et?.metadata['prefix']?.toString() || '', - assumedRoleArn: et?.metadata['provisioning-role']?.toString() || '', + assumedRoleArn: et?.metadata['provisioningRole']?.toString() || '', parameters: providerResolvedData }); })) @@ -198,12 +221,14 @@ const AppPromoCard = ({ setPromotedEnvName(""); }; - const handleClick = () => { - if (!selectedItem) { - alert('Select an Environment'); - return; - } + const closeEksDialog = () => setOpenEksDialog(false); + + const submitNewEksEnvironmentHandler = (namespace: string, iamRoleArn: string, roleBehavior: string) => { + // console.log(`CREATE ENV - namespace=${namespace} roleBehavior=${roleBehavior} iamRoleArn=${iamRoleArn}`); + createNewEnvironment({["NAMESPACE"]: namespace, ["APP_ADMIN_ROLE_ARN"]: iamRoleArn, ["K8S_IAM_ROLE_BINDING_TYPE"]: roleBehavior}); + }; + const createNewEnvironment = (extraParameters?: { [key: string]: string }) => { setSpinning(true); setPromotedEnvName(""); @@ -221,6 +246,14 @@ const AppPromoCard = ({ providersData: envProviders.providers }; + if (extraParameters) { + Object.keys(extraParameters).forEach(key => { + envProviders.providers.forEach(providerParams => { + providerParams.parameters[key] = extraParameters[key]; + }); + }); + } + // now call the API and submit the promo request api.promoteApp(promoBody).then(results => { setSpinning(false); @@ -255,7 +288,31 @@ const AppPromoCard = ({ }) }); + }; + + const handleClick = () => { + if (!selectedItem) { + alert('Select an Environment'); + return; + } + + const envType = awsComponent.currentEnvironment.environment.envType.toLowerCase(); + + // Show dialog asking user for additional EKS input + if (envType === ProviderType.EKS) { + + if (appEntity.metadata.appData && Object.keys(appEntity.metadata.appData).length) { + const firstEnv = Object.values(appEntity.metadata.appData)[0]; + const firstEnvProvider = Object.values(firstEnv)[0] as { Namespace: string; AppAdminRoleArn: string; }; + setSuggestedEksNamespace(`suggestions: "${appEntity.metadata.name}", "${appEntity.metadata.name}-${selectedItem}", "${firstEnvProvider.Namespace}"`); + setSuggestedIamRoleArn(`suggestions: "${firstEnvProvider.AppAdminRoleArn}"`); + } + + setOpenEksDialog(true); + return; + } + createNewEnvironment(); } return ( @@ -316,6 +373,16 @@ const AppPromoCard = ({ + + + theme.zIndex.drawer + 1 }} open={spinning} @@ -330,6 +397,7 @@ const AppPromoCard = ({ export const AppPromoWidget = () => { const awsAppLoadingStatus = useAsyncAwsApp(); const catalogApi = useApi(catalogApiRef); + const { entity } = useEntity(); if (awsAppLoadingStatus.loading) { return ; @@ -337,7 +405,8 @@ export const AppPromoWidget = () => { const component = awsAppLoadingStatus.component const input = { awsComponent: component, - catalogApi + catalogApi, + appEntity: entity }; return ; diff --git a/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AwsEksEnvPromoDialog.tsx b/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AwsEksEnvPromoDialog.tsx new file mode 100644 index 00000000..23c4c041 --- /dev/null +++ b/backstage-plugins/plugins/aws-apps/src/components/AppPromoCard/AwsEksEnvPromoDialog.tsx @@ -0,0 +1,184 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + Button, + Dialog, + DialogActions, + DialogContent, + DialogTitle, Grid, + IconButton, InputLabel, MenuItem, Select, TextField, makeStyles +} from '@material-ui/core'; +import { Close } from '@mui/icons-material'; +import React, { useState } from 'react'; +import FormControl from '@mui/material/FormControl'; + +// Declare styles to use in the components +const useStyles = makeStyles(theme => ({ + container: { + 'min-width': 500, + 'min-height': 150, + }, + resourceTitle: { + 'font-weight': 'bold', + }, + closeButton: { + position: 'absolute', + right: theme.spacing(1), + top: theme.spacing(1), + color: theme.palette.grey[500], + }, + empty: { + padding: theme.spacing(2), + display: 'flex', + justifyContent: 'center', + }, +})); + +/** + * + * @param isOpen Boolean describing whether the dialog is displayed (open) or not (closed) + * @param closeDialogHandler the handler callback when the dialog is dismissed/cancelled + * @param submitHandler the handler callback when the dialog is submitted + * @param environmentName the name of the environment that will be added to the app's CICD pipeline + * @namespaceDefault suggestions on what the user could enter for a namespace + * @iamRoleArnDefault suggestions on what the user could enter for the IAM role ARN + * @returns + */ +export const AwsEksEnvPromoDialog = ({ + isOpen, + closeDialogHandler, + submitHandler, + environmentName, + namespaceDefault, + iamRoleArnDefault, +}: { + isOpen: boolean; + closeDialogHandler: () => void; + submitHandler: (namespace: string, iamRoleArn: string, roleBehavior: string) => void; + environmentName: string; + namespaceDefault: string; + iamRoleArnDefault: string; +}) => { + + const classes = useStyles(); + + const [namespace, setNamespace] = useState(""); + const [namespaceIsInvalid, setNamespaceIsInvalid] = useState(false); + const [namespaceDescription, setNamespaceDescription] = useState(`The k8s namespace to assign to application resources for the ${environmentName} environment`); + + const [iamRoleArn, setIamRoleArn] = useState(""); + const [iamRoleArnIsInvalid, setIamRoleArnIsInvalid] = useState(false); + const [iamRoleArnDescription, setIamRoleArnDescription] = useState("Existing IAM role to grant namespace privileges to"); + + const [roleBehavior, setRoleBehavior] = useState("create_new_k8s_namespace_admin_iam_role"); + + const submitNewEnvironmentHandler = () => { + if (roleBehavior === 'existing_new_k8s_namespace_admin_iam_role' && !iamRoleArn) { + checkIamRoleArn(); + return; + } + if (namespaceIsInvalid || (iamRoleArnIsInvalid && roleBehavior === 'existing_new_k8s_namespace_admin_iam_role')) { + return; + } + closeDialogHandler(); + submitHandler(namespace as string, iamRoleArn as string, roleBehavior as string); + }; + + const checkNamespace = () => { + if (!namespace) { + setNamespaceDescription("Cannot be Empty"); + setNamespaceIsInvalid(true); + } else { + setNamespaceDescription(`The k8s namespace to assign to application resources for the ${environmentName} environment`); + setNamespaceIsInvalid(false); + } + }; + + const checkIamRoleArn = () => { + if (!iamRoleArn) { + setIamRoleArnDescription("Cannot be Empty"); + setIamRoleArnIsInvalid(true); + } else { + setIamRoleArnDescription("Existing IAM role to grant namespace privileges to"); + setIamRoleArnIsInvalid(false); + } + }; + + return ( + + + Add Environment: {environmentName} + + + + + + + + K8s Namespace + ) => setNamespace(e.target.value)} + onBlur={() => checkNamespace()} + error={namespaceIsInvalid} + helperText={namespaceDescription} + placeholder={namespaceDefault} + required + autoFocus + > + + + + + Namespace-bound Kubectl Admin Access + + + + {roleBehavior === 'existing_new_k8s_namespace_admin_iam_role' && + + + IAM Role + ) => setIamRoleArn(e.target.value)} + onBlur={() => checkIamRoleArn()} + error={iamRoleArnIsInvalid} + helperText={iamRoleArnDescription} + placeholder={iamRoleArnDefault} + required + > + + + } + + + + + + + ); +}; + diff --git a/backstage-plugins/plugins/aws-apps/src/components/AwsEnvironmentProviderCard/AwsEnvironmentProviderCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/AwsEnvironmentProviderCard/AwsEnvironmentProviderCard.tsx index 1be571d0..70a6e56d 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/AwsEnvironmentProviderCard/AwsEnvironmentProviderCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/AwsEnvironmentProviderCard/AwsEnvironmentProviderCard.tsx @@ -55,18 +55,19 @@ const AwsEnvironmentProviderCard = ({ id: index.toString(), name: et?.metadata.name || '', prefix: et?.metadata['prefix']?.toString() || '', - providerType: et?.metadata['env-type']?.toString() || '', + providerType: et?.metadata['envType']?.toString() || '', description: et?.metadata['description']?.toString() || '', - accountNumber: et?.metadata['aws-account']?.toString() || '', - region: et?.metadata['aws-region']?.toString() || '' + accountNumber: et?.metadata['awsAccount']?.toString() || '', + region: et?.metadata['awsRegion']?.toString() || '' }) }) setItems(providers) let potentialProviders: AWSEnvironmentProviderRecord[] = []; let index = 0; + const envRuntimeType = entity.metadata.environmentType?.toString() || "" - catalog.getEntities({ filter: { 'kind': "awsenvironmentprovider" } }).then(entities => { + catalog.getEntities({ filter: { 'kind': "awsenvironmentprovider", 'metadata.envType':envRuntimeType } }).then(entities => { entities.items.forEach((et) => { if (providers.length > 0) { providers.forEach(existingP => { @@ -78,10 +79,10 @@ const AwsEnvironmentProviderCard = ({ id: index.toString(), name: et?.metadata.name || '', prefix: et?.metadata['prefix']?.toString() || '', - providerType: et?.metadata['env-type']?.toString() || '', + providerType: et?.metadata['envType']?.toString() || '', description: et?.metadata['description']?.toString() || '', - accountNumber: et?.metadata['aws-account']?.toString() || '', - region: et?.metadata['aws-region']?.toString() || '' + accountNumber: et?.metadata['awsAccount']?.toString() || '', + region: et?.metadata['awsRegion']?.toString() || '' }) } }) @@ -92,10 +93,10 @@ const AwsEnvironmentProviderCard = ({ id: index.toString(), name: et?.metadata.name || '', prefix: et?.metadata['prefix']?.toString() || '', - providerType: et?.metadata['env-type']?.toString() || '', + providerType: et?.metadata['envType']?.toString() || '', description: et?.metadata['description']?.toString() || '', - accountNumber: et?.metadata['aws-account']?.toString() || '', - region: et?.metadata['aws-region']?.toString() || '' + accountNumber: et?.metadata['awsAccount']?.toString() || '', + region: et?.metadata['awsRegion']?.toString() || '' }) } }) @@ -110,16 +111,16 @@ const AwsEnvironmentProviderCard = ({ awsAccount: item.accountNumber, awsRegion: item.region, prefix: item.prefix, - providerName: item.name + providerName: item.name.toLowerCase() }; const params = { gitHost: entity.metadata['repoUrl'] ? entity.metadata['repoUrl'].toString().split('?')[0] : "", - gitRepoName: entity.metadata.name, + gitRepoName: entity.metadata.repoUrl?.toString().split('repo=')[1].toLowerCase() || "", provider: item, gitProjectGroup: 'aws-environments', gitAdminSecret: 'opa-admin-gitlab-secrets', - envName: entity.metadata.name, + envName: entity.metadata.name.toLowerCase(), action, backendParamsOverrides } diff --git a/backstage-plugins/plugins/aws-apps/src/components/DeleteComponentCard/DeleteComponentCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/DeleteComponentCard/DeleteComponentCard.tsx index 101c6d3c..27bb2569 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/DeleteComponentCard/DeleteComponentCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/DeleteComponentCard/DeleteComponentCard.tsx @@ -3,7 +3,7 @@ import { Button, CardContent, Grid, LinearProgress } from "@material-ui/core"; import { useAsyncAwsApp } from "../../hooks/useAwsApp"; -import { AWSComponent, GenericAWSEnvironment } from "@aws/plugin-aws-apps-common-for-backstage"; +import { AWSComponent, AWSComponentType, AWSEKSAppDeploymentEnvironment, AWSResourceDeploymentEnvironment, GenericAWSEnvironment } from "@aws/plugin-aws-apps-common-for-backstage"; import { EmptyState, InfoCard } from "@backstage/core-components"; import React, { useState } from "react"; import { useApi } from "@backstage/core-plugin-api"; @@ -16,6 +16,7 @@ import Backdrop from '@mui/material/Backdrop'; import CircularProgress from '@mui/material/CircularProgress'; import { catalogApiRef } from '@backstage/plugin-catalog-react'; import { sleep } from "../../helpers/util"; +import { APP_SUBTYPE } from "../../helpers/constants"; const DeleteAppPanel = ({ input: { awsComponent, entity, catalogApi, api } @@ -27,8 +28,9 @@ const DeleteAppPanel = ({ const [deleteResultMessage, setDeleteResultMessage] = useState(""); const navigate = useNavigate(); - const appIACType=entity.metadata["iac-type"]?.toString(); - console.log(appIACType); + const appIACType = entity.metadata["iacType"]?.toString(); + const appSubtype = entity.spec?.["subType"]?.toString() || 'undefinedSubtype'; + // console.log(appIACType); const handleCloseAlert = () => { setDeleteResultMessage(""); @@ -40,8 +42,8 @@ const DeleteAppPanel = ({ gitProject: gitRepo.split('/')[0], gitRepoName: gitRepo.split('/')[1], gitAdminSecret: 'opa-admin-gitlab-secrets' - }).then(results => { - console.log(results); + }).then(_results => { + // console.log(_results); setDeleteResultMessage("Gitlab Repository deleted.") setIsDeleteSuccessful(true) }).catch(error => { @@ -53,7 +55,7 @@ const DeleteAppPanel = ({ } const deleteFromCatalog = async () => { - console.log("Deleting entity from backstage catalog") + // console.log("Deleting entity from backstage catalog"); setDeleteResultMessage("Deleting entity from backstage catalog") // The entity will be removed from the catalog along with the auto-generated Location kind entity // which references the catalog entity @@ -67,6 +69,82 @@ const DeleteAppPanel = ({ catalogApi.removeEntityByUid(uid); } + // Ensure that k8s objects are deleted in an appropriate order + const getK8sKindSortOrder = (k8sObject: any): number => { + let order; + switch (k8sObject.kind) { + case "Ingress": + order = 0; + break; + case "Service": + order = 1; + break; + case "Deployment": + order = 2; + break; + case "ConfigMap": + order = 4; + break; + case "RoleBinding": + order = 999; + break; + default: + order = 3; + } + return order; + }; + + const deleteK8sApp = async (env: AWSEKSAppDeploymentEnvironment) => { + + let k8sManifests = await api.getEKSAppManifests({ + envName: env.environment.name, + gitAdminSecret: 'opa-admin-gitlab-secrets', + platformSCMConfig: { + host: awsComponent.gitHost, + projectGroup: 'aws-app', + repoName: awsComponent.gitRepo.split('/')[1] + } + }); + + // Removing objects without a namespace set since the app admin role + // does not have permissions to delete them. + k8sManifests = (k8sManifests as any[]) + .filter(k8sObject => !!k8sObject.metadata?.namespace) + .sort((a, b) => getK8sKindSortOrder(a) - getK8sKindSortOrder(b)); + + if (!k8sManifests.length) { + return; + } + + const kubectlLambdaArn = env.entities.envProviderEntity?.metadata["kubectlLambdaArn"]?.toString() || ""; + const kubectlLambdaRoleArn = env.app.appAdminRoleArn; + + const clusterNameParam = await api.getSSMParameter({ ssmParamName: env.clusterName }); + const clusterName = clusterNameParam.Parameter?.Value?.toString().split('/')[1].toString() || ""; + + const bodyParamVariables = { + RequestType: "Delete", + ResourceType: "Custom::AWSCDK-EKS-KubernetesResource", + ResourceProperties: { + ClusterName: clusterName, + RoleArn: kubectlLambdaRoleArn, + Manifest: JSON.stringify(k8sManifests), + } + }; + + const invokeLambdaResponse = await api.invokeLambda({ + functionName: kubectlLambdaArn, + actionDescription: `Delete app from namespace ${env.app.namespace}`, + body: JSON.stringify(bodyParamVariables) + }); + + if (invokeLambdaResponse.FunctionError) { + throw new Error('Failed to delete app from Kubernetes cluster.'); + } + + return invokeLambdaResponse; + } + const deleteAppFromSingleProvider = async (appName: string, env: GenericAWSEnvironment) => { const backendParamsOverrides = { appName: appName, @@ -75,87 +153,99 @@ const DeleteAppPanel = ({ prefix: env.providerData.prefix, providerName: env.providerData.name }; + const accessRole = `arn:aws:iam::${env.providerData.accountNumber}:role/${env.providerData.prefix}-${env.providerData.name}-operations-role` - if (appIACType==="cdk") - { - const stackName = env.app.cloudFormationStackName; + if (appIACType === "cdk") { + let stackName = "" + if (awsComponent.componentType === AWSComponentType.AWSResource) { + const resourceEnv = env as AWSResourceDeploymentEnvironment + stackName = resourceEnv.resource.cloudFormationStackName + } + else if (awsComponent.componentType === AWSComponentType.AWSApp) { + stackName = env.app.cloudFormationStackName; + } + + // For EKS apps, we need to delete the application from the Kubernetes cluster + if (APP_SUBTYPE.EKS === appSubtype) { + await deleteK8sApp(awsComponent.currentEnvironment as AWSEKSAppDeploymentEnvironment); + } + const results = api.deleteProvider({ stackName, accessRole, backendParamsOverrides }); - return results - } - else if (appIACType==="terraform") - { + return results; + + } else if (appIACType === "terraform") { + + // For EKS apps, we need to delete the application from the Kubernetes cluster + if (APP_SUBTYPE.EKS === appSubtype) { + await deleteK8sApp(awsComponent.currentEnvironment as AWSEKSAppDeploymentEnvironment); + } + const gitHost = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/instance']?.toString() : ""; - const gitRepo = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : ""; - const params ={ + const gitRepo = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : ""; + const params = { backendParamsOverrides, gitHost, - gitRepoName:gitRepo.split('/')[1], - gitProjectGroup:gitRepo.split('/')[0], - gitAdminSecret:'opa-admin-gitlab-secrets', - envName:env.environment.name + gitRepoName: gitRepo.split('/')[1], + gitProjectGroup: gitRepo.split('/')[0], + gitAdminSecret: 'opa-admin-gitlab-secrets', + envName: env.environment.name } const results = api.deleteTFProvider(params); - return results + return results; } else { throw new Error(`deleteAppFromSingleProvider Not Yet implemented for ${appIACType}`) } - + } const deleteSecret = (secretName: string) => { - api.deletePlatformSecret({ secretName }).then(result => { - console.log(result) + api.deletePlatformSecret({ secretName }).then(_result => { + // console.log(_result); setDeleteResultMessage("Secret Deleted.") }).catch(error => { - setSpinning(false) - setIsDeleteSuccessful(false) - setDeleteResultMessage(error.toString()) + setSpinning(false); + setIsDeleteSuccessful(false); + setDeleteResultMessage(error.toString()); }) } const handleDeleteRepo = async () => { - // Delete the repo now. - const gitHost = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/instance']?.toString() : ""; - const gitRepo = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : ""; - deleteRepo(gitHost, gitRepo) - setDeleteResultMessage("Redirect to home ....") - await sleep(4000); - navigate('/') + // Delete the repo now. + const gitHost = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/instance']?.toString() : ""; + const gitRepo = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : ""; + deleteRepo(gitHost, gitRepo) + setDeleteResultMessage("Redirect to home ....") + await sleep(4000); + navigate('/') } const handleClickDelete = async () => { - const deployedEnvironments = Object.keys(awsComponent.environments).length; - if (deployedEnvironments === 1) { - handleClickDeleteAll(); - } - else { - if (confirm('Are you sure you want to delete this app?')) { - setSpinning(true); - deleteAppFromSingleProvider(awsComponent.componentName, awsComponent.currentEnvironment).then(async results => { - console.log(results) - setIsDeleteSuccessful(true); - setDeleteResultMessage("App delete initiated.") - //now update repo to remove environment - // api.InitiateGitDelete - await sleep(2000); - // awsComponent.currentEnvironment.providerData.name - - }).catch(error => { - console.log(error) - setSpinning(false) - setIsDeleteSuccessful(false) - setDeleteResultMessage(error.toString()) - return; - }) - + if (confirm('Are you sure you want to delete this app?')) { + setSpinning(true); + deleteAppFromSingleProvider(awsComponent.componentName, awsComponent.currentEnvironment).then(async _results => { + // console.log(_results) + setSpinning(false); + setIsDeleteSuccessful(true); + setDeleteResultMessage("App delete initiated.") + //now update repo to remove environment + // api.InitiateGitDelete + await sleep(2000); + // awsComponent.currentEnvironment.providerData.name + }).catch(error => { + console.log(error); + setSpinning(false); + setIsDeleteSuccessful(false); + setDeleteResultMessage(error.toString()); + return; + }) - } else { - // Do nothing! - } + } else { + // Do nothing! } + }; const handleClickDeleteAll = async () => { @@ -164,43 +254,43 @@ const DeleteAppPanel = ({ deployedEnvironments.forEach(env => { const environmentToRemove: GenericAWSEnvironment = awsComponent.environments[env]; // remove environment x - deleteAppFromSingleProvider(awsComponent.componentName, environmentToRemove).then(async results => { - console.log(results) + deleteAppFromSingleProvider(awsComponent.componentName, environmentToRemove).then(async _results => { + // console.log(_results) setIsDeleteSuccessful(true); setDeleteResultMessage(`CloudFormation delete stack on provider ${env} initiated.`) await sleep(2000); }).catch(error => { - console.log(error) - setSpinning(false) - setIsDeleteSuccessful(false) - setDeleteResultMessage(error.toString()) + console.log(error); + setSpinning(false); + setIsDeleteSuccessful(false); + setDeleteResultMessage(error.toString()); return; }) }) - if (appIACType==="cdk") - { + if (appIACType === "cdk") { // Delete the repo now. const gitHost = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/instance']?.toString() : ""; const gitRepo = entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : ""; - deleteRepo(gitHost, gitRepo) + deleteRepo(gitHost, gitRepo); await sleep(2000); - deleteSecret(entity.metadata['repo-secret-arn']?.toString() || "") - deleteFromCatalog() + if (awsComponent.componentType === AWSComponentType.AWSApp) { + deleteSecret(entity.metadata['repoSecretArn']?.toString() || ""); + } + deleteFromCatalog(); setSpinning(false); await sleep(2000); - setDeleteResultMessage("Redirect to home ....") - navigate('/') - setDisabled(false) + setDeleteResultMessage("Redirect to home ...."); + navigate('/'); + setDisabled(false); } - else if (appIACType==="terraform") - { + else if (appIACType === "terraform") { await sleep(2000); setSpinning(false); - setDisabled(false) - setDeleteResultMessage("Once the pipeline finish executing you may click Delete Repository") + setDisabled(false); + setDeleteResultMessage("Once the pipeline finish executing you may click Delete Repository"); } - + } else { // Do nothing! } @@ -233,31 +323,31 @@ const DeleteAppPanel = ({ { - (appIACType==="terraform")? - ( - - - Delete Repository + (appIACType === "terraform") ? + ( + + + Delete Repository + + + + +
*Delete the repo after terraform IAC delete pipeline is completed. +
+
- - - -
*Delete the repo after terraform IAC delete pipeline is completed. -
-
-
- ):
+ ) :
} {isDeleteSuccessful && deleteResultMessage && ( - + Success {entity.metadata.name} was successfully deleted! {!!deleteResultMessage && (<>

{deleteResultMessage})}
)} {!isDeleteSuccessful && deleteResultMessage && ( - + Error Failed to delete {entity.metadata.name} . {!!deleteResultMessage && (<>

{deleteResultMessage})} @@ -276,8 +366,6 @@ const DeleteAppPanel = ({ ); } - - export const DeleteComponentCard = () => { const awsAppLoadingStatus = useAsyncAwsApp(); const { entity } = useEntity(); @@ -287,7 +375,7 @@ export const DeleteComponentCard = () => { if (awsAppLoadingStatus.loading) { return } else if (awsAppLoadingStatus.component) { - console.log(awsAppLoadingStatus.component) + // console.log(awsAppLoadingStatus.component) const input = { awsComponent: awsAppLoadingStatus.component, entity, diff --git a/backstage-plugins/plugins/aws-apps/src/components/DeleteEnvironmentCard/DeleteEnvironmentCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/DeleteEnvironmentCard/DeleteEnvironmentCard.tsx index 81bc6d63..6e45abb9 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/DeleteEnvironmentCard/DeleteEnvironmentCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/DeleteEnvironmentCard/DeleteEnvironmentCard.tsx @@ -33,7 +33,7 @@ const DeleteEnvironmentPanel = ({ const deleteRepo = () => { const gitHost = entity.metadata['repoUrl'] ? entity.metadata['repoUrl'].toString().split("?")[0] : ""; - const gitRepoName = entity.metadata.name; + const gitRepoName = entity.metadata.repoUrl?.toString().split('repo=')[1].toLowerCase() || ""; api.deleteRepository({ gitHost, gitProject: 'aws-environments', diff --git a/backstage-plugins/plugins/aws-apps/src/components/DeleteProviderCard/DeleteProviderCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/DeleteProviderCard/DeleteProviderCard.tsx index b613710a..41f7ff93 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/DeleteProviderCard/DeleteProviderCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/DeleteProviderCard/DeleteProviderCard.tsx @@ -24,11 +24,11 @@ const DeleteProviderPanel = ({ const [deleteResultMessage, setDeleteResultMessage] = useState(""); const api = useApi(opaApiRef); const navigate = useNavigate(); - const stackName = entity.metadata['stack-name']?.toString() || ''; + const stackName = entity.metadata['stackName']?.toString() || ''; const prefix = entity.metadata['prefix']?.toString() || ''; - const accessRole = entity.metadata['environment_role']?.toString() || ''; - const awsAccount = entity.metadata['aws-account']?.toString() || ''; - const awsRegion = entity.metadata['aws-region']?.toString() || ''; + const accessRole = entity.metadata['environmentRole']?.toString() || ''; + const awsAccount = entity.metadata['awsAccount']?.toString() || ''; + const awsRegion = entity.metadata['awsRegion']?.toString() || ''; const backendParamsOverrides = { appName: '', awsAccount: awsAccount, diff --git a/backstage-plugins/plugins/aws-apps/src/components/EnvironmentInfoCard/EnvironmentInfoCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/EnvironmentInfoCard/EnvironmentInfoCard.tsx index dca854de..cb39e5e6 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/EnvironmentInfoCard/EnvironmentInfoCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/EnvironmentInfoCard/EnvironmentInfoCard.tsx @@ -44,21 +44,21 @@ const EnvironmentInfo = (props: ProviderInfoProps) => { items.push({ key: "Short Name", - value: metadata['short-name']?.toString() || "" + value: metadata['shortName']?.toString() || "" }); items.push({ key: "Environment Type", - value: metadata['environment-type']?.toString() || "" + value: metadata['environmentType']?.toString() || "" }); items.push({ key: "Account Type", - value: metadata['env-type-account']?.toString() || "" + value: metadata['envTypeAccount']?.toString() || "" }); items.push({ key: "Region Type", - value: metadata['env-type-region']?.toString() || "" + value: metadata['envTypeRegion']?.toString() || "" }); items.push({ key: "Category", diff --git a/backstage-plugins/plugins/aws-apps/src/components/GeneralInfoCard/GeneralInfoCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/GeneralInfoCard/GeneralInfoCard.tsx index 9adb8624..090e4d52 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/GeneralInfoCard/GeneralInfoCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/GeneralInfoCard/GeneralInfoCard.tsx @@ -134,7 +134,7 @@ export const GeneralInfoCard = ({ appPending }: { appPending: boolean }) => { region: '', gitApp: entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : "", gitHostUrl: '', - repoSecretArn: entity.metadata['repo-secret-arn']?.toString() || '', + repoSecretArn: entity.metadata['repoSecretArn']?.toString() || '', api, appPending }; diff --git a/backstage-plugins/plugins/aws-apps/src/components/InfrastructureCard/InfrastructureCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/InfrastructureCard/InfrastructureCard.tsx index 145cb078..9e1e2b88 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/InfrastructureCard/InfrastructureCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/InfrastructureCard/InfrastructureCard.tsx @@ -9,6 +9,7 @@ import React, { useEffect, useState } from 'react'; import { opaApiRef } from '../../api'; import { ServiceResourcesComponent } from './ServiceComponent'; import { useAsyncAwsApp } from '../../hooks/useAwsApp'; +import { ProviderType } from '../../helpers/constants'; const OpaAppInfraInfo = ({ input: { resourceGroupArn, awsComponent } @@ -36,6 +37,11 @@ const OpaAppInfraInfo = ({ 'SSM', ]; + if (ProviderType.EKS === awsComponent.currentEnvironment.providerData.providerType) { + defaultServiceFilter.push('ElasticLoadBalancingV2'); + defaultServiceFilter.push('ECR'); + } + async function getData() { // Validate the resource group annotation and extract the resource group name // so that we can build a deepLink to the resource group page in the AWS console diff --git a/backstage-plugins/plugins/aws-apps/src/components/K8sAppStateCard/K8sAppStateCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/K8sAppStateCard/K8sAppStateCard.tsx new file mode 100644 index 00000000..d09bbff9 --- /dev/null +++ b/backstage-plugins/plugins/aws-apps/src/components/K8sAppStateCard/K8sAppStateCard.tsx @@ -0,0 +1,806 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { InfoCard, EmptyState } from '@backstage/core-components'; +import { TableBody, TableCell, TableRow, Table } from '@material-ui/core'; +import { useApi } from '@backstage/core-plugin-api'; +import { LinearProgress } from '@material-ui/core'; +import { Button, CardContent, Divider, Grid, Typography } from '@mui/material'; +import React, { useEffect, useState, useRef } from 'react'; +import { opaApiRef } from '../../api'; +import { useAsyncAwsApp } from '../../hooks/useAwsApp'; +import { AWSComponent, AWSEKSAppDeploymentEnvironment, AppState, AppStateType, keyValue } from '@aws/plugin-aws-apps-common-for-backstage'; +import { useEntity } from '@backstage/plugin-catalog-react'; +import { base64PayloadConvert } from '../../helpers/util'; +import { Entity } from '@backstage/catalog-model'; +import { Unstable_NumberInput as NumberInput } from '@mui/base/Unstable_NumberInput'; +import { styled } from '@mui/system'; +import RemoveIcon from '@mui/icons-material/Remove'; +import AddIcon from '@mui/icons-material/Add'; +import { useCancellablePromise } from '../../hooks/useCancellablePromise'; +import { GetParameterCommandOutput } from '@aws-sdk/client-ssm'; +import { InvokeCommandOutput } from '@aws-sdk/client-lambda'; + +const blue = { + 100: '#daecff', + 200: '#b6daff', + 300: '#66b2ff', + 400: '#3399ff', + 500: '#007fff', + 600: '#0072e5', + 700: '#0059B2', + 800: '#004c99', +}; + +const grey = { + 50: '#F3F6F9', + 100: '#E5EAF2', + 200: '#DAE2ED', + 300: '#C7D0DD', + 400: '#B0B8C4', + 500: '#9DA8B7', + 600: '#6B7A90', + 700: '#434D5B', + 800: '#303740', + 900: '#1C2025', +}; + +const StyledInputRoot = styled('div')( + ({ theme }) => ` + font-family: 'IBM Plex Sans', sans-serif; + font-weight: 400; + color: ${theme.palette.mode === 'dark' ? grey[300] : grey[500]}; + display: flex; + flex-flow: row nowrap; + justify-content: center; + align-items: center; +`, +); + +const StyledInput = styled('input')( + ({ theme }) => ` + font-size: 0.875rem; + font-family: inherit; + font-weight: 400; + line-height: 1.375; + color: ${theme.palette.mode === 'dark' ? grey[300] : grey[900]}; + background: ${theme.palette.mode === 'dark' ? grey[900] : '#fff'}; + border: 1px solid ${theme.palette.mode === 'dark' ? grey[700] : grey[200]}; + box-shadow: 0px 2px 4px ${theme.palette.mode === 'dark' ? 'rgba(0,0,0, 0.5)' : 'rgba(0,0,0, 0.05)' + }; + border-radius: 8px; + margin: 0 8px; + padding: 10px 12px; + outline: 0; + min-width: 0; + width: 4rem; + text-align: center; + + &:hover { + border-color: ${blue[400]}; + } + + &:focus { + border-color: ${blue[400]}; + box-shadow: 0 0 0 3px ${theme.palette.mode === 'dark' ? blue[700] : blue[200]}; + } + + &:focus-visible { + outline: 0; + } +`, +); + +const StyledButton = styled('button')( + ({ theme }) => ` + font-family: 'IBM Plex Sans', sans-serif; + font-size: 0.875rem; + box-sizing: border-box; + line-height: 1.5; + border: 1px solid; + border-radius: 999px; + border-color: ${theme.palette.mode === 'dark' ? grey[800] : grey[200]}; + background: ${theme.palette.mode === 'dark' ? grey[900] : grey[50]}; + color: ${theme.palette.mode === 'dark' ? grey[200] : grey[900]}; + width: 32px; + height: 32px; + display: flex; + flex-flow: row nowrap; + justify-content: center; + align-items: center; + transition-property: all; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 120ms; + + &:hover { + cursor: pointer; + background: ${theme.palette.mode === 'dark' ? blue[700] : blue[500]}; + border-color: ${theme.palette.mode === 'dark' ? blue[500] : blue[400]}; + color: ${grey[50]}; + } + + &:focus-visible { + outline: 0; + } + + &.increment { + order: 1; + } +`, +); + +const OpaAppStateOverview = ({ + input: { env, entity, awsComponent } +}: { + input: { + env: AWSEKSAppDeploymentEnvironment, + entity: Entity, + awsComponent: AWSComponent + } +}) => { + + const api = useApi(opaApiRef); + const [appStateData, setAppStateData] = useState([]); + const [variablesJson, setVariablesJson] = useState({}); + const [appStarted, setAppStarted] = useState(false); + const [appStopped, setAppStopped] = useState(false); + const [clusterNameState, setClusterNameState] = useState(""); + const [loading, setLoading] = useState(true); + const [error, setError] = useState<{ isError: boolean; errorMsg: string | null }>({ isError: false, errorMsg: null }); + const { cancellablePromise } = useCancellablePromise({ rejectOnCancel: true }); + const timerRef = useRef(null); + + // Namespace-bound application admin role (not cluster admin role) + const appAdminRoleArn = env.app.appAdminRoleArn; + + const kubectlLambdaArn = env.entities.envProviderEntity?.metadata["kubectlLambdaArn"]?.toString() || ""; + let clusterNameParam, clusterName: string; + + async function fetchAppConfig() { + if (!clusterName) { + // console.log(`getting cluster name`); + clusterNameParam = await cancellablePromise( + api.getSSMParameter({ ssmParamName: env.clusterName }) + ); + clusterName = clusterNameParam.Parameter?.Value?.toString().split('/')[1].toString() || ""; + } else { + // console.log(`clusterName was already cached when getting app config`); + } + + setClusterNameState(clusterName); + + const bodyParamVariables = { + RequestType: "Create", + ResourceType: "Custom::AWSCDK-EKS-KubernetesObjectValue", + ResourceProperties: { + TimeoutSeconds: "5", + ClusterName: clusterName, + RoleArn: appAdminRoleArn, + ObjectNamespace: env.app.namespace, + InvocationType: 'RequestResponse', + ObjectType: "configmaps", + ObjectLabels: `app.kubernetes.io/env=${env.environment.name},app.kubernetes.io/name=${entity.metadata.name}`, + JsonPath: "@" + } + }; + + // console.log(`calling lambda to get configs`); + const resultsVariables = await cancellablePromise( + api.invokeLambda({ + functionName: kubectlLambdaArn, + actionDescription: `Fetch app configs for namespace ${env.app.namespace}`, + body: JSON.stringify(bodyParamVariables) + }) + ); + // console.log(`got configs`); + + try { + if (resultsVariables?.Payload) { + const payloadVariablesString = base64PayloadConvert(resultsVariables.Payload as Object); + const payloadVariablesJson = JSON.parse(payloadVariablesString); + + if (payloadVariablesJson?.Data?.Value) { + const variablesJson = JSON.parse(payloadVariablesJson.Data.Value); + // console.log(variablesJson); + return variablesJson; + } else { + return {}; + } + + } + + } catch (err) { + console.log(err); + throw Error("Can't parse json response"); + } + + } + + async function fetchAppState() { + + if (!clusterName) { + // console.log(`getting cluster name`); + clusterNameParam = await cancellablePromise( + api.getSSMParameter({ ssmParamName: env.clusterName }) + ); + // console.log(`DONE getting cluster name`); + clusterName = clusterNameParam.Parameter?.Value?.toString().split('/')[1].toString() || ""; + // console.log(`clusterName is ${clusterName}`); + } + + const bodyParam = { + RequestType: "Create", + ResourceType: "Custom::AWSCDK-EKS-KubernetesObjectValue", + ResourceProperties: { + TimeoutSeconds: "5", + ClusterName: clusterName, + RoleArn: appAdminRoleArn, + ObjectNamespace: env.app.namespace, + InvocationType: 'RequestResponse', + ObjectType: "deployments", + ObjectLabels: `app.kubernetes.io/env=${env.environment.name},app.kubernetes.io/name=${entity.metadata.name}`, + JsonPath: "@" + } + }; + + // console.log(bodyParam) + // console.log(`calling lambda to get manifests`); + const results = await cancellablePromise( + api.invokeLambda({ + functionName: kubectlLambdaArn, + actionDescription: `Fetch deployments for namespace ${env.app.namespace}`, + body: JSON.stringify(bodyParam) + }) + ); + // console.log(`got manifests`); + + try { + if (results?.Payload) { + const payloadString = base64PayloadConvert(results.Payload as Object); + const payloadJson = JSON.parse(payloadString); + if (payloadJson?.Data?.Value) { + const deploymentJson = JSON.parse(payloadJson.Data.Value).items; + // console.log(deploymentJson); + return deploymentJson; + } else { + return {}; + } + } + + } catch (err) { + console.log(err); + throw Error("Can't parse json response"); + } + } + + const getDeploymentEnvVars = (deploymentName: string): keyValue[] => { + if (!appStateData || !variablesJson) { + return []; + } + + const configMapName = appStateData.filter(appState => appState.appID === deploymentName)[0].stateObject.spec.template.spec.containers[0]?.envFrom?.[0]?.configMapRef?.name; + + if (!configMapName) { + return []; + } + + const configMap = variablesJson.items.filter((candidateMap: any) => candidateMap.metadata.name === configMapName)?.[0]; + + if (!configMap) { + return []; + } + + const variables: keyValue[] = []; + Object.keys(configMap.data).forEach((key: string, index: number) => { + variables.push({ + id: `${index}`, + key: key.toString(), + value: configMap.data[key].toString() + }) + }); + return variables; + }; + + const parseState = (deploymentsJson: any): AppState[] => { + // parse response JSON + + let deploymentsState: AppState[] = [] + try { + + Object.keys(deploymentsJson).forEach(key => { + const deploymentJson = deploymentsJson[key]; + + const updatedReplicas = Number.parseInt(deploymentJson.status.updatedReplicas) || 0; + const appRunning = Number.parseInt(deploymentJson.status.readyReplicas) || 0; + + const pending = Math.abs(appRunning - updatedReplicas); + + let appStateDescription; + if (pending) { + appStateDescription = AppStateType.UPDATING; + } else { + appStateDescription = appRunning > 0 ? AppStateType.RUNNING : AppStateType.STOPPED; + } + + const appState: AppState = { + appID: deploymentJson.metadata.name, + appState: appStateDescription, + deploymentIdentifier: deploymentJson.metadata.uid, + desiredCount: Number.parseInt(deploymentJson.spec.replicas) || 0, + pendingCount: pending, + runningCount: appRunning, + lastStateTimestamp: new Date(deploymentJson.status.conditions[0].lastUpdateTime), + stateObject: deploymentJson + } + + deploymentsState.push(appState); + }) + + } catch (err) { + console.log(err); + } + return deploymentsState || []; + } + + async function getData(appStateResults?: any) { + + let isCanceled = false; + let isError = false; + let deploymentsJson; + let variablesJson; + try { + if (appStateResults) { + // console.log(`reusing appStateResults`); + } + + deploymentsJson = appStateResults ? appStateResults : await fetchAppState(); // returns array of deployments + variablesJson = await fetchAppConfig(); // return the configMaps for the app + } catch (e) { + if ((e as any).isCanceled) { + isCanceled = true; + // console.log(`got cancellation in getData`); + } else { + isError = true; + console.error(e); + setError({ isError: true, errorMsg: `Unexpected error occurred while retrieving event data: ${e}` }); + } + + } + + if (!isCanceled && !isError) { + const states = parseState(deploymentsJson); + + setAppStateData(states); + setVariablesJson(variablesJson); + } + + } + + useEffect(() => { + setAppStateData([]); // reset existing state + getData() + .then(() => { + setLoading(false); + setError({ isError: false, errorMsg: '' }); + }) + .catch(e => { + setLoading(false); + setError({ isError: true, errorMsg: `Unexpected error occurred while retrieving app data: ${e}` }); + }); + + return () => { + // prevent sleeping while-loops from continuing + setAppStarted(true); + setAppStopped(true); + + if (timerRef.current) { + clearTimeout(timerRef.current); + // console.log(`Clearing Timeout`); + } + + } + }, []); + + function sleep(ms: number) { + return new Promise(resolve => { + + const resolveHandler = () => { + clearTimeout(timerRef.current); + resolve(null); + } + timerRef.current = setTimeout(resolveHandler, ms); + }); + } + + const handleStartTask = async (appState: AppState) => { + + setLoading(true); + + // console.log(`calling lambda to set replicas to > 0, clusterNameState is ${clusterNameState}`); + + let isCanceled = false; + try { + await cancellablePromise( + api.updateEKSApp({ + actionDescription: `Starting app in environment ${env.environment.name}`, + envName: env.environment.name, + cluster: clusterNameState, + kubectlLambda: env.entities.envProviderEntity?.metadata["kubectlLambdaArn"]?.toString() || "", + lambdaRoleArn: appAdminRoleArn, + gitAdminSecret: 'opa-admin-gitlab-secrets', + updateKey: 'spec.replicas', + updateValue: appState.desiredCount || 1, + platformSCMConfig: { + host: awsComponent.gitHost, + projectGroup: 'aws-app', + repoName: awsComponent.gitRepo.split('/')[1] + } + }) + ); + // console.log(`DONE setting replicas to > 0`); + } catch (e) { + if ((e as any).isCanceled) { + isCanceled = true; + } else { + console.error(e); + setError({ isError: true, errorMsg: `Unexpected error occurred while starting app: ${e}` }); + setLoading(false); + } + } + + let deploymentsJson: any; + let count = 0; + let localAppStarted = false; + // console.log(`isCanceled is ${isCanceled} and localAppStarted is ${localAppStarted} and appStarted is ${appStarted}`); + while (!isCanceled && !appStarted && !localAppStarted) { + + // console.log(`sleeping waiting for app to be started, localAppStarted is ${localAppStarted} and appStarted is ${appStarted}`); + await sleep(5000); + // console.log(`awake ${count}, will now check app state`); + count++; + + try { + // console.log("start fetching app state"); + deploymentsJson = await fetchAppState(); + // console.log("DONE fetching app state"); + Object.keys(deploymentsJson).forEach(key => { + const currState = deploymentsJson[key]; + if (currState.metadata.uid === appState.deploymentIdentifier) { + if (Number.parseInt(currState.status.readyReplicas) > 0) { + setAppStateData([]); // reset existing state + setAppStarted(true); + localAppStarted = true; + // console.log(`setting appStarted to true`); + } + } + }); + + if (localAppStarted || appStarted) { + // console.log(`breaking from while loop since app was started`); + break; + } else { + // console.log(`not breaking from while loop since appStarted is falsy`); + } + + } catch (e) { + if ((e as any).isCanceled) { + isCanceled = true; + } else { + console.error(e); + setError({ isError: true, errorMsg: `Unexpected error occurred while retrieving app state: ${e}` }); + setAppStarted(true); + localAppStarted = true; + // console.log(`setting appStarted to true`); + setLoading(false); + } + break; + } + } + + if (!isCanceled && localAppStarted) { + await getData(deploymentsJson); + setLoading(false); + } + }; + + const handleStopTask = async (appState: AppState) => { + setLoading(true); + + // console.log(`calling lambda to set replicas to 0 when clusterNameState is ${clusterNameState}`); + + let isCanceled = false; + try { + await cancellablePromise( + api.updateEKSApp({ + actionDescription: `Stopping app in environment ${env.environment.name}`, + envName: env.environment.name, + cluster: clusterNameState, + kubectlLambda: env.entities.envProviderEntity?.metadata["kubectlLambdaArn"]?.toString() || "", + lambdaRoleArn: appAdminRoleArn, + gitAdminSecret: 'opa-admin-gitlab-secrets', + updateKey: 'spec.replicas', + updateValue: 0, + platformSCMConfig: { + host: awsComponent.gitHost, + projectGroup: 'aws-app', + repoName: awsComponent.gitRepo.split('/')[1] + } + }) + ); + // console.log(`DONE setting replicas to 0`); + } catch (e) { + if ((e as any).isCanceled) { + isCanceled = true; + } else { + console.error(e); + setError({ isError: true, errorMsg: `Unexpected error occurred while stopping app: ${e}` }); + setLoading(false); + } + } + + let count = 0; + let localAppStopped = false; + // console.log(`isCanceled is ${isCanceled} and localAppStopped is ${localAppStopped} and appStoped is ${appStopped}`); + while (!isCanceled && !appStopped && !localAppStopped) { + // console.log(`sleeping ${count} - waiting for app to be stopped, localAppStopped is ${localAppStopped} and appStoped is ${appStopped}`); + await sleep(7000); + // console.log(`DONE sleeping - app is stopped`); + count++; + + try { + // console.log("fetching app state"); + const deploymentsJson = await fetchAppState(); + // console.log("DONE - fetching app state"); + + Object.keys(deploymentsJson).forEach(key => { + const currState = deploymentsJson[key]; + if (currState.metadata.uid === appState.deploymentIdentifier) { + if (!currState.status.readyReplicas || Number.parseInt(currState.status.readyReplicas) === 0) { + appState.appState = AppStateType.STOPPED + appState.runningCount = 0 + localAppStopped = true; + setAppStopped(true); + setLoading(false); + // console.log(`setting appStopped to true and loading to false`); + } + } + }); + + if (localAppStopped || appStopped) { + // console.log(`breaking from while loop since app was stopped`); + break; + } + + } catch (e) { + if ((e as any).isCanceled) { + isCanceled = true; + } else { + console.error(e); + setError({ isError: true, errorMsg: `Unexpected error occurred while retrieving app state: ${e}` }); + setLoading(false); + setAppStopped(true); + localAppStopped = true; + // console.log(`setting appStopped to true and loading to false`); + } + break; + } + + } + }; + + const EnvVars = ({ appID }: { appID: string }) => { + + const envVarArr = getDeploymentEnvVars(appID); + + if (envVarArr && envVarArr.length) { + return ( + <> + {envVarArr.map((envVar) => ( + + {envVar.key} + {envVar.value} + + ))} + + ); + } else { + return ( + + None configured + + + ); + } + }; + + const DeploymentCard = ({ deploymentState, index, total }: { deploymentState: AppState, index: number, total: number }) => { + + return ( + <> + + + Deployment {index > 1 ? index : ""} +
+ { + deploymentState?.appState ? + ( + + + + + + + Name + {deploymentState.stateObject.metadata.name} + + + Status + {deploymentState?.appState ? deploymentState?.appState : 'Not Running'} + + + Pods + {deploymentState?.runningCount + "/" + deploymentState?.desiredCount}{deploymentState?.pendingCount ? ` (${deploymentState?.pendingCount} Pending)` : ''} + + + Last Updated + {deploymentState?.lastStateTimestamp ? deploymentState?.lastStateTimestamp.toString() : ''} + + +
+
+
+ ) : <> + } +
+
+ + + Environment Variables + + + + + + + +
+
+
+
+
+ + + { + index === total && deploymentState?.appState === AppStateType.STOPPED ? + ( +
+ deploymentState.desiredCount = val} + min={0} + max={10} + slots={{ + root: StyledInputRoot, + input: StyledInput, + incrementButton: StyledButton, + decrementButton: StyledButton, + }} + slotProps={{ + incrementButton: { + children: , + className: 'increment', + }, + decrementButton: { + children: , + }, + }} + /> +
) : + <> + } + { + index === total ? + ( + <> + + + **Changes to your application state will be applied directly to the cluster and not to the source code repository + ) : <> + } +
+
+ + ) + } + + if (loading) { + return ( + + + Loading current state... + + ); + } + if (error.isError) { + return {error.errorMsg}; + } + + return ( + + + + + Cluster Info + + + + + + + Cluster Name + {clusterNameState} + + + Namespace + {env.app.namespace} + + +
+
+
+
+
+ + { + appStateData.length ? + appStateData.map((state, index, array) => { + return () + }) : <>No Deployments Found + } + +
+
+ ); +}; + +export const K8sAppStateCard = () => { + const { entity } = useEntity(); + const awsAppLoadingStatus = useAsyncAwsApp(); + + if (awsAppLoadingStatus.loading) { + return + } else if (awsAppLoadingStatus.component) { + let input; + if (awsAppLoadingStatus.component.componentSubType === "aws-eks") { + const env = awsAppLoadingStatus.component.currentEnvironment as AWSEKSAppDeploymentEnvironment; + input = { + env, + entity, + awsComponent: awsAppLoadingStatus.component + }; + return + } else { + return + } + + } else { + return + } +}; diff --git a/backstage-plugins/plugins/aws-apps/src/components/ProviderInfoCard/ProviderInfoCard.tsx b/backstage-plugins/plugins/aws-apps/src/components/ProviderInfoCard/ProviderInfoCard.tsx index c60603ae..2e974a64 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/ProviderInfoCard/ProviderInfoCard.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/ProviderInfoCard/ProviderInfoCard.tsx @@ -8,8 +8,8 @@ import { Entity } from '@backstage/catalog-model'; import { ProviderType } from '../../helpers/constants'; interface keyValue { - key: string; - value: string; + key: string; + value: string; } /** @public */ @@ -35,64 +35,88 @@ const ProviderInfo = (props: ProviderInfoProps) => { }, ]; - let items:keyValue[] = [] - items.push({ - key:"Prefix", - value: metadata['prefix']?.toString() || "" - }); - items.push({ - key:"Name", - value: metadata.name.toString() || "" - }); + let items: keyValue[] = [] + items.push({ + key: "Prefix", + value: metadata['prefix']?.toString() || "" + }); + items.push({ + key: "Name", + value: metadata.name.toString() || "" + }); - items.push({ - key:"AWS Account", - value: metadata['aws-account']?.toString() || "" - }); + items.push({ + key: "AWS Account", + value: metadata['awsAccount']?.toString() || "" + }); + items.push({ + key: "AWS Region", + value: metadata['awsRegion']?.toString() || "" + }); + items.push({ + key: "Runtime", + value: metadata['envType']?.toString() || "" + }); + items.push({ + key: "Audit Table", + value: metadata['auditTable']?.toString() || "" + }); + items.push({ + key: "VPC", + value: metadata['vpc']?.toString() || "" + }); + const envType = metadata['envType']?.toString() || ""; + if (envType === ProviderType.ECS || envType === ProviderType.EKS) { items.push({ - key:"AWS Region", - value: metadata['aws-region']?.toString() || "" - }); + key: "Cluster Name", + value: metadata['clusterName']?.toString() || "" + }); + } + if (envType === ProviderType.EKS) { items.push({ - key:"Runtime", - value: metadata['env-type']?.toString() || "" - }); + key: "Node Type", + value: metadata['nodeType']?.toString() || "" + }); + } + items.push({ + key: "Operation Role", + value: metadata['operationRole']?.toString() || "" + }); + items.push({ + key: "Provisioning Role", + value: metadata['provisioningRole']?.toString() || "" + }); + if (envType === ProviderType.EKS) { items.push({ - key:"Audit Table", - value: metadata['audit-table']?.toString() || "" - }); + key: "Cluster Admin Role ARN", + value: metadata['clusterAdminRole']?.toString() || "" + }); items.push({ - key:"VPC", - value: metadata['vpc']?.toString() || "" - }); - const envType = metadata['env-type']?.toString() || ""; - if (envType === ProviderType.ECS || envType === ProviderType.EKS) { - items.push({ - key:"Cluster Name", - value: metadata['cluster-name']?.toString() || "" - }); - } + key: "API Endpoint Access", + value: metadata['apiAccess']?.toString() || "" + }); items.push({ - key:"Operation Role", - value: metadata['operation-role']?.toString() || "" - }); + key: "Kubectl / Helm Lambda ARN", + value: metadata['kubectlLambdaArn']?.toString() || "" + }); items.push({ - key:"Provisioning Role", - value: metadata['provisioning-role']?.toString() || "" - }); - + key: "Kubectl / Helm Lambda Role ARN", + value: metadata['kubectlLambdaAssumeRoleArn']?.toString() || "" + }); + } + return ( - - +
{ }; export const ProviderInfoCard = () => { - const { entity } = useEntity(); - return ; + const { entity } = useEntity(); + return ; }; diff --git a/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceBinding.tsx b/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceBinding.tsx index afffbeaf..2b10d7ff 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceBinding.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceBinding.tsx @@ -16,6 +16,7 @@ import { ResourceSelectorDialog } from './ResourceSelectorDialog'; import Backdrop from '@mui/material/Backdrop'; import CircularProgress from '@mui/material/CircularProgress'; +// TODO: Externalize policy templates to a repo path for easy updates and access control const RDS_POLICY = `{ "Effect": "Allow", "Action": ["rds:*"], @@ -28,6 +29,11 @@ const SECRET_POLICY = `{ "Resource": "@@@PLACEHOLDER@@@" }`; +const S3_POLICY = `{ + "Effect": "Allow", + "Action": ["s3:*"], + "Resource": [@@@PLACEHOLDER@@@] +}`; const ResourceBindingCard = ({ input: { awsComponent, entity, catalog }, @@ -74,7 +80,7 @@ const ResourceBindingCard = ({ const providers = Object.keys(envAppData) providers.forEach(p => { const providerAppData = envAppData[p] as any; - if (et!.metadata['resource-type'] === "aws-rds") { + if (et!.metadata['resourceType'] === "aws-rds") { const associatedRDSResources: AssociatedResources = { @@ -86,7 +92,7 @@ const ResourceBindingCard = ({ resources.push( { resourceName: et!.metadata.name, - resourceType: et!.metadata['resource-type']?.toString() || "", + resourceType: et!.metadata['resourceType']?.toString() || "", provider: p, resourceArn: providerAppData['Arn'], id: providerAppData['Arn'], @@ -98,7 +104,7 @@ const ResourceBindingCard = ({ resources.push( { resourceName: et!.metadata.name, - resourceType: et!.metadata['resource-type']?.toString() || "", + resourceType: et!.metadata['resourceType']?.toString() || "", provider: p, resourceArn: providerAppData['Arn'], id: providerAppData['Arn'], @@ -142,8 +148,16 @@ const ResourceBindingCard = ({ policyContent: secretPolicy, policyResource: item.resourceType }); + } else if (item.resourceType === "aws-s3") { + const s3Policy = S3_POLICY.replace("@@@PLACEHOLDER@@@",`"${item.resourceArn}","${item.resourceArn}/*"`); + policies.push({ + policyFileName: `statement-s3-${awsComponent.currentEnvironment.environment.name}-${item.provider}-${item.resourceName}`, + policyContent: s3Policy, + policyResource: item.resourceType + }); } + const params: BindResourceParams = { gitHost: awsComponent.gitHost, gitJobID: '', diff --git a/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceSelectorDialog.tsx b/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceSelectorDialog.tsx index 3e9e7925..b38232ac 100644 --- a/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceSelectorDialog.tsx +++ b/backstage-plugins/plugins/aws-apps/src/components/ResourceBindingCard/ResourceSelectorDialog.tsx @@ -148,13 +148,16 @@ export const ResourceSelectorDialog = ({ // search the catalog for resources within the same environment and provider const allResources = await catalog.getEntities({ filter: { 'kind': "resource", 'spec.type': 'aws-resource' } }); - const matchedResources = allResources.items.filter(entity => { const appData = entity.metadata["appData"] as any; return appData && appData[currentEnvironment] && entity.metadata.name }) matchedResources.forEach(et => { + const etNamespace = et.metadata.namespace || 'default'; + const etName = et.metadata.name; + const id = `resource:${etNamespace}/${etName}`; + const appData = et.metadata["appData"] as any; const envAppData = appData[currentEnvironment] as any; // find all providers - for multi providers @@ -164,37 +167,53 @@ export const ResourceSelectorDialog = ({ if (isResourceAlreadyBind(providerAppData['Arn'], associatedResources)) { return; } - if (et.metadata['resource-type'] === "aws-rds") { + if (et.metadata['resourceType'] === "aws-rds") { //Handler for aws-rds with associated resources const associatedRDSResources: AssociatedResources = { resourceArn: providerAppData['DbAdminSecretArn'], resourceType: "aws-db-secret", - resourceName: `${et.metadata.name}-secret` + resourceName: `${etName}-secret` } tableData.push( { - resourceName: et.metadata.name, - resourceType: et.metadata['resource-type']?.toString() || "", + resourceName: etName, + resourceType: et.metadata['resourceType']?.toString() || "", provider: p, resourceArn: providerAppData['Arn'], - id: `resource:default/${et.metadata.name}`, + id, associatedResources: [associatedRDSResources] }) } - else if (et.metadata['resource-type'] === "aws-s3") { + else if (et.metadata['resourceType'] === "aws-s3") { // Custom S3 bucket resource handler - add resource policy + const associatedS3Resources: AssociatedResources = + { + resourceArn: providerAppData['Arn'], + resourceType: "aws-s3", + resourceName: `${etName}-secret` + } + + tableData.push( + { + resourceName: etName, + resourceType: et.metadata['resourceType']?.toString() || "", + provider: p, + resourceArn: providerAppData['Arn'], + id, + associatedResources: [associatedS3Resources] + }) } else { // General AWS resource handler tableData.push( { - resourceName: et.metadata.name, - resourceType: et.metadata['resource-type']?.toString() || "", + resourceName: etName, + resourceType: et.metadata['resourceType']?.toString() || "", provider: p, resourceArn: providerAppData['Arn'], - id: `resource:default/${et.metadata.name}`, + id, }) } }) diff --git a/backstage-plugins/plugins/aws-apps/src/helpers/constants.ts b/backstage-plugins/plugins/aws-apps/src/helpers/constants.ts index 13e42a27..6895f37b 100644 --- a/backstage-plugins/plugins/aws-apps/src/helpers/constants.ts +++ b/backstage-plugins/plugins/aws-apps/src/helpers/constants.ts @@ -17,8 +17,9 @@ export enum ProviderType { export type DeployStackStatus = StackStatus | ExtraStackDeployStatus; export enum APP_SUBTYPE { - ECS = 'ecs', - SERVERLESS_REST_API = 'serverless-rest-api', + ECS = 'aws-ecs', + EKS = 'aws-eks', + SERVERLESS = 'aws-serverless', } export enum HTTP { diff --git a/backstage-plugins/plugins/aws-apps/src/helpers/util.ts b/backstage-plugins/plugins/aws-apps/src/helpers/util.ts index 60daf908..40e70492 100644 --- a/backstage-plugins/plugins/aws-apps/src/helpers/util.ts +++ b/backstage-plugins/plugins/aws-apps/src/helpers/util.ts @@ -2,3 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 export const sleep = (ms: number) => new Promise(r => setTimeout(r, ms)); + +export const base64PayloadConvert = (payload:Object) => { + let str = ""; + Object.values(payload).forEach(k=> { + str+=String.fromCharCode(k) + }) + return str; +} \ No newline at end of file diff --git a/backstage-plugins/plugins/aws-apps/src/hooks/useAwsApp.tsx b/backstage-plugins/plugins/aws-apps/src/hooks/useAwsApp.tsx index 117ca07b..c095232d 100644 --- a/backstage-plugins/plugins/aws-apps/src/hooks/useAwsApp.tsx +++ b/backstage-plugins/plugins/aws-apps/src/hooks/useAwsApp.tsx @@ -222,7 +222,7 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { } function populateEcsState(ecsAppDeployEnv: AWSECSAppDeploymentEnvironment, providerEntity: Entity): void { - ecsAppDeployEnv.clusterName = providerEntity.metadata['cluster-name']?.toString() || ""; + ecsAppDeployEnv.clusterName = providerEntity.metadata['clusterName']?.toString() || ""; const providerAppData = getProviderAppData(ecsAppDeployEnv); @@ -245,26 +245,30 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { } function populateEksState(eksAppDeployEnv: AWSEKSAppDeploymentEnvironment, providerEntity: Entity): void { - eksAppDeployEnv.clusterName = providerEntity.metadata['cluster-name']?.toString() || ""; + eksAppDeployEnv.clusterName = providerEntity.metadata['clusterName']?.toString() || ""; const providerAppData = getProviderAppData(eksAppDeployEnv); eksAppDeployEnv.app = { - deploymentName: providerAppData["DeploymentName"] as string || "", + appAdminRoleArn: providerAppData["AppAdminRoleArn"] as string || "", ecrArn: providerAppData["EcrRepositoryArn"] as string || "", namespace: providerAppData["Namespace"] as string || "", - logGroupName: providerAppData["TaskLogGroup"] || "", + + // Must match Fluent Bit configurations. See "log_group_template" setting in the EKS provider IaC. + logGroupName: providerAppData["LogGroup"] || `/aws/apps/${eksAppDeployEnv.providerData.prefix}-${eksAppDeployEnv.providerData.name}/${providerAppData["Namespace"]}`, + resourceGroupArn: providerAppData["AppResourceGroup"] || "", cloudFormationStackName: providerAppData["StackName"] || "", - links: [ - { - title: "Go to app", - url: providerAppData["AlbEndpoint"] || "", - // icon: 'kind:api' - } - ] + links: [] } + if (providerAppData["AlbEndpoint"]) { + eksAppDeployEnv.app.links.push({ + title: "Go to app", + url: providerAppData["AlbEndpoint"] || "", + // icon: 'kind:api' + }); + } } @@ -314,27 +318,27 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { const awsDeploymentEnvironment: AWSDeploymentEnvironment = { environment: { - accountType: envEntity.entity?.metadata['env-type-account']?.toString() || "", + accountType: envEntity.entity?.metadata['envTypeAccount']?.toString() || "", category: envEntity.entity?.metadata['category']?.toString() || "", classification: envEntity.entity?.metadata['classification']?.toString() || "", description: envEntity.entity?.metadata['description']?.toString() || "", - envType: envEntity.entity?.metadata['environment-type']?.toString() || "", + envType: envEntity.entity?.metadata['environmentType']?.toString() || "", level: parseInt(envEntity.entity?.metadata['level']?.toString() || "0", 10), name: envEntity.entity?.metadata['name'].toString() || "", - regionType: envEntity.entity?.metadata['env-type-region']?.toString() || "", + regionType: envEntity.entity?.metadata['envTypeRegion']?.toString() || "", }, providerData: { - accountNumber: envProvider.metadata['aws-account']?.toString() || "", - region: envProvider.metadata['aws-region']?.toString() || "", + accountNumber: envProvider.metadata['awsAccount']?.toString() || "", + region: envProvider.metadata['awsRegion']?.toString() || "", prefix: envProvider.metadata['prefix']?.toString() || "", - auditTable: envProvider.metadata['audit-table']?.toString() || "", + auditTable: envProvider.metadata['auditTable']?.toString() || "", description: envProvider.metadata['description']?.toString() || "", name: envProvider.metadata['name'], - operationRoleSsmKey: envProvider.metadata['operation-role']?.toString() || "", - provisioningRoleSsmKey: envProvider.metadata['provisioning-role']?.toString() || "", - providerType: envProvider.metadata['env-type']?.toString().toLowerCase() || "", + operationRoleSsmKey: envProvider.metadata['operationRole']?.toString() || "", + provisioningRoleSsmKey: envProvider.metadata['provisioningRole']?.toString() || "", + providerType: envProvider.metadata['envType']?.toString().toLowerCase() || "", vpcSsmKey: envProvider.metadata['vpc']?.toString() || "", - cloudFormationStackName: envProvider.metadata['stack-name']?.toString() || "" + cloudFormationStackName: envProvider.metadata['stackName']?.toString() || "" }, entities: { envEntity: envEntity.entity as AWSEnvironmentEntityV1, @@ -348,10 +352,10 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { } // now adjust more specific provider data types - const providerType = envEntity.entity?.metadata['environment-type']?.toString().toLowerCase() || "N/A"; + const providerType = envEntity.entity?.metadata['environmentType']?.toString().toLowerCase() || "N/A"; if (providerType === "N/A") { - throw new Error("Environment Entity not set properly - please configure environment-type"); + throw new Error("Environment Entity not set properly - please configure environmentType"); } if (providerType === ProviderType.ECS && componentType === "aws-app") { @@ -362,7 +366,7 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { // Must handle this later, since it will require async calls that cannot be made here } else if (componentType === "aws-resource") { populateResourceState(awsDeploymentEnvironment as AWSResourceDeploymentEnvironment); - if (entity.metadata["resource-type"] === "aws-rds") { + if (entity.metadata["resourceType"] === "aws-rds") { (awsDeploymentEnvironment as AWSResourceDeploymentEnvironment).resource.resourceType = "database"; } } @@ -398,8 +402,9 @@ export const useAwsComponentFromContext = (): AwsComponentHookLoadingStatus => { const awsComponent: AWSComponent = { componentName: entity.metadata['name'], componentType, - iacType: entity.metadata['iac-type']?.toString() || "", - repoSecretArn: entity.metadata['repo-secret-arn']?.toString() || "", + componentSubType: entity.spec? entity.spec['subType']!.toString(): "", + iacType: entity.metadata['iacType']?.toString() || "", + repoSecretArn: entity.metadata['repoSecretArn']?.toString() || "", gitHost: entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/instance']?.toString() : "", gitRepo: entity.metadata.annotations ? entity.metadata.annotations['gitlab.com/project-slug']?.toString() : "", platformRegion: config.getString('backend.platformRegion'), diff --git a/backstage-plugins/plugins/aws-apps/src/index.ts b/backstage-plugins/plugins/aws-apps/src/index.ts index 6ec1e2df..981245bd 100644 --- a/backstage-plugins/plugins/aws-apps/src/index.ts +++ b/backstage-plugins/plugins/aws-apps/src/index.ts @@ -9,6 +9,7 @@ export { opaPlugin, EntityAnnotationTypeTable, EntityAppStateCard, + EntityK8sAppStateCard, EntityAppPromoCard, EntityAppStateCardCloudFormation, EntityLabelTable, diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsAppPage/AwsAppPage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsAppPage/AwsAppPage.tsx index 27094d8b..2c1bdef4 100644 --- a/backstage-plugins/plugins/aws-apps/src/pages/AwsAppPage/AwsAppPage.tsx +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsAppPage/AwsAppPage.tsx @@ -21,6 +21,7 @@ import { import { AwsECSAppPage } from '../AwsECSAppPage/AwsECSAppPage'; import { AwsServerlessAppPage } from '../AwsServerlessAppPage/AwsServerlessAppPage'; import { CICDContent } from '../../components/CICDContent/CICDContent'; +import { AwsEKSAppPage } from '../AwsEKSAppPage/AwsEKSAppPage'; interface AwsAppPageProps { children: ReactNode; @@ -106,6 +107,33 @@ export function AwsAppPage(_props: AwsAppPageProps) { ); + const AwsEKSAppEntityPage = ( + <> + {_props.children} + + + + + + + + + {awsAppLogsContent} + + + {managementContent} + + {!loadingPermission && canReadAudit && ( + + }> + {auditContent} + + + )} + + + ); + const AwsServerlessAppEntityPage = ( <> {_props.children} @@ -122,9 +150,13 @@ export function AwsAppPage(_props: AwsAppPageProps) { {managementContent} - - {auditContent} - + {!loadingPermission && canReadAudit && ( + + }> + {auditContent} + + + )} ); @@ -136,7 +168,11 @@ export function AwsAppPage(_props: AwsAppPageProps) { return ( {AwsECSAppEntityPage} + {AwsEKSAppEntityPage} {AwsServerlessAppEntityPage} + +

Application Type "{env.providerData.providerType}" Is Not Supported At This Time

+
); } else { diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSAppPage/AwsEKSAppPage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSAppPage/AwsEKSAppPage.tsx new file mode 100644 index 00000000..0f3475aa --- /dev/null +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSAppPage/AwsEKSAppPage.tsx @@ -0,0 +1,43 @@ +import React from 'react'; +import { Grid } from '@material-ui/core'; +import { EntityAboutCard } from '@backstage/plugin-catalog'; +import { EntityGeneralInfoCard, EntityAppLinksCard, EntityInfrastructureInfoCard, EntityK8sAppStateCard } from '../../plugin'; +import { + EntityCatalogGraphCard +} from '@backstage/plugin-catalog-graph'; + +interface AwsEKSAppPageProps { + +} + +/** @public */ +export function AwsEKSAppPage(_props: AwsEKSAppPageProps) { + const awsEKSAppViewContent = ( + + + + + + + + + + + + + + + + + + + + + ); + + return ( + <> + {awsEKSAppViewContent} + + ); +} diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSEnvironmentProviderPage/AwsEKSEnvironmentProviderPage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSEnvironmentProviderPage/AwsEKSEnvironmentProviderPage.tsx new file mode 100644 index 00000000..f690c3ae --- /dev/null +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsEKSEnvironmentProviderPage/AwsEKSEnvironmentProviderPage.tsx @@ -0,0 +1,59 @@ +import { Entity } from '@backstage/catalog-model'; +import { EntityAboutCard, EntityLayout, EntityLinksCard } from '@backstage/plugin-catalog'; +import { EntityCatalogGraphCard } from '@backstage/plugin-catalog-graph'; +import { isGithubActionsAvailable } from '@backstage/plugin-github-actions'; +import { EntityTechdocsContent } from '@backstage/plugin-techdocs'; +import { ReportIssue } from '@backstage/plugin-techdocs-module-addons-contrib'; +import { TechDocsAddons } from '@backstage/plugin-techdocs-react'; +import { isGitlabAvailable } from '@immobiliarelabs/backstage-plugin-gitlab'; +import { Grid } from '@material-ui/core'; +import React, { ReactNode } from 'react'; +import { CICDContent } from '../../components/CICDContent/CICDContent'; +import { EntityDeleteProviderCard, EntityProviderInfoCard } from '../../plugin'; + +export interface AwsEnvironmentProviderPageProps { + children?: ReactNode; +} + +const isCicdApplicable = (entity: Entity) => { + return isGitlabAvailable(entity) || isGithubActionsAvailable(entity); +}; + +/** @public */ +export function AwsEKSEnvironmentProviderPage(/* {children}: AwsEnvironmentProviderPageProps */) { + return ( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ); +} diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsEnvironmentProviderPage/AwsEnvironmentProviderPage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsEnvironmentProviderPage/AwsEnvironmentProviderPage.tsx index 44cdc215..d0dbe854 100644 --- a/backstage-plugins/plugins/aws-apps/src/pages/AwsEnvironmentProviderPage/AwsEnvironmentProviderPage.tsx +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsEnvironmentProviderPage/AwsEnvironmentProviderPage.tsx @@ -8,6 +8,7 @@ import { AwsServerlessEnvironmentProviderPage } from '../AwsServerlessEnvironmen import { useEntity } from '@backstage/plugin-catalog-react'; import { Entity } from '@backstage/catalog-model'; import { ProviderType } from '../../helpers/constants'; +import { AwsEKSEnvironmentProviderPage } from '../AwsEKSEnvironmentProviderPage/AwsEKSEnvironmentProviderPage'; export interface AwsEnvironmentProviderPageProps { children?: ReactNode @@ -15,7 +16,7 @@ export interface AwsEnvironmentProviderPageProps { export function isProviderType(providerType: string, entity: Entity): (entity: Entity) => boolean { return (): boolean => { - return entity.metadata["env-type"]?.toString().toLowerCase() === providerType; + return entity.metadata["envType"]?.toString().toLowerCase() === providerType; }; }; @@ -28,11 +29,14 @@ export function AwsEnvironmentProviderPage(/* {children}: AwsEnvironmentProvider + + + -

Environment Provider Type "{entity.metadata["env-type"]}" Is Not Supported At This Time

+

Environment Provider Type "{entity.metadata["envType"]}" Is Not Supported At This Time

); diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsResourcePage/AwsResourcePage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsResourcePage/AwsResourcePage.tsx index 0710e2f0..c3e7ff4a 100644 --- a/backstage-plugins/plugins/aws-apps/src/pages/AwsResourcePage/AwsResourcePage.tsx +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsResourcePage/AwsResourcePage.tsx @@ -10,6 +10,7 @@ import React, { ReactNode } from 'react'; import { CICDContent } from '../../components/CICDContent/CICDContent'; import { EntityDeleteAppCard } from '../../plugin'; import { AwsRDSResourcePage } from '../AwsRDSResourcePage/AwsRDSResourcePage'; +import {AwsS3ResourcePage} from '../AwsS3ResourcePage/AwsS3ResourcePage' interface AwsResourcePageProps { children: ReactNode; @@ -18,7 +19,7 @@ interface AwsResourcePageProps { export function isResourceType(resourceType: string): (entity: Entity) => boolean { return (entity: Entity): boolean => { let subType = 'N/A'; - if (entity?.metadata?.['resource-type']) subType = entity?.metadata?.['resource-type'].toString(); + if (entity?.metadata?.['resourceType']) subType = entity?.metadata?.['resourceType'].toString(); return subType == resourceType; }; } @@ -54,12 +55,28 @@ export function AwsResourcePage(_props: AwsResourcePageProps) { ); + const AwsS3ResourceEntityPage = ( + <> + {_props.children} + + + + + + + + + {managementContent} + + + + ); + return ( {AwsRDSResourceEntityPage} - {/* - {AwsS3EntityPage} - + {AwsS3ResourceEntityPage} + {/* {AwsSQSEntityPage} */} diff --git a/backstage-plugins/plugins/aws-apps/src/pages/AwsS3ResourcePage/AwsS3ResourcePage.tsx b/backstage-plugins/plugins/aws-apps/src/pages/AwsS3ResourcePage/AwsS3ResourcePage.tsx new file mode 100644 index 00000000..092e7837 --- /dev/null +++ b/backstage-plugins/plugins/aws-apps/src/pages/AwsS3ResourcePage/AwsS3ResourcePage.tsx @@ -0,0 +1,39 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { Grid } from '@material-ui/core'; +import React, { ReactNode } from 'react'; +import { EntityAboutCard, EntityLinksCard } from '@backstage/plugin-catalog'; +import { + EntityCatalogGraphCard +} from '@backstage/plugin-catalog-graph'; +import { EntityInfrastructureInfoCard } from '../../plugin'; + +interface AwsS3ResourcePageProps { + children?: ReactNode +} + +/** @public */ +export function AwsS3ResourcePage(_props: AwsS3ResourcePageProps) { + const rdsContent = ( + + + + + + + + + + + + + + + ); + return ( + <> + {rdsContent} + + ); +} diff --git a/backstage-plugins/plugins/aws-apps/src/plugin.ts b/backstage-plugins/plugins/aws-apps/src/plugin.ts index c6d95372..b1f00d8c 100644 --- a/backstage-plugins/plugins/aws-apps/src/plugin.ts +++ b/backstage-plugins/plugins/aws-apps/src/plugin.ts @@ -75,6 +75,15 @@ export const EntityAppStateCard = opaPlugin.provide( }), ); +export const EntityK8sAppStateCard = opaPlugin.provide( + createComponentExtension({ + name: 'K8sAppStateCard', + component: { + lazy: () => import('./components/K8sAppStateCard/K8sAppStateCard').then(m => m.K8sAppStateCard), + }, + }), +); + export const EntityAppStateCardCloudFormation = opaPlugin.provide( createComponentExtension({ name: 'AppStateCardCloudFormation', diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/README.md b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/README.md index ec55d583..88727e7b 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/README.md +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/README.md @@ -97,7 +97,7 @@ This action will generate a `awsSecretArn` output which can be referenced in sub action: opa:create-secret input: # The name of the SecretsManager secret - secretName: ${{ parameters.component_id }}-gitlab-access-token + secretName: ${{ parameters.component_id | lower }}-gitlab-access-token # The AWS region where the secret will be created region: ${{ steps['opaDeployECSBoilerplate'].output.region }} # The AWS account in which the secret will be created @@ -106,8 +106,8 @@ This action will generate a `awsSecretArn` output which can be referenced in sub description: "Gitlab repo access token" # AWS tags to apply to the Secret tags: - - Key: "aws-apps:${{ parameters.component_id }}" - Value: ${{ parameters.component_id }} + - Key: "aws-apps:${{ parameters.component_id | lower }}" + Value: ${{ parameters.component_id | lower }} ... ``` diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/package.json b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/package.json index 80eaaf09..356d260c 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/package.json +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/package.json @@ -1,7 +1,7 @@ { "name": "@aws/plugin-scaffolder-backend-aws-apps-for-backstage", "description": "App Development for Backstage.io on AWS - scaffolder actions", - "version": "0.2.0", + "version": "0.2.1", "main": "src/index.ts", "types": "src/index.ts", "license": "Apache-2.0", @@ -16,7 +16,7 @@ }, "repository": { "type": "git", - "url": "github:awslabs/app-development-for-backstage-io-on-aws", + "url": "git+https://github.com/awslabs/app-development-for-backstage-io-on-aws.git", "directory": "backstage-plugins/plugins/scaffolder-backend-module-aws-apps" }, "bugs": { @@ -40,22 +40,22 @@ "@aws-sdk/types": "^3.272.0", "@aws-sdk/client-secrets-manager": "^3.279.0", "@aws-sdk/util-arn-parser": "^3.310.0", - "@backstage/catalog-client": "^1.4.3", - "@backstage/catalog-model": "^1.4.1", - "@backstage/config": "^1.0.8", - "@backstage/plugin-scaffolder-backend": "^1.16.5", - "@backstage/plugin-scaffolder-node": "^0.2.2", - "@backstage/integration": "^1.6.2", - "@backstage/errors": "^1.2.1", - "@backstage/types": "^1.1.0", + "@backstage/catalog-client": "^1.5.1", + "@backstage/catalog-model": "^1.4.3", + "@backstage/config": "^1.1.1", + "@backstage/plugin-scaffolder-backend": "^1.19.2", + "@backstage/plugin-scaffolder-node": "^0.2.9", + "@backstage/integration": "^1.8.0", + "@backstage/errors": "^1.2.3", + "@backstage/types": "^1.1.1", "@aws/plugin-aws-apps-backend-for-backstage": "^0.2.0", "lodash": "^4.17.21", "winston": "^3.2.1", "yaml": "^2.2.1" }, "devDependencies": { - "@backstage/backend-common": "^0.19.4", - "@backstage/cli": "^0.22.12" + "@backstage/backend-common": "^0.20.0", + "@backstage/cli": "^0.25.0" }, "files": [ "dist" diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-env-providers/get-env-providers.ts b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-env-providers/get-env-providers.ts index 4c5320f5..fc3275dc 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-env-providers/get-env-providers.ts +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-env-providers/get-env-providers.ts @@ -43,6 +43,8 @@ interface DeploymentParameters { ssmPublicSubnets: string; ssmPrivateSubnets: string; ssmPathCluster: string; + kubectlLambdaArn?: string; + kubectlLambdaRoleArn?: string; } export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { @@ -145,6 +147,14 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { title: 'The Arn of AWS IAM role that can be assumed to deploy resources to the environment provider', type: 'string', }, + kubectlLambdaArn: { + title: 'EKS Only - The Arn of the lambda function that that can execute kubectl commands against the provider\'s EKS cluster', + type: 'string', + }, + kubectlLambdaRoleArn: { + title: 'The Arn of the IAM role for the lambda function that that can execute kubectl commands against the provider\'s EKS cluster', + type: 'string', + }, } } }, @@ -159,8 +169,21 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { // Fail early if there is no user entity if (ctx.user?.entity === undefined) { - ctx.logger.info(`No user context provided for ${ID} action`); - return; + // Verify the automationKey value. If it matches, set an automation user in the context + if (ctx.secrets?.automationKey === process.env.AUTOMATION_KEY) { + console.log("Automation key provided to use automation user"); + ctx.user = { + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'User', + metadata: { name: 'automation' }, + spec: { profile: { displayName: "Automation User" } } + } + } + } else { + ctx.logger.info(`No user context provided for ${ID} action`); + throw new Error(`No user context provided for ${ID} action`); + } } const awsEnvEntity = await catalogClient.getEntityByRef(environmentRef, { token }); @@ -168,10 +191,10 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { throw new Error(`The environment entity "${environmentRef}" could not be located in the catalog.`); } - const envShortName = awsEnvEntity.metadata['short-name']?.toString() || ''; + const envShortName = awsEnvEntity.metadata['shortName']?.toString() || ''; ctx.output('envName', awsEnvEntity.metadata.name); ctx.output('envRef', environmentRef); - ctx.output('envDeployManualApproval', "true" === awsEnvEntity.metadata['deployment_requires_approval']?.toString() || '') + ctx.output('envDeployManualApproval', "true" === awsEnvEntity.metadata['deploymentRequiresApproval']?.toString() || '') ctx.output('envShortName', envShortName); const deploymentParametersArray = await getEnvDeploymentParameters(awsEnvEntity); @@ -183,7 +206,7 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { // looping over all providers of the selected environment for (const params of deploymentParametersArray) { const { accountId, region, ssmAssumeRoleArn, ssmPathVpc, ssmPublicSubnets, ssmPrivateSubnets, ssmPathCluster, - envProviderName, envProviderType, envProviderPrefix } = params; + envProviderName, envProviderType, envProviderPrefix, kubectlLambdaArn, kubectlLambdaRoleArn } = params; if (!accountId) { throw new Error(`accountId not configured for environment provider: ${envProviderName}. The provider IaC deployment may have failed.`); @@ -195,7 +218,11 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { throw new Error(`ssmAssumeRoleArn not configured for environment provider: ${envProviderName}. The provider IaC deployment may have failed.`); } if (!ssmPathVpc) { - throw new Error(`ssmPathVpc not configured for environment provider: ${envProviderName}. The provider IaC deployment may have failed.`); + if ((envProviderType === 'ecs' || envProviderType === 'eks')) { + throw new Error(`ssmPathVpc not configured for environment provider: ${envProviderName}. The provider IaC deployment may have failed.`); + } else { + ctx.logger.info('No VPC configured for the environment provider'); + } } // Get AWS credentials for the specific provider @@ -204,9 +231,9 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { const { credentials } = response; try { - const vpcId = await getSSMParameterValue(region, credentials, ssmPathVpc, ctx.logger); - const publicSubnets = await getSSMParameterValue(region, credentials, ssmPublicSubnets, ctx.logger); - const privateSubnets = await getSSMParameterValue(region, credentials, ssmPrivateSubnets, ctx.logger); + const vpcId = !!ssmPathVpc ? await getSSMParameterValue(region, credentials, ssmPathVpc, ctx.logger) : ''; + const publicSubnets = !!ssmPathVpc ? await getSSMParameterValue(region, credentials, ssmPublicSubnets, ctx.logger): ''; + const privateSubnets = !!ssmPathVpc ? await getSSMParameterValue(region, credentials, ssmPrivateSubnets, ctx.logger): ''; const clusterArn = (envProviderType === 'ecs' || envProviderType === 'eks') ? await getSSMParameterValue(region, credentials, ssmPathCluster, ctx.logger) : ''; const assumedRoleArn = await getSSMParameterValue(region, credentials, ssmAssumeRoleArn, ctx.logger); @@ -223,6 +250,13 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { assumedRoleArn, }; + if (kubectlLambdaArn) { + envProvider.kubectlLambdaArn = kubectlLambdaArn; + } + if (kubectlLambdaRoleArn) { + envProvider.kubectlLambdaRoleArn = kubectlLambdaRoleArn; + } + envProviderOutputArray.push(envProvider); } catch (err: any) { throw new Error(`Failed to populate environment provider ${envProviderName}. ${err.toString()}`) @@ -249,7 +283,7 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { .filter( entity => entity && - ['name', 'env-type', 'aws-account', 'aws-region', 'vpc'].every(key => key in entity.metadata), + ['name', 'envType', 'awsAccount', 'awsRegion', 'vpc'].every(key => key in entity.metadata), ) .map(entity => { const { metadata } = entity!; @@ -260,15 +294,23 @@ export function getEnvProvidersAction(options: { catalogClient: CatalogApi }) { envName: envEntity.metadata.name, envProviderName: metadata.name, envRef: environmentRef, - envProviderType: metadata['env-type']?.toString().toLowerCase() || '', - accountId: metadata['aws-account']?.toString() || '', - region: metadata['aws-region']?.toString() || '', - ssmAssumeRoleArn: metadata['provisioning-role']?.toString() || '', + envProviderType: metadata['envType']?.toString().toLowerCase() || '', + accountId: metadata['awsAccount']?.toString() || '', + region: metadata['awsRegion']?.toString() || '', + ssmAssumeRoleArn: metadata['provisioningRole']?.toString() || '', ssmPathVpc: vpc, ssmPrivateSubnets: `${vpc}/private-subnets`, ssmPublicSubnets: `${vpc}/public-subnets`, - ssmPathCluster: metadata['cluster-name']?.toString() || '', + ssmPathCluster: metadata['clusterName']?.toString() || '', }; + + if (metadata['kubectlLambdaArn']) { + deployParams.kubectlLambdaArn = metadata['kubectlLambdaArn'].toString(); + } + if (metadata['clusterAdminRole']) { + deployParams.kubectlLambdaRoleArn = metadata['clusterAdminRole'].toString(); + } + return deployParams; }); diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-platform-parameters/get-platform-parameters.ts b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-platform-parameters/get-platform-parameters.ts index eab48424..a7c01f3f 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-platform-parameters/get-platform-parameters.ts +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-platform-parameters/get-platform-parameters.ts @@ -78,10 +78,24 @@ export function getPlatformParametersAction(options: { envConfig: Config }) { } ctx.logger.info(`paramKeys: ${JSON.stringify(paramKeys)}`); ctx.logger.info(`Region: ${region}`); + // Fail early if there is no user entity if (ctx.user?.entity === undefined) { - ctx.logger.info(`No user context provided for ${ID} action`); - return; + // Verify the automationKey value. If it matches, set an automation user in the context + if (ctx.secrets?.automationKey === process.env.AUTOMATION_KEY) { + console.log("Automation key provided to use automation user"); + ctx.user = { + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'User', + metadata: { name: 'automation' }, + spec: { profile: { displayName: "Automation User" } } + } + } + } else { + ctx.logger.info(`No user context provided for ${ID} action`); + throw new Error(`No user context provided for ${ID} action`); + } } // Get a key/value map of SSM parameters diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-ssm-parameters/get-ssm-parameters.ts b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-ssm-parameters/get-ssm-parameters.ts index 9b95ca0b..c24a9654 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-ssm-parameters/get-ssm-parameters.ts +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/actions/get-ssm-parameters/get-ssm-parameters.ts @@ -81,8 +81,21 @@ export function getSsmParametersAction() { // Fail early if there is no user entity if (ctx.user?.entity === undefined) { - ctx.logger.info(`No user context provided for ${ID} action`); - return; + // Verify the automationKey value. If it matches, set an automation user in the context + if (ctx.secrets?.automationKey === process.env.AUTOMATION_KEY) { + console.log("Automation key provided to use automation user"); + ctx.user = { + entity: { + apiVersion: 'backstage.io/v1alpha1', + kind: 'User', + metadata: { name: 'automation' }, + spec: { profile: { displayName: "Automation User" } } + } + } + } else { + ctx.logger.info(`No user context provided for ${ID} action`); + throw new Error(`No user context provided for ${ID} action`); + } } const providerConnect: EnvProviderConnectMap = diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/example/template.yaml b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/example/template.yaml index 934b7045..cbf20935 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/example/template.yaml +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/example/template.yaml @@ -89,7 +89,7 @@ spec: PREFIX=${{ each.value.envProviderPrefix }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName | lower }}-${{ each.value.envProviderName | lower }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -111,8 +111,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" # Fetches the template code from the remote repo where this template resides # In a real template, the input values which start with "aws_" would be used to @@ -123,12 +123,13 @@ spec: input: url: ./content values: - component_id: ${{ parameters.component_id }} + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} # Publishes the contents of the working directory to a new GitLab repo - id: publish diff --git a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/types.ts b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/types.ts index 5fe3c810..45144f8d 100644 --- a/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/types.ts +++ b/backstage-plugins/plugins/scaffolder-backend-module-aws-apps/src/types.ts @@ -21,4 +21,6 @@ export type EnvironmentProvider = { privateSubnets: string; clusterArn?: string; assumedRoleArn: string; + kubectlLambdaArn?: string; + kubectlLambdaRoleArn?: string; } diff --git a/backstage-reference/.gitignore b/backstage-reference/.gitignore index 140ce74a..c9241679 100644 --- a/backstage-reference/.gitignore +++ b/backstage-reference/.gitignore @@ -10,7 +10,7 @@ # Crash log files crash.log -*.zip +# *.zip *.pem # Ignore any .tfvars files that are generated automatically for each Terraform run. Most diff --git a/backstage-reference/common/aws_ecs/README.md b/backstage-reference/common/aws_ecs/README.md index d4b76812..02f1ad30 100644 --- a/backstage-reference/common/aws_ecs/README.md +++ b/backstage-reference/common/aws_ecs/README.md @@ -2,4 +2,5 @@ This project will create: - An ECR repository for the container image of an AWS application deployed to Fargate ECS -- A Fargate Task Definition for the AWS application +- A Fargate Task Definition and ECS Service (fronted by ALB) for the AWS application +- A resource group that helps identify all of the resources created for the application diff --git a/backstage-reference/common/aws_ecs/package.json b/backstage-reference/common/aws_ecs/package.json index ae3d5dfd..ad587d94 100644 --- a/backstage-reference/common/aws_ecs/package.json +++ b/backstage-reference/common/aws_ecs/package.json @@ -11,13 +11,13 @@ }, "devDependencies": { "@types/node": "18.11.15", - "aws-cdk": "2.88.0", + "aws-cdk": "2.120.0", "ts-node": "^10.9.1", "typescript": "~5.0.0" }, "dependencies": { "@aws-sdk/util-arn-parser": "^3.208.0", - "aws-cdk-lib": "2.88.0", + "aws-cdk-lib": "2.120.0", "constructs": "^10.0.0" } } diff --git a/backstage-reference/common/aws_ecs/src/cdk-ecs-module-app.ts b/backstage-reference/common/aws_ecs/src/cdk-ecs-module-app.ts index 2810e27f..748c4bac 100644 --- a/backstage-reference/common/aws_ecs/src/cdk-ecs-module-app.ts +++ b/backstage-reference/common/aws_ecs/src/cdk-ecs-module-app.ts @@ -9,7 +9,7 @@ const account = app.node.tryGetContext("account") || process.env.CDK_DEPLOY_ACCO const region = app.node.tryGetContext("region") || process.env.REGION || process.env.CDK_DEPLOY_REGION || process.env.CDK_DEFAULT_REGION || "us-east-1"; -console.log(`Selected region: ${region}`) +console.log(`Selected region: ${region}`); const env = { region, account }; const stackSufix = process.env.TARGET_ENV_PROVIDER_NAME ? `-${process.env.TARGET_ENV_PROVIDER_NAME}` : ''; diff --git a/backstage-reference/common/aws_ecs/src/cdk-ecs-module-stack.ts b/backstage-reference/common/aws_ecs/src/cdk-ecs-module-stack.ts index 789f8357..5bbcb5fd 100644 --- a/backstage-reference/common/aws_ecs/src/cdk-ecs-module-stack.ts +++ b/backstage-reference/common/aws_ecs/src/cdk-ecs-module-stack.ts @@ -14,8 +14,6 @@ import { Construct } from "constructs"; import * as fs from 'fs' import { PolicyStatement } from "aws-cdk-lib/aws-iam"; - - interface PermissionList { [key: string]: string[] // adjusting require this in order to some json data type } @@ -64,6 +62,12 @@ export class EcsResourcesStack extends Stack { if (!clusterArn) { throw new Error("Required environment variable: clusterArn was not provided."); } + if (!envName) { + throw new Error("Required environment variable: envName was not provided."); + } + if (!envProviderName) { + throw new Error("Required environment variable: envProviderName was not provided."); + } // Tag all resources so that they can be grouped together in a Resource Group // the prefix "aws-apps:" is a convention adopted for this implementation @@ -73,7 +77,6 @@ export class EcsResourcesStack extends Stack { // Search for the particular env/provider permissions to apply const readPermissionsPath = `./permissions/${envName}/${envProviderName}/` - // Add any tags passed as part of AWS_RESOURCE_TAGS input parameters const resourceTagsEnvVar = process.env.AWS_RESOURCE_TAGS; if (resourceTagsEnvVar) { @@ -99,11 +102,12 @@ export class EcsResourcesStack extends Stack { }, }); - // Create a key for encrypting the repository + // Create a key for encrypting the ECR repository const kmsKey = new kms.Key(this, "appKmsKey", { // alias: `${parameters.appShortName.valueAsString}-repo-key`, removalPolicy: RemovalPolicy.DESTROY, enableKeyRotation: true, + description: "Key used to encrypt ECS app repository" }); // TODO: ECR repositories cannot be automatically deleted when destroying the CDK stack. @@ -111,7 +115,7 @@ export class EcsResourcesStack extends Stack { // performed via SDK as part of any teardown/destroy actions // Create an ECR repository for the application container images const ecrRepository = new ecr.Repository(this, "ecr-repository", { - repositoryName: `${appShortName}-${envProviderName}`.toLowerCase(), + repositoryName: `${appShortName}-${envName}-${envProviderName}`.toLowerCase(), imageScanOnPush: true, encryption: ecr.RepositoryEncryption.KMS, encryptionKey: kmsKey, @@ -128,11 +132,11 @@ export class EcsResourcesStack extends Stack { securityGroups: [], // empty array required. See https://github.com/aws/aws-cdk/issues/11146 }); - // Get application plaintext env vars from the APP_ENV_PLAINTEXT env var + // Get application plaintext env vars from the appEnvPlaintext env var let plaintextEnvVars: Record> = {}; let environment: Record = {}; - {%- if values.app_env_plaintext %} - plaintextEnvVars = ${{values.app_env_plaintext | dump}} + {%- if values.appEnvPlaintext %} + plaintextEnvVars = ${{values.appEnvPlaintext | dump}} {%- endif %} // convert all values to strings. ECS container definition env vars require Record Object.keys(plaintextEnvVars).forEach(key => { @@ -186,6 +190,11 @@ export class EcsResourcesStack extends Stack { const cfnEcsService = loadBalancedEcsService.service.node.defaultChild as ecs.CfnService; cfnEcsService.desiredCount = 0; + // Add the health check + loadBalancedEcsService.targetGroup.configureHealthCheck({ + path: "${{values.app_health_endpoint or '/'}}", + }); + // ensure that the execution role can decrypt the key when pulling from the repo. kmsKey.grantDecrypt(loadBalancedEcsService.service.taskDefinition.executionRole!); kmsKey.grantDecrypt(loadBalancedEcsService.service.taskDefinition.taskRole!); diff --git a/backstage-reference/common/aws_efs/README.md b/backstage-reference/common/aws_efs/README.md deleted file mode 100644 index 4cf8d400..00000000 --- a/backstage-reference/common/aws_efs/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# CDK project for EFS resource - -This project will create: -- An AWS EFS instance diff --git a/backstage-reference/common/aws_efs/src/cdk-efs-module-app.ts b/backstage-reference/common/aws_efs/src/cdk-efs-module-app.ts deleted file mode 100644 index 9f633565..00000000 --- a/backstage-reference/common/aws_efs/src/cdk-efs-module-app.ts +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env node -import "source-map-support/register"; -import * as cdk from "aws-cdk-lib"; -import { randomUUID as uuid } from "crypto"; -import { CdkEfsModuleStack } from "./cdk-efs-module-stack"; - -const app = new cdk.App(); - -// If an application short name was provided, associate the AWS resources -// created in this app with it through tagging -if (process.env['APP_SHORT_NAME']) { - // Tag all resources so that they can be grouped together in a Resource Group - const appShortName = process.env['APP_SHORT_NAME']; - const tagKey = `aws-apps:${appShortName}`; - cdk.Tags.of(app).add(tagKey, appShortName); -} - -const account = app.node.tryGetContext("account") || process.env.CDK_DEPLOY_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT; - -const region = - app.node.tryGetContext("region") || process.env.CDK_DEPLOY_REGION || process.env.CDK_DEFAULT_REGION || "us-east-1"; - -const env = { region, account }; - -let stackName; -if (process.env.EFS_NAME) { - stackName = `${process.env.EFS_NAME}-efs-resource`; - } else if (process.env.APP_SHORT_NAME) { - stackName = `${process.env.APP_SHORT_NAME}-efs-resource` - } else { - stackName = `efs-resource-${uuid()}`; - } - -new CdkEfsModuleStack(app, stackName, { - vpcId: process.env.TARGET_VPCID || "missing", - efsName: process.env.EFS_NAME, - efsAccessPointPath: process.env.EFS_ACCESS_POINT_PATH, - env, -}); diff --git a/backstage-reference/common/aws_efs/src/cdk-efs-module-stack.ts b/backstage-reference/common/aws_efs/src/cdk-efs-module-stack.ts deleted file mode 100644 index 242a2792..00000000 --- a/backstage-reference/common/aws_efs/src/cdk-efs-module-stack.ts +++ /dev/null @@ -1,75 +0,0 @@ -import * as cdk from "aws-cdk-lib"; -import * as ec2 from "aws-cdk-lib/aws-ec2"; -import * as efs from "aws-cdk-lib/aws-efs"; -import { Construct } from "constructs"; - -export interface CdkBaseStackProps extends cdk.StackProps { - vpcId: string; - efsName?: string; - efsAccessPointPath?: string; -} - -export class CdkEfsModuleStack extends cdk.Stack { - public readonly vpc: ec2.IVpc; - public readonly efsInstance: efs.IFileSystem; - - constructor(scope: Construct, id: string, props: CdkBaseStackProps) { - super(scope, id, props); - - this.vpc = ec2.Vpc.fromLookup(this, "VPC", { vpcId: props.vpcId }); - - // Define the File System properties - const efsConfig: efs.FileSystemProps = { - vpc: this.vpc, - fileSystemName: props.efsName, - removalPolicy: cdk.RemovalPolicy.DESTROY, - performanceMode: efs.PerformanceMode.GENERAL_PURPOSE, // default, TODO: allow the performance mode to be passed in as a parameter - }; - - // create the EFS instance - this.efsInstance = new efs.FileSystem(this, props.efsName ?? "efsInstance", efsConfig); - - // Create an access point for the EFS instance - const accessPoint = new efs.AccessPoint(this, "efsAccessPoint", { - fileSystem: this.efsInstance, - path: props.efsAccessPointPath || "/data", - createAcl: { - ownerGid: "1000", - ownerUid: "1000", - permissions: "750", - }, - posixUser: { - uid: "1000", - gid: "1000", - }, - }); - - // allow connections from within the VPC - this.efsInstance.connections.allowDefaultPortFrom( - { - connections: new ec2.Connections({ - peer: ec2.Peer.ipv4(this.vpc.vpcCidrBlock), - }), - }, - `Allow connections from within VPC ${this.vpc.vpcId}` - ); - - // Provide CFN Output values relevant for the newly created File System - new cdk.CfnOutput(this, "opaEfsArn", { - description: "Arn for the EFS file system", - value: this.efsInstance.fileSystemArn, - }); - new cdk.CfnOutput(this, "opaEfsId", { - description: "ID of the EFS file system", - value: this.efsInstance.fileSystemId, - }); - new cdk.CfnOutput(this, "opaEfsSecurityGroupId", { - description: "Security Group for the EFS instance", - value: this.efsInstance.connections.securityGroups[0].securityGroupId, - }); - new cdk.CfnOutput(this, "opaEfsAccessPointId", { - description: "ID of the EFS access point", - value: accessPoint.accessPointId, - }); - } -} diff --git a/backstage-reference/common/aws_efs/.gitignore b/backstage-reference/common/aws_eks/.gitignore similarity index 100% rename from backstage-reference/common/aws_efs/.gitignore rename to backstage-reference/common/aws_eks/.gitignore diff --git a/backstage-reference/common/aws_efs/.npmignore b/backstage-reference/common/aws_eks/.npmignore similarity index 100% rename from backstage-reference/common/aws_efs/.npmignore rename to backstage-reference/common/aws_eks/.npmignore diff --git a/backstage-reference/common/aws_eks/README.md b/backstage-reference/common/aws_eks/README.md new file mode 100644 index 00000000..71cc1d6f --- /dev/null +++ b/backstage-reference/common/aws_eks/README.md @@ -0,0 +1,6 @@ +# CDK project for Backstage AWS Application EKS resources + +This project will create: +- An ECR repository for the container image of an AWS application deployed to an EKS cluster +- A resource group that helps identify all of the resources created for the application + diff --git a/backstage-reference/common/aws_eks/cdk.json b/backstage-reference/common/aws_eks/cdk.json new file mode 100644 index 00000000..0298b3d0 --- /dev/null +++ b/backstage-reference/common/aws_eks/cdk.json @@ -0,0 +1,41 @@ +{ + "app": "npx ts-node --prefer-ts-exts src/cdk-eks-module-app.ts", + "watch": { + "include": [ + ".src" + ], + "exclude": [ + "README.md", + "cdk*.json", + "**/*.d.ts", + "**/*.js", + "tsconfig.json", + "package*.json", + "yarn.lock", + "node_modules", + "test" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true + } +} diff --git a/backstage-reference/common/aws_eks/package.json b/backstage-reference/common/aws_eks/package.json new file mode 100644 index 00000000..092d6906 --- /dev/null +++ b/backstage-reference/common/aws_eks/package.json @@ -0,0 +1,24 @@ +{ + "name": "aws_eks", + "version": "0.1.0", + "bin": { + "aws_eks": "src/cdk-eks-module-app.js" + }, + "scripts": { + "build": "tsc", + "watch": "tsc -w", + "cdk": "cdk" + }, + "devDependencies": { + "@types/node": "18.11.15", + "aws-cdk": "2.120.0", + "ts-node": "^10.9.1", + "typescript": "~5.0.0" + }, + "dependencies": { + "@aws-sdk/util-arn-parser": "^3.208.0", + "aws-cdk-lib": "2.120.0", + "constructs": "^10.0.0", + "cdk-nag": "^2.27.95" + } +} diff --git a/backstage-reference/common/aws_eks/permissions/README.md b/backstage-reference/common/aws_eks/permissions/README.md new file mode 100644 index 00000000..b4b601dc --- /dev/null +++ b/backstage-reference/common/aws_eks/permissions/README.md @@ -0,0 +1,10 @@ +Add AWS IAM policy statements here, each file with a particular statement that grants the pod access. + +Make sure the tasks are of the format - statement-xxx.json + +example: +{ + "Effect": "Allow", + "Action": ["rds:*"], + "Resource": "*" +} diff --git a/backstage-reference/common/aws_eks/src/cdk-eks-module-app.ts b/backstage-reference/common/aws_eks/src/cdk-eks-module-app.ts new file mode 100644 index 00000000..e95587da --- /dev/null +++ b/backstage-reference/common/aws_eks/src/cdk-eks-module-app.ts @@ -0,0 +1,20 @@ +#!/usr/bin/env node +import * as cdk from 'aws-cdk-lib'; +import { EksResourcesStack } from "./cdk-eks-module-stack"; +import { + getAccountId, + getRegion, + getStackName, +} from "./eks-input"; + +const app = new cdk.App(); + +const account = getAccountId(); +const region = getRegion(); +console.log(`Selected region: ${region}`); +const env = { region, account }; + +const stackName = getStackName(); + +new EksResourcesStack(app, stackName, { env }); + diff --git a/backstage-reference/common/aws_eks/src/cdk-eks-module-stack.ts b/backstage-reference/common/aws_eks/src/cdk-eks-module-stack.ts new file mode 100644 index 00000000..e29f66df --- /dev/null +++ b/backstage-reference/common/aws_eks/src/cdk-eks-module-stack.ts @@ -0,0 +1,220 @@ + +import { Construct } from "constructs"; +import * as cdk from "aws-cdk-lib"; +import * as ecr from "aws-cdk-lib/aws-ecr" +import * as kms from "aws-cdk-lib/aws-kms"; +import * as rg from "aws-cdk-lib/aws-resourcegroups"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import * as fs from 'fs'; +import { Stack, StackProps, RemovalPolicy, Tags, CfnOutput } from "aws-cdk-lib/core"; +import { EKSAppAdminRoleConstruct } from "./constructs/eks-env-app-admin-role-construct" +import { + getAccountId, + getClusterName, + getAppAdminRoleArn, + getEnvironmentName, + getEnvironmentProviderName, + getK8sIamRoleBindingType, + getNamespace, + getClusterOidcProvider, + getPrefix, + getRegion, + getResourceTags, + validateEKSStackRequiredEnvVars, + OPAEnvironmentParams +} from "./eks-input"; + +interface PermissionList { + [key: string]: string[] +} + +// Read permissions files to augment a pod's IAM role +export function DeclareJSONStatements(readPermissionsPath: string): PermissionList { + const list: PermissionList = {} + if (fs.existsSync(readPermissionsPath)) { + const fileNames = fs.readdirSync(readPermissionsPath).filter(file => file.match(/\.json$/)) + fileNames.forEach((fileName: string) => { + let typeName = fileName.match(/(^.*?)\.json/) + if (typeName) { + list[typeName[1]] = JSON.parse(fs.readFileSync(readPermissionsPath + fileName, 'utf8').toString()) + } + }) + } + + return list +} + +export class EksResourcesStack extends Stack { + constructor(scope: Construct, id: string, props?: StackProps) { + super(scope, id, props); + + const envName = getEnvironmentName(); + const prefix = getPrefix(); + const awsRegion = getRegion(); + const awsAccount = getAccountId(); + + const opaEnvParams: OPAEnvironmentParams = { + envName, + awsRegion, + awsAccount, + prefix + } + + const envIdentifier = `${opaEnvParams.prefix.toLowerCase()}-${opaEnvParams.envName}`; + const envPathIdentifier = `/${opaEnvParams.prefix.toLowerCase()}/${opaEnvParams.envName.toLowerCase()}`; + const appAdminRoleArn = getAppAdminRoleArn(); + const k8sIamRoleBindingType = getK8sIamRoleBindingType(); + const appShortName = "${{ values.component_id }}"; + const clusterName = getClusterName(); + const envProviderName = getEnvironmentProviderName(); + const namespace = getNamespace(); + const clusterOIDCProvider = getClusterOidcProvider(); + + validateEKSStackRequiredEnvVars(); + + let appAdminRoleArnParam = null; + + // Tag all resources so that they can be grouped together in a Resource Group + // the prefix "aws-apps-" is a convention adopted for this implementation + const tagKey = `aws-apps-${appShortName}-${envName}-${envProviderName}`; + Tags.of(this).add(tagKey, appShortName); + + // Search for the particular env/provider permissions to apply + const readPermissionsPath = `./permissions/${envName}/${envProviderName}/` + + // Add any tags passed as part of AWS_RESOURCE_TAGS input parameters + const resourceTagsEnvVar = getResourceTags(); + if (resourceTagsEnvVar) { + const resourceTags = (JSON.parse(resourceTagsEnvVar) as Record[]); + resourceTags.forEach(tag => { + Tags.of(this).add(tag.Key, tag.Value); + }); + } + + const rscGroup = new rg.CfnGroup(this, `${appShortName}-resource-group`, { + name: `${appShortName}-${envName}-${envProviderName}-rg`, + description: `Resources related to ${appShortName} in the ${envName} environment`, + resourceQuery: { + type: "TAG_FILTERS_1_0", + query: { + resourceTypeFilters: ["AWS::AllSupported"], + tagFilters: [ + { + key: tagKey, + }, + ], + }, + }, + }); + + // Create a key for encrypting the ECR repository + const kmsKey = new kms.Key(this, "appKmsKey", { + // alias: `${parameters.appShortName.valueAsString}-repo-key`, + removalPolicy: RemovalPolicy.DESTROY, + enableKeyRotation: true, + description: `Key used to encrypt ECR for ${appShortName}-${envName}-${envProviderName}` + }); + + // TODO: ECR repositories cannot be automatically deleted when destroying the CDK stack. + // Emptying the repository of all images and then deleting the repo will need to be + // performed via SDK as part of any teardown/destroy actions + // Create an ECR repository for the application container images + const ecrRepository = new ecr.Repository(this, "ecr-repository", { + repositoryName: `${appShortName}-${envName}-${envProviderName}`.toLowerCase(), + imageScanOnPush: true, + encryption: ecr.RepositoryEncryption.KMS, + encryptionKey: kmsKey, + removalPolicy: RemovalPolicy.DESTROY, + autoDeleteImages: true + }); + + // Create a new app admin IAM role if an existing IAM role was not provided by the user + // This IAM role will be mapped to the k8s Role for namespace-bound access + let appAdminRole; + if (('existing_new_k8s_namespace_admin_iam_role' === k8sIamRoleBindingType) && appAdminRoleArn) { + appAdminRole = iam.Role.fromRoleArn(this, 'role,', appAdminRoleArn); + } else if ('create_new_k8s_namespace_admin_iam_role' === k8sIamRoleBindingType) { + appAdminRole = new EKSAppAdminRoleConstruct(this, + `${appShortName}-admin-role`, + { + opaEnv: opaEnvParams, + eksClusterName: clusterName + } + ).iamRole; + } + + if (appAdminRole) { + // now save the app admin role ARN in SSM Param + const appAdminRoleNameParam = new ssm.StringParameter(this, `${envIdentifier}-${appShortName}-admin-role-param`, { + allowedPattern: ".*", + description: `The IAM role name mapped to the K8s Role for namespace-bound k8s API access to ${appShortName} resources`, + parameterName: `${envPathIdentifier}/${appShortName}-admin-role`, + stringValue: appAdminRole.roleName, + }); + + appAdminRoleArnParam = new ssm.StringParameter(this, `${envIdentifier}-${appShortName}-admin-role-arn-param`, { + allowedPattern: ".*", + description: `The IAM role ARN mapped to the K8s Role for namespace-bound k8s API access to ${appShortName} resources`, + parameterName: `${envPathIdentifier}/${appShortName}-admin-role-arn`, + stringValue: appAdminRole.roleArn, + }); + } + + const serviceAccountIamRole = new iam.Role(this, `${envIdentifier}-service-account-role`, { + description: `IAM role for ${appShortName} serviceaccount "${namespace}/${appShortName}-sa-${envName}"`, + assumedBy: new iam.FederatedPrincipal( + `arn:aws:iam::${awsAccount}:oidc-provider/${clusterOIDCProvider}`, + { + 'StringEquals': { + [`${clusterOIDCProvider}:aud`]: 'sts.amazonaws.com', + [`${clusterOIDCProvider}:sub`]: `system:serviceaccount:${namespace}:${appShortName}-sa-${envName}`, + } + }, + 'sts:AssumeRoleWithWebIdentity' + ), + // roleName: name, - let CDK generate the role name + }); + + // Add custom permissions to service account + const fileStatements = DeclareJSONStatements(readPermissionsPath); + Object.keys(fileStatements).forEach(key => { + console.log(key) + console.log(fileStatements[key]) + const statement: iam.PolicyStatement = iam.PolicyStatement.fromJson(fileStatements[key]); + serviceAccountIamRole.addToPrincipalPolicy(statement); + }); + + // Output parameters + new CfnOutput(this, "EcrRepositoryUri", { + description: `The ECR repository Uri for ${appShortName}`, + value: ecrRepository.repositoryUri, + }); + new CfnOutput(this, "EcrRepositoryArn", { + description: `The ECR repository Arn for ${appShortName}`, + value: ecrRepository.repositoryArn, + }); + + new CfnOutput(this, "AppResourceGroup", { + description: `The tag-based resource group to identify resources related to ${appShortName}`, + value: `${rscGroup.attrArn}`, + }); + + new CfnOutput(this, `StackName`, { + value: this.stackName, + description: "The EKS App CF Stack name", + }); + + new CfnOutput(this, "AppAdminRoleArn", { + value: appAdminRole ? appAdminRole.roleArn : '', + description: `The IAM mapped to the K8s Role for namespace-bound k8s API access to ${appShortName} resources`, + }); + + new CfnOutput(this, "ServiceAccountRoleArn", { + value: serviceAccountIamRole.roleArn, + description: `The IAM role for serviceaccount "${namespace}/${appShortName}"`, + }); + + } +} + diff --git a/backstage-reference/common/aws_eks/src/constructs/eks-env-app-admin-role-construct.ts b/backstage-reference/common/aws_eks/src/constructs/eks-env-app-admin-role-construct.ts new file mode 100644 index 00000000..2486b6e0 --- /dev/null +++ b/backstage-reference/common/aws_eks/src/constructs/eks-env-app-admin-role-construct.ts @@ -0,0 +1,69 @@ +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import { NagSuppressions } from "cdk-nag"; +import * as iam from "aws-cdk-lib/aws-iam"; +import { OPAEnvironmentParams } from "../eks-input" + +export interface EKSAppAdminRoleConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + readonly eksClusterName: string; +} + +const defaultProps: Partial = {}; + +export class EKSAppAdminRoleConstruct extends Construct { + public iamRole: iam.Role; + + constructor(parent: Construct, name: string, props: EKSAppAdminRoleConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + + // Create IAM role + this.iamRole = new iam.Role(this, `${envIdentifier}-app-admin-role`, { + assumedBy: new iam.AccountPrincipal(props.opaEnv.awsAccount), + // roleName: name, - let CDK generate the role name + maxSessionDuration: cdk.Duration.seconds(43200), + }); + NagSuppressions.addResourceSuppressions(this.iamRole, [ + { id: "AwsSolutions-IAM4", reason: "Assumed roles will use AWS managed policies for demonstration purposes. Customers will be advised/required to assess and apply custom policies based on their role requirements" }, + { id: "AwsSolutions-IAM5", reason: "Assumed roles will require permissions to perform multiple eks, ddb, and ec2 for demonstration purposes. Customers will be advised/required to assess and apply minimal permission based on role mappings to their idP groups" }, + ], true + ); + + // Add cluster-specific permissions + this.iamRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "eks:AccessKubernetesApi", + "eks:DescribeAddon", + "eks:DescribeAddonConfiguration", + "eks:DescribeAddonVersions", + "eks:DescribeCluster", + "eks:DescribeFargateProfile", + "eks:DescribeIdentityProviderConfig", + "eks:DescribeNodegroup", + "eks:DescribeUpdate", + "eks:ListAddons", + "eks:ListFargateProfiles", + "eks:ListIdentityProviderConfigs", + "eks:ListNodegroups", + "eks:ListTagsForResource", + "eks:ListUpdates", + ], + effect: iam.Effect.ALLOW, + resources: [ + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:addon/${props.eksClusterName}/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:cluster/${props.eksClusterName}`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:fargateprofile/${props.eksClusterName}/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:identityproviderconfig/${props.eksClusterName}/*/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:nodegroup/${props.eksClusterName}/*/*`, + ], + }) + ); + + } +} diff --git a/backstage-reference/common/aws_eks/src/eks-input.ts b/backstage-reference/common/aws_eks/src/eks-input.ts new file mode 100644 index 00000000..2d528e2a --- /dev/null +++ b/backstage-reference/common/aws_eks/src/eks-input.ts @@ -0,0 +1,96 @@ +#!/usr/bin/env node + +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { randomUUID as uuid } from 'crypto'; + +export interface OPAEnvironmentParams { + readonly awsAccount: string; + readonly awsRegion: string; + readonly envName: string; + readonly prefix: string; +} + +// Environment variables that can be passed in and used in this stack +// The env var names must match the values passed in from scaffolder action(s) building this stack +export enum STACK_EKS_ENV_VARS { + APP_SHORT_NAME = "APP_SHORT_NAME", + K8S_IAM_ROLE_BINDING_TYPE = "K8S_IAM_ROLE_BINDING_TYPE", + NAMESPACE = "NAMESPACE", + CLUSTER_OIDC_PROVIDER = "CLUSTER_OIDC_PROVIDER", + PREFIX = "PREFIX", + TARGET_EKS_CLUSTER_ARN = "TARGET_EKS_CLUSTER_ARN", + TARGET_ENV_NAME = "TARGET_ENV_NAME", + TARGET_ENV_PROVIDER_NAME = "TARGET_ENV_PROVIDER_NAME", +} + +export function validateEKSStackRequiredEnvVars() { + Object.values(STACK_EKS_ENV_VARS).forEach(val => { + if (!process.env[val]) { + throw new Error(`${val} Environment variable is missing and mandatory for EKS stack`); + } + }); +} + +export function getAccountId(): string { + return (process.env.CDK_DEPLOY_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT) as string +} + +export function getClusterArn(): string { + return process.env[STACK_EKS_ENV_VARS.TARGET_EKS_CLUSTER_ARN] as string; +} + +export function getClusterName(): string { + const clusterArn = getClusterArn(); + return clusterArn.substring(clusterArn.lastIndexOf('/') + 1); +} + +export function getK8sIamRoleBindingType(): string { + return process.env[STACK_EKS_ENV_VARS.K8S_IAM_ROLE_BINDING_TYPE] as string; +} + +export function getEnvironmentName(): string { + return process.env[STACK_EKS_ENV_VARS.TARGET_ENV_NAME] as string; +} + +export function getEnvironmentProviderName(): string { + return process.env[STACK_EKS_ENV_VARS.TARGET_ENV_PROVIDER_NAME] as string; +} + +export function getNamespace(): string { + return process.env[STACK_EKS_ENV_VARS.NAMESPACE] as string; +} + +export function getClusterOidcProvider(): string { + return process.env[STACK_EKS_ENV_VARS.CLUSTER_OIDC_PROVIDER] as string; +} + +export function getPrefix(): string { + return process.env[STACK_EKS_ENV_VARS.PREFIX] as string; +} + +export function getRegion(): string { + return (process.env.REGION || process.env.CDK_DEPLOY_REGION || process.env.CDK_DEFAULT_REGION || "us-east-1") as string; +} + +export function getResourceTags(): string { + return process.env.AWS_RESOURCE_TAGS as string; +} + +export function getStackName(): string { + return (process.env.APP_SHORT_NAME ? `${process.env.APP_SHORT_NAME}-eks-resources${getStackSuffix()}` : `eks-resources-${uuid()}`); +} + +export function getStackSuffix(): string { + return (getEnvironmentProviderName() ? `-${getEnvironmentName()}-${getEnvironmentProviderName()}` : '') as string; +} + +export function getAppAdminRoleArn(): string { + let appAdminRoleArn = process.env.APP_ADMIN_ROLE_ARN; + if (appAdminRoleArn) { + appAdminRoleArn = appAdminRoleArn.replace(/ /g, ''); // remove all whitespace + } + return appAdminRoleArn as string || ''; +} + diff --git a/backstage-reference/common/aws_efs/tsconfig.json b/backstage-reference/common/aws_eks/tsconfig.json similarity index 100% rename from backstage-reference/common/aws_efs/tsconfig.json rename to backstage-reference/common/aws_eks/tsconfig.json diff --git a/backstage-reference/common/aws_rds/package.json b/backstage-reference/common/aws_rds/package.json index 6c827e77..a0ff9447 100644 --- a/backstage-reference/common/aws_rds/package.json +++ b/backstage-reference/common/aws_rds/package.json @@ -11,13 +11,13 @@ }, "devDependencies": { "@types/node": "18.11.18", - "aws-cdk": "2.88.0", + "aws-cdk": "2.120.0", "ts-node": "^10.9.1", "typescript": "~5.0.0" }, "dependencies": { - "@aws-sdk/util-arn-parser": "^3.208.0", - "aws-cdk-lib": "2.88.0", + "@aws-sdk/util-arn-parser": "^3.208.0", + "aws-cdk-lib": "2.120.0", "constructs": "^10.0.0", "source-map-support": "^0.5.21" } diff --git a/backstage-reference/common/aws_s3/.gitignore b/backstage-reference/common/aws_s3/.gitignore new file mode 100644 index 00000000..f60797b6 --- /dev/null +++ b/backstage-reference/common/aws_s3/.gitignore @@ -0,0 +1,8 @@ +*.js +!jest.config.js +*.d.ts +node_modules + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/backstage-reference/common/aws_s3/.npmignore b/backstage-reference/common/aws_s3/.npmignore new file mode 100644 index 00000000..c1d6d45d --- /dev/null +++ b/backstage-reference/common/aws_s3/.npmignore @@ -0,0 +1,6 @@ +*.ts +!*.d.ts + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/backstage-reference/common/aws_s3/README.md b/backstage-reference/common/aws_s3/README.md new file mode 100644 index 00000000..3d860f31 --- /dev/null +++ b/backstage-reference/common/aws_s3/README.md @@ -0,0 +1,4 @@ +# CDK project for S3 resource + +This project will create: +- An S3 Bucket diff --git a/backstage-reference/common/aws_efs/buildspec.yml b/backstage-reference/common/aws_s3/buildspec.yml similarity index 92% rename from backstage-reference/common/aws_efs/buildspec.yml rename to backstage-reference/common/aws_s3/buildspec.yml index e0923098..36a07abb 100644 --- a/backstage-reference/common/aws_efs/buildspec.yml +++ b/backstage-reference/common/aws_s3/buildspec.yml @@ -20,7 +20,7 @@ phases: - echo "==== Starting the build phase $(date -u +%Y%m%d-%H%M) ====" - echo "AWS caller identity information:" - aws sts get-caller-identity --output table - - cdk deploy --no-color --require-approval never --method direct --tags "backstage-iac=aws_efs" --outputs-file "output/cdk-output.json" + - cdk deploy --no-color --require-approval never --method direct --tags "backstage-iac=aws_s3" --outputs-file "output/cdk-output.json" post_build: commands: - echo "==== Starting the post_build phase $(date -u +%Y%m%d-%H%M) ====" diff --git a/backstage-reference/common/aws_efs/cdk.json b/backstage-reference/common/aws_s3/cdk.json similarity index 95% rename from backstage-reference/common/aws_efs/cdk.json rename to backstage-reference/common/aws_s3/cdk.json index 4d12f2ff..8684c9af 100644 --- a/backstage-reference/common/aws_efs/cdk.json +++ b/backstage-reference/common/aws_s3/cdk.json @@ -1,5 +1,5 @@ { - "app": "npx ts-node --prefer-ts-exts src/cdk-efs-module-app.ts", + "app": "npx ts-node --prefer-ts-exts src/cdk-s3-module-app.ts", "watch": { "include": [ "**" diff --git a/backstage-reference/common/aws_efs/package.json b/backstage-reference/common/aws_s3/package.json similarity index 64% rename from backstage-reference/common/aws_efs/package.json rename to backstage-reference/common/aws_s3/package.json index 08e100eb..d8245590 100644 --- a/backstage-reference/common/aws_efs/package.json +++ b/backstage-reference/common/aws_s3/package.json @@ -1,8 +1,8 @@ { - "name": "cdk-efs-module", + "name": "cdk-s3-module", "version": "0.1.0", "bin": { - "cdk-efs-module": "src/cdk-efs-module-app.js" + "cdk-rds-module": "src/cdk-s3-module-app.js" }, "scripts": { "build": "tsc", @@ -11,12 +11,13 @@ }, "devDependencies": { "@types/node": "18.11.18", - "aws-cdk": "2.62.2", + "aws-cdk": "2.88.0", "ts-node": "^10.9.1", "typescript": "~5.0.0" }, "dependencies": { - "aws-cdk-lib": "2.62.2", + "@aws-sdk/util-arn-parser": "^3.208.0", + "aws-cdk-lib": "2.88.0", "constructs": "^10.0.0", "source-map-support": "^0.5.21" } diff --git a/backstage-reference/common/aws_s3/src/cdk-s3-module-app.ts b/backstage-reference/common/aws_s3/src/cdk-s3-module-app.ts new file mode 100644 index 00000000..bb25fca5 --- /dev/null +++ b/backstage-reference/common/aws_s3/src/cdk-s3-module-app.ts @@ -0,0 +1,25 @@ +#!/usr/bin/env node +import "source-map-support/register"; +import * as cdk from "aws-cdk-lib"; +import { randomUUID as uuid } from "crypto"; +import { CdkS3ModuleStack } from "./cdk-s3-module-stack"; + +const app = new cdk.App(); + +const account = app.node.tryGetContext("account") || process.env.CDK_DEPLOY_ACCOUNT || process.env.CDK_DEFAULT_ACCOUNT; + +const region = + app.node.tryGetContext("region") || process.env.REGION || process.env.CDK_DEPLOY_REGION || process.env.CDK_DEFAULT_REGION || "us-east-1"; +console.log(`Selected region: ${region}`) +const env = { region, account }; + +let stackName; +if (process.env.TARGET_BUCKET_NAME) { + stackName = `${process.env.TARGET_BUCKET_NAME}-s3-resource`; + } else { + stackName = `s3-resource-${uuid()}`; + } + +console.log(`Stack Name: ${stackName}`) + +new CdkS3ModuleStack(app, stackName, {env}); diff --git a/backstage-reference/common/aws_s3/src/cdk-s3-module-stack.ts b/backstage-reference/common/aws_s3/src/cdk-s3-module-stack.ts new file mode 100644 index 00000000..fa37a80e --- /dev/null +++ b/backstage-reference/common/aws_s3/src/cdk-s3-module-stack.ts @@ -0,0 +1,91 @@ +import { CfnOutput, Stack, StackProps, Tags } from "aws-cdk-lib"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import * as s3 from "aws-cdk-lib/aws-s3"; +import * as rg from "aws-cdk-lib/aws-resourcegroups"; +import { Construct } from "constructs"; +import { RemovalPolicy } from 'aws-cdk-lib'; + +const StackVarNames = { + appShortName: "APP_SHORT_NAME", + envName: "TARGET_ENV_NAME", + envProviderName: "TARGET_ENV_PROVIDER_NAME", + BucketName: "TARGET_BUCKET_NAME" +}; + +export class CdkS3ModuleStack extends Stack { + + constructor(scope: Construct, id: string, props?: StackProps) { + super(scope, id, props); + + const appShortName = process.env[StackVarNames.appShortName]; + const bucketName = process.env[StackVarNames.BucketName]; + const envProviderName = process.env[StackVarNames.envProviderName]; + if (!appShortName) { + throw new Error("Required environment variable: APP_SHORT_NAME was not provided."); + } + if (!bucketName) { + throw new Error("Required environment variable: BucketName, was not provided."); + } + + // Tag all resources so that they can be grouped together in a Resource Group + // the prefix "aws-apps:" is a convention adopted for this implementation + const tagKey = `aws-resources:${appShortName}-${envProviderName}`; + Tags.of(this).add(tagKey, appShortName); + + // Add any tags passed as part of AWS_RESOURCE_TAGS input parameters + const resourceTagsEnvVar = process.env.AWS_RESOURCE_TAGS; + if (resourceTagsEnvVar) { + const resourceTags = (JSON.parse(resourceTagsEnvVar) as Record[]); + resourceTags.forEach(tag => { + Tags.of(this).add(tag.Key, tag.Value); + }); + } + + const rscGroup = new rg.CfnGroup(this, `${appShortName}-resource-group`, { + name: `${appShortName}-${envProviderName}-rg`, + description: `Resource related to ${appShortName}`, + resourceQuery: { + type: "TAG_FILTERS_1_0", + query: { + resourceTypeFilters: ["AWS::AllSupported"], + tagFilters: [ + { + key: tagKey, + }, + ], + }, + }, + }); + + // finally, lets create our bucket! - Change to your desire bucket settings + const bucket = new s3.Bucket(this, 'Bucket', { + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, + encryption: s3.BucketEncryption.S3_MANAGED, + enforceSSL: true, + bucketName: bucketName, + versioned: true, + removalPolicy: RemovalPolicy.DESTROY, // CHANGE IN PROD + }); + + // Output the endpoint and connection info so we can connect! + new CfnOutput(this, "BucketName", { + description: "Name of the S3 Bucket", + value: bucket.bucketName, + }); + + new CfnOutput(this, "BucketArn", { + description: "Arn of the S3 Bucket", + value: bucket.bucketArn, + }); + + new CfnOutput(this, "ResourceGroup", { + description: `The tag-based resource group to identify resources related to ${appShortName}`, + value: `${rscGroup.attrArn}`, + }); + // print the stack name as a Cloudformation output + new CfnOutput(this, `StackName`, { + value: this.stackName, + description: "The S3 Bucket CF Stack name", + }); + } +} diff --git a/backstage-reference/common/aws_s3/tsconfig.json b/backstage-reference/common/aws_s3/tsconfig.json new file mode 100644 index 00000000..fc44377a --- /dev/null +++ b/backstage-reference/common/aws_s3/tsconfig.json @@ -0,0 +1,30 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "commonjs", + "lib": [ + "es2020" + ], + "declaration": true, + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "noImplicitThis": true, + "alwaysStrict": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": false, + "inlineSourceMap": true, + "inlineSources": true, + "experimentalDecorators": true, + "strictPropertyInitialization": false, + "typeRoots": [ + "./node_modules/@types" + ] + }, + "exclude": [ + "node_modules", + "cdk.out" + ] +} diff --git a/backstage-reference/common/aws_serverless_api/package.json b/backstage-reference/common/aws_serverless_api/package.json index 6e6a4e28..728b97ef 100644 --- a/backstage-reference/common/aws_serverless_api/package.json +++ b/backstage-reference/common/aws_serverless_api/package.json @@ -11,13 +11,13 @@ }, "devDependencies": { "@types/node": "18.11.15", - "aws-cdk": "2.88.0", + "aws-cdk": "2.120.0", "ts-node": "^10.9.1", "typescript": "~5.0.0" }, "dependencies": { "@aws-sdk/util-arn-parser": "^3.208.0", - "aws-cdk-lib": "2.88.0", + "aws-cdk-lib": "2.120.0", "constructs": "^10.0.0" } } diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-dind-spring-boot.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-dind-spring-boot.yml index 6b2fa9a2..2cb860db 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-dind-spring-boot.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-dind-spring-boot.yml @@ -13,7 +13,8 @@ - apk add aws-cli - apk add jq - aws --version - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') @@ -28,10 +29,11 @@ # - (curl -sSL "https://github.com/buildpacks/pack/releases/download/v0.28.0/pack-v0.28.0-linux.tgz" | tar -C /usr/local/bin/ --no-same-owner -xzv pack) - set -a && source $PROVIDER_PROPS_FILE && set +a - TAG=$(date +%m-%d-%Y_%H-%M-%S) - - mvn spring-boot:build-image -Dspring-boot.build-image.imageName=$APP_SHORT_NAME:$TAG + - mvn spring-boot:build-image -Dspring-boot.build-image.imageName=$APP_SHORT_NAME:${CI_COMMIT_SHORT_SHA} # - pack build $APP_SHORT_NAME -t $APP_SHORT_NAME:$TAG --builder paketobuildpacks/builder:base - # - docker build -t $APP_SHORT_NAME:$TAG . - - docker tag $APP_SHORT_NAME:$TAG $OPA_CI_REGISTRY_IMAGE:$TAG - - docker tag $APP_SHORT_NAME:$TAG $OPA_CI_REGISTRY_IMAGE:latest + # - docker build -t $APP_SHORT_NAME:${CI_COMMIT_SHORT_SHA} . + - LC_TAG=`echo "${OPA_CI_REGISTRY_IMAGE}" | tr '[:upper:]' '[:lower:]'` + - docker tag $APP_SHORT_NAME:${CI_COMMIT_SHORT_SHA} $LC_TAG:${CI_COMMIT_SHORT_SHA} + - docker tag $APP_SHORT_NAME:${CI_COMMIT_SHORT_SHA} $LC_TAG:latest - echo Pushing Docker image to ECR... - - docker push $OPA_CI_REGISTRY_IMAGE --all-tags + - docker push $LC_TAG --all-tags diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-ecs.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-ecs.yml index 892e560d..b78f20c8 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-ecs.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-ecs.yml @@ -4,7 +4,8 @@ # tell CDK where to deploy to, based on provider props file - export CDK_DEPLOY_ACCOUNT=$ACCOUNT - export CDK_DEPLOY_REGION=$REGION - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml new file mode 100644 index 00000000..e7b661f2 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml @@ -0,0 +1,249 @@ +.abstract-iac-deployment: + script: + - echo -e "\e[0Ksection_start:`date +%s`:set_provider_vars[collapsed=true]\r\e[0KSet Provider Variables" + - set -a && source $PROVIDER_PROPS_FILE && set +a + # tell CDK where to deploy to, based on provider props file + - export CDK_DEPLOY_ACCOUNT=$ACCOUNT + - export CDK_DEPLOY_REGION=$REGION + - echo -e "\e[0Ksection_end:`date +%s`:set_provider_vars\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + + - cd $CI_PROJECT_DIR/.iac/ + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" + - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" + - export CLUSTER_OIDC_PROVIDER=$(aws eks describe-cluster --name ${TARGET_EKS_CLUSTER_ARN#*/} --region $REGION --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///") + - $CI_PROJECT_DIR/.iac/node_modules/.bin/cdk deploy --outputs-file cdk-output.json --require-approval never + - cat cdk-output.json + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + - cat cdk-output.json + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:k8s-config-dir[collapsed=true]\r\e[0KGetting k8s config directory" + - export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo K8S_CONFIG_DIR is $K8S_CONFIG_DIR + - echo -e "\e[0Ksection_end:`date +%s`:k8s-config-dir\r\e[0K" + + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + - echo -e "\e[0Ksection_start:`date +%s`:namespace[collapsed=true]\r\e[0KDeploying k8s Namespace $NAMESPACE" + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR + helm template -f $TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME/values.yaml -s templates/namespace.yaml . > resolved-namespace.yaml + else + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME + cp namespace.yaml resolved-namespace.yaml + fi + - echo "Namespace YAML is:" + - cat resolved-namespace.yaml + - export clusterName=${TARGET_EKS_CLUSTER_ARN#*/} + - $CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + - aws eks update-kubeconfig --region $REGION --name $clusterName + - kubectl apply -f resolved-namespace.yaml || exit 1 + - rm resolved-namespace.yaml + - cd - + - echo -e "\e[0Ksection_end:`date +%s`:namespace\r\e[0K" + + cache: + - key: + files: + - $CI_PROJECT_DIR/.iac/yarn.lock + paths: + - $CI_PROJECT_DIR/.iac/node_modules + +.abstract-git-commit: + rules: + - if: $CI_COMMIT_TITLE =~ /generate CICD stages/ + when: never + - if: $CI_COMMIT_TITLE =~ /^Destroy TF Infrastructure/ + when: never + - if: ($CI_PIPELINE_SOURCE == "web" || $CI_PIPELINE_SOURCE == "push") && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + script: + # make access token available to after_script + - echo $ACCESS_TOKEN > $CI_PROJECT_DIR/gittoken + + - echo -e "\e[0Ksection_start:`date +%s`:set_provider_vars[collapsed=true]\r\e[0KSet Provider Variables" + - set -a && source $PROVIDER_PROPS_FILE && set +a + - echo -e "\e[0Ksection_end:`date +%s`:set_provider_vars\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + - export APP_SHORT_NAME=$(yq -r .metadata.name $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo "APP_SHORT_NAME is $APP_SHORT_NAME" + - aws cloudformation --region $REGION describe-stacks --stack-name $APP_SHORT_NAME-eks-resources-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME --query "Stacks[0].Outputs[]" | jq -r '. | to_entries | map(.value.OutputKey + "=" + (.value.OutputValue|tojson)) | .[]' > cdk-output.properties + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - export clusterName=${TARGET_EKS_CLUSTER_ARN#*/} + - $CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + - aws eks update-kubeconfig --region $REGION --name $clusterName + - echo -e "\e[0Ksection_start:`date +%s`:catalog[collapsed=true]\r\e[0KGetting the latest catalog-info.yaml contents" + # We need the latest catalog-info.yaml contents in case this pipeline modified this file in an earlier stage + - git checkout $CI_COMMIT_BRANCH + - git pull + - export LATEST_CATALOG_INFO="$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml)" + - git checkout $CI_COMMIT_SHA # put our files back to where they were when the pipeline started + - echo "$LATEST_CATALOG_INFO" > $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:catalog\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:k8s-config-dir[collapsed=true]\r\e[0KGetting k8s config directory" + - export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo K8S_CONFIG_DIR is $K8S_CONFIG_DIR + - echo -e "\e[0Ksection_end:`date +%s`:k8s-config-dir\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:app-admin[collapsed=true]\r\e[0KCheck for App Admin" + # if AppAdminRoleArn is blank then clear out the RoleBinding file so that its a no-op + - | + if [[ -z "$AppAdminRoleArn" ]]; then + echo "AppAdminRoleArn was blank, clearing k8s admin role binding manifest."; + echo '' > $CI_PROJECT_DIR/$K8S_CONFIG_DIR/base/nsAdminRoleBinding.yaml + fi + - echo -e "\e[0Ksection_end:`date +%s`:app-admin\r\e[0K" + + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + - echo -e "\e[0Ksection_start:`date +%s`:save-template-output[collapsed=true]\r\e[0KSaving k8s Configs for Environment" + - $CI_PROJECT_DIR/cicd/scripts/k8s/save-template-output.sh "gitAddTemplateOutput" "$TARGET_ENV_NAME-${TARGET_ENV_PROVIDER_NAME}/" || exit 1 + # Restore current provider props env vars that could have been altered by save-template-output.sh + - set -a && source $PROVIDER_PROPS_FILE && set +a + - echo -e "\e[0Ksection_end:`date +%s`:save-template-output\r\e[0K" + + # Note: this section can be uncommented if you do NOT want the CICD pipeline + # to automatically deploy the app to the kubernetes cluster + # - echo -e "\e[0Ksection_start:`date +%s`:first_deployment[collapsed=true]\r\e[0KDetermine If This Is The First Stack Deployment Ever" + # - export appData=".metadata | try(.appData) | try (.[\"$TARGET_ENV_NAME\"]) | try(.[\"$TARGET_ENV_PROVIDER_NAME\"]) | try(.StackName)" + # # Determine if the stack has ever been deployed + # - export ENTITY_ENV_STACK_NAME=$(yq --arg appData "${appData}" "$appData" $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + # - echo "ENTITY_ENV_STACK_NAME is $ENTITY_ENV_STACK_NAME" + # - echo -e "\e[0Ksection_end:`date +%s`:first_deployment\r\e[0K" + + # This line should be commented-out if the above section is executed + - export ENTITY_ENV_STACK_NAME="null" + + - | + if [[ "$ENTITY_ENV_STACK_NAME" == "null" ]]; then + echo -e "\e[0Ksection_start:`date +%s`:aws-auth-patch[collapsed=true]\r\e[0KUpdating aws-auth ConfigMap if necessary"; + export clusterName=${TARGET_EKS_CLUSTER_ARN#*/}; + if [[ -n "$AppAdminRoleArn" ]]; then + echo "User provided app admin role $AppAdminRoleArn" + kubectl get configmap/aws-auth -n kube-system -o=json > configmap.json + echo "Current configmap" + cat configmap.json + export currentRolesArray="$(cat configmap.json | jq -r '.data.mapRoles')" + if [[ "$currentRolesArray" == *"$AppAdminRoleArn"* ]]; then + echo "Skipping updating aws-auth ConfigMap since it already contains $AppAdminRoleArn." + else + export newRolesArray="${currentRolesArray::${#currentRolesArray}-1}, {\"rolearn\":\"$AppAdminRoleArn\",\"username\":\"$AppAdminRoleArn\",\"groups\":[]}]" + echo "$newRolesArray" + export parsedRolesArray="${newRolesArray//\"/\\\"}" + echo "$parsedRolesArray" + kubectl patch configmap/aws-auth -n kube-system -p "{\"data\": {\"mapRoles\": \"$parsedRolesArray\"}}" --type strategic + fi + fi + echo -e "\e[0Ksection_end:`date +%s`:aws-auth-patch\r\e[0K"; + echo -e "\e[0Ksection_start:`date +%s`:deploy_app[collapsed=true]\r\e[0KDeploying App to Kubernetes"; + echo "Deploying k8s app..."; + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME; + export MANIFEST_JSON=$(cat next-release.json); + cat next-release.yaml; + kubectl apply -f next-release.yaml || exit 1; + if [[ "$MANIFEST_JSON" == *"alb.ingress.kubernetes.io/tags"* ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/get-ingress-dns-name.sh "aws-apps-$APP_SHORT_NAME-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" || exit 1; + if [[ -f "$CI_PROJECT_DIR/ingressDNS.txt" ]]; then + AlbEndpoint=$(cat $CI_PROJECT_DIR/ingressDNS.txt); + export AlbEndpoint="http://${AlbEndpoint}"; + echo "Set AlbEndpoint to $AlbEndpoint"; + rm $CI_PROJECT_DIR/ingressDNS.txt; + fi + else + echo "Skipping setting AlbEndpoint since application did not contain properly annotated Ingress."; + fi + cd -; + echo -e "\e[0Ksection_end:`date +%s`:deploy_app\r\e[0K"; + else + echo "The CDK stack has already been deployed to the $TARGET_ENV_PROVIDER_NAME provider."; + echo "Skipping deploying k8s app. It can still be deployed via kubectl or via the OPA UI."; + fi + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcrRepositoryUri\"] =\"${EcrRepositoryUri}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcrRepositoryArn\"] =\"${EcrRepositoryArn}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AppResourceGroup\"] =\"${AppResourceGroup}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"StackName\"] =\"${StackName}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AppAdminRoleArn\"] =\"${AppAdminRoleArn}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"ServiceAccountRoleArn\"] =\"${ServiceAccountRoleArn}\"" catalog-info.yaml + - ALREADY_DEPENDS_ON="$(grep "awsenvironment:default/$TARGET_ENV_NAME" catalog-info.yaml || true)" + - if [[ -z "$ALREADY_DEPENDS_ON" ]]; then yq -Yi ".spec.dependsOn += [\"awsenvironment:default/${TARGET_ENV_NAME}\"]" catalog-info.yaml; fi + - if [[ ! -z "$AlbEndpoint" ]]; then yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AlbEndpoint\"] =\"${AlbEndpoint}\"" catalog-info.yaml; fi + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"Namespace\"] =\"${NAMESPACE}\"" catalog-info.yaml + - cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - export LATEST_CATALOG_INFO="$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml)" + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" + # undo catalog-info.yaml changes to avoid potential merge conflicts + - git reset HEAD $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git checkout -- $CI_PROJECT_DIR/.backstage/catalog-info.yaml + # make sure that we are up to date so that our commit will succeed + - git checkout $CI_COMMIT_BRANCH + - git pull + - echo "$LATEST_CATALOG_INFO" > $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git add $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git add $CI_PROJECT_DIR/$K8S_CONFIG_DIR + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - git status + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME; fi + - git checkout $CI_COMMIT_SHA # put our git repo back to where it was when the pipeline started + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" + + after_script: + - echo -e "\e[0Ksection_start:`date +%s`:delete-aws_creds[collapsed=true]\r\e[0KDelete AWS Creds" + - export ACCESS_TOKEN=$(cat $CI_PROJECT_DIR/gittoken) + - rm $CI_PROJECT_DIR/gittoken + - set -a && source $PROVIDER_PROPS_FILE && set +a + - export GL_API=https://$CI_SERVER_HOST/api/v4/projects/$CI_PROJECT_ID/variables + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_ACCESS_KEY_ID?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_SECRET_ACCESS_KEY?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_SESSION_TOKEN?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - echo -e "\e[0Ksection_end:`date +%s`:delete-aws_creds\r\e[0K" diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks.yml new file mode 100644 index 00000000..0085baa2 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-eks.yml @@ -0,0 +1,234 @@ +.abstract-iac-deployment: + script: + - echo -e "\e[0Ksection_start:`date +%s`:set_provider_vars[collapsed=true]\r\e[0KSet Provider Variables" + - set -a && source $PROVIDER_PROPS_FILE && set +a + # tell CDK where to deploy to, based on provider props file + - export CDK_DEPLOY_ACCOUNT=$ACCOUNT + - export CDK_DEPLOY_REGION=$REGION + - echo -e "\e[0Ksection_end:`date +%s`:set_provider_vars\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + + - cd $CI_PROJECT_DIR/.iac/ + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" + - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" + - export CLUSTER_OIDC_PROVIDER=$(aws eks describe-cluster --name ${TARGET_EKS_CLUSTER_ARN#*/} --region $REGION --query "cluster.identity.oidc.issuer" --output text | sed -e "s/^https:\/\///") + - $CI_PROJECT_DIR/.iac/node_modules/.bin/cdk deploy --outputs-file cdk-output.json --require-approval never + - cat cdk-output.json + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + - cat cdk-output.json + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:k8s-config-dir[collapsed=true]\r\e[0KGetting k8s config directory" + - export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo K8S_CONFIG_DIR is $K8S_CONFIG_DIR + - echo -e "\e[0Ksection_end:`date +%s`:k8s-config-dir\r\e[0K" + + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + - echo -e "\e[0Ksection_start:`date +%s`:namespace[collapsed=true]\r\e[0KDeploying k8s Namespace $NAMESPACE" + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR + export MANIFEST_JSON=$(helm template -f $TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME/values.yaml -s templates/namespace.yaml . | yq -s .) + else + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME + export MANIFEST_JSON=$(yq -s . namespace.yaml) + fi + - echo "Namespace MANIFEST_JSON is $MANIFEST_JSON" + - export clusterName=${TARGET_EKS_CLUSTER_ARN#*/} + # It is possible to just use kubectl here to deploy the namespace but it won't work if this pipeline + # cannot hit the EKS cluster API endpoint due to the cluster being created as private or with an IP + # allow-list that does not include this pipeline's IP address. We are using a lambda function here + # to deploy the namespace because it runs within the same VPC as the EKS cluster and will always be + # able to talk to the EKS cluster API endpoint. + - $CI_PROJECT_DIR/cicd/scripts/k8s/apply-k8s-lambda.sh || exit 1 + - cd - + - echo -e "\e[0Ksection_end:`date +%s`:namespace\r\e[0K" + + cache: + - key: + files: + - $CI_PROJECT_DIR/.iac/yarn.lock + paths: + - $CI_PROJECT_DIR/.iac/node_modules + +.abstract-git-commit: + rules: + - if: $CI_COMMIT_TITLE =~ /generate CICD stages/ + when: never + - if: $CI_COMMIT_TITLE =~ /^Destroy TF Infrastructure/ + when: never + - if: ($CI_PIPELINE_SOURCE == "web" || $CI_PIPELINE_SOURCE == "push") && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + script: + # make access token available to after_script + - echo $ACCESS_TOKEN > $CI_PROJECT_DIR/gittoken + + - echo -e "\e[0Ksection_start:`date +%s`:set_provider_vars[collapsed=true]\r\e[0KSet Provider Variables" + - set -a && source $PROVIDER_PROPS_FILE && set +a + - echo -e "\e[0Ksection_end:`date +%s`:set_provider_vars\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + - export APP_SHORT_NAME=$(yq -r .metadata.name $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo "APP_SHORT_NAME is $APP_SHORT_NAME" + - aws cloudformation --region $REGION describe-stacks --stack-name $APP_SHORT_NAME-eks-resources-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME --query "Stacks[0].Outputs[]" | jq -r '. | to_entries | map(.value.OutputKey + "=" + (.value.OutputValue|tojson)) | .[]' > cdk-output.properties + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - $CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + + - echo -e "\e[0Ksection_start:`date +%s`:catalog[collapsed=true]\r\e[0KGetting the latest catalog-info.yaml contents" + # We need the latest catalog-info.yaml contents in case this pipeline modified this file in an earlier stage + - git checkout $CI_COMMIT_BRANCH + - git pull + - export LATEST_CATALOG_INFO="$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml)" + - git checkout $CI_COMMIT_SHA # put our files back to where they were when the pipeline started + - echo "$LATEST_CATALOG_INFO" > $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:catalog\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:k8s-config-dir[collapsed=true]\r\e[0KGetting k8s config directory" + - export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + - echo K8S_CONFIG_DIR is $K8S_CONFIG_DIR + - echo -e "\e[0Ksection_end:`date +%s`:k8s-config-dir\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:app-admin[collapsed=true]\r\e[0KCheck for App Admin" + # if AppAdminRoleArn is blank then clear out the RoleBinding file so that its a no-op + - | + if [[ -z "$AppAdminRoleArn" ]]; then + echo "AppAdminRoleArn was blank, clearing k8s admin role binding manifest."; + echo '' > $CI_PROJECT_DIR/$K8S_CONFIG_DIR/base/nsAdminRoleBinding.yaml + fi + - echo -e "\e[0Ksection_end:`date +%s`:app-admin\r\e[0K" + + - | + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + - echo -e "\e[0Ksection_start:`date +%s`:save-template-output[collapsed=true]\r\e[0KSaving k8s Configs for Environment" + - $CI_PROJECT_DIR/cicd/scripts/k8s/save-template-output.sh "gitAddTemplateOutput" "$TARGET_ENV_NAME-${TARGET_ENV_PROVIDER_NAME}/" || exit 1 + # Restore current provider props env vars that could have been altered by save-template-output.sh + - set -a && source $PROVIDER_PROPS_FILE && set +a + - echo -e "\e[0Ksection_end:`date +%s`:save-template-output\r\e[0K" + + # Note: this section can be uncommented if you do NOT want the CICD pipeline + # to automatically deploy the app to the kubernetes cluster + # - echo -e "\e[0Ksection_start:`date +%s`:first_deployment[collapsed=true]\r\e[0KDetermine If This Is The First Stack Deployment Ever" + # - export appData=".metadata | try(.appData) | try (.[\"$TARGET_ENV_NAME\"]) | try(.[\"$TARGET_ENV_PROVIDER_NAME\"]) | try(.StackName)" + # # Determine if the stack has ever been deployed + # - export ENTITY_ENV_STACK_NAME=$(yq --arg appData "${appData}" "$appData" $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + # - echo "ENTITY_ENV_STACK_NAME is $ENTITY_ENV_STACK_NAME" + # - echo -e "\e[0Ksection_end:`date +%s`:first_deployment\r\e[0K" + + # This line should be commented-out if the above section is executed + - export ENTITY_ENV_STACK_NAME="null" + + - | + if [[ "$ENTITY_ENV_STACK_NAME" == "null" ]]; then + echo -e "\e[0Ksection_start:`date +%s`:aws-auth-patch[collapsed=true]\r\e[0KUpdating aws-auth ConfigMap if necessary"; + export clusterName=${TARGET_EKS_CLUSTER_ARN#*/}; + $CI_PROJECT_DIR/cicd/scripts/k8s/add-role-to-aws-auth-configmap.sh || exit 1; + echo -e "\e[0Ksection_end:`date +%s`:aws-auth-patch\r\e[0K"; + echo -e "\e[0Ksection_start:`date +%s`:deploy_app[collapsed=true]\r\e[0KDeploying App to Kubernetes"; + echo "Deploying k8s app..."; + cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME; + export MANIFEST_JSON=$(cat next-release.json) + $CI_PROJECT_DIR/cicd/scripts/k8s/apply-k8s-lambda.sh || exit 1 + if [[ "$MANIFEST_JSON" == *"alb.ingress.kubernetes.io/tags"* ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/get-ingress-dns-name.sh "aws-apps-$APP_SHORT_NAME-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" || exit 1 + if [[ -f "$CI_PROJECT_DIR/ingressDNS.txt" ]]; then + AlbEndpoint=$(cat $CI_PROJECT_DIR/ingressDNS.txt) + export AlbEndpoint="http://${AlbEndpoint}" + echo "Set AlbEndpoint to $AlbEndpoint" + rm $CI_PROJECT_DIR/ingressDNS.txt + fi + else + echo "Skipping setting AlbEndpoint since application did not contain properly annotated Ingress." + fi + cd -; + echo -e "\e[0Ksection_end:`date +%s`:deploy_app\r\e[0K"; + else + echo "The CDK stack has already been deployed to the $TARGET_ENV_PROVIDER_NAME provider."; + echo "Skipping deploying k8s app. It can still be deployed via kubectl or via the OPA UI."; + fi + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcrRepositoryUri\"] =\"${EcrRepositoryUri}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcrRepositoryArn\"] =\"${EcrRepositoryArn}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AppResourceGroup\"] =\"${AppResourceGroup}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"StackName\"] =\"${StackName}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AppAdminRoleArn\"] =\"${AppAdminRoleArn}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"ServiceAccountRoleArn\"] =\"${ServiceAccountRoleArn}\"" catalog-info.yaml + - ALREADY_DEPENDS_ON="$(grep "awsenvironment:default/$TARGET_ENV_NAME" catalog-info.yaml || true)" + - if [[ -z "$ALREADY_DEPENDS_ON" ]]; then yq -Yi ".spec.dependsOn += [\"awsenvironment:default/${TARGET_ENV_NAME}\"]" catalog-info.yaml; fi + - if [[ ! -z "$AlbEndpoint" ]]; then yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"AlbEndpoint\"] =\"${AlbEndpoint}\"" catalog-info.yaml; fi + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"Namespace\"] =\"${NAMESPACE}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"LogGroup\"] =\"/aws/apps/${PREFIX}-${TARGET_ENV_PROVIDER_NAME}/${NAMESPACE}\"" catalog-info.yaml + - cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - export LATEST_CATALOG_INFO="$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml)" + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" + # undo catalog-info.yaml changes to avoid potential merge conflicts + - git reset HEAD $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git checkout -- $CI_PROJECT_DIR/.backstage/catalog-info.yaml + # make sure that we are up to date so that our commit will succeed + - git checkout $CI_COMMIT_BRANCH + - git pull + - echo "$LATEST_CATALOG_INFO" > $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git add $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git add $CI_PROJECT_DIR/$K8S_CONFIG_DIR + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - git status + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME; fi + - git checkout $CI_COMMIT_SHA # put our git repo back to where it was when the pipeline started + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" + + after_script: + - echo -e "\e[0Ksection_start:`date +%s`:delete-aws_creds[collapsed=true]\r\e[0KDelete AWS Creds" + - export ACCESS_TOKEN=$(cat $CI_PROJECT_DIR/gittoken) + - rm $CI_PROJECT_DIR/gittoken + - set -a && source $PROVIDER_PROPS_FILE && set +a + - export GL_API=https://$CI_SERVER_HOST/api/v4/projects/$CI_PROJECT_ID/variables + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_ACCESS_KEY_ID?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_SECRET_ACCESS_KEY?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - > + curl --globoff --request DELETE --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" + "${GL_API}/AWS_SESSION_TOKEN?filter[environment_scope]=$OPA_CI_ENVIRONMENT" + - echo -e "\e[0Ksection_end:`date +%s`:delete-aws_creds\r\e[0K" diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-rds.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-rds.yml index f9b913bf..d161a775 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-rds.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-rds.yml @@ -1,7 +1,8 @@ .abstract-iac-deployment: script: - set -a && source $PROVIDER_PROPS_FILE && set +a - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-s3.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-s3.yml new file mode 100644 index 00000000..652c4775 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-s3.yml @@ -0,0 +1,39 @@ +.abstract-iac-deployment: + script: + - set -a && source $PROVIDER_PROPS_FILE && set +a + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - cd $CI_PROJECT_DIR/.iac/ + - yarn install + - $CI_PROJECT_DIR/.iac/node_modules/.bin/cdk deploy --outputs-file cdk-output.json --require-approval never + - cat cdk-output.json + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + # alter entity details + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"Arn\"] =\"${BucketArn}\"" catalog-info.yaml + - yq -Yi ".metadata[\"awsArn\"]=\"${BucketArn}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"BucketName\"] =\"${BucketName}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"ResourceGroup\"] =\"${ResourceGroup}\"" catalog-info.yaml + - yq -Yi ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"StackName\"] =\"${StackName}\"" catalog-info.yaml + - ALREADY_DEPENDS_ON="$(grep "awsenvironment:default/$TARGET_ENV_NAME" catalog-info.yaml || true)" + - if [[ -z "$ALREADY_DEPENDS_ON" ]]; then yq -Yi ".spec.dependsOn += [\"awsenvironment:default/${TARGET_ENV_NAME}\"]" catalog-info.yaml; fi + - cat catalog-info.yaml + - git add $CI_PROJECT_DIR/.backstage/catalog-info.yaml + - git add catalog-info.yaml + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - git status + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi + cache: + - key: + files: + - $CI_PROJECT_DIR/.iac/yarn.lock + paths: + - $CI_PROJECT_DIR/.iac/node_modules diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-serverless-api.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-serverless-api.yml index 60a01102..d1942f0f 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-serverless-api.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-serverless-api.yml @@ -1,21 +1,36 @@ .abstract-iac-deployment: script: + - echo -e "\e[0Ksection_start:`date +%s`:set_provider_vars[collapsed=true]\r\e[0KSet Provider Variables" - set -a && source $PROVIDER_PROPS_FILE && set +a # tell CDK where to deploy to, based on provider props file - export CDK_DEPLOY_ACCOUNT=$ACCOUNT - export CDK_DEPLOY_REGION=$REGION - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - echo -e "\e[0Ksection_end:`date +%s`:set_provider_vars\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + - cd $CI_PROJECT_DIR/.iac/ + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" - $CI_PROJECT_DIR/.iac/node_modules/.bin/cdk deploy --outputs-file cdk-output.json --require-approval never - cat cdk-output.json - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties - cat cdk-output.properties - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" # alter entity details - cd $CI_PROJECT_DIR/.backstage - if [[ -z "$BuildBucketName" ]]; then BuildBucketName=$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml | yq -r ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"BuildBucketName\"]"); fi @@ -26,12 +41,17 @@ - ALREADY_DEPENDS_ON="$(grep "awsenvironment:default/$TARGET_ENV_NAME" catalog-info.yaml || true)" - if [[ -z "$ALREADY_DEPENDS_ON" ]]; then yq -Yi ".spec.dependsOn += [\"awsenvironment:default/${TARGET_ENV_NAME}\"]" catalog-info.yaml; fi - cat catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" - git add $CI_PROJECT_DIR/.backstage/catalog-info.yaml - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') - echo "The number of files that will be committed is $UPDATE_COUNT" - git status - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" + # Additional functionality for serverless - write file for CICD artifact output - echo "BuildBucketName=${BuildBucketName}" > $CI_PROJECT_DIR/serverless-ci-output-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties cache: @@ -48,7 +68,8 @@ - if [[ -z "$BuildBucketName" ]]; then BuildBucketName=$(cat $CI_PROJECT_DIR/.backstage/catalog-info.yaml | yq -r ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"BuildBucketName\"]"); fi # Source output artifact file if it exists from IaC job to get BuildBucketName - if [[ -f "$CI_PROJECT_DIR/serverless-ci-output-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" ]]; then set -a && source $CI_PROJECT_DIR/serverless-ci-output-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties && set +a; fi - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml index 8541a248..36cb5c40 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml @@ -4,7 +4,8 @@ - cat updated_props.properties - set -a && source updated_props.properties && set +a - set -a && source $PROVIDER_PROPS_FILE && set +a - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-image-deploy.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-image-deploy.yml new file mode 100644 index 00000000..ee12d3f6 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-image-deploy.yml @@ -0,0 +1,50 @@ +.abstract-deploy-ecs-image: + script: + - echo -e "\e[0Ksection_start:`date +%s`:verify_env_info[collapsed=true]\r\e[0KVerify environment information" + - set -a && source $PROVIDER_PROPS_FILE && set +a + # Verify required utilities and filesare available and versions + - if command -v aws; then echo "aws-cli is available"; aws --version; else echo "aws-cli is not available"; fi + - if command -v yq; then echo "yq is available"; yq --version; else echo "yq is not available"; fi + - '[[ ! -z "$BACKSTAGE_ENTITY_FILE" ]] || { echo >&2 "BACKSTAGE_ENTITY_FILE must be set"; exit 1; }' + - '[[ -f $BACKSTAGE_ENTITY_FILE ]] || { echo >&2 "BACKSTAGE_ENTITY_FILE var does not point to a valid file (${BACKSTAGE_ENTITY_FILE})"; exit 1; }' + - cat $BACKSTAGE_ENTITY_FILE; + # Verify that the '.metadata.appData' key exists in the backstage entity file before continuing + - APPDATA=$(cat ${BACKSTAGE_ENTITY_FILE} | yq -r '.metadata | select(.appData)') + - '[[ ! -z "${APPDATA}" ]] || { echo >&2 "appData is not set in the ${BACKSTAGE_ENTITY_FILE} entity file. Exiting."; exit 0; }' + # + # Note: yq v3 wraps scalar values in quotes in the output. Remove the beginning and trailing quotes from each variable using 'sed' after setting them using yq + - ECS_SERVICE_ARN=$(yq -r ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcsServiceArn\"]" ${CI_PROJECT_DIR}/.backstage/catalog-info.yaml); + # - ECS_SERVICE_ARN=$(sed -e 's/^"//' -e 's/"$//' <<<"$ECS_SERVICE_ARN") + - ECS_TASK_DEF_ARN=$(yq -r ".metadata.appData[\"${TARGET_ENV_NAME}\"][\"${TARGET_ENV_PROVIDER_NAME}\"][\"EcsTaskDefinitionArn\"]" ${CI_PROJECT_DIR}/.backstage/catalog-info.yaml); + # - ECS_TASK_DEF_ARN=$(sed -e 's/^"//' -e 's/"$//' <<<"$ECS_TASK_DEF_ARN") + - TASK_DEF_REGION=$(echo "${ECS_TASK_DEF_ARN}" | cut -d':' -f4) + - echo -e "\e[0Ksection_end:`date +%s`:verify_env_info\r\e[0K" + # + # Deploy/Update ECS service as needed + - echo -e "\e[0Ksection_start:`date +%s`:deploy_ecs_image[collapsed=true]\r\e[0KDeploy ECS service image" + - aws sts get-caller-identity + # Assume the environment provisioning role to manage the ecs service and task definition + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + # Determine if the deployed image is tagged as "latest" (or defaults to latest). Do not proceed with updating the deployment if the image uses an explicit tag identifier + - TASK_DEF_IMAGE=$(aws ecs describe-task-definition --task-definition ${ECS_TASK_DEF_ARN} --region ${TASK_DEF_REGION} --output text --query 'taskDefinition.containerDefinitions[0].image') + - TASK_DEF_IMAGE_TAG=$(echo "${TASK_DEF_IMAGE}" | cut -sd':' -f2) + - if [ -n "$TASK_DEF_IMAGE_TAG" ] && [ "$TASK_DEF_IMAGE_TAG" != "latest" ]; then echo >&2 "Task Definition image tag exists but doesn't equal 'latest'. Exiting"; fi + - echo "Using the \"latest\" image. Proceeding to check number of desired count." + # Determine if the ECS service desired count is greater than zero. If not, do not proceed with updating the deployment + - DESIRED_COUNT="$(aws ecs describe-services --cluster ${TARGET_ECS_CLUSTER_ARN} --service ${ECS_SERVICE_ARN} --region ${TASK_DEF_REGION} --query 'services[0].desiredCount' --output text)" + - '[[ "$DESIRED_COUNT" -ne "0" ]] || { echo >&2 "Desired count for the service is zero. No update will be deployed."; exit 0; }' + # Proceed with forcing a new update + - aws ecs update-service --cluster ${TARGET_ECS_CLUSTER_ARN} --service ${ECS_SERVICE_ARN} --force-new-deployment --region ${TASK_DEF_REGION} + - echo -e "\e[0Ksection_end:`date +%s`:deploy_ecs_image\r\e[0K" + rules: + - if: $CI_COMMIT_TITLE =~ /generate CICD stages/ + when: never + + after_script: + - echo "Unsetting the role assumption env vars" + - unset AWS_ACCESS_KEY_ID && unset AWS_SECRET_ACCESS_KEY && unset AWS_SESSION_TOKEN diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-image-kaniko.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-image-kaniko.yml index 8352fb11..93b669fc 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-image-kaniko.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-image-kaniko.yml @@ -5,11 +5,13 @@ - if: ($CI_PIPELINE_SOURCE == "web" || $CI_PIPELINE_SOURCE == "push") && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH script: - set -a && source $PROVIDER_PROPS_FILE && set +a - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=900 --output json) + - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=900 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') - export GL_API=https://$CI_SERVER_HOST/api/v4/projects/$CI_PROJECT_ID/variables + - echo "NOTE - if the next command fails, delete these CICD variables and restart the pipeline job - AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN" - > curl --silent --output /dev/null --show-error --fail --request POST --header "PRIVATE-TOKEN: ${ACCESS_TOKEN}" "${GL_API}" @@ -72,7 +74,8 @@ --context "${CI_PROJECT_DIR}" --dockerfile "Dockerfile" --destination "${LC_TAG}:latest" - + --destination "${LC_TAG}:${CI_COMMIT_SHORT_SHA}" + .abstract-delete-aws-creds: rules: - if: $CI_COMMIT_TITLE =~ /generate CICD stages/ diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-basic.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-basic.yml new file mode 100644 index 00000000..079fe959 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-basic.yml @@ -0,0 +1,46 @@ +iac-deployment-env-provider: + stage: build + before_script: + - cat /etc/os-release + - apt-get update + - apt install nodejs npm git python3-pip yq jq -y + - export PIP_BREAK_SYSTEM_PACKAGES=1 + - pip3 install awscli --upgrade + - yarn global add aws-cli typescript@latest aws-cdk@2.88.0 + - yarn --version + - aws --version + - aws sts get-caller-identity + # Store the access token before assuming the environment provisioning role + - ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + script: + # Export environment variables + - set -a && source stack-parameters.properties && set +a + - cd .iac/opa-basic-environment + - yarn install + - cdk deploy --outputs-file cdk-output.json --require-approval never + # once CDK finished - extract output params + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + # export the new variables + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + # alter entity details + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.vpc =\"${VPC}\"" catalog-info.yaml + - yq -Yi ".metadata[\"audit-table\"] = \"${AuditTable}\"" catalog-info.yaml + - yq -Yi ".metadata[\"operation-role\"] = \"${OperationsRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"provisioning-role\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"stack-name\"] = \"${StackName}\"" catalog-info.yaml + - cat catalog-info.yaml + - git config --global user.email "fsi-pace-pe@amazon.com" + - git config --global user.name "OPA CICD User" + - git add catalog-info.yaml + - git status + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs-ec2.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs-ec2.yml new file mode 100644 index 00000000..53a114ca --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs-ec2.yml @@ -0,0 +1,70 @@ +iac-deployment-env-provider: + stage: build + before_script: + - echo -e "\e[0Ksection_start:`date +%s`:log_os[collapsed=true]\r\e[0KOS Info" + - cat /etc/os-release + - echo -e "\e[0Ksection_end:`date +%s`:log_os\r\e[0K" + - echo -e "\e[0Ksection_start:`date +%s`:get_tools[collapsed=true]\r\e[0KGet Tools" + - apt-get update + - apt install nodejs npm git python3-pip yq jq -y + - export PIP_BREAK_SYSTEM_PACKAGES=1 + - pip3 install awscli --upgrade + - yarn global add aws-cli typescript@latest aws-cdk@2.120.0 + - yarn --version + - aws --version + - echo -e "\e[0Ksection_end:`date +%s`:get_tools\r\e[0K" + # Export environment variables + - set -a && source stack-parameters.properties && set +a + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - aws sts get-caller-identity + # Store the access token before assuming the environment provisioning role + - ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT_ID" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + script: + # Export environment variables + - set -a && source stack-parameters.properties && set +a + - cd .iac/opa-ecs-ec2-environment + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" + - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" + - cdk deploy --outputs-file cdk-output.json --require-approval never + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + # once CDK finished - extract output params + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + # export the new variables + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" + # alter entity details + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.vpc =\"${VPC}\"" catalog-info.yaml + - yq -Yi ".metadata[\"clusterName\"] = \"${ClusterName}\"" catalog-info.yaml + - yq -Yi ".metadata[\"auditTable\"] = \"${AuditTable}\"" catalog-info.yaml + - yq -Yi ".metadata[\"operationRole\"] = \"${OperationsRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"provisioningRole\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"stackName\"] = \"${StackName}\"" catalog-info.yaml + - cat catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" + - git config --global user.email "fsi-pace-pe@amazon.com" + - git config --global user.name "OPA CICD User" + - git add catalog-info.yaml + - git status + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs.yml index 03302e2e..0ff7725a 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-ecs.yml @@ -1,42 +1,67 @@ iac-deployment-env-provider: stage: build before_script: + - echo -e "\e[0Ksection_start:`date +%s`:log_os[collapsed=true]\r\e[0KOS Info" - cat /etc/os-release + - echo -e "\e[0Ksection_end:`date +%s`:log_os\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_tools[collapsed=true]\r\e[0KGet Tools" - apt-get update - apt install nodejs npm git python3-pip yq jq -y - export PIP_BREAK_SYSTEM_PACKAGES=1 - pip3 install awscli --upgrade - - yarn global add aws-cli typescript@latest aws-cdk@2.88.0 + - yarn global add aws-cli typescript@latest aws-cdk@2.120.0 - yarn --version - aws --version + - echo -e "\e[0Ksection_end:`date +%s`:get_tools\r\e[0K" + + # Export environment variables + - set -a && source stack-parameters.properties && set +a + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" - aws sts get-caller-identity # Store the access token before assuming the environment provisioning role - ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT" --duration-second=3600 --output json) + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT_ID" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" script: # Export environment variables - set -a && source stack-parameters.properties && set +a - cd .iac/opa-ecs-environment + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" - cdk deploy --outputs-file cdk-output.json --require-approval never + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" # once CDK finished - extract output params - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties # export the new variables - cat cdk-output.properties - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" # alter entity details - cd $CI_PROJECT_DIR/.backstage - yq -Yi ".metadata.vpc =\"${VPC}\"" catalog-info.yaml - - yq -Yi ".metadata[\"cluster-name\"] = \"${ClusterName}\"" catalog-info.yaml - - yq -Yi ".metadata[\"audit-table\"] = \"${AuditTable}\"" catalog-info.yaml - - yq -Yi ".metadata[\"operation-role\"] = \"${OperationsRoleARN}\"" catalog-info.yaml - - yq -Yi ".metadata[\"provisioning-role\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml - - yq -Yi ".metadata[\"stack-name\"] = \"${StackName}\"" catalog-info.yaml + - yq -Yi ".metadata[\"clusterName\"] = \"${ClusterName}\"" catalog-info.yaml + - yq -Yi ".metadata[\"auditTable\"] = \"${AuditTable}\"" catalog-info.yaml + - yq -Yi ".metadata[\"operationRole\"] = \"${OperationsRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"provisioningRole\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"stackName\"] = \"${StackName}\"" catalog-info.yaml - cat catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" - git config --global user.email "fsi-pace-pe@amazon.com" - git config --global user.name "OPA CICD User" - git add catalog-info.yaml @@ -45,3 +70,4 @@ iac-deployment-env-provider: - echo "The number of files that will be committed is $UPDATE_COUNT" - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-eks.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-eks.yml new file mode 100644 index 00000000..a3e64565 --- /dev/null +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-eks.yml @@ -0,0 +1,148 @@ +iac-deployment-env-provider: + stage: build + before_script: + - echo -e "\e[0Ksection_start:`date +%s`:log_os[collapsed=true]\r\e[0KOS Info" + - cat /etc/os-release + - echo -e "\e[0Ksection_end:`date +%s`:log_os\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_tools[collapsed=true]\r\e[0KGet Tools" + - apt-get update + - apt install nodejs npm git python3-pip yq jq -y + - export PIP_BREAK_SYSTEM_PACKAGES=1 + - pip3 install awscli --upgrade + - yarn global add aws-cli typescript@latest aws-cdk@2.120.0 + - yarn --version + - aws --version + - echo -e "\e[0Ksection_end:`date +%s`:get_tools\r\e[0K" + + # Export environment variables + - set -a && source stack-parameters.properties && set +a + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" + - aws sts get-caller-identity + # Store the access token before assuming the environment provisioning role + - ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT_ID" --duration-second=3600 --output json) + - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_kubectl[collapsed=true]\r\e[0KInstall Kubectl" + - apt install sudo + - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg + - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + - sudo apt update + - sudo apt install kubectl + - echo -e "\e[0Ksection_end:`date +%s`:get_kubectl\r\e[0K" + + script: + # Export environment variables + - set -a && source stack-parameters.properties && set +a + + - echo -e "\e[0Ksection_start:`date +%s`:existing_cluster[collapsed=true]\r\e[0KLookup Existing Cluster If Applicable" + - | + if [[ -z "$VPC_ID" && ! -z "$EXISTING_CLUSTER_NAME" ]]; then + export VPC_ID=$(aws eks describe-cluster --name $EXISTING_CLUSTER_NAME --region us-west-2 --query "cluster.resourcesVpcConfig.vpcId" --output text) + echo "Imported EKS Cluster VPC_ID is $VPC_ID" + echo "VPC_ID=$VPC_ID" >> stack-parameters.properties + git add stack-parameters.properties + fi + - echo -e "\e[0Ksection_end:`date +%s`:existing_cluster\r\e[0K" + + - cd .iac/opa-eks-environment + + - echo -e "\e[0Ksection_start:`date +%s`:yarn_install[collapsed=true]\r\e[0KYarn Install" + - yarn install + - echo -e "\e[0Ksection_end:`date +%s`:yarn_install\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:cdk_deploy[collapsed=true]\r\e[0KCDK Deploy" + - echo "Deploying the cluster and saving outputs in a file" + - cdk deploy --outputs-file cdk-output.json --require-approval never + - echo -e "\e[0Ksection_end:`date +%s`:cdk_deploy\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:extract_cdk_output[collapsed=true]\r\e[0KExport CDK Output" + # once CDK finished - extract output params + - jq '.[] ' cdk-output.json | jq -r 'to_entries[]|"\(.key)=\"\(.value)\""' > cdk-output.properties + # export the new variables + - cat cdk-output.properties + - set -a && source cdk-output.properties && set +a + - echo -e "\e[0Ksection_end:`date +%s`:extract_cdk_output\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:lambda_arn[collapsed=true]\r\e[0KGet kubectl lambda ARN" + # Look up the kubectl cluster creation role if we dont know it already + - | + if [[ -z "$KUBECTL_LAMBDA_ASSUME_ROLE_ARN" ]]; then + export KUBECTL_LAMBDA_ASSUME_ROLE_ARN=$(aws cloudformation describe-stack-resources --stack-name $StackName --query "StackResources[?ResourceType=='AWS::IAM::Role' && contains(LogicalResourceId, 'CreationRole')].PhysicalResourceId | [0]" --output text) + export KUBECTL_LAMBDA_ASSUME_ROLE_ARN="arn:aws:iam::$AWS_ACCOUNT_ID:role/$KUBECTL_LAMBDA_ASSUME_ROLE_ARN" + echo "KUBECTL_LAMBDA_ASSUME_ROLE_ARN is $KUBECTL_LAMBDA_ASSUME_ROLE_ARN" + else + export KUBECTL_LAMBDA_ASSUME_ROLE_ARN=$KUBECTL_LAMBDA_ASSUME_ROLE_ARN + fi + # Look up the kubectl lambda ARN if we dont know it already + - | + if [[ -z "$KUBECTL_LAMBDA_ARN" ]]; then + export KUBECTL_STACK_ARN=$(aws cloudformation describe-stack-resources --stack-name $StackName --query "StackResources[?ResourceType=='AWS::CloudFormation::Stack' && contains(LogicalResourceId, 'KubectlProvider')].PhysicalResourceId | [0]") + echo "KUBECTL_STACK_ARN is $KUBECTL_STACK_ARN" + export KUBECTL_STACK_NAME=$(echo "${KUBECTL_STACK_ARN%/*}") + export KUBECTL_STACK_NAME=$(echo "${KUBECTL_STACK_NAME##*/}") + echo "KUBECTL_STACK_NAME is $KUBECTL_STACK_NAME" + export KUBECTL_LAMBDA_ARN=$(aws cloudformation describe-stack-resources --stack-name $KUBECTL_STACK_NAME --query "StackResources[?ResourceType=='AWS::Lambda::Function' && starts_with(LogicalResourceId, 'Handler')].PhysicalResourceId | [0]" --output text) + export KUBECTL_LAMBDA_ARN="arn:aws:lambda:$AWS_DEFAULT_REGION:$AWS_ACCOUNT_ID:function:$KUBECTL_LAMBDA_ARN" + else + export KUBECTL_LAMBDA_ARN=$KUBECTL_LAMBDA_ARN + fi + - echo "KUBECTL_LAMBDA_ARN is $KUBECTL_LAMBDA_ARN" + - echo -e "\e[0Ksection_end:`date +%s`:lambda_arn\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:backstage_entity[collapsed=true]\r\e[0KUpdate Backstage Entity" + # alter entity details + - cd $CI_PROJECT_DIR/.backstage + - yq -Yi ".metadata.vpc =\"${VPC}\"" catalog-info.yaml + - yq -Yi ".metadata[\"clusterName\"] = \"${ClusterName}\"" catalog-info.yaml + - yq -Yi ".metadata[\"auditTable\"] = \"${AuditTable}\"" catalog-info.yaml + - yq -Yi ".metadata[\"operationRole\"] = \"${OperationsRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"provisioningRole\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"clusterAdminRole\"] = \"${ClusterAdminRoleDirectARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"kubectlLambdaArn\"] = \"${KUBECTL_LAMBDA_ARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"kubectlLambdaAssumeRoleArn\"] = \"${KUBECTL_LAMBDA_ASSUME_ROLE_ARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"kubectlLambdaExecutionRoleArn\"] = \"${KubectlLambdaRoleDirectARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"stackName\"] = \"${StackName}\"" catalog-info.yaml + - cat catalog-info.yaml + - echo -e "\e[0Ksection_end:`date +%s`:backstage_entity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" + - git config --global user.email "fsi-pace-pe@amazon.com" + - git config --global user.name "OPA CICD User" + - git add catalog-info.yaml + - git status + - UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') + - echo "The number of files that will be committed is $UPDATE_COUNT" + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git commit -m "updating entity details" --quiet; fi + - if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push -o ci.skip https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi + - echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:lambda[collapsed=true]\r\e[0KUpdate Kubectl Lambda" + - cd $CI_PROJECT_DIR/.iac/opa-eks-environment + - echo "Updating ${KUBECTL_LAMBDA_ARN#*function:}" + - aws lambda update-function-code --function-name ${KUBECTL_LAMBDA_ARN#*function:} --zip-file fileb://kubectl-helm-lambda.zip + - echo -e "\e[0Ksection_end:`date +%s`:lambda\r\e[0K" + + # The below code can be uncommented if you need to run some kubectl commands after the provider is created + # - echo -e "\e[0Ksection_start:`date +%s`:kubectl[collapsed=true]\r\e[0KExecute kubectl commands" + # # Unset AWS_xxx env vars so that we will use the default EC2 instance role + # - unset AWS_ACCESS_KEY_ID && unset AWS_SECRET_ACCESS_KEY && unset AWS_SESSION_TOKEN + # - aws sts get-caller-identity + # # Now assume the newly-created provider provisioning role, which has K8s cluster access + # - export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + # - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) + # - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') + # - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') + # - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') + # - aws sts get-caller-identity + # # Get configs for a kubectl session + # - aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name opa-$ENV_NAME-cluster + # - echo "Performing rolling restart on coredns to ensure it uses the latest aws-logging ConfigMap" + # - kubectl rollout restart deployment/coredns -n kube-system + # - echo -e "\e[0Ksection_end:`date +%s`:kubectl\r\e[0K" diff --git a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-serverless.yml b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-serverless.yml index e53eba8f..3489156d 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-aws-provider-serverless.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-aws-provider-serverless.yml @@ -1,22 +1,34 @@ iac-deployment-env-provider: stage: build before_script: + - echo -e "\e[0Ksection_start:`date +%s`:log_os[collapsed=true]\r\e[0KOS Info" - cat /etc/os-release + - echo -e "\e[0Ksection_end:`date +%s`:log_os\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_tools[collapsed=true]\r\e[0KGet Tools" - apt-get update - apt install nodejs npm git python3-pip yq jq -y - export PIP_BREAK_SYSTEM_PACKAGES=1 - pip3 install awscli --upgrade - - yarn global add aws-cli typescript@latest aws-cdk@2.88.0 + - yarn global add aws-cli typescript@latest aws-cdk@2.120.0 - yarn --version - aws --version + - echo -e "\e[0Ksection_end:`date +%s`:get_tools\r\e[0K" + + # Export environment variables + - set -a && source stack-parameters.properties && set +a + + - echo -e "\e[0Ksection_start:`date +%s`:aws_identity[collapsed=true]\r\e[0KAssume AWS IAM Role" - aws sts get-caller-identity # Store the access token before assuming the environment provisioning role - ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` - - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT" --duration-second=3600 --output json) + - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ROLE_ARN" --role-session-name "pipelineJob-$AWS_ACCOUNT_ID" --duration-second=3600 --output json) - export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') - export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') - export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:aws_identity\r\e[0K" + script: # Export environment variables - set -a && source stack-parameters.properties && set +a @@ -31,10 +43,10 @@ iac-deployment-env-provider: # alter entity details - cd $CI_PROJECT_DIR/.backstage - yq -Yi ".metadata.vpc =\"${VPC}\"" catalog-info.yaml - - yq -Yi ".metadata[\"audit-table\"] = \"${AuditTable}\"" catalog-info.yaml - - yq -Yi ".metadata[\"operation-role\"] = \"${OperationsRoleARN}\"" catalog-info.yaml - - yq -Yi ".metadata[\"provisioning-role\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml - - yq -Yi ".metadata[\"stack-name\"] = \"${StackName}\"" catalog-info.yaml + - yq -Yi ".metadata[\"auditTable\"] = \"${AuditTable}\"" catalog-info.yaml + - yq -Yi ".metadata[\"operationRole\"] = \"${OperationsRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"provisioningRole\"] = \"${ProvisioningRoleARN}\"" catalog-info.yaml + - yq -Yi ".metadata[\"stackName\"] = \"${StackName}\"" catalog-info.yaml - cat catalog-info.yaml - git config --global user.email "fsi-pace-pe@amazon.com" - git config --global user.name "OPA CICD User" diff --git a/backstage-reference/common/cicd/.gitlab-ci-job-defaults-cdk.yml b/backstage-reference/common/cicd/.gitlab-ci-job-defaults-cdk.yml index cfb5a0bb..b140b6f9 100644 --- a/backstage-reference/common/cicd/.gitlab-ci-job-defaults-cdk.yml +++ b/backstage-reference/common/cicd/.gitlab-ci-job-defaults-cdk.yml @@ -2,21 +2,41 @@ default: image: name: node:18 before_script: + - echo -e "\e[0Ksection_start:`date +%s`:log_os[collapsed=true]\r\e[0KOS Info" - cat /etc/os-release + - echo -e "\e[0Ksection_end:`date +%s`:log_os\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_tools[collapsed=true]\r\e[0KGet Tools" - apt-get update - - apt install nodejs npm git python3-pip yq jq -y - - export PIP_BREAK_SYSTEM_PACKAGES=1 - - pip3 install awscli --upgrade - - yarn global add aws-cli typescript@latest aws-cdk@2.88.0 + - apt install nodejs npm git python3-pip yq jq unzip -y + - yarn global add typescript@latest aws-cdk@2.120.0 - yarn --version + - echo -e "\e[0Ksection_end:`date +%s`:get_tools\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:get_aws_cli[collapsed=true]\r\e[0KGet AWS CLI v2" + # Explicitly install v2 of awscli + - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + - unzip -qq awscliv2.zip + - echo "Installing AWS CLI..." + - ./aws/install 1> /dev/null + - rm awscliv2.zip - aws --version + - echo -e "\e[0Ksection_end:`date +%s`:get_aws_cli\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:reset_aws_identity[collapsed=true]\r\e[0KReset AWS Caller Identity" # Unset AWS_xxx env vars so that we will use the default EC2 instance role - unset AWS_ACCESS_KEY_ID && unset AWS_SECRET_ACCESS_KEY && unset AWS_SESSION_TOKEN - aws sts get-caller-identity + - echo -e "\e[0Ksection_end:`date +%s`:reset_aws_identity\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:gitlab_access[collapsed=true]\r\e[0KConfigure GitLab Access" # Storing gitlab access token before changing role - export ACCESS_TOKEN=`aws secretsmanager get-secret-value --secret-id opa-admin-gitlab-secrets --region ${OPA_PLATFORM_REGION} | jq --raw-output '.SecretString' | jq -r .apiToken` - git config --global user.email "fsi-pace-pe@amazon.com" - git config --global user.name "OPA CICD User" + - echo -e "\e[0Ksection_end:`date +%s`:gitlab_access\r\e[0K" + + - echo -e "\e[0Ksection_start:`date +%s`:download_ref_repo_scripts[collapsed=true]\r\e[0KGet Reference Repo Common Files" # Clone reference repo so that apps can utilize shared files. This allows us to have # a single point of change for reusable files, instead of having to change them in # every application repository. @@ -25,3 +45,4 @@ default: - rm -rf $CI_PROJECT_DIR/backstage-reference - echo "Shared files are available in $CI_PROJECT_DIR/cicd" - ls -la $CI_PROJECT_DIR/cicd/scripts + - echo -e "\e[0Ksection_end:`date +%s`:download_ref_repo_scripts\r\e[0K" diff --git a/backstage-reference/common/cicd/scripts/example-eks-note-app/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-eks-nodejs-rds/create-ci-stages.sh old mode 100644 new mode 100755 similarity index 70% rename from backstage-reference/common/cicd/scripts/example-eks-note-app/create-ci-stages.sh rename to backstage-reference/common/cicd/scripts/example-eks-nodejs-rds/create-ci-stages.sh index 3cb29a80..f6230ef5 --- a/backstage-reference/common/cicd/scripts/example-eks-note-app/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-eks-nodejs-rds/create-ci-stages.sh @@ -4,6 +4,8 @@ scriptDir="$CI_PROJECT_DIR/.awsdeployment" mkdir -p $scriptDir/jobs mkdir -p $scriptDir/providers +$CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + echo "looking for providers in $scriptDir/providers" ls -la $scriptDir/providers @@ -53,7 +55,6 @@ do rm $STAGE_FILE_PATH 2> /dev/null || true # regenerate all jobs files from scratch - # Run IaC Job echo "iac-deployment-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" > $STAGE_FILE_PATH echo " extends: .abstract-iac-deployment" >> $STAGE_FILE_PATH echo " rules:" >> $STAGE_FILE_PATH @@ -69,6 +70,14 @@ do if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH @@ -79,7 +88,6 @@ do echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH - # Get AWS Creds Job echo "get-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " extends: .abstract-get-aws-creds-manual" >> $STAGE_FILE_PATH @@ -92,7 +100,6 @@ do echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH - # Build Container Image Job echo "build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH echo " extends: .abstract-build-image" >> $STAGE_FILE_PATH echo " needs:" >> $STAGE_FILE_PATH @@ -102,10 +109,9 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH - - # Delete AWS Creds Job - echo "delete-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH - echo " extends: .abstract-delete-aws-creds" >> $STAGE_FILE_PATH + + echo "git-commit-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-git-commit" >> $STAGE_FILE_PATH echo " needs:" >> $STAGE_FILE_PATH echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH @@ -113,12 +119,52 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH - + git add $CI_PROJECT_DIR/.gitlab-ci.yml git add $STAGE_FILE_PATH + + export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + + if [[ ! -d "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" ]]; then + + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + export IS_HELM="true" + export IS_KUSTOMIZE="false" + else + export IS_HELM="false" + export IS_KUSTOMIZE="true" + fi + + if [[ "$IS_HELM" == "true" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + echo -e "\e[0Ksection_start:`date +%s`:k8s-config[collapsed=true]\r\e[0KCreate k8s Configs for Environment" + newK8sEnvDir="$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" + + if [[ "$IS_HELM" == "true" ]]; then + mkdir $newK8sEnvDir + cp $CI_PROJECT_DIR/$K8S_CONFIG_DIR/values.yaml $newK8sEnvDir + elif [[ "$IS_KUSTOMIZE" == "true" ]]; then + cp -r $CI_PROJECT_DIR/$K8S_CONFIG_DIR/new-env-template/ $newK8sEnvDir + fi + + $CI_PROJECT_DIR/cicd/scripts/k8s/save-template-output.sh "skipGitAddTemplateOutput" "$TARGET_ENV_NAME-${TARGET_ENV_PROVIDER_NAME}/" || exit 1 + git add $newK8sEnvDir + + # Do not add these files to git since they will contain unresolved variables + # that we cannot resolve until after the app image has been built + git reset $newK8sEnvDir/next-release.yaml + git reset $newK8sEnvDir/next-release.json + + echo -e "\e[0Ksection_end:`date +%s`:k8s-config\r\e[0K" + else + echo "Skipping creating k8s environment directory $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME since it already exists" + fi done +echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') echo "The number of files that will be committed is $UPDATE_COUNT" git status @@ -132,3 +178,4 @@ if [[ "$UPDATE_COUNT" -gt "0" ]]; then fi fi if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi +echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" diff --git a/backstage-reference/common/cicd/scripts/example-generic/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-generic/create-ci-stages.sh index 1e248679..25d9ef79 100755 --- a/backstage-reference/common/cicd/scripts/example-generic/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-generic/create-ci-stages.sh @@ -68,6 +68,14 @@ do if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-nodejs-rds/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-nodejs-rds/create-ci-stages.sh index c99320a4..dd239e80 100755 --- a/backstage-reference/common/cicd/scripts/example-nodejs-rds/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-nodejs-rds/create-ci-stages.sh @@ -107,6 +107,18 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH + + # Deploy Container Image Job + echo "deploy-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-deploy-ecs-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo " BACKSTAGE_ENTITY_FILE: .backstage/catalog-info.yaml" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH echo "delete-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH echo " extends: .abstract-delete-aws-creds" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-nodejs/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-nodejs/create-ci-stages.sh index 84e33ab8..c23dd6d4 100755 --- a/backstage-reference/common/cicd/scripts/example-nodejs/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-nodejs/create-ci-stages.sh @@ -110,6 +110,18 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH + + # Deploy Container Image Job + echo "deploy-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-deploy-ecs-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo " BACKSTAGE_ENTITY_FILE: .backstage/catalog-info.yaml" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH # Delete AWS Creds Job echo "delete-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-python-flask-eks/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-python-flask-eks/create-ci-stages.sh new file mode 100755 index 00000000..26111c96 --- /dev/null +++ b/backstage-reference/common/cicd/scripts/example-python-flask-eks/create-ci-stages.sh @@ -0,0 +1,185 @@ +#!/usr/bin/env bash + +scriptDir="$CI_PROJECT_DIR/.awsdeployment" +mkdir -p $scriptDir/jobs +mkdir -p $scriptDir/providers +mkdir -p $scriptDir/permissions + +$CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + +echo "looking for providers in $scriptDir/providers" +ls -la $scriptDir/providers + +for propsfile in $scriptDir/providers/*.properties +do + [ -e "$propsfile" ] || continue + set -a && source $propsfile && set +a + + echo "Processing Environment $TARGET_ENV_NAME, Provider $TARGET_ENV_PROVIDER_NAME" + + SIMPLE_STAGE_FILE_NAME=".gitlab-ci-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME.yml" + STAGE_FILE_PATH="$scriptDir/jobs/$SIMPLE_STAGE_FILE_NAME" + + # Include the new jobs file for the environment provider if it has not + # already been included + + LAST_NON_BLANK_LINE="$(tail -n 1 $CI_PROJECT_DIR/.gitlab-ci.yml)" + if [[ -z "$LAST_NON_BLANK_LINE" ]]; then LAST_NON_BLANK_LINE="$(tail -n 2 $CI_PROJECT_DIR/.gitlab-ci.yml)"; fi + + # This block can create a new include if it is not already there + # CURRENT_INCLUDE="$(echo "$LAST_NON_BLANK_LINE" | grep local:)" + # if [[ -z "$CURRENT_INCLUDE" ]]; then + # echo -e "\ninclude:" >> $CI_PROJECT_DIR/.gitlab-ci.yml + # fi + + INCLUDE_COUNT=$(grep -c "$SIMPLE_STAGE_FILE_NAME" $CI_PROJECT_DIR/.gitlab-ci.yml) + + if [[ "$INCLUDE_COUNT" -eq "0" ]]; then + echo " - local: .awsdeployment/jobs/$SIMPLE_STAGE_FILE_NAME" >> $CI_PROJECT_DIR/.gitlab-ci.yml + echo "Updated $CI_PROJECT_DIR/.gitlab-ci.yml to include jobs for provider: ${TARGET_ENV_PROVIDER_NAME}" + fi + + # Add pipeline stages for the environment if they have not already been set up + + ALREADY_HAS_STAGE="$(grep "prepare-$TARGET_ENV_NAME" $CI_PROJECT_DIR/.gitlab-ci.yml || true)" + if [[ -z "$ALREADY_HAS_STAGE" ]]; then yq -Yi ".stages += [\"prepare-${TARGET_ENV_NAME}-stage\", \"${TARGET_ENV_NAME}-stage\"]" $CI_PROJECT_DIR/.gitlab-ci.yml; fi + + # Add GitLab jobs YAML file for the environment provider + + if [[ ! -f "$STAGE_FILE_PATH" ]]; then + if [[ -z "$ADDED_STAGE" ]]; then + ADDED_STAGE="$TARGET_ENV_NAME" + elif [[ "$ADDED_STAGE" != "$TARGET_ENV_NAME" ]]; then + ADDED_STAGE="MULTIPLE" + fi + fi + + rm $STAGE_FILE_PATH 2> /dev/null || true # regenerate all jobs files from scratch + + # Run IaC Job + echo "iac-deployment-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" > $STAGE_FILE_PATH + echo " extends: .abstract-iac-deployment" >> $STAGE_FILE_PATH + echo " rules:" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_TITLE =~ /generate CICD stages/\"" >> $STAGE_FILE_PATH + echo " when: never" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_BRANCH == '$CI_DEFAULT_BRANCH'\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " changes:" >> $STAGE_FILE_PATH + echo " - .iac/**/*" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_TITLE =~ /Added CICD environment stage ${TARGET_ENV_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " stage: prepare-${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + # Get AWS Creds Job + echo "get-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " extends: .abstract-get-aws-creds-manual" >> $STAGE_FILE_PATH + else + echo " extends: .abstract-get-aws-creds-auto" >> $STAGE_FILE_PATH + fi + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + # Build Container Image Job + echo "build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-build-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - get-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + echo "git-commit-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-git-commit" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + git add $CI_PROJECT_DIR/.gitlab-ci.yml + git add $STAGE_FILE_PATH + + export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + + if [[ ! -d "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" ]]; then + + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + export IS_HELM="true" + export IS_KUSTOMIZE="false" + else + export IS_HELM="false" + export IS_KUSTOMIZE="true" + fi + + if [[ "$IS_HELM" == "true" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + echo -e "\e[0Ksection_start:`date +%s`:k8s-config[collapsed=true]\r\e[0KCreate k8s Configs for Environment" + newK8sEnvDir="$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" + + if [[ "$IS_HELM" == "true" ]]; then + mkdir $newK8sEnvDir + cp $CI_PROJECT_DIR/$K8S_CONFIG_DIR/values.yaml $newK8sEnvDir + elif [[ "$IS_KUSTOMIZE" == "true" ]]; then + cp -r $CI_PROJECT_DIR/$K8S_CONFIG_DIR/new-env-template/ $newK8sEnvDir + fi + + $CI_PROJECT_DIR/cicd/scripts/k8s/save-template-output.sh "skipGitAddTemplateOutput" "$TARGET_ENV_NAME-${TARGET_ENV_PROVIDER_NAME}/" || exit 1 + git add $newK8sEnvDir + + # Do not add these files to git since they will contain unresolved variables + # that we cannot resolve until after the app image has been built + git reset $newK8sEnvDir/next-release.yaml + git reset $newK8sEnvDir/next-release.json + + echo -e "\e[0Ksection_end:`date +%s`:k8s-config\r\e[0K" + else + echo "Skipping creating k8s environment directory $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME since it already exists" + fi + +done + +echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" +UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') +echo "The number of files that will be committed is $UPDATE_COUNT" +git status +if [[ "$UPDATE_COUNT" -gt "0" ]]; then + if [[ "$ADDED_STAGE" == "MULTIPLE" ]]; then + git commit -m "Added multiple environment stages" --quiet; + elif [[ ! -z "$ADDED_STAGE" ]]; then + git commit -m "Added CICD environment stage ${ADDED_STAGE}" --quiet; + else + git commit -m "Reprocessed CICD jobs. No new stages added." --quiet; + fi +fi +if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi +echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" diff --git a/backstage-reference/common/cicd/scripts/example-python-flask/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-python-flask/create-ci-stages.sh index eff199d4..351d8073 100755 --- a/backstage-reference/common/cicd/scripts/example-python-flask/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-python-flask/create-ci-stages.sh @@ -70,6 +70,14 @@ do if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH @@ -103,6 +111,18 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH + + # Deploy Container Image Job + echo "deploy-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-deploy-ecs-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo " BACKSTAGE_ENTITY_FILE: .backstage/catalog-info.yaml" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH # Delete AWS Creds Job echo "delete-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-serverless-rest-api/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-serverless-rest-api/create-ci-stages.sh index 08acebe9..8d13ec2e 100755 --- a/backstage-reference/common/cicd/scripts/example-serverless-rest-api/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-serverless-rest-api/create-ci-stages.sh @@ -71,6 +71,14 @@ do if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-springboot-eks/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-springboot-eks/create-ci-stages.sh new file mode 100755 index 00000000..37bda5b0 --- /dev/null +++ b/backstage-reference/common/cicd/scripts/example-springboot-eks/create-ci-stages.sh @@ -0,0 +1,176 @@ +#!/usr/bin/env bash + +scriptDir="$CI_PROJECT_DIR/.awsdeployment" +mkdir -p $scriptDir/jobs +mkdir -p $scriptDir/providers + +$CI_PROJECT_DIR/cicd/scripts/k8s/install-kubectl.sh + +echo "looking for providers in $scriptDir/providers" +ls -la $scriptDir/providers + +for propsfile in $scriptDir/providers/*.properties +do + [ -e "$propsfile" ] || continue + set -a && source $propsfile && set +a + + echo "Processing Environment $TARGET_ENV_NAME, Provider $TARGET_ENV_PROVIDER_NAME" + + SIMPLE_STAGE_FILE_NAME=".gitlab-ci-$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME.yml" + STAGE_FILE_PATH="$scriptDir/jobs/$SIMPLE_STAGE_FILE_NAME" + + # Include the new jobs file for the environment provider if it has not + # already been included + + LAST_NON_BLANK_LINE="$(tail -n 1 $CI_PROJECT_DIR/.gitlab-ci.yml)" + if [[ -z "$LAST_NON_BLANK_LINE" ]]; then LAST_NON_BLANK_LINE="$(tail -n 2 $CI_PROJECT_DIR/.gitlab-ci.yml)"; fi + + # This block can create a new include if it is not already there + # CURRENT_INCLUDE="$(echo "$LAST_NON_BLANK_LINE" | grep local:)" + # if [[ -z "$CURRENT_INCLUDE" ]]; then + # echo -e "\ninclude:" >> $CI_PROJECT_DIR/.gitlab-ci.yml + # fi + + INCLUDE_COUNT=$(grep -c "$SIMPLE_STAGE_FILE_NAME" $CI_PROJECT_DIR/.gitlab-ci.yml) + + if [[ "$INCLUDE_COUNT" -eq "0" ]]; then + echo " - local: .awsdeployment/jobs/$SIMPLE_STAGE_FILE_NAME" >> $CI_PROJECT_DIR/.gitlab-ci.yml + echo "Updated $CI_PROJECT_DIR/.gitlab-ci.yml to include jobs for provider: ${TARGET_ENV_PROVIDER_NAME}" + fi + + # Add pipeline stages for the environment if they have not already been set up + + ALREADY_HAS_STAGE="$(grep "prepare-$TARGET_ENV_NAME" $CI_PROJECT_DIR/.gitlab-ci.yml || true)" + if [[ -z "$ALREADY_HAS_STAGE" ]]; then yq -Yi ".stages += [\"prepare-${TARGET_ENV_NAME}-stage\", \"${TARGET_ENV_NAME}-stage\"]" $CI_PROJECT_DIR/.gitlab-ci.yml; fi + + # Add GitLab jobs YAML file for the environment provider + + if [[ ! -f "$STAGE_FILE_PATH" ]]; then + if [[ -z "$ADDED_STAGE" ]]; then + ADDED_STAGE="$TARGET_ENV_NAME" + elif [[ "$ADDED_STAGE" != "$TARGET_ENV_NAME" ]]; then + ADDED_STAGE="MULTIPLE" + fi + fi + + rm $STAGE_FILE_PATH 2> /dev/null || true # regenerate all jobs files from scratch + + # Run IaC Job + echo "iac-deployment-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" > $STAGE_FILE_PATH + echo " extends: .abstract-iac-deployment" >> $STAGE_FILE_PATH + echo " rules:" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_TITLE =~ /generate CICD stages/\"" >> $STAGE_FILE_PATH + echo " when: never" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_BRANCH == '$CI_DEFAULT_BRANCH'\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " changes:" >> $STAGE_FILE_PATH + echo " - .iac/**/*" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_TITLE =~ /Added CICD environment stage ${TARGET_ENV_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " stage: prepare-${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + # Build Container Image Job + echo "build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-build-image" >> $STAGE_FILE_PATH + echo " rules:" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_TITLE =~ /generate CICD stages/\"" >> $STAGE_FILE_PATH + echo " when: never" >> $STAGE_FILE_PATH + echo " - if: \"\$CI_COMMIT_BRANCH == '$CI_DEFAULT_BRANCH'\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + echo "git-commit-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-git-commit" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + git add $CI_PROJECT_DIR/.gitlab-ci.yml + git add $STAGE_FILE_PATH + + export K8S_CONFIG_DIR=$(yq -r .metadata.k8sConfigDirName $CI_PROJECT_DIR/.backstage/catalog-info.yaml) + + if [[ ! -d "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" ]]; then + + if [[ -f "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/Chart.yaml" ]]; then + export IS_HELM="true" + export IS_KUSTOMIZE="false" + else + export IS_HELM="false" + export IS_KUSTOMIZE="true" + fi + + if [[ "$IS_HELM" == "true" ]]; then + $CI_PROJECT_DIR/cicd/scripts/k8s/install-helm.sh + fi + + echo -e "\e[0Ksection_start:`date +%s`:k8s-config[collapsed=true]\r\e[0KCreate k8s Configs for Environment" + newK8sEnvDir="$CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME" + + if [[ "$IS_HELM" == "true" ]]; then + mkdir $newK8sEnvDir + cp $CI_PROJECT_DIR/$K8S_CONFIG_DIR/values.yaml $newK8sEnvDir + elif [[ "$IS_KUSTOMIZE" == "true" ]]; then + cp -r $CI_PROJECT_DIR/$K8S_CONFIG_DIR/new-env-template/ $newK8sEnvDir + fi + + $CI_PROJECT_DIR/cicd/scripts/k8s/save-template-output.sh "skipGitAddTemplateOutput" "$TARGET_ENV_NAME-${TARGET_ENV_PROVIDER_NAME}/" || exit 1 + git add $newK8sEnvDir + + # Do not add these files to git since they will contain unresolved variables + # that we cannot resolve until after the app image has been built + git reset $newK8sEnvDir/next-release.yaml + git reset $newK8sEnvDir/next-release.json + + echo -e "\e[0Ksection_end:`date +%s`:k8s-config\r\e[0K" + else + echo "Skipping creating k8s environment directory $CI_PROJECT_DIR/$K8S_CONFIG_DIR/$TARGET_ENV_NAME-$TARGET_ENV_PROVIDER_NAME since it already exists" + fi + +done + +echo -e "\e[0Ksection_start:`date +%s`:git[collapsed=true]\r\e[0KUpdate Git Repo" +UPDATE_COUNT=$(git diff --cached --numstat | wc -l | sed 's/ *$//g') +echo "The number of files that will be committed is $UPDATE_COUNT" +git status +if [[ "$UPDATE_COUNT" -gt "0" ]]; then + if [[ "$ADDED_STAGE" == "MULTIPLE" ]]; then + git commit -m "Added multiple environment stages" --quiet; + elif [[ ! -z "$ADDED_STAGE" ]]; then + git commit -m "Added CICD environment stage ${ADDED_STAGE}" --quiet; + else + git commit -m "Reprocessed CICD jobs. No new stages added." --quiet; + fi +fi +if [[ "$UPDATE_COUNT" -gt "0" ]]; then git push https://oauth2:$ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME HEAD:main; fi +echo -e "\e[0Ksection_end:`date +%s`:git\r\e[0K" diff --git a/backstage-reference/common/cicd/scripts/example-springboot/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-springboot/create-ci-stages.sh index fd1a61aa..9ac5bd05 100755 --- a/backstage-reference/common/cicd/scripts/example-springboot/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-springboot/create-ci-stages.sh @@ -69,6 +69,14 @@ do if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^Bind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi + echo " - if: \"\$CI_COMMIT_TITLE =~ /^unBind Resource to env ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}/\"" >> $STAGE_FILE_PATH + if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then + echo " when: manual" >> $STAGE_FILE_PATH + fi echo " - if: \"\$CI_COMMIT_TITLE =~ /Added multiple environment stages/\"" >> $STAGE_FILE_PATH if [[ "$OPA_CI_ENVIRONMENT_MANUAL_APPROVAL" == "true" ]]; then echo " when: manual" >> $STAGE_FILE_PATH @@ -94,6 +102,20 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH + + # Deploy Container Image Job + echo "deploy-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-deploy-ecs-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo " BACKSTAGE_ENTITY_FILE: .backstage/catalog-info.yaml" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + + git add $CI_PROJECT_DIR/.gitlab-ci.yml git add $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/example-tf-nodejs/create-ci-stages.sh b/backstage-reference/common/cicd/scripts/example-tf-nodejs/create-ci-stages.sh index 41f77f8f..55c2d369 100755 --- a/backstage-reference/common/cicd/scripts/example-tf-nodejs/create-ci-stages.sh +++ b/backstage-reference/common/cicd/scripts/example-tf-nodejs/create-ci-stages.sh @@ -46,7 +46,8 @@ do aws s3api head-bucket --bucket $STATE_BUCKET_NAME || BUCKET_NOT_EXIST=true if [ $BUCKET_NOT_EXIST ]; then echo "State bucket does not exist. creating..." - ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) + export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length + ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') @@ -137,7 +138,19 @@ do echo " variables:" >> $STAGE_FILE_PATH echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH echo "" >> $STAGE_FILE_PATH - + + # Deploy Container Image Job + echo "deploy-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH + echo " extends: .abstract-deploy-ecs-image" >> $STAGE_FILE_PATH + echo " needs:" >> $STAGE_FILE_PATH + echo " - build-image-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " stage: ${TARGET_ENV_NAME}-stage" >> $STAGE_FILE_PATH + echo " environment: ${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}" >> $STAGE_FILE_PATH + echo " variables:" >> $STAGE_FILE_PATH + echo " PROVIDER_PROPS_FILE: .awsdeployment/providers/${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}.properties" >> $STAGE_FILE_PATH + echo " BACKSTAGE_ENTITY_FILE: .backstage/catalog-info.yaml" >> $STAGE_FILE_PATH + echo "" >> $STAGE_FILE_PATH + echo "delete-aws-creds-${TARGET_ENV_NAME}-${TARGET_ENV_PROVIDER_NAME}:" >> $STAGE_FILE_PATH echo " extends: .abstract-delete-aws-creds" >> $STAGE_FILE_PATH echo " needs:" >> $STAGE_FILE_PATH diff --git a/backstage-reference/common/cicd/scripts/k8s/add-role-to-aws-auth-configmap.sh b/backstage-reference/common/cicd/scripts/k8s/add-role-to-aws-auth-configmap.sh new file mode 100755 index 00000000..5452052c --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/add-role-to-aws-auth-configmap.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Adds an IAM role to the aws-auth configmap in the kube-system namespace + +if [[ -z "$AppAdminRoleArn" ]]; then + echo "AppAdminRoleArn not set. Skipping updating aws-auth ConfigMap." + return 0; +fi + +echo "AppAdminRoleArn detected. Updating aws-auth ConfigMap." + +FUNCTION_NAME=${TARGET_KUBECTL_LAMBDA_ARN##*:} + +GET_AWS_AUTH_LAMBDA_PAYLOAD=$( + jq --null-input \ + --arg clusterName "${clusterName}" \ + --arg manifest "${MANIFEST_JSON}" \ + --arg roleArn "${TARGET_KUBECTL_LAMBDA_ROLE_ARN}" \ + '{RequestType: "Create", ResourceType: "Custom::AWSCDK-EKS-KubernetesObjectValue", ResourceProperties: {TimeoutSeconds: "10", ClusterName: $clusterName, RoleArn: $roleArn, ObjectNamespace: "kube-system", ObjectType: "configmap", ObjectName: "aws-auth", JsonPath: "@"}}') + +echo -e "\nlambda payload to retrieve aws-auth configmap:" +echo $GET_AWS_AUTH_LAMBDA_PAYLOAD + +# aws cli v1 +# getCMLambdaResult="$(aws lambda invoke --function-name $FUNCTION_NAME --region $REGION \ +# --log-type Tail --output json \ +# --payload "$GET_AWS_AUTH_LAMBDA_PAYLOAD" aws-auth-lambda_response.json)" + +# aws cli v2 +getCMLambdaResult="$(aws lambda invoke \ + --function-name $FUNCTION_NAME \ + --cli-binary-format raw-in-base64-out \ + --log-type Tail \ + --region $REGION \ + --payload "$GET_AWS_AUTH_LAMBDA_PAYLOAD" aws-auth-lambda_response.json)" + +if [ $? -ne 0 ]; then + exit 1; +fi + +echo "$getCMLambdaResult" | jq -r .LogResult | base64 -d +isLambdaError=$(echo $getCMLambdaResult | jq 'has("FunctionError")') + +if [[ "true" == "$isLambdaError" ]]; then + echo "Lambda returned with an error" + exit 1 +fi + +currentConfigMapJson="$(jq -r '.Data.Value' aws-auth-lambda_response.json)" +echo -e "\ncurrentConfigMapJson:" +echo "$currentConfigMapJson" + +rm aws-auth-lambda_response.json + +currentRolesArray="$(echo "$currentConfigMapJson" | jq -r '.data.mapRoles')" +firstChar=${currentRolesArray:0:1} + +# Check to see if response is in YAML instead of JSON +if [[ "$firstChar" != "[" || "$firstChar" != "{" ]]; then + echo "Converting YAML ConfigMap lookup response to JSON" + currentRolesArray="$(echo -e "$currentRolesArray" | yq .)" +fi + +# Remove linebreaks from JSON +currentRolesArray="$(echo $currentRolesArray)" + +echo -e "\ncurrentRolesArray:" +echo "$currentRolesArray" + +if [[ "$currentRolesArray" == *"$AppAdminRoleArn"* ]]; then + echo "Skipping updating aws-auth ConfigMap since it already contains $AppAdminRoleArn." + exit 0 +else + echo "ConfigMap does not already contain $AppAdminRoleArn. Proceeding to add it." +fi + +# remove "]" from the end of the array string +newRolesArray="${currentRolesArray::${#currentRolesArray}-1}" + +# Add App Admin role object to the array +newRolesArray="${newRolesArray}, {\"rolearn\":\"$AppAdminRoleArn\",\"username\":\"$AppAdminRoleArn\",\"groups\":[]}]" + +echo "newRolesArray:" +echo "$newRolesArray" + +# We now need to escape quotes +currentRolesArray="${currentRolesArray//\"/\\\"}" +newRolesArray="${newRolesArray//\"/\\\"}" + +echo "escaped currentRolesArray:" +echo "$currentRolesArray" + +echo "escaped newRolesArray:" +echo "$newRolesArray" + +applyPatchJson="{\"data\": {\"mapRoles\": \"$newRolesArray\"}}" +restorePatchJson="{\"data\": {\"mapRoles\": \"$currentRolesArray\"}}" + +# Add the new app admin role to the aws-auth ConfigMap by way of a patch + +PATCH_AWS_AUTH_LAMBDA_PAYLOAD=$( + jq --null-input \ + --arg clusterName "${clusterName}" \ + --arg roleArn "${TARGET_KUBECTL_LAMBDA_ROLE_ARN}" \ + --arg applyPatchJson "$applyPatchJson" \ + --arg restorePatchJson "$restorePatchJson" \ + '{RequestType: "Create", ResourceType: "Custom::AWSCDK-EKS-KubernetesPatch", ResourceProperties: {ClusterName: $clusterName, RoleArn: $roleArn, ResourceNamespace: "kube-system", ApplyPatchJson: $applyPatchJson, RestorePatchJson: $restorePatchJson, ResourceName: "configmap/aws-auth", PatchType: "strategic"}}') + +echo -e "\nlambda payload to patch aws-auth configmap:" +echo $PATCH_AWS_AUTH_LAMBDA_PAYLOAD + +# aws cli v1 +# patchCMLambdaResult="$(aws lambda invoke --function-name $FUNCTION_NAME --region $REGION \ +# --log-type Tail --output json \ +# --payload "$PATCH_AWS_AUTH_LAMBDA_PAYLOAD" patch-aws-auth-lambda_response.json)" + +# aws cli v2 +patchCMLambdaResult="$(aws lambda invoke \ + --function-name $FUNCTION_NAME \ + --cli-binary-format raw-in-base64-out \ + --log-type Tail \ + --region $REGION \ + --payload "$PATCH_AWS_AUTH_LAMBDA_PAYLOAD" patch-aws-auth-lambda_response.json)" + +if [ $? -ne 0 ]; then + exit 1; +fi + +echo $patchCMLambdaResult | jq -r .LogResult | base64 -d +isLambdaError=$(echo $patchCMLambdaResult | jq 'has("FunctionError")') + +if [[ "true" == "$isLambdaError" ]]; then + echo "Lambda returned with an error" + exit 1 +fi + +rm patch-aws-auth-lambda_response.json + +echo "Successfully updated aws-auth ConfigMap with IAM role: $AppAdminRoleArn" + + diff --git a/backstage-reference/common/cicd/scripts/k8s/apply-k8s-lambda.sh b/backstage-reference/common/cicd/scripts/k8s/apply-k8s-lambda.sh new file mode 100755 index 00000000..ffba20bd --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/apply-k8s-lambda.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +export FUNCTION_NAME=${TARGET_KUBECTL_LAMBDA_ARN##*:} + +export LAMBDA_PAYLOAD=$( + jq --null-input \ + --arg clusterName "${clusterName}" \ + --arg manifest "${MANIFEST_JSON}" \ + --arg roleArn "${TARGET_KUBECTL_LAMBDA_ROLE_ARN}" \ + '{RequestType: "Update", ResourceType: "Custom::AWSCDK-EKS-KubernetesResource", ResourceProperties: {ClusterName: $clusterName, Manifest: $manifest, RoleArn: $roleArn}}') + +echo "lambda payload:" +echo $LAMBDA_PAYLOAD + +# aws cli v1 +# lambdaResult="$(aws lambda invoke --function-name $FUNCTION_NAME --region $REGION \ +# --log-type Tail --output json \ +# --payload "$LAMBDA_PAYLOAD" lambda_response.json)" + +# aws cli v2 +lambdaResult="$(aws lambda invoke \ + --function-name $FUNCTION_NAME \ + --cli-binary-format raw-in-base64-out \ + --log-type Tail \ + --region $REGION \ + --payload "$LAMBDA_PAYLOAD" lambda_response.json)" + +if [ $? -ne 0 ]; then + exit 1; +fi + +cat lambda_response.json +rm lambda_response.json + +echo "$lambdaResult" | jq -r .LogResult | base64 -d +isLambdaError=$(echo $lambdaResult | jq 'has("FunctionError")') + +if [[ "true" == "$isLambdaError" ]]; then + echo "Lambda returned with an error" + exit 1 +fi diff --git a/backstage-reference/common/cicd/scripts/k8s/get-ingress-dns-name.sh b/backstage-reference/common/cicd/scripts/k8s/get-ingress-dns-name.sh new file mode 100755 index 00000000..843fc91d --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/get-ingress-dns-name.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Search for DNS name of a load balancer that is created by a k8s Ingress. +# We will look for the load balancer based on a tag. + +tagKey="$1" +echo "Searching for load balancer tagged with key \"$tagKey\"" + +# loop 50 times, increment i by 1 +for i in {1..51..1} +do + for i in $(aws elbv2 describe-load-balancers --region $REGION | jq -r '.LoadBalancers[].LoadBalancerArn'); do aws elbv2 describe-tags --region $REGION --resource-arns "$i" | jq -ce --arg tagKey "$tagKey" ".TagDescriptions[].Tags[] | select( .Key == \"$tagKey\")" && INGRESS_ALB_ARN="$i";done + + if [[ -z "$INGRESS_ALB_ARN" ]]; then + echo "Ingress load balancer was not yet created." + echo "Sleeping for 5 seconds" + sleep 5 + else + + INGRESS_DNS=$(aws elbv2 describe-load-balancers --region $REGION --load-balancer-arns $INGRESS_ALB_ARN | jq -r -ce ".LoadBalancers[].DNSName") + echo "INGRESS_DNS is $INGRESS_DNS" + # Make ingress value available to CICD + echo "$INGRESS_DNS" > $CI_PROJECT_DIR/ingressDNS.txt + + # Stop the for loop + break + fi + +done + +if [[ -z "$INGRESS_DNS" ]]; then + echo "WARN: Ingress DNS could not be detected. Make sure your Ingress has the \"alb.ingress.kubernetes.io/tags\" annotation with a value of \"$tagKey\"" +fi diff --git a/backstage-reference/common/cicd/scripts/k8s/install-helm.sh b/backstage-reference/common/cicd/scripts/k8s/install-helm.sh new file mode 100755 index 00000000..61a981c8 --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/install-helm.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +echo -e "\e[0Ksection_start:`date +%s`:install_helm[collapsed=true]\r\e[0KInstalling Helm"; +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +chmod 700 get_helm.sh +./get_helm.sh -v v3.13.3 +rm ./get_helm.sh +echo "helm client version..."; +helm version; +echo -e "\e[0Ksection_end:`date +%s`:install_helm\r\e[0K"; diff --git a/backstage-reference/common/cicd/scripts/k8s/install-kubectl.sh b/backstage-reference/common/cicd/scripts/k8s/install-kubectl.sh new file mode 100755 index 00000000..cf53178c --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/install-kubectl.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +echo -e "\e[0Ksection_start:`date +%s`:install_kubectl[collapsed=true]\r\e[0KInstalling kubectl"; +curl -LO https://dl.k8s.io/release/v1.28.4/bin/linux/amd64/kubectl; +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"; +echo "$(cat kubectl.sha256) kubectl" | sha256sum --check; +apt install sudo; +sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl; +echo "kubectl client version..."; +kubectl version --client; +echo -e "\e[0Ksection_end:`date +%s`:install_kubectl\r\e[0K"; diff --git a/backstage-reference/common/cicd/scripts/k8s/resolve-placeholders.sh b/backstage-reference/common/cicd/scripts/k8s/resolve-placeholders.sh new file mode 100755 index 00000000..8ed4f4ef --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/resolve-placeholders.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# loop over yaml files and replace placeholder values with environment variables + +echo "" +echo "Provider Replacement Variables:" +echo " ACCT_PLACEHOLDER=\"$ACCOUNT\"" +echo " NS_PLACEHOLDER=\"$NAMESPACE\"" +echo " PREFIX_PLACEHOLDER=\"$PREFIX\"" +echo " REGION_PLACEHOLDER=\"$REGION\"" +echo " ENV_PLACEHOLDER=\"$TARGET_ENV_NAME\"" +echo " ENV_PROVIDER_PLACEHOLDER=\"$TARGET_ENV_PROVIDER_NAME\"" +echo "" +echo "App Replacement Variables:" +echo " SA_ROLE_PLACEHOLDER=\"$ServiceAccountRoleArn\"" +echo " APP_ADMIN_ROLE_PLACEHOLDER=\"$AppAdminRoleArn\"" +echo "" + +echoerr() { echo "$@" 1>&2; } + +if [[ -z "$NAMESPACE" ]]; then + echoerr "ERROR: Missing environment variable value for NS_PLACEHOLDER" +fi + +performReplacement () { + local k8sYamlDir=$1 + for filename in $k8sYamlDir/*.yaml; do + echo "found yaml file in $k8sYamlDir: $filename" + sed -i "s|NS_PLACEHOLDER|$NAMESPACE|g" $filename + sed -i "s|ENV_PLACEHOLDER|$TARGET_ENV_NAME|g" $filename + sed -i "s|ENV_PROVIDER_PLACEHOLDER|$TARGET_ENV_PROVIDER_NAME|g" $filename + sed -i "s|PREFIX_PLACEHOLDER|$PREFIX|g" $filename + sed -i "s|ACCT_PLACEHOLDER|$ACCOUNT|g" $filename + sed -i "s|REGION_PLACEHOLDER|$REGION|g" $filename + + # Conditional replacements. Leave the placeholder alone if we don't + # have a replacement value yet + if [[ ! -z "$ServiceAccountRoleArn" ]]; then + sed -i "s|SA_ROLE_PLACEHOLDER|$ServiceAccountRoleArn|g" $filename + fi + if [[ ! -z "$AppAdminRoleArn" ]]; then + sed -i "s|APP_ADMIN_ROLE_PLACEHOLDER|$AppAdminRoleArn|g" $filename + fi + + done +} + +performReplacement "$1" + +curDir=${1##*/} +if [[ "$curDir" != "base" && -d "$1/../base" ]]; then + cd "$1/../base" + baseDir="$(pwd)" + performReplacement "$baseDir" + cd - +fi diff --git a/backstage-reference/common/cicd/scripts/k8s/save-template-output.sh b/backstage-reference/common/cicd/scripts/k8s/save-template-output.sh new file mode 100755 index 00000000..de462501 --- /dev/null +++ b/backstage-reference/common/cicd/scripts/k8s/save-template-output.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Loop over all current environments and resolve template output + +gitAddResolvedTemplateSetting="$1" +d="$2" + +echo "Saving Resolved Template Output" +echo "gitAddResolvedTemplateSetting set to $gitAddResolvedTemplateSetting" +echo "directory to process is $d" + +imageSearch="${EcrRepositoryUri/\//\\/}:.*" +imageReplace="${EcrRepositoryUri/\//\\/}:$CI_COMMIT_SHORT_SHA" + +cd $CI_PROJECT_DIR/$K8S_CONFIG_DIR + +# Loop over all current environments and save template engine output +# (for d in */ ; do +# if [[ -d "$d" && ! -L "$d" && "$d" != "base/" && "$d" != "new-env-template/" ]]; then + + providerPropsFile=$CI_PROJECT_DIR/.awsdeployment/providers/${d%%/*}.properties + echo "using provider props file: $providerPropsFile" + source $providerPropsFile + echo "performing variable replacement before running template engine" + $CI_PROJECT_DIR/cicd/scripts/k8s/resolve-placeholders.sh ${d%%/*} + + echo "creating resolved template output for ${d%%/*}" + + if [[ -f "$d/values.yaml" ]]; then + helm template -f $d/values.yaml . > $d/next-release.yaml + cat $d/next-release.yaml + cd $d + else + cd $d + kubectl kustomize > next-release.yaml + cat next-release.yaml + fi + + if [ $? -ne 0 ]; then + exit 1 + fi + if [[ "$gitAddResolvedTemplateSetting" != "skipGitAddTemplateOutput" ]]; then + # Updating the ECR image to use the latest Git short hash + echo "updating manifest's container image - from \"$imageSearch\" to \"$imageReplace\"" + sed -i "s/$imageSearch/$imageReplace/g" next-release.yaml + git add next-release.yaml + fi + echo "Converting next-release.yaml to next-release.json" + yq -s . next-release.yaml > next-release.json; + if [ $? -ne 0 ]; then + exit 1 + fi + if [[ "$gitAddResolvedTemplateSetting" != "skipGitAddTemplateOutput" ]]; then + git add next-release.json + fi + cd - + + # reset kustomize base files to prepare to process subsequent providers + if [[ -d "$CI_PROJECT_DIR/$K8S_CONFIG_DIR/base" ]]; then + git restore $CI_PROJECT_DIR/$K8S_CONFIG_DIR/base/* + fi + + +# fi; +# done) + +if [ $? -ne 0 ]; then + exit 1 +fi + +cd - diff --git a/backstage-reference/common/cicd/scripts/terraform/destroy-tf.sh b/backstage-reference/common/cicd/scripts/terraform/destroy-tf.sh index 46314c57..ef96512c 100755 --- a/backstage-reference/common/cicd/scripts/terraform/destroy-tf.sh +++ b/backstage-reference/common/cicd/scripts/terraform/destroy-tf.sh @@ -7,7 +7,8 @@ sed -e 's/^/TF_VAR_/' providers/$PROVIDER_FILE_TO_DELETE > updated_props.propert cat updated_props.properties set -a && source updated_props.properties && set +a cd $CI_PROJECT_DIR/.iac/ -ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "$CI_PROJECT_NAME-$CI_JOB_STAGE" --duration-second=3600 --output json) +export ROLE_NAME=$CI_PROJECT_NAME-$CI_JOB_STAGE # store role session name so that a single env var can be truncated to allowed character length +ROLE_OUTPUT=$(aws sts assume-role --role-arn "$ENV_ROLE_ARN" --role-session-name "${ROLE_NAME:0:63}" --duration-second=3600 --output json) export AWS_ACCESS_KEY_ID=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.AccessKeyId') export AWS_SECRET_ACCESS_KEY=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SecretAccessKey') export AWS_SESSION_TOKEN=$(echo ${ROLE_OUTPUT} | jq -r '.Credentials.SessionToken') diff --git a/backstage-reference/common/tf_aws_ecs/main.tf b/backstage-reference/common/tf_aws_ecs/main.tf index 20bfc45d..b2f4fed7 100644 --- a/backstage-reference/common/tf_aws_ecs/main.tf +++ b/backstage-reference/common/tf_aws_ecs/main.tf @@ -47,7 +47,7 @@ resource "aws_kms_alias" "keyAlias" { # Create ECR repo for the app image resource "aws_ecr_repository" "ecrRepository" { - name = lower("${var.APP_NAME}-${var.TARGET_ENV_PROVIDER_NAME}") + name = lower("${var.APP_NAME}-${var.TARGET_ENV_NAME}-${var.TARGET_ENV_PROVIDER_NAME}") image_tag_mutability = "MUTABLE" encryption_configuration { encryption_type = "KMS" @@ -359,4 +359,4 @@ output "AppResourceGroup" { output "TaskExecutionRoleArn" { value = aws_iam_role.ecsTaskExecutionRole.arn description = "The task execution role identify resources " -} \ No newline at end of file +} diff --git a/backstage-reference/templates/all-templates.yaml b/backstage-reference/templates/all-templates.yaml index a4987fa9..3674d850 100644 --- a/backstage-reference/templates/all-templates.yaml +++ b/backstage-reference/templates/all-templates.yaml @@ -8,19 +8,27 @@ spec: # APPS - ./example-nodejs/template.yaml - ./example-springboot/template.yaml + - ./example-springboot-eks/template.yaml # - ./example-eks-note-app/template.yaml - ./example-serverless-rest-api/template.yaml - ./example-nodejs-rds/template.yaml - # - ./example-nodejs-efs/template.yaml + - ./example-eks-nodejs-rds-kustomize/template.yaml + - ./example-eks-nodejs-rds-helm/template.yaml + - ./example-nodejs-microservice/template.yaml - ./example-python-flask/template.yaml + - ./example-python-flask-eks/template.yaml - ./example-tf-nodejs/template.yaml - # # Environments + # Environments - ./aws-environment/template.yaml - ./aws-ecs-environment-provider/template.yaml - # - ./aws-eks-environment-provider/template.yaml + - ./aws-ecs-ec2-environment-provider/template.yaml + - ./aws-eks-environment-provider/template.yaml + - ./aws-eks-environment-existing-cluster-provider/template.yaml - ./aws-serverless-environment-provider/template.yaml + # - ./aws-basic-environment-provider/template.yaml # AWS Resources - ./aws-rds-resource/template.yaml + - ./aws-s3-resource/template.yaml rules: - allow: [Template] diff --git a/backstage-reference/templates/aws-basic-environment-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-basic-environment-provider/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..ee8e6743 --- /dev/null +++ b/backstage-reference/templates/aws-basic-environment-provider/content/.backstage/catalog-info.yaml @@ -0,0 +1,21 @@ +apiVersion: aws.backstage.io/v1alpha +kind: AWSEnvironmentProvider +metadata: + name: ${{ values.name | dump }} + title: ${{ values.title | dump }} + prefix: ${{ values.prefix | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - aws-environment-provider + env-type: ${{values.env_type}} + aws-account: "${{values.aws_account}}" + aws-region: ${{values.aws_region}} + environment_role: "${{values.environment_role}}" + vpc: "" +spec: + type: environment-provider + lifecycle: experimental + owner: ${{values.owner | dump}} diff --git a/backstage-reference/templates/aws-basic-environment-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-basic-environment-provider/content/.gitlab-ci.yml new file mode 100644 index 00000000..25e1c2a6 --- /dev/null +++ b/backstage-reference/templates/aws-basic-environment-provider/content/.gitlab-ci.yml @@ -0,0 +1,15 @@ +image: node:18 +variables: + AWS_ACCOUNT: "${{ values.aws_account }}" + AWS_DEFAULT_REGION: "${{ values.aws_region }}" + ROLE_ARN: "${{values.environment_role}}" + OPA_PLATFORM_REGION: "${{ values.platform_region }}" + +stages: + - build + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-aws-provider-basic.yml' diff --git a/backstage-reference/templates/aws-basic-environment-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-basic-environment-provider/content/stack-parameters.properties new file mode 100644 index 00000000..0195b598 --- /dev/null +++ b/backstage-reference/templates/aws-basic-environment-provider/content/stack-parameters.properties @@ -0,0 +1,7 @@ +PREFIX=${{ values.prefix }} +ENV_NAME=${{ values.name }} +AWS_ACCOUNT_ID=${{ values.aws_account }} +AWS_DEFAULT_REGION=${{ values.aws_region }} +PLATFORM_ROLE_ARN=${{ values.platform_role }} +PIPELINE_ROLE_ARN=${{ values.pipeline_role }} +PROVISIONING_ROLE_ARN=${{values.environment_role}} diff --git a/backstage-reference/templates/aws-basic-environment-provider/template.yaml b/backstage-reference/templates/aws-basic-environment-provider/template.yaml new file mode 100644 index 00000000..61cd9628 --- /dev/null +++ b/backstage-reference/templates/aws-basic-environment-provider/template.yaml @@ -0,0 +1,184 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +kind: Template +metadata: + name: aws-basic-environment-provider + title: AWS Basic Environment Provider + description: Configure an AWS basic environment provider with no VPC + tags: + - aws + - environment-provider +spec: + owner: group:admins + type: aws-environment-provider + parameters: + - title: Fill in the environment provider details + required: + - name + - prefix + - env_description + - owner + - aws_account_id + - aws_region + - environment_role + properties: + name: + title: Name + type: string + description: Unique name for the environment provider + ui:autofocus: true + prefix: + title: Prefix + type: string + description: Prefix for environment - examples - payments, engineering, infra + default: opa + env_description: + title: Description + type: string + description: A description of the environment + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + aws_account_id: + title: AWS Account number + type: string + description: Enter an AWS account number + minLength: 12 + aws_region: + title: AWS Region + type: string + description: Select AWS region + default: us-east-1 + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - af-south-1 + - ap-east-1 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - ap-south-1 + - ap-northeast-3 + - ap-northeast-2 + - ap-southeast-1 + - ap-southeast-2 + - ap-northeast-1 + - ca-central-1 + - eu-central-1 + - eu-west-1 + - eu-west-2 + - eu-south-1 + - eu-west-3 + - eu-south-2 + - eu-north-1 + - eu-central-2 + - me-south-1 + - me-central-1 + - sa-east-1 + enumNames: + - 'US East (N. Virginia)' + - 'US East (Ohio)' + - 'US West (N. California)' + - 'US West (Oregon)' + - 'Africa (Cape Town)' + - 'Asia Pacific (Hong Kong)' + - 'Asia Pacific (Hyderabad)' + - 'Asia Pacific (Jakarta)' + - 'Asia Pacific (Melbourne)' + - 'Asia Pacific (Mumbai)' + - 'Asia Pacific (Osaka)' + - 'Asia Pacific (Seoul)' + - 'Asia Pacific (Singapore)' + - 'Asia Pacific (Sydney)' + - 'Asia Pacific (Tokyo)' + - 'Canada (Central)' + - 'Europe (Frankfurt)' + - 'Europe (Ireland)' + - 'Europe (London)' + - 'Europe (Milan)' + - 'Europe (Paris)' + - 'Europe (Spain)' + - 'Europe (Stockholm)' + - 'Europe (Zurich)' + - 'Middle East (Bahrain)' + - 'Middle East (UAE)' + - 'South America (São Paulo)' + environment_role: + title: Environment Role arn + type: string + description: The role arn to assume in order to provision the new environment. + - title: Provide Repository information + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-environment-providers + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: resolvePlatformVariables + name: Resolve Platform Variables + action: opa:get-platform-parameters + input: + paramKeys: + - '/opa/platform-role' + - '/opa/pipeline-role' + - id: fetchBase + name: Fetch Code Base + action: fetch:template + input: + url: ./content + values: + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} + description: ${{ parameters.env_description }} + owner: ${{ parameters.owner }} + env_type: serverless + platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + aws_region: ${{ parameters.aws_region }} + aws_account: ${{ parameters.aws_account_id }} + environment_role: ${{ parameters.environment_role }} + platform_role: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipeline_role: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + prefix: ${{parameters.prefix}} + - id: fetchBaseIAC + name: Fetch IAC Base + action: fetch:plain + input: + targetPath: ./.iac + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments" + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..279ec903 --- /dev/null +++ b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.backstage/catalog-info.yaml @@ -0,0 +1,27 @@ +apiVersion: aws.backstage.io/v1alpha +kind: AWSEnvironmentProvider +metadata: + name: ${{values.name | dump }} + title: ${{ values.title | dump }} + prefix: ${{values.prefix | dump}} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - aws-environment-provider + - ecs + envType: ${{values.envType}} + awsAccount: "${{values.awsAccount}}" + awsRegion: ${{values.awsRegion}} + environmentRole: "${{values.environmentRole}}" + vpc: "" + clusterName: "" + operationRole: "" + provisioningRole: "" + auditTable: "" + stackName: "" +spec: + type: environment-provider + lifecycle: experimental + owner: ${{values.owner | dump}} diff --git a/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.gitlab-ci.yml new file mode 100644 index 00000000..107c6456 --- /dev/null +++ b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/.gitlab-ci.yml @@ -0,0 +1,15 @@ +image: node:18 +variables: + AWS_ACCOUNT: "${{ values.awsAccount }}" + AWS_DEFAULT_REGION: "${{ values.awsRegion }}" + ROLE_ARN: "${{values.environmentRole}}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +stages: + - build + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-aws-provider-ecs-ec2.yml' diff --git a/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/stack-parameters.properties new file mode 100644 index 00000000..640ebb3d --- /dev/null +++ b/backstage-reference/templates/aws-ecs-ec2-environment-provider/content/stack-parameters.properties @@ -0,0 +1,10 @@ +PREFIX=${{ values.prefix }} +ENV_NAME=${{ values.name }} +AWS_ACCOUNT_ID=${{ values.awsAccount }} +AWS_DEFAULT_REGION=${{ values.awsRegion }} +PLATFORM_ROLE_ARN=${{ values.platformRole }} +PIPELINE_ROLE_ARN=${{ values.pipelineRole }} +PROVISIONING_ROLE_ARN=${{values.environmentRole}} +ENV_CIDR=${{ values.cidr }} +EC2_INSTANCE_TYPE=${{ values.ec2InstanceType }} +EC2_MAX_CAPACITY=${{ values.ec2MaxCapacity }} \ No newline at end of file diff --git a/backstage-reference/templates/aws-ecs-ec2-environment-provider/template.yaml b/backstage-reference/templates/aws-ecs-ec2-environment-provider/template.yaml new file mode 100644 index 00000000..a4182234 --- /dev/null +++ b/backstage-reference/templates/aws-ecs-ec2-environment-provider/template.yaml @@ -0,0 +1,233 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +kind: Template +metadata: + name: aws-ecs-ec2-environment-provider + title: AWS ECS EC2 Environment Provider + description: Configure an AWS ECS environment provider with EC2 launch type + tags: + - aws + - environment-provider + - ecs +spec: + owner: group:admins + type: aws-environment-provider + parameters: + - title: Fill in the environment provider details + required: + - name + - prefix + - env_description + - owner + - awsAccountID + - awsRegion + - environmentRole + - cidr + properties: + name: + title: Name + type: string + description: Unique name for the environment provider + ui:autofocus: true + prefix: + title: Prefix + type: string + description: Prefix for environment - examples - payments, engineering, infra + default: opa + env_description: + title: Description + type: string + description: A description of the environment + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + awsAccountID: + title: AWS Account number + type: string + description: Enter an AWS account number + minLength: 12 + awsRegion: + title: AWS Region + type: string + description: Select AWS region + default: us-east-1 + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - af-south-1 + - ap-east-1 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - ap-south-1 + - ap-northeast-3 + - ap-northeast-2 + - ap-southeast-1 + - ap-southeast-2 + - ap-northeast-1 + - ca-central-1 + - eu-central-1 + - eu-west-1 + - eu-west-2 + - eu-south-1 + - eu-west-3 + - eu-south-2 + - eu-north-1 + - eu-central-2 + - me-south-1 + - me-central-1 + - sa-east-1 + enumNames: + - 'US East (N. Virginia)' + - 'US East (Ohio)' + - 'US West (N. California)' + - 'US West (Oregon)' + - 'Africa (Cape Town)' + - 'Asia Pacific (Hong Kong)' + - 'Asia Pacific (Hyderabad)' + - 'Asia Pacific (Jakarta)' + - 'Asia Pacific (Melbourne)' + - 'Asia Pacific (Mumbai)' + - 'Asia Pacific (Osaka)' + - 'Asia Pacific (Seoul)' + - 'Asia Pacific (Singapore)' + - 'Asia Pacific (Sydney)' + - 'Asia Pacific (Tokyo)' + - 'Canada (Central)' + - 'Europe (Frankfurt)' + - 'Europe (Ireland)' + - 'Europe (London)' + - 'Europe (Milan)' + - 'Europe (Paris)' + - 'Europe (Spain)' + - 'Europe (Stockholm)' + - 'Europe (Zurich)' + - 'Middle East (Bahrain)' + - 'Middle East (UAE)' + - 'South America (São Paulo)' + environmentRole: + title: Environment Role ARN + type: string + description: The IAM role ARN to assume in order to provision the new environment. + cidr: + title: CIDR + type: string + description: The network CIDR of the environment provider + default: 10.0.0.0/24 + ec2InstanceType: + title: EC2 Instance Type + type: string + description: Select EC2 Type + default: p2 + enum: + - p2.xlarge + - p3.2xlarge + - g5g.xlarge + enumNames: + - "p2.xlarge" + - "p3.2xlarge" + - "g5g.xlarge" + ec2MaxCapacity: + title: EC2 Max Capacity + type: integer + description: Enter the maximum capacity (less than 10) + maximum: 9 + - title: Provide Repository information + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-environment-providers + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: resolvePlatformVariables + name: Resolve Platform Variables + action: opa:get-platform-parameters + input: + paramKeys: + - '/opa/platform-role' + - '/opa/pipeline-role' + + - id: fetchBase + name: Fetch Code Base + action: fetch:template + input: + url: ./content + values: + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} + description: ${{ parameters.env_description }} + owner: ${{ parameters.owner }} + envType: ecs + awsRegion: ${{ parameters.awsRegion }} + awsAccount: ${{ parameters.awsAccountID }} + environmentRole: ${{ parameters.environmentRole }} + platformRole: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipelineRole: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + prefix: ${{parameters.prefix}} + cidr: ${{parameters.cidr}} + ec2InstanceType: ${{parameters.ec2InstanceType}} + ec2MaxCapacity: ${{parameters.ec2MaxCapacity}} + - id: fetchBaseIAC + name: Fetch IAC Base + action: fetch:plain + input: + targetPath: ./.iac/opa-ecs-ec2-environment + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-ecs-ec2-environment" + - id: fetchIacCommonConstructs + name: Fetch IaC Common Constructs + action: fetch:plain + input: + targetPath: ./.iac/opa-common-constructs + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-common-constructs" + - id: createYarnPackageFile + name: Configure Yarn Package Manager + action: roadiehq:utils:fs:write + input: + path: ./.iac/package.json + content: | + { + "private": true, + "name": "aws-application-development", + "workspaces": [ + "opa-ecs-ec2-environment", + "opa-common-constructs" + ] + } + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/aws-ecs-environment-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-ecs-environment-provider/content/.backstage/catalog-info.yaml index d51744ab..279ec903 100644 --- a/backstage-reference/templates/aws-ecs-environment-provider/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/aws-ecs-environment-provider/content/.backstage/catalog-info.yaml @@ -1,7 +1,8 @@ apiVersion: aws.backstage.io/v1alpha kind: AWSEnvironmentProvider metadata: - name: ${{values.name | dump}} + name: ${{values.name | dump }} + title: ${{ values.title | dump }} prefix: ${{values.prefix | dump}} {%- if values.description %} description: ${{values.description | dump}} @@ -10,16 +11,16 @@ metadata: - aws - aws-environment-provider - ecs - env-type: ${{values.env_type}} - aws-account: "${{values.aws_account}}" - aws-region: ${{values.aws_region}} - environment_role: "${{values.environment_role}}" + envType: ${{values.envType}} + awsAccount: "${{values.awsAccount}}" + awsRegion: ${{values.awsRegion}} + environmentRole: "${{values.environmentRole}}" vpc: "" - cluster-name: "" - operation-role: "" - provisioning-role: "" - audit-table: "" - stack-name: "" + clusterName: "" + operationRole: "" + provisioningRole: "" + auditTable: "" + stackName: "" spec: type: environment-provider lifecycle: experimental diff --git a/backstage-reference/templates/aws-ecs-environment-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-ecs-environment-provider/content/.gitlab-ci.yml index 2e9665a9..8148f2da 100644 --- a/backstage-reference/templates/aws-ecs-environment-provider/content/.gitlab-ci.yml +++ b/backstage-reference/templates/aws-ecs-environment-provider/content/.gitlab-ci.yml @@ -1,9 +1,10 @@ image: node:18 + variables: - AWS_ACCOUNT: "${{ values.aws_account }}" - AWS_DEFAULT_REGION: "${{ values.aws_region }}" - ROLE_ARN: "${{values.environment_role}}" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + AWS_ACCOUNT: "${{ values.awsAccount }}" + AWS_DEFAULT_REGION: "${{ values.awsRegion }}" + ROLE_ARN: "${{ values.environmentRole }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" stages: - build diff --git a/backstage-reference/templates/aws-ecs-environment-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-ecs-environment-provider/content/stack-parameters.properties index 88150149..1eaeac02 100644 --- a/backstage-reference/templates/aws-ecs-environment-provider/content/stack-parameters.properties +++ b/backstage-reference/templates/aws-ecs-environment-provider/content/stack-parameters.properties @@ -1,8 +1,10 @@ PREFIX=${{ values.prefix }} ENV_NAME=${{ values.name }} -AWS_ACCOUNT_ID=${{ values.aws_account }} -AWS_DEFAULT_REGION=${{ values.aws_region }} -PLATFORM_ROLE_ARN=${{ values.platform_role }} -PIPELINE_ROLE_ARN=${{ values.pipeline_role }} -PROVISIONING_ROLE_ARN=${{values.environment_role}} -ENV_CIDR=${{ values.cidr }} \ No newline at end of file +AWS_ACCOUNT_ID=${{ values.awsAccount }} +AWS_DEFAULT_REGION=${{ values.awsRegion }} +PLATFORM_ROLE_ARN=${{ values.platformRole }} +PIPELINE_ROLE_ARN=${{ values.pipelineRole }} +PROVISIONING_ROLE_ARN=${{ values.environmentRole }} +VPC_ID=${{ values.vpcid }} +ENV_CIDR=${{ values.cidr }} + diff --git a/backstage-reference/templates/aws-ecs-environment-provider/template.yaml b/backstage-reference/templates/aws-ecs-environment-provider/template.yaml index 44e34b0f..845f6497 100644 --- a/backstage-reference/templates/aws-ecs-environment-provider/template.yaml +++ b/backstage-reference/templates/aws-ecs-environment-provider/template.yaml @@ -18,9 +18,9 @@ spec: - prefix - env_description - owner - - aws_account_id - - aws_region - - environment_role + - awsAccountID + - awsRegion + - environmentRole - cidr properties: name: @@ -45,12 +45,12 @@ spec: ui:options: catalogFilter: kind: [Group] - aws_account_id: + awsAccountID: title: AWS Account number type: string description: Enter an AWS account number minLength: 12 - aws_region: + awsRegion: title: AWS Region type: string description: Select AWS region @@ -111,15 +111,44 @@ spec: - 'Middle East (Bahrain)' - 'Middle East (UAE)' - 'South America (São Paulo)' - environment_role: - title: Environment Role arn + environmentRole: + title: Environment Role ARN type: string - description: The role arn to assume in order to provision the new environment. - cidr: - title: CIDR + description: The IAM role ARN to assume in order to provision the new environment. + vpc_configuration: + title: VPC Configuration + description: Choose 'Use Existing VPC' to utilize an existing VPC, or continue with 'Create New VPC' for a new setup. type: string - description: The network cidr of the environment provider - default: 10.0.0.0/24 + default: create_new + enum: + - create_new + - use_existing + enumNames: + - 'Create New VPC' + - 'Use Existing VPC' + + # Only ask for the allow-list if user chose public API access + dependencies: + vpc_configuration: + oneOf: + - properties: + vpc_configuration: + enum: + - use_existing + vpc_id: + title: VPC ID + type: string + description: Specify the existing VPC ID + - properties: + vpc_configuration: + enum: + - create_new + cidr: + title: CIDR + type: string + description: Specify the CIDR block for the new VPC. Default is 10.0.0.0/24 + default: 10.0.0.0/24 + - title: Provide Repository information required: - repoUrl @@ -152,24 +181,48 @@ spec: input: url: ./content values: - name: ${{ parameters.name }} + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} description: ${{ parameters.env_description }} owner: ${{ parameters.owner }} - env_type: ecs - aws_region: ${{ parameters.aws_region }} - aws_account: ${{ parameters.aws_account_id }} - environment_role: ${{ parameters.environment_role }} - platform_role: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} - pipeline_role: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + envType: ecs + awsRegion: ${{ parameters.awsRegion }} + awsAccount: ${{ parameters.awsAccountID }} + environmentRole: ${{ parameters.environmentRole }} + platformRole: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipelineRole: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} prefix: ${{parameters.prefix}} cidr: ${{parameters.cidr}} + vpcid: ${{parameters.vpc_id}} + + - id: fetchBaseIAC name: Fetch IAC Base action: fetch:plain input: - targetPath: ./.iac - url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments" + targetPath: ./.iac/opa-ecs-environment + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-ecs-environment" + - id: fetchIacCommonConstructs + name: Fetch IaC Common Constructs + action: fetch:plain + input: + targetPath: ./.iac/opa-common-constructs + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-common-constructs" + - id: createYarnPackageFile + name: Configure Yarn Package Manager + action: roadiehq:utils:fs:write + input: + path: ./.iac/package.json + content: | + { + "private": true, + "name": "aws-application-development", + "workspaces": [ + "opa-ecs-environment", + "opa-common-constructs" + ] + } - id: publish name: Publish action: publish:gitlab @@ -177,12 +230,14 @@ spec: repoUrl: ${{ parameters.repoUrl }} repoVisibility: internal defaultBranch: main + - id: register name: Register action: catalog:register input: repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} catalogInfoPath: "/.backstage/catalog-info.yaml" + output: links: - title: Repository diff --git a/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..8f75f99b --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.backstage/catalog-info.yaml @@ -0,0 +1,33 @@ +apiVersion: aws.backstage.io/v1alpha +kind: AWSEnvironmentProvider +metadata: + name: ${{ values.name | dump }} + title: ${{ values.title | dump }} + prefix: ${{values.prefix | dump}} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - aws-environment-provider + - eks + envType: ${{values.envType}} + awsAccount: "${{values.awsAccount}}" + awsRegion: ${{values.awsRegion}} + environmentRole: "${{values.environmentRole}}" + apiAccess: "Unknown - Existing EKS Cluster was imported" + apiAccessCidrs: "Unknown - Existing EKS Cluster was imported" + nodeType: "Unknown - Existing EKS Cluster was imported" + vpc: "" + clusterName: "" + operationRole: "" + provisioningRole: "" + auditTable: "" + clusterAdminRole: "" + kubectlLambdaArn: "" + kubectlLambdaAssumeRoleArn: "" + +spec: + type: environment-provider + lifecycle: experimental + owner: ${{values.owner | dump}} diff --git a/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.gitlab-ci.yml new file mode 100644 index 00000000..c22db240 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/.gitlab-ci.yml @@ -0,0 +1,16 @@ +image: node:18 + +variables: + AWS_ACCOUNT: "${{ values.aws_account }}" + AWS_DEFAULT_REGION: "${{ values.awsRegion }}" + ROLE_ARN: "${{ values.environmentRole }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +stages: + - build + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-aws-provider-eks.yml' diff --git a/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/stack-parameters.properties new file mode 100644 index 00000000..c39902d3 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/content/stack-parameters.properties @@ -0,0 +1,24 @@ +PREFIX=${{ values.prefix }} +ENV_NAME=${{ values.name }} +AWS_ACCOUNT_ID=${{ values.awsAccount }} +AWS_DEFAULT_REGION=${{ values.awsRegion }} +PLATFORM_ROLE_ARN=${{ values.platformRole }} +PIPELINE_ROLE_ARN=${{ values.pipelineRole }} +PROVISIONING_ROLE_ARN=${{values.environmentRole}} +ENV_CIDR=na +CLUSTER_ADMIN_ROLE_ARN=${{ values.clusterAdminRole }} +API_ACCESS=na +API_ACCESS_CIDRS=na +NODE_TYPE=na +INSTANCE_TYPE=na +AMI_TYPE=na +NODE_GROUP_MIN_SIZE=na +NODE_GROUP_DESIRED_SIZE=na +NODE_GROUP_MAX_SIZE=na +NODE_GROUP_DISK_SIZE=na +EXISTING_CLUSTER_NAME=${{ values.existingClusterName }} +CREATE_K8S_OPA_RESOURCES=${{ values.createK8sOpaResources }} +KUBECTL_LAMBDA_ARN=${{ values.kubectlLambdaArn }} +KUBECTL_ON_EVENT_LAMBDA_ARN=${{ values.kubectlOnEventLambdaArn }} +EXISTING_KUBECTL_LAMBDA_EXECUTION_ROLE_ARN=${{ values.kubectlLambdaExecutionRoleArn }} +KUBECTL_LAMBDA_ASSUME_ROLE_ARN=na diff --git a/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/template.yaml b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/template.yaml new file mode 100644 index 00000000..27d95975 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-existing-cluster-provider/template.yaml @@ -0,0 +1,275 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +kind: Template +metadata: + name: aws-eks-environment-existing-cluster-provider + title: AWS EKS Environment Provider From Existing Cluster + description: Configure an AWS EKS environment provider. Use an existing EKS cluster instead of creating a new one. + tags: + - aws + - environment-provider + - eks +spec: + owner: group:admins + type: aws-environment-provider + parameters: + - title: Fill in the environment provider details + required: + - name + - prefix + - env_description + - owner + - awsAccountID + - awsRegion + - environmentRole + properties: + name: + title: Name + type: string + description: Unique name for the environment provider + ui:autofocus: true + prefix: + title: Prefix + type: string + description: Prefix for environment - examples - payments, engineering, infra + default: opa + env_description: + title: Description + type: string + description: A description of the environment + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + awsAccountID: + title: AWS Account number + type: string + description: Enter the AWS account number where the existing EKS cluster was deployed + minLength: 12 + awsRegion: + title: AWS Region + type: string + description: Select AWS region where the existing EKS cluster was deployed + default: us-east-1 + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - af-south-1 + - ap-east-1 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - ap-south-1 + - ap-northeast-3 + - ap-northeast-2 + - ap-southeast-1 + - ap-southeast-2 + - ap-northeast-1 + - ca-central-1 + - eu-central-1 + - eu-west-1 + - eu-west-2 + - eu-south-1 + - eu-west-3 + - eu-south-2 + - eu-north-1 + - eu-central-2 + - me-south-1 + - me-central-1 + - sa-east-1 + enumNames: + - 'US East (N. Virginia)' + - 'US East (Ohio)' + - 'US West (N. California)' + - 'US West (Oregon)' + - 'Africa (Cape Town)' + - 'Asia Pacific (Hong Kong)' + - 'Asia Pacific (Hyderabad)' + - 'Asia Pacific (Jakarta)' + - 'Asia Pacific (Melbourne)' + - 'Asia Pacific (Mumbai)' + - 'Asia Pacific (Osaka)' + - 'Asia Pacific (Seoul)' + - 'Asia Pacific (Singapore)' + - 'Asia Pacific (Sydney)' + - 'Asia Pacific (Tokyo)' + - 'Canada (Central)' + - 'Europe (Frankfurt)' + - 'Europe (Ireland)' + - 'Europe (London)' + - 'Europe (Milan)' + - 'Europe (Paris)' + - 'Europe (Spain)' + - 'Europe (Stockholm)' + - 'Europe (Zurich)' + - 'Middle East (Bahrain)' + - 'Middle East (UAE)' + - 'South America (São Paulo)' + environmentRole: + title: Environment Role ARN + type: string + description: The IAM role ARN to assume in order to provision the new environment. + + - title: Configure EKS Cluster + required: + - existingClusterName + - clusterAdminRole + - createK8sOpaResources + - kubectlLambdaConfiguration + + properties: + existingClusterName: + title: Existing EKS Cluster Name + type: string + description: The name of an existing EKS cluster to use for this EKS provider. + clusterAdminRole: + title: Cluster Admin IAM Role ARN + type: string + description: The IAM role ARN that has already been granted cluster admin permissions in the EKS cluster. + createK8sOpaResources: + title: Create OPA Resources In EKS Cluster + description: Should OPA resources be created in the EKS cluster? This can only be done once per cluster. + type: boolean + ui:widget: radio + kubectlLambdaConfiguration: + title: Kubectl Lambda Configuration + description: Choose 'Use Existing Kubectl Lambda' to utilize an existing Kubectl lambda that has access to the EKS cluster, or continue with 'Create New Kubectl Lambda' for a new setup. + type: string + default: create_new + enum: + - create_new + - use_existing + enumNames: + - 'Create New Kubectl Lambda' + - 'Use Existing Kubectl Lambda' + + # Ask for different input based on whether the user wants to create a + # new Kubectl Lambda or use an existing one + dependencies: + kubectlLambdaConfiguration: + oneOf: + - properties: + kubectlLambdaConfiguration: + enum: + - use_existing + kubectlLambdaArn: + title: Existing Kubectl Lambda ARN + type: string + description: Specify the existing Kubectl Lambda ARN + kubectlOnEventLambdaArn: + title: Existing Kubectl Provider onEvent Lambda ARN + type: string + description: Specify the existing Kubectl Provider onEvent Lambda ARN + kubectlLambdaExecutionRoleArn: + title: Existing Kubectl Lambda Execution Role ARN + type: string + description: Specify the existing Kubectl Lambda Execution Role ARN + - properties: + kubectlLambdaConfiguration: + enum: + - create_new + + - title: Provide Repository Information + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-environment-providers + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: resolvePlatformVariables + name: Resolve Platform Variables + action: opa:get-platform-parameters + input: + paramKeys: + - '/opa/platform-role' + - '/opa/pipeline-role' + + - id: fetchBase + name: Fetch Code Base + action: fetch:template + input: + url: ./content + values: + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} + description: ${{ parameters.env_description }} + owner: ${{ parameters.owner }} + envType: eks + awsRegion: ${{ parameters.awsRegion }} + awsAccount: ${{ parameters.awsAccountID }} + environmentRole: ${{ parameters.environmentRole }} + platformRole: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipelineRole: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + prefix: ${{ parameters.prefix }} + existingClusterName: ${{parameters.existingClusterName }} + clusterAdminRole: ${{ parameters.clusterAdminRole }} + createK8sOpaResources: ${{ parameters.createK8sOpaResources }} + kubectlLambdaArn: ${{ parameters.kubectlLambdaArn }} + kubectlOnEventLambdaArn: ${{ parameters.kubectlOnEventLambdaArn }} + kubectlLambdaExecutionRoleArn: ${{ parameters.kubectlLambdaExecutionRoleArn }} + + - id: fetchBaseIAC + name: Fetch IAC Base + action: fetch:plain + input: + targetPath: ./.iac/opa-eks-environment + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-eks-environment" + - id: fetchIacCommonConstructs + name: Fetch IaC Common Constructs + action: fetch:plain + input: + targetPath: ./.iac/opa-common-constructs + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-common-constructs" + - id: createYarnPackageFile + name: Configure Yarn Package Manager + action: roadiehq:utils:fs:write + input: + path: ./.iac/package.json + content: | + { + "private": true, + "name": "aws-application-development", + "workspaces": [ + "opa-eks-environment", + "opa-common-constructs" + ] + } + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/aws-eks-environment-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-eks-environment-provider/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..21712ce0 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-provider/content/.backstage/catalog-info.yaml @@ -0,0 +1,33 @@ +apiVersion: aws.backstage.io/v1alpha +kind: AWSEnvironmentProvider +metadata: + name: ${{ values.name | dump }} + title: ${{ values.title | dump }} + prefix: ${{values.prefix | dump}} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - aws-environment-provider + - eks + envType: ${{values.envType}} + awsAccount: "${{values.awsAccount}}" + awsRegion: ${{values.awsRegion}} + environmentRole: "${{values.environmentRole}}" + apiAccess: "${{values.apiAccess}}" + apiAccessCidrs: "${{values.apiAccessCidrs}}" + nodeType: "${{values.nodeType}}" + vpc: "" + clusterName: "" + operationRole: "" + provisioningRole: "" + auditTable: "" + clusterAdminRole: "" + kubectlLambdaArn: "" + kubectlLambdaAssumeRoleArn: "" + +spec: + type: environment-provider + lifecycle: experimental + owner: ${{values.owner | dump}} diff --git a/backstage-reference/templates/aws-eks-environment-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-eks-environment-provider/content/.gitlab-ci.yml new file mode 100644 index 00000000..c22db240 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-provider/content/.gitlab-ci.yml @@ -0,0 +1,16 @@ +image: node:18 + +variables: + AWS_ACCOUNT: "${{ values.aws_account }}" + AWS_DEFAULT_REGION: "${{ values.awsRegion }}" + ROLE_ARN: "${{ values.environmentRole }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +stages: + - build + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-aws-provider-eks.yml' diff --git a/backstage-reference/templates/aws-eks-environment-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-eks-environment-provider/content/stack-parameters.properties new file mode 100644 index 00000000..ec8b2df2 --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-provider/content/stack-parameters.properties @@ -0,0 +1,25 @@ +PREFIX=${{ values.prefix }} +ENV_NAME=${{ values.name }} +AWS_ACCOUNT_ID=${{ values.awsAccount }} +AWS_DEFAULT_REGION=${{ values.awsRegion }} +PLATFORM_ROLE_ARN=${{ values.platformRole }} +PIPELINE_ROLE_ARN=${{ values.pipelineRole }} +PROVISIONING_ROLE_ARN=${{values.environmentRole}} +VPC_ID=${{ values.vpcId }} +ENV_CIDR=${{ values.cidr }} +CLUSTER_ADMIN_ROLE_ARN=${{ values.clusterAdminRole }} +API_ACCESS=${{ values.apiAccess }} +API_ACCESS_CIDRS=${{ values.apiAccessCidrs }} +NODE_TYPE=${{ values.nodeType }} +INSTANCE_TYPE=${{ values.instanceType }} +AMI_TYPE=${{ values.amiType }} +NODE_GROUP_MIN_SIZE=${{ values.nodeGroupMinSize }} +NODE_GROUP_DESIRED_SIZE=${{ values.nodeGroupDesiredSize }} +NODE_GROUP_MAX_SIZE=${{ values.nodeGroupMaxSize }} +NODE_GROUP_DISK_SIZE=${{ values.nodeGroupDiskSize }} +EXISTING_CLUSTER_NAME= +CREATE_K8S_OPA_RESOURCES=true +KUBECTL_LAMBDA_ARN= +KUBECTL_ON_EVENT_LAMBDA_ARN= +EXISTING_KUBECTL_LAMBDA_EXECUTION_ROLE_ARN= +KUBECTL_LAMBDA_ASSUME_ROLE_ARN= diff --git a/backstage-reference/templates/aws-eks-environment-provider/template.yaml b/backstage-reference/templates/aws-eks-environment-provider/template.yaml new file mode 100644 index 00000000..42af80ca --- /dev/null +++ b/backstage-reference/templates/aws-eks-environment-provider/template.yaml @@ -0,0 +1,360 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +kind: Template +metadata: + name: aws-eks-environment-provider + title: AWS EKS Environment Provider + description: Configure an AWS EKS environment provider. This will create a new EKS cluster. + tags: + - aws + - environment-provider + - eks +spec: + owner: group:admins + type: aws-environment-provider + parameters: + - title: Fill in the environment provider details + required: + - name + - prefix + - env_description + - owner + - awsAccountID + - awsRegion + - environmentRole + - vpcConfiguration + - cidr + properties: + name: + title: Name + type: string + description: Unique name for the environment provider + ui:autofocus: true + prefix: + title: Prefix + type: string + description: Prefix for environment - examples - payments, engineering, infra + default: opa + env_description: + title: Description + type: string + description: A description of the environment + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + awsAccountID: + title: AWS Account number + type: string + description: Enter an AWS account number + minLength: 12 + awsRegion: + title: AWS Region + type: string + description: Select AWS region + default: us-east-1 + enum: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + - af-south-1 + - ap-east-1 + - ap-south-2 + - ap-southeast-3 + - ap-southeast-4 + - ap-south-1 + - ap-northeast-3 + - ap-northeast-2 + - ap-southeast-1 + - ap-southeast-2 + - ap-northeast-1 + - ca-central-1 + - eu-central-1 + - eu-west-1 + - eu-west-2 + - eu-south-1 + - eu-west-3 + - eu-south-2 + - eu-north-1 + - eu-central-2 + - me-south-1 + - me-central-1 + - sa-east-1 + enumNames: + - 'US East (N. Virginia)' + - 'US East (Ohio)' + - 'US West (N. California)' + - 'US West (Oregon)' + - 'Africa (Cape Town)' + - 'Asia Pacific (Hong Kong)' + - 'Asia Pacific (Hyderabad)' + - 'Asia Pacific (Jakarta)' + - 'Asia Pacific (Melbourne)' + - 'Asia Pacific (Mumbai)' + - 'Asia Pacific (Osaka)' + - 'Asia Pacific (Seoul)' + - 'Asia Pacific (Singapore)' + - 'Asia Pacific (Sydney)' + - 'Asia Pacific (Tokyo)' + - 'Canada (Central)' + - 'Europe (Frankfurt)' + - 'Europe (Ireland)' + - 'Europe (London)' + - 'Europe (Milan)' + - 'Europe (Paris)' + - 'Europe (Spain)' + - 'Europe (Stockholm)' + - 'Europe (Zurich)' + - 'Middle East (Bahrain)' + - 'Middle East (UAE)' + - 'South America (São Paulo)' + environmentRole: + title: Environment Role ARN + type: string + description: The IAM role ARN to assume in order to provision the new environment. + vpcConfiguration: + title: VPC Configuration + description: Choose 'Use Existing VPC' to utilize an existing VPC, or continue with 'Create New VPC' for a new setup. + type: string + default: create_new + enum: + - create_new + - use_existing + enumNames: + - 'Create New VPC' + - 'Use Existing VPC' + + # Ask for different input based on whether the user wants to create a + # new VPC or use an existing one + dependencies: + vpcConfiguration: + oneOf: + - properties: + vpcConfiguration: + enum: + - use_existing + vpcId: + title: VPC ID + type: string + description: Specify the existing VPC ID + - properties: + vpcConfiguration: + enum: + - create_new + cidr: + title: CIDR + type: string + description: Specify the CIDR block for the new VPC. Default is 10.0.0.0/24 + default: 10.0.0.0/24 + + - title: Configure EKS Cluster + required: + - apiAccess + properties: + clusterAdminRole: + title: Cluster Admin IAM Role ARN + type: string + description: The IAM role ARN to map to the \"cluster-admin\" K8s ClusterRole. If not supplied, a new role will be created. + apiAccess: + title: Kubernetes API server endpoint access + description: Choose whether your API server should be available over the internet + type: string + default: public_and_private + enum: + - private + - public_and_private + enumNames: + - 'Private' + - 'Public and Private' + nodeType: + title: Worker Node Type + description: Choose between managed EC2 nodes and serverless (Fargate) + type: string + default: FARGATE + enum: + - MANAGED + - FARGATE + enumNames: + - 'Managed Node Group' + - 'Fargate (Serverless)' + + # Only ask for the allow-list if user chose public API access + dependencies: + apiAccess: + oneOf: + - properties: + apiAccess: + enum: + - public_and_private + apiAccessCidrs: + title: API server endpoint IP allow-list + type: string + description: Comma-delimited list of CIDR blocks that should be allowed access to the cluster API endpoint + default: 0.0.0.0/0 + - properties: + apiAccess: + enum: + - private + nodeType: + oneOf: + - properties: + nodeType: + enum: + - MANAGED + instanceType: + title: Worker node EC2 instance type + type: string + description: Choose family and size of EC2 instances + default: 'm5.large' + enum: + - 'm5.large' + - 't3.medium' + enumNames: + - 'm5.large' + - 't3.medium' + amiType: + title: Node Group AMI Type + type: string + description: Choose the Amazon Machine Image type for your node group + default: 'BOTTLEROCKET_x86_64' + enum: + - 'AL2_x86_64' + - 'BOTTLEROCKET_x86_64' + enumNames: + - 'Amazon Linux 2 (x86-64)' + - 'Bottlerocket (x86-64)' + nodeGroupMinSize: + title: Node Group Minimum Size + type: number + description: Minimum number of worker nodes that the managed node group can scale in to + default: 1 + nodeGroupDesiredSize: + title: Node Group Desired Size + type: number + description: Desired number of worker nodes that the group should launch with initially + default: 1 + nodeGroupMaxSize: + title: Node Group Maximum Size + type: number + description: Maximum number of worker nodes that the managed node group can scale out to + default: 2 + nodeGroupDiskSize: + title: Node Group Disk Size + type: number + description: Root device disk size (in GiB) for your node group instances + default: 20 + + - properties: + nodeType: + enum: + - FARGATE + + - title: Provide Repository Information + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-environment-providers + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: resolvePlatformVariables + name: Resolve Platform Variables + action: opa:get-platform-parameters + input: + paramKeys: + - '/opa/platform-role' + - '/opa/pipeline-role' + + - id: fetchBase + name: Fetch Code Base + action: fetch:template + input: + url: ./content + values: + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} + description: ${{ parameters.env_description }} + owner: ${{ parameters.owner }} + envType: eks + awsRegion: ${{ parameters.awsRegion }} + awsAccount: ${{ parameters.awsAccountID }} + environmentRole: ${{ parameters.environmentRole }} + platformRole: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipelineRole: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + prefix: ${{ parameters.prefix }} + cidr: ${{ parameters.cidr }} + vpcId: ${{parameters.vpcId|default('', true)}} + clusterAdminRole: ${{ parameters.clusterAdminRole }} + apiAccess: ${{ parameters.apiAccess }} + apiAccessCidrs: ${{ parameters.apiAccessCidrs|default('', true) }} + nodeType: ${{ parameters.nodeType }} + instanceType: ${{ parameters.instanceType|default('', true) }} + amiType: ${{ parameters.amiType|default('', true) }} + nodeGroupMinSize: ${{ parameters.nodeGroupMinSize|default(0, true) }} + nodeGroupDesiredSize: ${{ parameters.nodeGroupDesiredSize|default(0, true) }} + nodeGroupMaxSize: ${{ parameters.nodeGroupMaxSize|default(0, true) }} + nodeGroupDiskSize: ${{ parameters.nodeGroupDiskSize|default(0, true) }} + + - id: fetchBaseIAC + name: Fetch IAC Base + action: fetch:plain + input: + targetPath: ./.iac/opa-eks-environment + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-eks-environment" + - id: fetchIacCommonConstructs + name: Fetch IaC Common Constructs + action: fetch:plain + input: + targetPath: ./.iac/opa-common-constructs + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-common-constructs" + - id: createYarnPackageFile + name: Configure Yarn Package Manager + action: roadiehq:utils:fs:write + input: + path: ./.iac/package.json + content: | + { + "private": true, + "name": "aws-application-development", + "workspaces": [ + "opa-eks-environment", + "opa-common-constructs" + ] + } + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/aws-environment/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-environment/content/.backstage/catalog-info.yaml index e4c69ba0..b17cd80e 100644 --- a/backstage-reference/templates/aws-environment/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/aws-environment/content/.backstage/catalog-info.yaml @@ -1,19 +1,20 @@ apiVersion: aws.backstage.io/v1alpha kind: AWSEnvironment metadata: - name: ${{values.name | dump}} - short-name: ${{values.short_name | dump}} + name: ${{ values.name | dump }} + title: ${{ values.title | dump }} + shortName: ${{values.short_name | dump}} {%- if values.description %} - environment-type: ${{values.environment_type | dump}} + environmentType: ${{values.environment_type | dump}} description: ${{values.description | dump}} {%- endif %} - env-type-account: ${{values.account_type | dump}} - env-type-region: ${{values.region_type | dump}} + envTypeAccount: ${{values.account_type | dump}} + envTypeRegion: ${{values.region_type | dump}} category: ${{values.category | dump}} classification: ${{values.classification | dump}} level: ${{values.level}} repoUrl: ${{values.repoUrl}} - deployment_requires_approval: ${{values.deployment_requires_approval}} + deploymentRequiresApproval: ${{values.deploymentRequiresApproval}} tags: - aws - aws-environment diff --git a/backstage-reference/templates/aws-environment/template.yaml b/backstage-reference/templates/aws-environment/template.yaml index dd0a1a6f..6a5a89ac 100644 --- a/backstage-reference/templates/aws-environment/template.yaml +++ b/backstage-reference/templates/aws-environment/template.yaml @@ -18,7 +18,7 @@ spec: - short_name - description - environment_type - - deployment_requires_approval + - deploymentRequiresApproval - owner - account_type - region_type @@ -54,7 +54,7 @@ spec: - AWS ECS - AWS EKS - AWS Serverless - deployment_requires_approval: + deploymentRequiresApproval: title: Deployment requires approval description: is approval required before deploying to this environment? type: boolean @@ -158,7 +158,8 @@ spec: input: url: ./content values: - name: ${{ parameters.name }} + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} short_name: ${{ parameters.short_name }} description: ${{ parameters.description }} environment_type: ${{parameters.environment_type}} @@ -171,7 +172,7 @@ spec: providers: ${{parameters.providers}} level: ${{parameters.level}} repoUrl: ${{ parameters.repoUrl }} - deployment_requires_approval: ${{parameters.deployment_requires_approval}} + deploymentRequiresApproval: ${{parameters.deploymentRequiresApproval}} - id: publish name: Publish action: publish:gitlab diff --git a/backstage-reference/templates/aws-rds-resource/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-rds-resource/content/.backstage/catalog-info.yaml index a1e59968..130eaf41 100644 --- a/backstage-reference/templates/aws-rds-resource/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/aws-rds-resource/content/.backstage/catalog-info.yaml @@ -2,6 +2,7 @@ apiVersion: backstage.io/v1alpha1 kind: Resource metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} @@ -11,13 +12,14 @@ metadata: - aws-resource - database annotations: - iac-type: cdk - resource-type: "aws-rds" - db-name: ${{ values.db_name | dump }} - db-object-name: ${{ values.db_object_name | dump }} - aws-arn: "" + iacType: cdk + resourceType: "aws-rds" + dbName: ${{ values.dbName | dump }} + dbObjectName: ${{ values.dbObjectName | dump }} + awsArn: "" spec: type: aws-resource + subType: "arn:aws:rds" owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/aws-rds-resource/content/.gitlab-ci.yml b/backstage-reference/templates/aws-rds-resource/content/.gitlab-ci.yml index bf668093..2936c6c5 100644 --- a/backstage-reference/templates/aws-rds-resource/content/.gitlab-ci.yml +++ b/backstage-reference/templates/aws-rds-resource/content/.gitlab-ci.yml @@ -1,11 +1,11 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-generic" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' diff --git a/backstage-reference/templates/aws-rds-resource/content/queries.js b/backstage-reference/templates/aws-rds-resource/content/queries.js index 8145d10e..798a72da 100644 --- a/backstage-reference/templates/aws-rds-resource/content/queries.js +++ b/backstage-reference/templates/aws-rds-resource/content/queries.js @@ -14,18 +14,18 @@ const Pool = require('pg').Pool; const pool = new Pool({ user, host, database, password, port }); const createTable = () => { - pool.query('CREATE TABLE IF NOT EXISTS ${{ values.db_name }} (ID SERIAL PRIMARY KEY, name VARCHAR(30), email VARCHAR(30) );', (error, results) => { + pool.query('CREATE TABLE IF NOT EXISTS ${{ values.dbName }} (ID SERIAL PRIMARY KEY, name VARCHAR(30), email VARCHAR(30) );', (error, results) => { if (error) { console.error('Error: ', error); } else { - console.log(`Created ${{ values.db_name }} table. Results: ${JSON.stringify(results)}`) + console.log(`Created ${{ values.dbName }} table. Results: ${JSON.stringify(results)}`) } }) } -// Get all ${{ values.db_object_name }}s -const get${{ values.db_object_name | capitalize }}s = (request, response) => { - pool.query('SELECT * FROM ${{ values.db_name }} ORDER BY id ASC', (error, results) => { +// Get all ${{ values.dbObjectName }}s +const get${{ values.dbObjectName | capitalize }}s = (request, response) => { + pool.query('SELECT * FROM ${{ values.dbName }} ORDER BY id ASC', (error, results) => { if (error) { console.error('Error: ', error); response.status(500).json({error: JSON.stringify(error)}); @@ -35,11 +35,11 @@ const get${{ values.db_object_name | capitalize }}s = (request, response) => { }) } -// Get a single ${{ values.db_object_name }} by id -const get${{ values.db_object_name | capitalize }}ById = (request, response) => { +// Get a single ${{ values.dbObjectName }} by id +const get${{ values.dbObjectName | capitalize }}ById = (request, response) => { const id = parseInt(request.params.id) - pool.query('SELECT * FROM ${{ values.db_name }} WHERE id = $1', [id], (error, results) => { + pool.query('SELECT * FROM ${{ values.dbName }} WHERE id = $1', [id], (error, results) => { if (error) { console.error('Error: ', error); response.status(500).json({error: JSON.stringify(error)}); @@ -49,58 +49,58 @@ const get${{ values.db_object_name | capitalize }}ById = (request, response) => }) } -// Create a new ${{ values.db_object_name }} -const create${{ values.db_object_name | capitalize }} = (request, response) => { +// Create a new ${{ values.dbObjectName }} +const create${{ values.dbObjectName | capitalize }} = (request, response) => { const { name, email } = request.body - pool.query('INSERT INTO ${{ values.db_name }} (name, email) VALUES ($1, $2) RETURNING *', [name, email], (error, results) => { + pool.query('INSERT INTO ${{ values.dbName }} (name, email) VALUES ($1, $2) RETURNING *', [name, email], (error, results) => { if (error) { console.error('Error: ', error); response.status(500).json({error: JSON.stringify(error)}); } else { - response.status(201).send(`${{ values.db_object_name | capitalize }} added with ID: ${results.rows[0].id}`) + response.status(201).send(`${{ values.dbObjectName | capitalize }} added with ID: ${results.rows[0].id}`) } }) } -// Update an existing ${{ values.db_object_name }} -const update${{ values.db_object_name | capitalize }} = (request, response) => { +// Update an existing ${{ values.dbObjectName }} +const update${{ values.dbObjectName | capitalize }} = (request, response) => { const id = parseInt(request.params.id) const { name, email } = request.body pool.query( - 'UPDATE ${{ values.db_name }} SET name = $1, email = $2 WHERE id = $3', + 'UPDATE ${{ values.dbName }} SET name = $1, email = $2 WHERE id = $3', [name, email, id], (error, results) => { if (error) { console.error('Error: ', error); response.status(500).json({error: JSON.stringify(error)}); } else { - response.status(200).send(`${{ values.db_object_name | capitalize }} modified with ID: ${id}`) + response.status(200).send(`${{ values.dbObjectName | capitalize }} modified with ID: ${id}`) } } ) } -// Delete a ${{ values.db_object_name }} -const delete${{ values.db_object_name | capitalize }} = (request, response) => { +// Delete a ${{ values.dbObjectName }} +const delete${{ values.dbObjectName | capitalize }} = (request, response) => { const id = parseInt(request.params.id) - pool.query('DELETE FROM ${{ values.db_name }} WHERE id = $1', [id], (error, results) => { + pool.query('DELETE FROM ${{ values.dbName }} WHERE id = $1', [id], (error, results) => { if (error) { console.error('Error: ', error); response.status(500).json({error: JSON.stringify(error)}); } else { - response.status(200).send(`${{ values.db_object_name | capitalize }} deleted with ID: ${id}`) + response.status(200).send(`${{ values.dbObjectName | capitalize }} deleted with ID: ${id}`) } }) } module.exports = { createTable, - get${{ values.db_object_name | capitalize }}s, - get${{ values.db_object_name | capitalize }}ById, - create${{ values.db_object_name | capitalize }}, - update${{ values.db_object_name | capitalize }}, - delete${{ values.db_object_name | capitalize }}, + get${{ values.dbObjectName | capitalize }}s, + get${{ values.dbObjectName | capitalize }}ById, + create${{ values.dbObjectName | capitalize }}, + update${{ values.dbObjectName | capitalize }}, + delete${{ values.dbObjectName | capitalize }}, } \ No newline at end of file diff --git a/backstage-reference/templates/aws-rds-resource/template.yaml b/backstage-reference/templates/aws-rds-resource/template.yaml index 9e50c0b3..b28df640 100644 --- a/backstage-reference/templates/aws-rds-resource/template.yaml +++ b/backstage-reference/templates/aws-rds-resource/template.yaml @@ -53,17 +53,17 @@ spec: defaultKind: AWSEnvironment - title: Provide database configuration required: - - db_name - - db_object_name + - dbName + - dbObjectName - db_type - instance_size properties: - db_name: + dbName: title: Database name type: string description: The name of a default database to create in the RDS instance default: usersdb - db_object_name: + dbObjectName: title: Object name type: string description: >- @@ -140,11 +140,11 @@ spec: input: path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties content: | - APP_SHORT_NAME=${{ parameters.component_id }} + APP_SHORT_NAME=${{ parameters.component_id | lower }} TARGET_VPCID=${{ each.value.vpcId }} TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} - TARGET_DB_NAME=${{ parameters.db_name }} + TARGET_DB_NAME=${{ parameters.dbName }} TARGET_DB_TYPE=${{ parameters.db_type }} TARGET_DB_SIZE=${{ parameters.instance_size }} ACCOUNT=${{ each.value.accountId }} @@ -160,28 +160,29 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_rds targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower}} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base action: fetch:template input: url: ./content values: - component_id: ${{ parameters.component_id }} + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_environment_provider_name: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderName }} - aws_environment_prefix: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderPrefix }} - aws_region: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].region }} - aws_account: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].accountId }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsEnvironmentProviderName: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderName }} + awsEnvironmentPrefix: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderPrefix }} + awsRegion: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].region }} + awsAccount: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].accountId }} destination: ${{ parameters.repoUrl | parseRepoUrl }} assumedRoleArn: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].assumedRoleArn }} - db_name: ${{ parameters.db_name }} - db_object_name: ${{ parameters.db_object_name }} + dbName: ${{ parameters.dbName }} + dbObjectName: ${{ parameters.dbObjectName }} - id: publish name: Publish action: publish:gitlab diff --git a/backstage-reference/templates/example-nodejs-efs/.gitignore b/backstage-reference/templates/aws-s3-resource/.gitignore similarity index 100% rename from backstage-reference/templates/example-nodejs-efs/.gitignore rename to backstage-reference/templates/aws-s3-resource/.gitignore diff --git a/backstage-reference/templates/aws-s3-resource/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-s3-resource/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..7832b660 --- /dev/null +++ b/backstage-reference/templates/aws-s3-resource/content/.backstage/catalog-info.yaml @@ -0,0 +1,24 @@ +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - bucket + - aws-resource + - s3 + annotations: + iacType: cdk + resourceType: "aws-s3" + bucketName: ${{ values.bucketName | dump }} + awsArn: "" +spec: + type: aws-resource + subType: "arn:aws:s3" + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-nodejs-efs/content/.editorconfig b/backstage-reference/templates/aws-s3-resource/content/.editorconfig similarity index 100% rename from backstage-reference/templates/example-nodejs-efs/content/.editorconfig rename to backstage-reference/templates/aws-s3-resource/content/.editorconfig diff --git a/backstage-reference/templates/example-nodejs-efs/content/.gitignore b/backstage-reference/templates/aws-s3-resource/content/.gitignore similarity index 100% rename from backstage-reference/templates/example-nodejs-efs/content/.gitignore rename to backstage-reference/templates/aws-s3-resource/content/.gitignore diff --git a/backstage-reference/templates/aws-s3-resource/content/.gitlab-ci.yml b/backstage-reference/templates/aws-s3-resource/content/.gitlab-ci.yml new file mode 100644 index 00000000..1ae1cbe7 --- /dev/null +++ b/backstage-reference/templates/aws-s3-resource/content/.gitlab-ci.yml @@ -0,0 +1,16 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-generic" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-s3.yml' diff --git a/backstage-reference/templates/aws-s3-resource/content/package.json b/backstage-reference/templates/aws-s3-resource/content/package.json new file mode 100644 index 00000000..68018210 --- /dev/null +++ b/backstage-reference/templates/aws-s3-resource/content/package.json @@ -0,0 +1,16 @@ +{ + "name": "${{ values.component_id }}", + "private": true, + "version": "0.1.0", + "description": "${{ values.description }}", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "${{ values.owner }}", + "license": "ISC", + "dependencies": { + "express": "^4.18.2", + "pg": "^8.10.0" + } +} diff --git a/backstage-reference/templates/aws-s3-resource/template.yaml b/backstage-reference/templates/aws-s3-resource/template.yaml new file mode 100644 index 00000000..5b5e63fd --- /dev/null +++ b/backstage-reference/templates/aws-s3-resource/template.yaml @@ -0,0 +1,166 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: aws-s3 + title: AWS S3 Bucket + description: >- + Create an Amazon S3 Bucket. + tags: + - aws-resource + - s3 + - aws +spec: + owner: group:admins + type: resource + parameters: + - title: Provide resource information + required: + - component_id + - owner + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + ui:autofocus: true + description: + title: Description + type: string + description: what this bucket will be used for + owner: + title: Owner + type: string + description: Owner of the resource + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + - title: Provide deployment information for the resource + required: + - environment + properties: + environment: + title: AWS Environment + type: string + description: The AWS Environment where the bucket is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + defaultKind: AWSEnvironment + - title: Provide Bucket configuration + required: + - bucketName + properties: + bucketName: + title: Bucket name + type: string + description: The name of the bucket to create. + minLength: 3 + maxLength: 63 + pattern: '(?!(^xn--|.+-s3alias$|.+--ol-s3$))^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$' + ui:autofocus: true + ui:help: "The name must follow AWS s3 bucket naming rules" + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-resources + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + APP_SHORT_NAME=${{ parameters.component_id | lower }} + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + TARGET_BUCKET_NAME=${{ parameters.bucketName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + - id: fetchIac + name: Fetch S3 Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_s3 + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower}} + appEnvPlaintext: "" + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + owner: ${{ parameters.owner }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsEnvironmentProviderName: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderName }} + awsEnvironmentPrefix: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].envProviderPrefix }} + awsRegion: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].region }} + awsAccount: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].accountId }} + destination: ${{ parameters.repoUrl | parseRepoUrl }} + assumedRoleArn: ${{ steps['opaGetAwsEnvProviders'].output.envProviders[0].assumedRoleArn }} + bucketName: ${{ parameters.bucketName }} + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} + diff --git a/backstage-reference/templates/aws-serverless-environment-provider/content/.backstage/catalog-info.yaml b/backstage-reference/templates/aws-serverless-environment-provider/content/.backstage/catalog-info.yaml index a577141b..6ce36ce3 100644 --- a/backstage-reference/templates/aws-serverless-environment-provider/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/aws-serverless-environment-provider/content/.backstage/catalog-info.yaml @@ -1,7 +1,8 @@ apiVersion: aws.backstage.io/v1alpha kind: AWSEnvironmentProvider metadata: - name: ${{values.name | dump}} + name: ${{values.name | dump }} + title: ${{ values.title | dump }} prefix: ${{values.prefix | dump}} {%- if values.description %} description: ${{values.description | dump}} @@ -10,10 +11,10 @@ metadata: - aws - aws-environment-provider - serverless - env-type: ${{values.env_type}} - aws-account: "${{values.aws_account}}" - aws-region: ${{values.aws_region}} - environment_role: "${{values.environment_role}}" + envType: ${{values.envType}} + awsAccount: "${{values.awsAccount}}" + awsRegion: ${{values.awsRegion}} + environmentRole: "${{values.environmentRole}}" vpc: "" spec: type: environment-provider diff --git a/backstage-reference/templates/aws-serverless-environment-provider/content/.gitlab-ci.yml b/backstage-reference/templates/aws-serverless-environment-provider/content/.gitlab-ci.yml index 9dfce311..d612bf95 100644 --- a/backstage-reference/templates/aws-serverless-environment-provider/content/.gitlab-ci.yml +++ b/backstage-reference/templates/aws-serverless-environment-provider/content/.gitlab-ci.yml @@ -1,9 +1,9 @@ image: node:18 variables: - AWS_ACCOUNT: "${{ values.aws_account }}" - AWS_DEFAULT_REGION: "${{ values.aws_region }}" - ROLE_ARN: "${{values.environment_role}}" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + AWS_ACCOUNT: "${{ values.awsAccount }}" + AWS_DEFAULT_REGION: "${{ values.awsRegion }}" + ROLE_ARN: "${{values.environmentRole}}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" stages: - build diff --git a/backstage-reference/templates/aws-serverless-environment-provider/content/stack-parameters.properties b/backstage-reference/templates/aws-serverless-environment-provider/content/stack-parameters.properties index 88150149..4d735c35 100644 --- a/backstage-reference/templates/aws-serverless-environment-provider/content/stack-parameters.properties +++ b/backstage-reference/templates/aws-serverless-environment-provider/content/stack-parameters.properties @@ -1,8 +1,9 @@ PREFIX=${{ values.prefix }} ENV_NAME=${{ values.name }} -AWS_ACCOUNT_ID=${{ values.aws_account }} -AWS_DEFAULT_REGION=${{ values.aws_region }} -PLATFORM_ROLE_ARN=${{ values.platform_role }} -PIPELINE_ROLE_ARN=${{ values.pipeline_role }} -PROVISIONING_ROLE_ARN=${{values.environment_role}} -ENV_CIDR=${{ values.cidr }} \ No newline at end of file +AWS_ACCOUNT_ID=${{ values.awsAccount }} +AWS_DEFAULT_REGION=${{ values.awsRegion }} +PLATFORM_ROLE_ARN=${{ values.platformRole }} +PIPELINE_ROLE_ARN=${{ values.pipelineRole }} +PROVISIONING_ROLE_ARN=${{values.environmentRole}} +VPC_ID=${{ values.vpcId }} +ENV_CIDR=${{ values.cidr }} diff --git a/backstage-reference/templates/aws-serverless-environment-provider/template.yaml b/backstage-reference/templates/aws-serverless-environment-provider/template.yaml index e878c7fe..9d7ed216 100644 --- a/backstage-reference/templates/aws-serverless-environment-provider/template.yaml +++ b/backstage-reference/templates/aws-serverless-environment-provider/template.yaml @@ -18,9 +18,10 @@ spec: - prefix - env_description - owner - - aws_account_id - - aws_region - - environment_role + - awsAccountID + - awsRegion + - environmentRole + - vpcConfiguration - cidr properties: name: @@ -45,12 +46,12 @@ spec: ui:options: catalogFilter: kind: [Group] - aws_account_id: + awsAccountID: title: AWS Account number type: string description: Enter an AWS account number minLength: 12 - aws_region: + awsRegion: title: AWS Region type: string description: Select AWS region @@ -111,15 +112,44 @@ spec: - 'Middle East (Bahrain)' - 'Middle East (UAE)' - 'South America (São Paulo)' - environment_role: - title: Environment Role arn + environmentRole: + title: Environment Role ARN type: string - description: The role arn to assume in order to provision the new environment. - cidr: - title: CIDR + description: The IAM role ARN to assume in order to provision the new environment. + vpcConfiguration: + title: VPC Configuration + description: Choose 'Use Existing VPC' to utilize an existing VPC, or continue with 'Create New VPC' for a new setup. type: string - description: The network cidr of the environment provider - default: 10.10.0.0/24 + default: create_new + enum: + - create_new + - use_existing + enumNames: + - 'Create New VPC' + - 'Use Existing VPC' + + # Only ask for the allow-list if user chose public API access + dependencies: + vpcConfiguration: + oneOf: + - properties: + vpcConfiguration: + enum: + - use_existing + vpcId: + title: VPC ID + type: string + description: Specify the existing VPC ID + - properties: + vpcConfiguration: + enum: + - create_new + cidr: + title: CIDR + type: string + description: Specify the CIDR block for the new VPC. Default is 10.0.0.0/24 + default: 10.0.0.0/24 + - title: Provide Repository information required: - repoUrl @@ -151,24 +181,46 @@ spec: input: url: ./content values: - name: ${{ parameters.name }} + name: ${{ parameters.name | lower }} + title: ${{ parameters.name }} description: ${{ parameters.env_description }} owner: ${{ parameters.owner }} - env_type: serverless - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_region: ${{ parameters.aws_region }} - aws_account: ${{ parameters.aws_account_id }} - environment_role: ${{ parameters.environment_role }} - platform_role: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} - pipeline_role: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} + envType: serverless + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsRegion: ${{ parameters.awsRegion }} + awsAccount: ${{ parameters.awsAccountID }} + environmentRole: ${{ parameters.environmentRole }} + platformRole: ${{steps['resolvePlatformVariables'].output.params['/opa/platform-role'] }} + pipelineRole: ${{steps['resolvePlatformVariables'].output.params['/opa/pipeline-role'] }} prefix: ${{parameters.prefix}} cidr: ${{parameters.cidr}} + vpcId: ${{parameters.vpcId|default('', true)}} - id: fetchBaseIAC name: Fetch IAC Base action: fetch:plain input: - targetPath: ./.iac - url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments" + targetPath: ./.iac/opa-serverless-environment + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-serverless-environment" + - id: fetchIacCommonConstructs + name: Fetch IaC Common Constructs + action: fetch:plain + input: + targetPath: ./.iac/opa-common-constructs + url: "https://{{gitlab_hostname}}/opa-admin/backstage-reference/-/tree/main/environments/opa-common-constructs" + - id: createYarnPackageFile + name: Configure Yarn Package Manager + action: roadiehq:utils:fs:write + input: + path: ./.iac/package.json + content: | + { + "private": true, + "name": "aws-application-development", + "workspaces": [ + "opa-serverless-environment", + "opa-common-constructs" + ] + } - id: publish name: Publish action: publish:gitlab diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/.gitignore b/backstage-reference/templates/example-eks-nodejs-rds-helm/.gitignore new file mode 100644 index 00000000..fcc2fc6b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/.gitignore @@ -0,0 +1,2 @@ +# be sure to include .js files +!**/*.js \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..b2b4b06c --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.backstage/catalog-info.yaml @@ -0,0 +1,26 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - nodejs + - k8s + # links: + # - title: Example Title + # url: http://www.example.com + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} + + # Configure where k8s configurations are within the project + k8sConfigDirName: k8s +spec: + type: aws-app + subType: aws-eks + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.dockerignore b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.dockerignore new file mode 100644 index 00000000..93f13619 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.dockerignore @@ -0,0 +1,2 @@ +node_modules +npm-debug.log diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.editorconfig b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.editorconfig new file mode 100644 index 00000000..81357d3e --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.editorconfig @@ -0,0 +1,36 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +max_line_length = 120 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{ts,json,js,tsx,jsx}] +indent_style = space +indent_size = 2 + +[*.md] +indent_size = 2 +indent_style = space + +[Dockerfile] +indent_style = space +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 +indent_style = space + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitignore b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitignore new file mode 100644 index 00000000..37fe0ff0 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitignore @@ -0,0 +1,64 @@ +# macOS +.DS_Store + +# Intellij +.idea/ +*.iml + +# VS Code +.vscode/** +!.vscode/launch.json +!.vscode/tasks.json +.vsls.json +git-temp + +# logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Coverage directory +coverage + +# Dependency directories +node_modules/ +**/.venv/ +**/__pycache__ + +# Yarn 3 files +.pnp.* +**/.yarn/* +!**/.yarn/patches +!**/.yarn/plugins +!**/.yarn/releases +!**/.yarn/sdks +!**/.yarn/versions + +# Node version directives +.nvmrc + +# Optional eslint cache +.eslintcache + +# dotenv environment variables file +.env +.env.* + +# Local configuration files +*.local.yaml + + +# transpiled JavaScript, Typings and test files +*.d.ts +!jest.config.js + +# CDK asset staging directory +.cdk.staging +cdk.out +cdk.context.json + +# Temp files +**/*.bak \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitlab-ci.yml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitlab-ci.yml new file mode 100644 index 00000000..4a0a5763 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/.gitlab-ci.yml @@ -0,0 +1,20 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-eks-nodejs-rds" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-eks.yml' + # If you want to call kubectl directly instead of going through lambda, comment above line and uncomment below line + # - 'common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml' + - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/Dockerfile b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/Dockerfile new file mode 100644 index 00000000..98df309f --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/Dockerfile @@ -0,0 +1,18 @@ +FROM node:20 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +COPY "src/package.json" ./ + +RUN yarn install + +# Bundle app source +COPY . . + +# Specify a non-root user +USER nobody + +EXPOSE ${{ values.appPort }} +CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/README.md b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/README.md new file mode 100644 index 00000000..d2e3cb02 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/README.md @@ -0,0 +1,10 @@ +# instructions +To use this Node.JS with RDS App follow the below steps + +1. Bind your app to an RDS Database - use the binding feature under Management-> Bind Resource-> Add +2. Once binding completed - you should see the rds database bound in your "bound resource" +3. Go to the DB Resource entity page and copy the secret name of the DB +4. Within your app - using the environment variables in your overview tab set the below two variables: + 1. DB_SECRET=YourDBSecretHere + 2. AWS_REGION=YourDBRegion - i.e: us-east-1 +5. Start your app, you should be able to connect to the database and execute queries. \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/.helmignore b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/Chart.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/Chart.yaml new file mode 100644 index 00000000..9539fa5b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: ${{ values.component_id }} +description: Example NodeJS App With RDS + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/NOTES.txt b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/NOTES.txt new file mode 100644 index 00000000..6f54f187 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/NOTES.txt @@ -0,0 +1,8 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get all {{ .Release.Name }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/_helpers.tpl b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/_helpers.tpl new file mode 100644 index 00000000..70c7efc1 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/_helpers.tpl @@ -0,0 +1,34 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "${{ values.component_id }}.name" -}} +{{ .Chart.Name | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "${{ values.component_id }}.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "${{ values.component_id }}.labels" -}} +helm.sh/chart: {{ include "${{ values.component_id }}.chart" . }} +{{ include "${{ values.component_id }}.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +See https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +*/}} +{{- define "${{ values.component_id }}.selectorLabels" -}} +app.kubernetes.io/name: {{ include "${{ values.component_id }}.name" . }} +app.kubernetes.io/env: {{ .Values.opa.environmentName }} +{{- end }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/deployment.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/deployment.yaml new file mode 100644 index 00000000..d6f7b81c --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.deployment.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicaCount }} + selector: + matchLabels: + {{- include "${{ values.component_id }}.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/envVarsConfigMap.yaml") . | sha256sum }} + spec: + serviceAccountName: {{ .Values.serviceAccount.name }} + containers: + - name: webapp + image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + envFrom: + - configMapRef: + name: {{ .Values.configMap.name }} + resources: + limits: + memory: 512Mi + cpu: "1" + requests: + cpu: "100m" + ports: + - containerPort: {{ .Values.image.port }} + livenessProbe: + httpGet: + path: / + port: {{ .Values.image.port }} + initialDelaySeconds: 2 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: / + port: {{ .Values.image.port }} + initialDelaySeconds: 2 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + + diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/envVarsConfigMap.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/envVarsConfigMap.yaml new file mode 100644 index 00000000..a068ed27 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/envVarsConfigMap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configMap.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +data: + ENVIRONMENT_NAME: {{ .Values.opa.environmentName }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/ingress.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/ingress.yaml new file mode 100644 index 00000000..a652e493 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/ingress.yaml @@ -0,0 +1,45 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.ingress.name }} + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/ingress.class: "alb" + alb.ingress.kubernetes.io/scheme: "internet-facing" + alb.ingress.kubernetes.io/healthcheck-path: "/health" + alb.ingress.kubernetes.io/success-codes: "200,201,302" + alb.ingress.kubernetes.io/target-type: "ip" + + # Tag load balancer so that it shows up as an OPA application resource + # Note - this setting must be overriden per environment provider + alb.ingress.kubernetes.io/tags: {{ .Values.ingress.albTags }} + + # Allows multiple services to use the same ALB + alb.ingress.kubernetes.io/group.name: {{ .Values.ingress.groupName }} + + # Configure the load balancer name. Comment this out to have the name be auto-generated + # Load balancer name can only be up to 32 characters long + # alb.ingress.kubernetes.io/load-balancer-name: "" + + # To enable HTTPS, you need a valid SSL certificate + # Here are some example annotations to use for enabling an HTTPS listener for your load balancer: + # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + # alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/ + + # To configure an IP allow-list for the load balancer + # alb.ingress.kubernetes.io/inbound-cidrs: 10.0.0.0/24 + + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +spec: + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: {{ .Values.service.name }} + port: + number: 80 diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/namespace.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/namespace.yaml new file mode 100644 index 00000000..a6150cc9 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/namespace.yaml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: {{ .Values.namespace }} + labels: + name: {{ .Values.namespace }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..0146312b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsAdminRoleBinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.nsAdminRoleBinding.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +subjects: +- kind: User + name: {{ .Values.appAdminRole }} + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..ff3985a0 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/nsViewerRoleBinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.nsViewerRoleBinding.name }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +subjects: +- kind: User + name: {{ .Values.appAdminRole }} + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: opa-namespace-viewer + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/service.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/service.yaml new file mode 100644 index 00000000..a8b85330 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.service.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} +spec: + selector: + {{- include "${{ values.component_id }}.selectorLabels" . | nindent 4 }} + ports: + - name: http + port: 80 + targetPort: {{ .Values.image.port }} + protocol: TCP + type: NodePort diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/serviceAccount.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/serviceAccount.yaml new file mode 100644 index 00000000..fcdb97b9 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/templates/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "${{ values.component_id }}.labels" . | nindent 4 }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.serviceAccount.roleArn }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/values.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/values.yaml new file mode 100644 index 00000000..4b96337c --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/k8s/values.yaml @@ -0,0 +1,42 @@ +# Default values for ${{ values.component_id }}. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +opa: + environmentName: ENV_PLACEHOLDER + +appAdminRole: APP_ADMIN_ROLE_PLACEHOLDER + +namespace: NS_PLACEHOLDER + +configMap: + name: ${{ values.component_id }}-env-vars-ENV_PLACEHOLDER + +deployment: + name: ${{ values.component_id }}-mainpod-ENV_PLACEHOLDER + replicaCount: 2 + +nsAdminRoleBinding: + name: ${{ values.component_id }}-admin-ENV_PLACEHOLDER + +nsViewerRoleBinding: + name: ${{ values.component_id }}-view-ns-ENV_PLACEHOLDER + +serviceAccount: + name: ${{ values.component_id }}-sa-ENV_PLACEHOLDER + roleArn: SA_ROLE_PLACEHOLDER + +ingress: + name: ${{ values.component_id }}-ingress-ENV_PLACEHOLDER + albTags: "aws-apps-${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER=${{ values.component_id }}" + groupName: ${{ values.component_id }}-ENV_PLACEHOLDER + +service: + name: ${{ values.component_id }}-service-ENV_PLACEHOLDER + +image: + repository: ACCT_PLACEHOLDER.dkr.ecr.REGION_PLACEHOLDER.amazonaws.com/${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + port: ${{ values.appPort }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/index.js b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/index.js new file mode 100644 index 00000000..e8c73337 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/index.js @@ -0,0 +1,95 @@ +const { SecretsManagerClient, GetSecretValueCommand } = require("@aws-sdk/client-secrets-manager"); +const express = require('express') +const app = express() +const port = process.env.PORT || 8080 + +const { Client } = require("pg") +const appName = "${{ values.component_id }}"; + +const ENVIRONMENT_NAME = process.env.ENVIRONMENT_NAME || 'Unknown Environment Name' +const DB_SECRET = process.env.DB_SECRET; +const AWS_REGION = process.env.AWS_REGION; +const config = { region: AWS_REGION || 'us-east-1' } +const secretsManagerClient = new SecretsManagerClient(config); + +app.get('/error', async (req, res) => { + throw new Error('Intentional error to demo seeing stack traces in logs'); +}) + +app.get('/health', async (req, res) => { + res.send('Success'); +}) + +app.get('/', async (req, res) => { + let apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Success


Note: the database connection has not yet been configured. (timestamp: ${new Date().toString()})`; + + if (DB_SECRET && AWS_REGION) { + const client = await connectDb(DB_SECRET, AWS_REGION); + console.log('Got DB connection!'); + + const queryResult = await queryDB(client, 'select now()'); + + if (queryResult) { + + let dbOutput = JSON.stringify(queryResult); + if (queryResult.rowCount === 1 && queryResult.rows) { + dbOutput = `According to the database, the current date/time is ${queryResult.rows[0].now}`; + } + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


${dbOutput}`; + + } else { + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


No response data was returned from query.`; + } + closeConnection(client); + } + + res.send(apiResponse); +}) + +app.listen(port, () => { + console.log(`Example RDS app listening on port ${port}`) +}) + +const getSecretValue = async (secretName, region) => { + console.log(`Retrieving secret: ${secretName} for region: ${region}`); + + let secret; + let secretValue = await secretsManagerClient.send( + new GetSecretValueCommand({ SecretId: secretName }) + ); + if (secretValue.SecretString) { + secret = secretValue.SecretString; + } + return secret ? JSON.parse(secret) : secret; +} + +const connectDb = async (secret, region) => { + const secretValues = await getSecretValue(secret, region); + console.log(`Will attempt to connect to database at ${secretValues.host}`) + try { + const client = new Client({ + user: secretValues.username, + host: secretValues.host, + database: secretValues.dbname, + password: secretValues.password, + port: secretValues.port + }) + console.log("Making a connection to db...") + await client.connect() + console.log("Connected to DB.") + return client; + + } catch (error) { + console.log(error) + } +} + +const closeConnection = async (client) => { + await client.end(); +} + +const queryDB = async (client, query) => { + const res = await client.query(query) + console.log(res) + return res; +} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/package.json b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/package.json new file mode 100644 index 00000000..f3a4e53a --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/content/src/package.json @@ -0,0 +1,17 @@ +{ + "name": "${{ values.component_id }}", + "private": true, + "version": "0.1.0", + "description": "${{ values.description }}", + "main": "src/index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "${{ values.owner }}", + "license": "ISC", + "dependencies": { + "express": "^4.18.2", + "@aws-sdk/client-secrets-manager": "^3.480.0", + "pg": "^8.11.3" + } +} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-helm/template.yaml b/backstage-reference/templates/example-eks-nodejs-rds-helm/template.yaml new file mode 100644 index 00000000..2dc164ae --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-helm/template.yaml @@ -0,0 +1,239 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: example-eks-nodejs2-rds-helm-template + title: Kubernetes - Node.js Express Web App With RDS + description: >- + Create a starter Node.js web application hosted by + an Express server, running on an Elastic Kubernetes Service cluster. + The application is configured using Helm and is for demonstrations only. + tags: + - nodejs + - aws + - rds + - eks + - kubernetes + - helm +spec: + owner: group:admins + type: website + + # These parameters are used to generate the input form in the frontend, and are + # used to gather input data for the execution of the template. + parameters: + - title: Provide basic component information + required: + - component_id + - owner + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + ui:autofocus: true + description: + title: Description + type: string + description: Help others understand what this service is for + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + + - title: Provide environment information for the application + required: + - environment + - namespace + - k8sIAMRoleBindingType + properties: + environment: + title: AWS Environment + type: string + description: The AWS Environment where the application is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: eks + defaultKind: AWSEnvironment + namespace: + title: k8s Namespace + type: string + description: The k8s namespace to assign to application resources for the environment selected above + k8sIAMRoleBindingType: + title: Namespace-bound Kubectl Admin Access + description: Choose how to map an AWS IAM role with namespace-bound k8s admin access + type: string + default: create_new_k8s_namespace_admin_iam_role + enum: + - create_new_k8s_namespace_admin_iam_role + - existing_new_k8s_namespace_admin_iam_role + enumNames: + - 'Create a separate role for the K8s namespace' + - 'Import existing role and grant it access to the K8s namespace' + + # Only ask for the existing IAM role if user chose to use an existing role + dependencies: + k8sIAMRoleBindingType: + oneOf: + - properties: + k8sIAMRoleBindingType: + enum: + - existing_new_k8s_namespace_admin_iam_role + existingK8sNamespaceAdminRole: + title: Existing IAM role ARN + type: string + description: Existing IAM role to grant namespace privileges to + - properties: + k8sIAMRoleBindingType: + enum: + - create_new_k8s_namespace_admin_iam_role + + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-app + + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_EKS_CLUSTER_ARN=${{ each.value.clusterArn }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + TARGET_KUBECTL_LAMBDA_ARN=${{ each.value.kubectlLambdaArn }} + TARGET_KUBECTL_LAMBDA_ROLE_ARN=${{ each.value.kubectlLambdaRoleArn }} + NAMESPACE=${{ parameters.namespace }} + K8S_IAM_ROLE_BINDING_TYPE=${{ parameters.k8sIAMRoleBindingType }} + APP_ADMIN_ROLE_ARN=${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: createSecretManager + name: Create a Secret + action: opa:create-secret + input: + secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token + + - id: fetchIac + name: Fetch EKS Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_eks + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" + + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + appPort: "8080" + component_id: ${{ parameters.component_id | lower}} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + owner: ${{ parameters.owner }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + namespace: ${{ parameters.namespace }} + k8sIAMRoleBindingType: ${{ parameters.k8sIAMRoleBindingType }} + existingK8sNamespaceAdminRole: ${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: entityDetail + name: Get AWSEnvironment entity details + action: catalog:fetch + input: + entityRef: ${{ parameters.environment }} + + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + + # This step publishes the contents of the working directory to GitLab. + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + # Create a gitlab repository access token and store it in a SecretsManager secret + - id: createRepoToken + name: Create Repo Token + action: opa:createRepoAccessToken:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + projectId: ${{ steps['publish'].output.projectId }} + secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + # The final step is to register our new component in the catalog. + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/.gitignore b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/.gitignore new file mode 100644 index 00000000..fcc2fc6b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/.gitignore @@ -0,0 +1,2 @@ +# be sure to include .js files +!**/*.js \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..b2b4b06c --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.backstage/catalog-info.yaml @@ -0,0 +1,26 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - nodejs + - k8s + # links: + # - title: Example Title + # url: http://www.example.com + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} + + # Configure where k8s configurations are within the project + k8sConfigDirName: k8s +spec: + type: aws-app + subType: aws-eks + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.dockerignore b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.dockerignore new file mode 100644 index 00000000..93f13619 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.dockerignore @@ -0,0 +1,2 @@ +node_modules +npm-debug.log diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.editorconfig b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.editorconfig new file mode 100644 index 00000000..81357d3e --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.editorconfig @@ -0,0 +1,36 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +max_line_length = 120 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{ts,json,js,tsx,jsx}] +indent_style = space +indent_size = 2 + +[*.md] +indent_size = 2 +indent_style = space + +[Dockerfile] +indent_style = space +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 +indent_style = space + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitignore b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitignore new file mode 100644 index 00000000..37fe0ff0 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitignore @@ -0,0 +1,64 @@ +# macOS +.DS_Store + +# Intellij +.idea/ +*.iml + +# VS Code +.vscode/** +!.vscode/launch.json +!.vscode/tasks.json +.vsls.json +git-temp + +# logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Coverage directory +coverage + +# Dependency directories +node_modules/ +**/.venv/ +**/__pycache__ + +# Yarn 3 files +.pnp.* +**/.yarn/* +!**/.yarn/patches +!**/.yarn/plugins +!**/.yarn/releases +!**/.yarn/sdks +!**/.yarn/versions + +# Node version directives +.nvmrc + +# Optional eslint cache +.eslintcache + +# dotenv environment variables file +.env +.env.* + +# Local configuration files +*.local.yaml + + +# transpiled JavaScript, Typings and test files +*.d.ts +!jest.config.js + +# CDK asset staging directory +.cdk.staging +cdk.out +cdk.context.json + +# Temp files +**/*.bak \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitlab-ci.yml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitlab-ci.yml new file mode 100644 index 00000000..4a0a5763 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/.gitlab-ci.yml @@ -0,0 +1,20 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-eks-nodejs-rds" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-eks.yml' + # If you want to call kubectl directly instead of going through lambda, comment above line and uncomment below line + # - 'common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml' + - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/Dockerfile b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/Dockerfile new file mode 100644 index 00000000..98df309f --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/Dockerfile @@ -0,0 +1,18 @@ +FROM node:20 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +COPY "src/package.json" ./ + +RUN yarn install + +# Bundle app source +COPY . . + +# Specify a non-root user +USER nobody + +EXPOSE ${{ values.appPort }} +CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/README.md b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/README.md new file mode 100644 index 00000000..d2e3cb02 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/README.md @@ -0,0 +1,10 @@ +# instructions +To use this Node.JS with RDS App follow the below steps + +1. Bind your app to an RDS Database - use the binding feature under Management-> Bind Resource-> Add +2. Once binding completed - you should see the rds database bound in your "bound resource" +3. Go to the DB Resource entity page and copy the secret name of the DB +4. Within your app - using the environment variables in your overview tab set the below two variables: + 1. DB_SECRET=YourDBSecretHere + 2. AWS_REGION=YourDBRegion - i.e: us-east-1 +5. Start your app, you should be able to connect to the database and execute queries. \ No newline at end of file diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/deployment.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/deployment.yaml new file mode 100644 index 00000000..ec6f65ce --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + template: + metadata: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + spec: + serviceAccountName: overrideMe + containers: + - name: webapp + image: 12345678912.dkr.ecr.us-east-1.amazonaws.com/${{ values.component_id }}-overrideMe-overrideMe:latest + imagePullPolicy: Always + resources: + limits: + memory: 512Mi + cpu: "1" + requests: + cpu: "100m" + ports: + - containerPort: ${{ values.appPort }} + livenessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 2 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 2 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/ingress.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/ingress.yaml new file mode 100644 index 00000000..faea5b8d --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/ingress.yaml @@ -0,0 +1,45 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + annotations: + kubernetes.io/ingress.class: "alb" + alb.ingress.kubernetes.io/scheme: "internet-facing" + alb.ingress.kubernetes.io/healthcheck-path: "/health" + alb.ingress.kubernetes.io/success-codes: "200,201,302" + alb.ingress.kubernetes.io/target-type: "ip" + + # Tag load balancer so that it shows up as an OPA application resource + # Note - this setting must be overriden per environment provider + alb.ingress.kubernetes.io/tags: "aws-apps-${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER=${{ values.component_id }}" + + # Allows multiple services to use the same ALB + alb.ingress.kubernetes.io/group.name: ${{ values.component_id }}-ENV_PLACEHOLDER + + # Configure the load balancer name. Comment this out to have the name be auto-generated + # Load balancer name can only be up to 32 characters long + # alb.ingress.kubernetes.io/load-balancer-name: "${{ values.component_id }}-ENV_PLACEHOLDER-NS_PLACEHOLDER" + + # To enable HTTPS, you need a valid SSL certificate + # Here are some example annotations to use for enabling an HTTPS listener for your load balancer: + # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + # alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/ + + # To configure an IP allow-list for the load balancer + # alb.ingress.kubernetes.io/inbound-cidrs: 10.0.0.0/24 + + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: ${{ values.component_id }}-service + port: + number: 80 diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/kustomization.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/kustomization.yaml new file mode 100644 index 00000000..af8ebbc8 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml + - ingress.yaml + - nsAdminRoleBinding.yaml + - nsViewerRoleBinding.yaml + - service.yaml + - serviceAccount.yaml diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..c298d053 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsAdminRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..18367e98 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/nsViewerRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: opa-namespace-viewer + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/service.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/service.yaml new file mode 100644 index 00000000..89ecd5da --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + selector: + app.kubernetes.io/name: ${{ values.component_id }} + ports: + - name: http + port: 80 + targetPort: ${{ values.appPort }} + protocol: TCP + type: NodePort diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/serviceAccount.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/serviceAccount.yaml new file mode 100644 index 00000000..d30a718e --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/base/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + annotations: + eks.amazonaws.com/role-arn: overrideMe diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/deployment.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/deployment.yaml new file mode 100644 index 00000000..cbb9fa6b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + matchLabels: + app.kubernetes.io/env: ENV_PLACEHOLDER + template: + metadata: + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + spec: + serviceAccountName: ${{ values.component_id }}-sa + containers: + - name: webapp + image: ACCT_PLACEHOLDER.dkr.ecr.REGION_PLACEHOLDER.amazonaws.com/${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER:latest + envFrom: + - configMapRef: + name: ${{ values.component_id }}-env-vars diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/ingress.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/ingress.yaml new file mode 100644 index 00000000..243436f7 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/ingress.yaml @@ -0,0 +1,6 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/kustomization.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/kustomization.yaml new file mode 100644 index 00000000..a307a41b --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/kustomization.yaml @@ -0,0 +1,22 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: NS_PLACEHOLDER +nameSuffix: -ENV_PLACEHOLDER +configMapGenerator: + - name: ${{ values.component_id }}-env-vars + literals: + - ENVIRONMENT_NAME=ENV_PLACEHOLDER +generatorOptions: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER +resources: + - ../base + - ./namespace.yaml +patches: + - path: deployment.yaml + - path: ingress.yaml + - path: nsAdminRoleBinding.yaml + - path: nsViewerRoleBinding.yaml + - path: service.yaml + - path: serviceAccount.yaml diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/namespace.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/namespace.yaml new file mode 100644 index 00000000..17dfd4ef --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/namespace.yaml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: NS_PLACEHOLDER + labels: + name: NS_PLACEHOLDER diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..876c1f02 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsAdminRoleBinding.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER + diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..ba4f4ac3 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/nsViewerRoleBinding.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/service.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/service.yaml new file mode 100644 index 00000000..9dbbdcef --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/serviceAccount.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/serviceAccount.yaml new file mode 100644 index 00000000..2a12df32 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/k8s/new-env-template/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + annotations: + eks.amazonaws.com/role-arn: SA_ROLE_PLACEHOLDER + diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/index.js b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/index.js new file mode 100644 index 00000000..e8c73337 --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/index.js @@ -0,0 +1,95 @@ +const { SecretsManagerClient, GetSecretValueCommand } = require("@aws-sdk/client-secrets-manager"); +const express = require('express') +const app = express() +const port = process.env.PORT || 8080 + +const { Client } = require("pg") +const appName = "${{ values.component_id }}"; + +const ENVIRONMENT_NAME = process.env.ENVIRONMENT_NAME || 'Unknown Environment Name' +const DB_SECRET = process.env.DB_SECRET; +const AWS_REGION = process.env.AWS_REGION; +const config = { region: AWS_REGION || 'us-east-1' } +const secretsManagerClient = new SecretsManagerClient(config); + +app.get('/error', async (req, res) => { + throw new Error('Intentional error to demo seeing stack traces in logs'); +}) + +app.get('/health', async (req, res) => { + res.send('Success'); +}) + +app.get('/', async (req, res) => { + let apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Success


Note: the database connection has not yet been configured. (timestamp: ${new Date().toString()})`; + + if (DB_SECRET && AWS_REGION) { + const client = await connectDb(DB_SECRET, AWS_REGION); + console.log('Got DB connection!'); + + const queryResult = await queryDB(client, 'select now()'); + + if (queryResult) { + + let dbOutput = JSON.stringify(queryResult); + if (queryResult.rowCount === 1 && queryResult.rows) { + dbOutput = `According to the database, the current date/time is ${queryResult.rows[0].now}`; + } + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


${dbOutput}`; + + } else { + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


No response data was returned from query.`; + } + closeConnection(client); + } + + res.send(apiResponse); +}) + +app.listen(port, () => { + console.log(`Example RDS app listening on port ${port}`) +}) + +const getSecretValue = async (secretName, region) => { + console.log(`Retrieving secret: ${secretName} for region: ${region}`); + + let secret; + let secretValue = await secretsManagerClient.send( + new GetSecretValueCommand({ SecretId: secretName }) + ); + if (secretValue.SecretString) { + secret = secretValue.SecretString; + } + return secret ? JSON.parse(secret) : secret; +} + +const connectDb = async (secret, region) => { + const secretValues = await getSecretValue(secret, region); + console.log(`Will attempt to connect to database at ${secretValues.host}`) + try { + const client = new Client({ + user: secretValues.username, + host: secretValues.host, + database: secretValues.dbname, + password: secretValues.password, + port: secretValues.port + }) + console.log("Making a connection to db...") + await client.connect() + console.log("Connected to DB.") + return client; + + } catch (error) { + console.log(error) + } +} + +const closeConnection = async (client) => { + await client.end(); +} + +const queryDB = async (client, query) => { + const res = await client.query(query) + console.log(res) + return res; +} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/package.json b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/package.json new file mode 100644 index 00000000..f3a4e53a --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/content/src/package.json @@ -0,0 +1,17 @@ +{ + "name": "${{ values.component_id }}", + "private": true, + "version": "0.1.0", + "description": "${{ values.description }}", + "main": "src/index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "${{ values.owner }}", + "license": "ISC", + "dependencies": { + "express": "^4.18.2", + "@aws-sdk/client-secrets-manager": "^3.480.0", + "pg": "^8.11.3" + } +} diff --git a/backstage-reference/templates/example-eks-nodejs-rds-kustomize/template.yaml b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/template.yaml new file mode 100644 index 00000000..98f9443f --- /dev/null +++ b/backstage-reference/templates/example-eks-nodejs-rds-kustomize/template.yaml @@ -0,0 +1,239 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: example-eks-nodejs2-rds-kustomize-template + title: Kubernetes - Node.js Express Web App With RDS + description: >- + Create a starter Node.js web application hosted by + an Express server, running on an Elastic Kubernetes Service cluster. + The application is configured using Kustomize and is for demonstrations only. + tags: + - nodejs + - aws + - rds + - eks + - kubernetes + - kustomize +spec: + owner: group:admins + type: website + + # These parameters are used to generate the input form in the frontend, and are + # used to gather input data for the execution of the template. + parameters: + - title: Provide basic component information + required: + - component_id + - owner + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + ui:autofocus: true + description: + title: Description + type: string + description: Help others understand what this service is for + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + + - title: Provide environment information for the application + required: + - environment + - namespace + - k8sIAMRoleBindingType + properties: + environment: + title: AWS Environment + type: string + description: The AWS Environment where the application is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: eks + defaultKind: AWSEnvironment + namespace: + title: k8s Namespace + type: string + description: The k8s namespace to assign to application resources for the environment selected above + k8sIAMRoleBindingType: + title: Namespace-bound Kubectl Admin Access + description: Choose how to map an AWS IAM role with namespace-bound k8s admin access + type: string + default: create_new_k8s_namespace_admin_iam_role + enum: + - create_new_k8s_namespace_admin_iam_role + - existing_new_k8s_namespace_admin_iam_role + enumNames: + - 'Create a separate role for the K8s namespace' + - 'Import existing role and grant it access to the K8s namespace' + + # Only ask for the existing IAM role if user chose to use an existing role + dependencies: + k8sIAMRoleBindingType: + oneOf: + - properties: + k8sIAMRoleBindingType: + enum: + - existing_new_k8s_namespace_admin_iam_role + existingK8sNamespaceAdminRole: + title: Existing IAM role ARN + type: string + description: Existing IAM role to grant namespace privileges to + - properties: + k8sIAMRoleBindingType: + enum: + - create_new_k8s_namespace_admin_iam_role + + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-app + + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_EKS_CLUSTER_ARN=${{ each.value.clusterArn }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + TARGET_KUBECTL_LAMBDA_ARN=${{ each.value.kubectlLambdaArn }} + TARGET_KUBECTL_LAMBDA_ROLE_ARN=${{ each.value.kubectlLambdaRoleArn }} + NAMESPACE=${{ parameters.namespace }} + K8S_IAM_ROLE_BINDING_TYPE=${{ parameters.k8sIAMRoleBindingType }} + APP_ADMIN_ROLE_ARN=${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: createSecretManager + name: Create a Secret + action: opa:create-secret + input: + secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token + + - id: fetchIac + name: Fetch EKS Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_eks + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" + + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + owner: ${{ parameters.owner }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + namespace: ${{ parameters.namespace }} + k8sIAMRoleBindingType: ${{ parameters.k8sIAMRoleBindingType }} + existingK8sNamespaceAdminRole: ${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: entityDetail + name: Get AWSEnvironment entity details + action: catalog:fetch + input: + entityRef: ${{ parameters.environment }} + + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + + # This step publishes the contents of the working directory to GitLab. + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + # Create a gitlab repository access token and store it in a SecretsManager secret + - id: createRepoToken + name: Create Repo Token + action: opa:createRepoAccessToken:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + projectId: ${{ steps['publish'].output.projectId }} + secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + # The final step is to register our new component in the catalog. + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/example-nodejs-efs/content/.backstage/aws-catalog-info.yaml b/backstage-reference/templates/example-nodejs-efs/content/.backstage/aws-catalog-info.yaml deleted file mode 100644 index f8ca2c09..00000000 --- a/backstage-reference/templates/example-nodejs-efs/content/.backstage/aws-catalog-info.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: backstage.io/v1alpha1 -kind: Resource -metadata: - name: ${{ values.efs_resource_id | dump }} - {%- if values.efs_description %} - description: ${{ values.efs_description | dump }} - {%- endif %} - tags: - - aws - - efs - annotations: - {%- for name, value in values.aws_efs_bp_outputs %} - aws.amazon.com/${{ name }}: ${{ value | dump }} - {%- endfor %} - labels: - aws-iacType: cdk -spec: - type: aws-resource - owner: ${{ values.owner | dump }} - dependencyOf: - - "component:${{ values.component_id }}" - dependsOn: - - ${{ values.aws_environment | dump }} diff --git a/backstage-reference/templates/example-nodejs-efs/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-nodejs-efs/content/.backstage/catalog-info.yaml deleted file mode 100644 index d4217ac9..00000000 --- a/backstage-reference/templates/example-nodejs-efs/content/.backstage/catalog-info.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: backstage.io/v1alpha1 -kind: Component -metadata: - name: ${{ values.component_id | dump }} - {%- if values.description %} - description: ${{values.description | dump}} - {%- endif %} - tags: - - aws - - nodejs - annotations: - aws.amazon.com/aws-stepfunctions-pipeline-arn: ${{ values.aws_pipeline_arn | dump }} - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} - {%- for name, value in values.aws_ecs_bp_outputs %} - aws.amazon.com/${{ name }}: ${{ value | dump }} - {%- endfor %} - links: - - title: AWS provisioning pipeline - url: https://console.aws.amazon.com/states/home?#/v2/executions/details/${{ values.aws_pipeline_arn }} - {%- if values.aws_ecs_bp_outputs['opa-alb-endpoint'] %} - - title: ${{ values.component_id}} endpoint - url: ${{ values.aws_ecs_bp_outputs["opa-alb-endpoint"] | dump }} - {%- endif %} - labels: - aws-iacType: cdk -spec: - type: aws-app - owner: ${{ values.owner | dump }} - lifecycle: experimental - dependsOn: - - ${{ values.aws_environment | dump }} - - "resource:${{ values.efs_resource_id }}" diff --git a/backstage-reference/templates/example-nodejs-efs/content/.gitlab-ci.yml b/backstage-reference/templates/example-nodejs-efs/content/.gitlab-ci.yml deleted file mode 100644 index 096e1b9f..00000000 --- a/backstage-reference/templates/example-nodejs-efs/content/.gitlab-ci.yml +++ /dev/null @@ -1,49 +0,0 @@ -image: docker:20.10.16 -variables: - APP_SHORT_NAME: "${{ values.component_id }}" - APP_TEMPLATE_NAME: "example-nodejs-efs" - AWS_ACCOUNT: "${{ values.aws_account }}" - AWS_DEFAULT_REGION: "${{ values.aws_region }}" - ECR_REPO: "${{ values.component_id }}" - ECR_REGISTRY: "${{ values.aws_account }}.dkr.ecr.${{ values.aws_region }}.amazonaws.com" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" - # When you use the dind service, you must instruct Docker to talk with - # the daemon started inside of the service. The daemon is available - # with a network connection instead of the default - # /var/run/docker.sock socket. Docker 19.03 does this automatically - # by setting the DOCKER_HOST in - # https://github.com/docker-library/docker/blob/d45051476babc297257df490d22cbd806f1b11e4/19.03/docker-entrypoint.sh#L23-L29 - # - # The 'docker' hostname is the alias of the service container as described at - # https://docs.gitlab.com/ee/ci/services/#accessing-the-services. - # - # Specify to Docker where to create the certificates. Docker - # creates them automatically on boot, and creates - # `/certs/client` to share between the service and job - # container, thanks to volume mount from config.toml - DOCKER_TLS_CERTDIR: "/certs" -services: - - docker:20.10.16-dind -stages: - - build -build-deploy-image: - stage: build - before_script: - - cat /etc/os-release - - docker info - - apk update - - apk add aws-cli - - aws --version - - aws sts get-caller-identity - - aws ecr get-login-password --region ${{ values.aws_region }} | docker login --username AWS --password-stdin $ECR_REGISTRY - - echo Successfully logged in to ECR! - - apk add curl - - apk add tar - script: - - (curl -sSL "https://github.com/buildpacks/pack/releases/download/v0.28.0/pack-v0.28.0-linux.tgz" | tar -C /usr/local/bin/ --no-same-owner -xzv pack) - - TAG=$(date +%m-%d-%Y_%H-%M-%S) - - pack build $ECR_REPO -t $ECR_REPO:$TAG --builder paketobuildpacks/builder:base - - docker tag $ECR_REPO:$TAG $ECR_REGISTRY/$ECR_REPO:$TAG - - docker tag $ECR_REPO:$TAG $ECR_REGISTRY/$ECR_REPO:latest - - echo Pushing Docker image to ECR... - - docker push $ECR_REGISTRY/$ECR_REPO --all-tags diff --git a/backstage-reference/templates/example-nodejs-efs/content/index.js b/backstage-reference/templates/example-nodejs-efs/content/index.js deleted file mode 100644 index 463dc30e..00000000 --- a/backstage-reference/templates/example-nodejs-efs/content/index.js +++ /dev/null @@ -1,120 +0,0 @@ -const express = require("express"); -const bodyParser = require("body-parser"); -const fs = require("fs"); - -const app = express(); -const port = process.env.PORT || 3000; -const serviceEndpoint = "/journals"; - -app.use(bodyParser.json()); -app.use( - bodyParser.urlencoded({ - extended: true, - }) -); - -const journalPath = process.env.NODE_ENV === "development" ? `${process.env.HOME}/journal` : "/data/journal"; - -// Create the journal directory if it does not exist -console.log(`Creating journal directory at ${journalPath}`); -fs.mkdir(journalPath, { recursive: true }, (err, data) => { - if (err) { - console.log(err); - } -}); - -// List journal files -app.get(serviceEndpoint, (req, res) => { - console.log(`listing journals at: ${journalPath}`); - - fs.readdir(journalPath, (err, files) => { - if (err) { - return res.status(500).send(err); // Return error if directory reading failed - } - res.send(files); // Return list of files - }); -}); - -// Read the contents of a specific journal file -app.get(`${serviceEndpoint}/:id`, (req, res) => { - const journalId = req.params.id; // Get file id from request parameters - console.log(`reading content from journalId: ${journalId}`); - fs.readFile(`${journalPath}/${journalId}`, "utf8", (err, data) => { - if (err) { - return res.status(500).send(err); // Return error if file reading failed - } - res.type("text/plain"); // Set content type to plain text - res.send(data); // Return file content - }); -}); - -// Create a new journal file -app.post(`${serviceEndpoint}/:id`, (req, res) => { - const journalId = req.params.id; - const filePath = `${journalPath}/${journalId}`; - console.log(`creating new journal at: ${filePath}`); - const fileContent = `Journal created: ${new Date().toISOString()}\n-----------------------------------------\n`; // Get file content from request body - fs.writeFile(filePath, fileContent, (err) => { - if (err) { - return res.status(500).send(err); // Return error if file writing failed - } - res.status(201).send(`File ${journalId} created successfully`); // Return success message - }); -}); - -// Append content to an existing journal file -app.put(`${serviceEndpoint}/:id`, (req, res) => { - const journalId = req.params.id; - const filePath = `${journalPath}/${journalId}`; - const textToAppend = req.body.content; - console.log(`appending content to journalId: ${journalId}`); - const fileContent = `${new Date().toISOString()}: ${textToAppend}\n`; - fs.appendFile(filePath, fileContent, (err) => { - if (err) { - return res.status(500).send(err); // Return error if file appending failed - } - res.send(`File ${journalId} appended successfully`); // Return success message - }); -}); - -// Delete a journal file -app.delete(`${serviceEndpoint}/:id`, (req, res) => { - const journalId = req.params.id; - const filePath = `${journalPath}/${journalId}`; - console.log(`deleting journalId: ${journalId}`); - fs.unlink(filePath, (err) => { - if (err) { - return res.status(500).send(err); // Return error if file deleting failed - } - res.send(`File ${journalId} deleted successfully`); // Return success message - }); -}); - -// Return basic information about the EFS resource and application, including the resource ID, region, account, and resource path. -// The resource path is the path to the EFS resource. For example, if the resource ID is "efs-journal" and the resource path is "/journal", then the resource path is "/journal". -app.get("/", (req, res) => { - const resObject = { - hostname: process.env.HOSTNAME, - efs_resource_id: ${{ values.aws_efs_bp_outputs['opa-efs-id'] | dump -}}, - region: ${{ values.aws_region | dump }}, - account: ${{ values.aws_account | dump }}, - resource_path: serviceEndpoint, - }; - -// check access to the user's home directory -// const homeAccess = new Promise((resolve, reject) => { -fs.access(journalPath, fs.constants.R_OK | fs.constants.W_OK, (err) => { - resObject.filesystemAccess = !err; - if (err) { - resObject.err = err.message; - console.log(err); - } - res.json(resObject); - -}); -}); - -app.listen(port, () => { - console.log(`Example nodejs service listening on port ${port}`); -}); diff --git a/backstage-reference/templates/example-nodejs-efs/template.yaml b/backstage-reference/templates/example-nodejs-efs/template.yaml deleted file mode 100644 index d3e292b9..00000000 --- a/backstage-reference/templates/example-nodejs-efs/template.yaml +++ /dev/null @@ -1,191 +0,0 @@ -apiVersion: scaffolder.backstage.io/v1beta3 -# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template -kind: Template -metadata: - name: example-nodejs-efs-template - title: Node.js microservice with EFS - description: >- - Create a sample journal application which persists - entries to an AWS EFS file system. - tags: - - nodejs - - efs - - aws -spec: - owner: group:admins - type: service - - # These parameters are used to generate the input form in the frontend, and are - # used to gather input data for the execution of the template. - parameters: - - title: Provide basic component information - required: - - component_id - - owner - properties: - component_id: - title: Name - type: string - description: Unique name of the component - ui:field: EntityNamePicker - ui:autofocus: true - description: - title: Description - type: string - description: Help others understand what this service is for - owner: - title: Owner - type: string - description: Owner of the component - ui:field: OwnerPicker - ui:options: - catalogFilter: - kind: [Group] - - title: Provide deployment information for the application - required: - - environment - properties: - environment: - title: AWS Environment - type: string - description: The AWS Environment where the application is created - ui:field: EntityPicker - ui:options: - allowedKinds: [AWSEnvironment] - defaultKind: AWSEnvironment - - - title: Provide basic file system resource information - required: - - efs_resource_id - properties: - efs_resource_id: - title: Resource identifier - type: string - description: Unique name of the file system resource - ui:field: EntityNamePicker - ui:autofocus: true - efs_description: - title: File system description - type: string - description: What's the purpose of this file system resource? - - title: Choose a git repository location - required: - - repoUrl - properties: - repoUrl: - title: Repository Location - type: string - ui:field: RepoUrlPicker - ui:options: - allowedHosts: - - {{ gitlab_hostname }} - allowedOwners: - - aws-app - - # These steps are executed in the scaffolder backend, using data that we gathered - # via the parameters above. - steps: - # Each step executes an action, in this case one templates files into the working directory. - - id: opaGetPlatformInfo - name: Get OPA platform information - action: opa:get-platform-metadata - - - id: opaDeployEFSBoilerplate - name: Deploy EFS resource - action: opa:deploy-boilerplate - input: - boilerplateRepositories: - - aws_efs - inputParameters: - APP_SHORT_NAME: ${{ parameters.component_id}} - EFS_NAME: ${{ parameters.efs_resource_id }} - EFS_ACCESS_POINT_PATH: "/data" - environmentRef: ${{ parameters.environment }} - actionType: "Create Resource" - - id: opaDeployECSBoilerplate - name: Deploy ECS Boilerplate - action: opa:deploy-boilerplate - input: - boilerplateRepositories: - - aws_ecs - inputParameters: - APP_SHORT_NAME: ${{ parameters.component_id}} - EFS_ID: ${{ steps['opaDeployEFSBoilerplate'].output.cfnOutputs['opa-efs-id'] }} - # EFS_MOUNT_PATH: "/data" - EFS_ACCESS_POINT_ID: ${{ steps['opaDeployEFSBoilerplate'].output.cfnOutputs['opa-efs-access-point-id'] }} - APP_ENV_PLAINTEXT: - PORT: "3001" - environmentRef: ${{ parameters.environment }} - actionType: "Create App" - - id: createSecretManager - name: Creates a Secret - action: opa:create-secret - input: - secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token - region: ${{ steps['opaDeployECSBoilerplate'].output.region }} - accountId: ${{ steps['opaDeployECSBoilerplate'].output.account }} - description: "Gitlab repo access token" - tags: - - Key: "aws-apps:${{ parameters.component_id }}" - Value: ${{ parameters.component_id }} - - id: fetchBase - name: Fetch Base - action: fetch:template - input: - url: ./content - values: - component_id: ${{ parameters.component_id }} - owner: ${{ parameters.owner }} - description: ${{ parameters.description }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ parameters.environment }} - aws_region: ${{ steps['opaDeployECSBoilerplate'].output.region }} - aws_account: ${{ steps['opaDeployECSBoilerplate'].output.account }} - aws_ecs_bp_outputs: ${{ steps['opaDeployECSBoilerplate'].output.cfnOutputs }} - aws_efs_bp_outputs: ${{ steps['opaDeployEFSBoilerplate'].output.cfnOutputs }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} - aws_pipeline_arn: ${{ steps['opaDeployECSBoilerplate'].output.executionArn }} - efs_resource_id: ${{ parameters.efs_resource_id }} - efs_description: ${{ parameters.efs_description }} - app_port: "8080" - # This step publishes the contents of the working directory to GitLab. - - id: publish - name: Publish - action: publish:gitlab - input: - repoUrl: ${{ parameters.repoUrl }} - repoVisibility: internal - defaultBranch: main - - id: createRepoToken - name: Create Repo Token - action: opa:createRepoAccessToken:gitlab - input: - repoUrl: ${{ parameters.repoUrl }} - projectId: ${{ steps['publish'].output.projectId }} - secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - accountId: ${{ steps['opaDeployECSBoilerplate'].output.account }} - # The final step is to register our new component in the catalog. - - id: registerApp - name: Register Component - action: catalog:register - input: - repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} - catalogInfoPath: "/.backstage/catalog-info.yaml" - - id: registerEfs - name: Register EFS Resource - action: catalog:register - input: - repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} - catalogInfoPath: "/.backstage/aws-catalog-info.yaml" - - # Outputs are displayed to the user after a successful execution of the template. - output: - links: - - title: Repository - url: ${{ steps['publish'].output.remoteUrl }} - - title: Open application component in catalog - icon: catalog - entityRef: ${{ steps['registerApp'].output.entityRef }} - - title: Open EFS resource in catalog - icon: resource - entityRef: ${{ steps['registerEfs'].output.entityRef }} diff --git a/backstage-reference/templates/example-nodejs-microservice/.gitignore b/backstage-reference/templates/example-nodejs-microservice/.gitignore new file mode 100644 index 00000000..fcc2fc6b --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/.gitignore @@ -0,0 +1,2 @@ +# be sure to include .js files +!**/*.js \ No newline at end of file diff --git a/backstage-reference/templates/example-nodejs-microservice/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-nodejs-microservice/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..8c4555d1 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/.backstage/catalog-info.yaml @@ -0,0 +1,22 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - nodejs + # links: + # - title: Example Title + # url: http://www.example.com + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} +spec: + type: aws-app + subType: aws-ecs + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-nodejs-microservice/content/.dockerignore b/backstage-reference/templates/example-nodejs-microservice/content/.dockerignore new file mode 100644 index 00000000..93f13619 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/.dockerignore @@ -0,0 +1,2 @@ +node_modules +npm-debug.log diff --git a/backstage-reference/templates/example-nodejs-microservice/content/.editorconfig b/backstage-reference/templates/example-nodejs-microservice/content/.editorconfig new file mode 100644 index 00000000..81357d3e --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/.editorconfig @@ -0,0 +1,36 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +max_line_length = 120 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{ts,json,js,tsx,jsx}] +indent_style = space +indent_size = 2 + +[*.md] +indent_size = 2 +indent_style = space + +[Dockerfile] +indent_style = space +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 +indent_style = space + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/backstage-reference/templates/example-nodejs-microservice/content/.gitignore b/backstage-reference/templates/example-nodejs-microservice/content/.gitignore new file mode 100644 index 00000000..37fe0ff0 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/.gitignore @@ -0,0 +1,64 @@ +# macOS +.DS_Store + +# Intellij +.idea/ +*.iml + +# VS Code +.vscode/** +!.vscode/launch.json +!.vscode/tasks.json +.vsls.json +git-temp + +# logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Coverage directory +coverage + +# Dependency directories +node_modules/ +**/.venv/ +**/__pycache__ + +# Yarn 3 files +.pnp.* +**/.yarn/* +!**/.yarn/patches +!**/.yarn/plugins +!**/.yarn/releases +!**/.yarn/sdks +!**/.yarn/versions + +# Node version directives +.nvmrc + +# Optional eslint cache +.eslintcache + +# dotenv environment variables file +.env +.env.* + +# Local configuration files +*.local.yaml + + +# transpiled JavaScript, Typings and test files +*.d.ts +!jest.config.js + +# CDK asset staging directory +.cdk.staging +cdk.out +cdk.context.json + +# Temp files +**/*.bak \ No newline at end of file diff --git a/backstage-reference/templates/example-nodejs-microservice/content/.gitlab-ci.yml b/backstage-reference/templates/example-nodejs-microservice/content/.gitlab-ci.yml new file mode 100644 index 00000000..bdd03639 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/.gitlab-ci.yml @@ -0,0 +1,19 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-nodejs" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-ecs.yml' + - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-nodejs-microservice/content/Dockerfile b/backstage-reference/templates/example-nodejs-microservice/content/Dockerfile new file mode 100644 index 00000000..5ac1bf25 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/Dockerfile @@ -0,0 +1,15 @@ +FROM node:18 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +COPY "src/package.json" ./ + +RUN yarn install + +# Bundle app source +COPY . . + +EXPOSE ${{ values.appPort }} +CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-nodejs-microservice/content/src/index.js b/backstage-reference/templates/example-nodejs-microservice/content/src/index.js new file mode 100644 index 00000000..1d19a753 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/content/src/index.js @@ -0,0 +1,30 @@ +const express = require('express'); +const os = require('os'); +const app = express(); +const port = ${{ values.appPort }}; + +app.get('/', (req, res) => { + // TODO: build the response object to return to the user + res.status(200).send("OK"); +}); + +/** + * Health endpoint function + * + * @returns a json object with data points reflecting the health of the service including + * - service uptime + * - os uptime + * - a status indicator + */ +app.get('/health', (req, res) => { + res.json({ + "uptime": process.uptime(), + "os_uptime": os.uptime(), + "status": "OK" + }); +}) + + +app.listen(port, () => { + console.log(`Example microservice listening on port ${port}`) +}); diff --git a/backstage-reference/templates/example-nodejs-efs/content/package.json b/backstage-reference/templates/example-nodejs-microservice/content/src/package.json similarity index 92% rename from backstage-reference/templates/example-nodejs-efs/content/package.json rename to backstage-reference/templates/example-nodejs-microservice/content/src/package.json index 4969eb06..aff37246 100644 --- a/backstage-reference/templates/example-nodejs-efs/content/package.json +++ b/backstage-reference/templates/example-nodejs-microservice/content/src/package.json @@ -3,7 +3,7 @@ "private": true, "version": "0.1.0", "description": "${{ values.description }}", - "main": "index.js", + "main": "src/index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, diff --git a/backstage-reference/templates/example-nodejs-microservice/template.yaml b/backstage-reference/templates/example-nodejs-microservice/template.yaml new file mode 100644 index 00000000..c45a1c36 --- /dev/null +++ b/backstage-reference/templates/example-nodejs-microservice/template.yaml @@ -0,0 +1,190 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: example-nodejs-microservice-template + title: Node.js Microservice + description: >- + Create a starter Node.js microservice application running on + an Express server. + This is demonstration only. + tags: + - nodejs + - aws +spec: + owner: group:admins + type: service + + # These parameters are used to generate the input form in the frontend, and are + # used to gather input data for the execution of the template. + parameters: + - title: Provide application information + required: + - component_id + - owner + - environment + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + ui:autofocus: true + description: + title: Description + type: string + description: Help others understand what this service is for + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + environment: + title: AWS Environment + type: string + description: The AWS Environment where the application is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs + defaultKind: AWSEnvironment + + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-app + + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_ECS_CLUSTER_ARN=${{ each.value.clusterArn }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + + - id: createSecretManager + name: Create a Secret + action: opa:create-secret + input: + secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token + + - id: fetchIac + name: Fetch ECS Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" + app_health_endpoint: "/health" + + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + owner: ${{ parameters.owner }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + - id: entityDetail + name: Get AWSEnvironment entity details + action: catalog:fetch + input: + entityRef: ${{ parameters.environment }} + + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + + # This step publishes the contents of the working directory to GitLab. + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + # Create a gitlab repository access token and store it in a SecretsManager secret + - id: createRepoToken + name: Create Repo Token + action: opa:createRepoAccessToken:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + projectId: ${{ steps['publish'].output.projectId }} + secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + # The final step is to register our new component in the catalog. + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/example-nodejs-rds/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-nodejs-rds/content/.backstage/catalog-info.yaml index b2d62c0f..8c4555d1 100644 --- a/backstage-reference/templates/example-nodejs-rds/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-nodejs-rds/content/.backstage/catalog-info.yaml @@ -2,21 +2,21 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} tags: - aws - nodejs - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} # links: # - title: Example Title # url: http://www.example.com - iac-type: cdk - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-ecs owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-nodejs-rds/content/.gitlab-ci.yml b/backstage-reference/templates/example-nodejs-rds/content/.gitlab-ci.yml index 4ae6ac87..532e052b 100644 --- a/backstage-reference/templates/example-nodejs-rds/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-nodejs-rds/content/.gitlab-ci.yml @@ -1,12 +1,12 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-nodejs-rds" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' @@ -16,3 +16,4 @@ include: - 'common/cicd/.gitlab-ci-aws-base.yml' - 'common/cicd/.gitlab-ci-aws-iac-ecs.yml' - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-nodejs-rds/content/Dockerfile b/backstage-reference/templates/example-nodejs-rds/content/Dockerfile index 36e320d2..98df309f 100644 --- a/backstage-reference/templates/example-nodejs-rds/content/Dockerfile +++ b/backstage-reference/templates/example-nodejs-rds/content/Dockerfile @@ -1,4 +1,4 @@ -FROM node:18 +FROM node:20 # Create app directory WORKDIR /usr/src/app @@ -11,5 +11,8 @@ RUN yarn install # Bundle app source COPY . . -EXPOSE ${{ values.app_port }} +# Specify a non-root user +USER nobody + +EXPOSE ${{ values.appPort }} CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-nodejs-rds/content/src/index.js b/backstage-reference/templates/example-nodejs-rds/content/src/index.js index ebe5b005..e8c73337 100644 --- a/backstage-reference/templates/example-nodejs-rds/content/src/index.js +++ b/backstage-reference/templates/example-nodejs-rds/content/src/index.js @@ -1,39 +1,62 @@ -const AWS = require("aws-sdk") +const { SecretsManagerClient, GetSecretValueCommand } = require("@aws-sdk/client-secrets-manager"); const express = require('express') const app = express() const port = process.env.PORT || 8080 const { Client } = require("pg") +const appName = "${{ values.component_id }}"; + +const ENVIRONMENT_NAME = process.env.ENVIRONMENT_NAME || 'Unknown Environment Name' +const DB_SECRET = process.env.DB_SECRET; +const AWS_REGION = process.env.AWS_REGION; +const config = { region: AWS_REGION || 'us-east-1' } +const secretsManagerClient = new SecretsManagerClient(config); + +app.get('/error', async (req, res) => { + throw new Error('Intentional error to demo seeing stack traces in logs'); +}) + +app.get('/health', async (req, res) => { + res.send('Success'); +}) app.get('/', async (req, res) => { - const DB_SECRET = process.env.DB_SECRET; - const AWS_REGION = process.env.AWS_REGION; - const client = await connectDb(DB_SECRET, AWS_REGION) - console.log('Yes!') - - const queryResult =await queryDB(client, 'select now()') - if (queryResult) - { - res.send(queryResult) - } - else - { - res.send('Hello world!') + let apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Success


Note: the database connection has not yet been configured. (timestamp: ${new Date().toString()})`; + + if (DB_SECRET && AWS_REGION) { + const client = await connectDb(DB_SECRET, AWS_REGION); + console.log('Got DB connection!'); + + const queryResult = await queryDB(client, 'select now()'); + + if (queryResult) { + + let dbOutput = JSON.stringify(queryResult); + if (queryResult.rowCount === 1 && queryResult.rows) { + dbOutput = `According to the database, the current date/time is ${queryResult.rows[0].now}`; + } + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


${dbOutput}`; + + } else { + apiResponse = `

${appName}

Environment: ${ENVIRONMENT_NAME}

Database Connection Successful!


No response data was returned from query.`; + } + closeConnection(client); } - closeConnection(client); + + res.send(apiResponse); }) app.listen(port, () => { - console.log(`Example rds app listening on port ${port}`) + console.log(`Example RDS app listening on port ${port}`) }) const getSecretValue = async (secretName, region) => { - console.log(secretName) - console.log(region) - const config = { region : region } + console.log(`Retrieving secret: ${secretName} for region: ${region}`); + let secret; - let secretsManager = new AWS.SecretsManager(config); - let secretValue = await secretsManager.getSecretValue({SecretId: secretName}).promise(); + let secretValue = await secretsManagerClient.send( + new GetSecretValueCommand({ SecretId: secretName }) + ); if (secretValue.SecretString) { secret = secretValue.SecretString; } @@ -42,31 +65,30 @@ const getSecretValue = async (secretName, region) => { const connectDb = async (secret, region) => { const secretValues = await getSecretValue(secret, region); - console.log("inside") - console.log(secretValues) + console.log(`Will attempt to connect to database at ${secretValues.host}`) try { - const client = new Client({ - user: secretValues.username, - host: secretValues.host, - database: secretValues.dbname, - password: secretValues.password, - port: secretValues.port - }) - console.log("making a connection to db...") - await client.connect() - console.log("Connected to DB.") - return client; - + const client = new Client({ + user: secretValues.username, + host: secretValues.host, + database: secretValues.dbname, + password: secretValues.password, + port: secretValues.port + }) + console.log("Making a connection to db...") + await client.connect() + console.log("Connected to DB.") + return client; + } catch (error) { - console.log(error) + console.log(error) } } const closeConnection = async (client) => { -await client.end(); + await client.end(); } -const queryDB = async(client, query) => { +const queryDB = async (client, query) => { const res = await client.query(query) console.log(res) return res; diff --git a/backstage-reference/templates/example-nodejs-rds/content/src/package.json b/backstage-reference/templates/example-nodejs-rds/content/src/package.json index e995d01d..f3a4e53a 100644 --- a/backstage-reference/templates/example-nodejs-rds/content/src/package.json +++ b/backstage-reference/templates/example-nodejs-rds/content/src/package.json @@ -11,7 +11,7 @@ "license": "ISC", "dependencies": { "express": "^4.18.2", - "aws-sdk": "^2.1444.0", + "@aws-sdk/client-secrets-manager": "^3.480.0", "pg": "^8.11.3" } } diff --git a/backstage-reference/templates/example-nodejs-rds/template.yaml b/backstage-reference/templates/example-nodejs-rds/template.yaml index 41f24f13..5ff03cc5 100644 --- a/backstage-reference/templates/example-nodejs-rds/template.yaml +++ b/backstage-reference/templates/example-nodejs-rds/template.yaml @@ -3,15 +3,16 @@ apiVersion: scaffolder.backstage.io/v1beta3 kind: Template metadata: name: example-nodejs2-rds-template - title: Node.js Express Web App With RDS + title: ECS - Node.js Express Web App With RDS description: >- - Create a starter Node.js web application running on - an Express server. + Create a starter Node.js web application hosted by + an Express server, running on an AWS Elastic Container Service cluster. This is demonstration only. tags: - nodejs - aws - rds + - ecs spec: owner: group:admins type: website @@ -54,6 +55,9 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs defaultKind: AWSEnvironment - title: Choose a git repository location @@ -105,7 +109,7 @@ spec: PREFIX=${{ each.value.envProviderPrefix }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -122,8 +126,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base @@ -131,14 +135,15 @@ spec: input: url: ./content values: - app_port: "8080" - component_id: ${{ parameters.component_id }} + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - id: entityDetail name: Get AWSEnvironment entity details diff --git a/backstage-reference/templates/example-nodejs/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-nodejs/content/.backstage/catalog-info.yaml index b2d62c0f..8c4555d1 100644 --- a/backstage-reference/templates/example-nodejs/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-nodejs/content/.backstage/catalog-info.yaml @@ -2,21 +2,21 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} tags: - aws - nodejs - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} # links: # - title: Example Title # url: http://www.example.com - iac-type: cdk - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-ecs owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-nodejs/content/.gitlab-ci.yml b/backstage-reference/templates/example-nodejs/content/.gitlab-ci.yml index 9d53e74b..bdd03639 100644 --- a/backstage-reference/templates/example-nodejs/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-nodejs/content/.gitlab-ci.yml @@ -1,12 +1,12 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-nodejs" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' @@ -16,3 +16,4 @@ include: - 'common/cicd/.gitlab-ci-aws-base.yml' - 'common/cicd/.gitlab-ci-aws-iac-ecs.yml' - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-nodejs/content/Dockerfile b/backstage-reference/templates/example-nodejs/content/Dockerfile index 36e320d2..5ac1bf25 100644 --- a/backstage-reference/templates/example-nodejs/content/Dockerfile +++ b/backstage-reference/templates/example-nodejs/content/Dockerfile @@ -11,5 +11,5 @@ RUN yarn install # Bundle app source COPY . . -EXPOSE ${{ values.app_port }} +EXPOSE ${{ values.appPort }} CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-nodejs/content/src/index.js b/backstage-reference/templates/example-nodejs/content/src/index.js index 7df7e3df..a9529484 100644 --- a/backstage-reference/templates/example-nodejs/content/src/index.js +++ b/backstage-reference/templates/example-nodejs/content/src/index.js @@ -1,6 +1,6 @@ const express = require('express'); const app = express(); -const port = ${{ values.app_port }} || '??port??'; +const port = ${{ values.appPort }} || '??port??'; app.get('/', (req, res) => { res.send('Hello ${{ values.component_id }}!'); diff --git a/backstage-reference/templates/example-nodejs/template.yaml b/backstage-reference/templates/example-nodejs/template.yaml index ce2b00fa..c1328a3f 100644 --- a/backstage-reference/templates/example-nodejs/template.yaml +++ b/backstage-reference/templates/example-nodejs/template.yaml @@ -53,6 +53,9 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs defaultKind: AWSEnvironment - title: Choose a git repository location @@ -104,7 +107,7 @@ spec: PREFIX=${{ each.value.envProviderPrefix }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -121,8 +124,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base @@ -130,14 +133,15 @@ spec: input: url: ./content values: - app_port: "8080" - component_id: ${{ parameters.component_id }} + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - id: entityDetail name: Get AWSEnvironment entity details diff --git a/backstage-reference/templates/example-python-flask-eks/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-python-flask-eks/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..7017abcb --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/.backstage/catalog-info.yaml @@ -0,0 +1,24 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - python + - flask + - k8s + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} + + # Configure where k8s configurations are within the project + k8sConfigDirName: k8s +spec: + type: aws-app + subType: aws-eks + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-python-flask-eks/content/.editorconfig b/backstage-reference/templates/example-python-flask-eks/content/.editorconfig new file mode 100644 index 00000000..81357d3e --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/.editorconfig @@ -0,0 +1,36 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +max_line_length = 120 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{ts,json,js,tsx,jsx}] +indent_style = space +indent_size = 2 + +[*.md] +indent_size = 2 +indent_style = space + +[Dockerfile] +indent_style = space +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 +indent_style = space + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/backstage-reference/templates/example-python-flask-eks/content/.gitignore b/backstage-reference/templates/example-python-flask-eks/content/.gitignore new file mode 100644 index 00000000..88ed9690 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/.gitignore @@ -0,0 +1,18 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +.venv/ +__pycache__ + +# testing +/coverage + +# production +/build + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local diff --git a/backstage-reference/templates/example-python-flask-eks/content/.gitlab-ci.yml b/backstage-reference/templates/example-python-flask-eks/content/.gitlab-ci.yml new file mode 100644 index 00000000..8e41df41 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/.gitlab-ci.yml @@ -0,0 +1,20 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-python-flask-eks" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-eks.yml' + # If you want to call kubectl directly instead of going through lambda, comment above line and uncomment below line + # - 'common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml' + - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' diff --git a/backstage-reference/templates/example-python-flask-eks/content/Dockerfile b/backstage-reference/templates/example-python-flask-eks/content/Dockerfile new file mode 100644 index 00000000..e25c5e02 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/Dockerfile @@ -0,0 +1,20 @@ +# This dockerfile builds an image for the python application +# It should be executed with the root of the repo as docker context. +# + +FROM --platform=linux/amd64 python:3.9 + +WORKDIR /app + +COPY requirements.txt requirements.txt + +RUN pip3 install -r requirements.txt + +COPY server.py . +COPY templates ./templates +ENV FLASK_APP server + +# Specify a non-root user +USER nobody + +CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0"] diff --git a/backstage-reference/templates/example-python-flask-eks/content/Procfile b/backstage-reference/templates/example-python-flask-eks/content/Procfile new file mode 100644 index 00000000..f89c5363 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/Procfile @@ -0,0 +1 @@ +web: gunicorn server:app \ No newline at end of file diff --git a/backstage-reference/templates/example-python-flask-eks/content/README.md b/backstage-reference/templates/example-python-flask-eks/content/README.md new file mode 100644 index 00000000..ad845d7d --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/README.md @@ -0,0 +1,14 @@ +# Simple Python Flask application + +This application is a starter skeleton for a Python Flask application + +## Dependencies +Install all dependencies with: +``` +pip install -r requirements.txt +``` + +## Running the application +Start the application with: +```py +FLASK_APP=app flask run \ No newline at end of file diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/deployment.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/deployment.yaml new file mode 100644 index 00000000..b7843112 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + template: + metadata: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + spec: + serviceAccountName: overrideMe + containers: + - name: webapp + image: 12345678912.dkr.ecr.us-east-1.amazonaws.com/${{ values.component_id }}-overrideMe-overrideMe:latest + imagePullPolicy: Always + resources: + limits: + memory: 512Mi + cpu: "1" + requests: + cpu: "100m" + ports: + - containerPort: ${{ values.appPort }} + livenessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 20 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 20 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/ingress.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/ingress.yaml new file mode 100644 index 00000000..faea5b8d --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/ingress.yaml @@ -0,0 +1,45 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + annotations: + kubernetes.io/ingress.class: "alb" + alb.ingress.kubernetes.io/scheme: "internet-facing" + alb.ingress.kubernetes.io/healthcheck-path: "/health" + alb.ingress.kubernetes.io/success-codes: "200,201,302" + alb.ingress.kubernetes.io/target-type: "ip" + + # Tag load balancer so that it shows up as an OPA application resource + # Note - this setting must be overriden per environment provider + alb.ingress.kubernetes.io/tags: "aws-apps-${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER=${{ values.component_id }}" + + # Allows multiple services to use the same ALB + alb.ingress.kubernetes.io/group.name: ${{ values.component_id }}-ENV_PLACEHOLDER + + # Configure the load balancer name. Comment this out to have the name be auto-generated + # Load balancer name can only be up to 32 characters long + # alb.ingress.kubernetes.io/load-balancer-name: "${{ values.component_id }}-ENV_PLACEHOLDER-NS_PLACEHOLDER" + + # To enable HTTPS, you need a valid SSL certificate + # Here are some example annotations to use for enabling an HTTPS listener for your load balancer: + # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + # alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/ + + # To configure an IP allow-list for the load balancer + # alb.ingress.kubernetes.io/inbound-cidrs: 10.0.0.0/24 + + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: ${{ values.component_id }}-service + port: + number: 80 diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/kustomization.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/kustomization.yaml new file mode 100644 index 00000000..af8ebbc8 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml + - ingress.yaml + - nsAdminRoleBinding.yaml + - nsViewerRoleBinding.yaml + - service.yaml + - serviceAccount.yaml diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..c298d053 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsAdminRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..18367e98 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/nsViewerRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: opa-namespace-viewer + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/service.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/service.yaml new file mode 100644 index 00000000..89ecd5da --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + selector: + app.kubernetes.io/name: ${{ values.component_id }} + ports: + - name: http + port: 80 + targetPort: ${{ values.appPort }} + protocol: TCP + type: NodePort diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/base/serviceAccount.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/serviceAccount.yaml new file mode 100644 index 00000000..d30a718e --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/base/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + annotations: + eks.amazonaws.com/role-arn: overrideMe diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/deployment.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/deployment.yaml new file mode 100644 index 00000000..cbb9fa6b --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + matchLabels: + app.kubernetes.io/env: ENV_PLACEHOLDER + template: + metadata: + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + spec: + serviceAccountName: ${{ values.component_id }}-sa + containers: + - name: webapp + image: ACCT_PLACEHOLDER.dkr.ecr.REGION_PLACEHOLDER.amazonaws.com/${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER:latest + envFrom: + - configMapRef: + name: ${{ values.component_id }}-env-vars diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/ingress.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/ingress.yaml new file mode 100644 index 00000000..243436f7 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/ingress.yaml @@ -0,0 +1,6 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/kustomization.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/kustomization.yaml new file mode 100644 index 00000000..deea8ac3 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/kustomization.yaml @@ -0,0 +1,23 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: NS_PLACEHOLDER +nameSuffix: -ENV_PLACEHOLDER +configMapGenerator: + - name: ${{ values.component_id }}-env-vars + literals: + - ENVIRONMENT_NAME=ENV_PLACEHOLDER + - FLASK_RUN_PORT=8080 +generatorOptions: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER +resources: + - ../base + - ./namespace.yaml +patches: + - path: deployment.yaml + - path: ingress.yaml + - path: nsAdminRoleBinding.yaml + - path: nsViewerRoleBinding.yaml + - path: service.yaml + - path: serviceAccount.yaml diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/namespace.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/namespace.yaml new file mode 100644 index 00000000..17dfd4ef --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/namespace.yaml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: NS_PLACEHOLDER + labels: + name: NS_PLACEHOLDER diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..876c1f02 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER + diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..ba4f4ac3 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/service.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/service.yaml new file mode 100644 index 00000000..9dbbdcef --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/serviceAccount.yaml b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/serviceAccount.yaml new file mode 100644 index 00000000..2a12df32 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/k8s/new-env-template/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + annotations: + eks.amazonaws.com/role-arn: SA_ROLE_PLACEHOLDER + diff --git a/backstage-reference/templates/example-python-flask-eks/content/requirements.txt b/backstage-reference/templates/example-python-flask-eks/content/requirements.txt new file mode 100644 index 00000000..c708748a --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/requirements.txt @@ -0,0 +1,7 @@ +click==8.1.3 +Flask==2.3.3 +itsdangerous==2.1.2 +Jinja2==3.1.2 +MarkupSafe==2.1.2 +Werkzeug==2.3.7 +gunicorn==20.1.0 diff --git a/backstage-reference/templates/example-python-flask-eks/content/server.py b/backstage-reference/templates/example-python-flask-eks/content/server.py new file mode 100644 index 00000000..a3a42933 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/server.py @@ -0,0 +1,30 @@ +# Importing flask module in the project is mandatory +# An object of Flask class is our WSGI application. +from flask import Flask, render_template +import os +# Flask constructor takes the name of +# current module (__name__) as argument. +app = Flask(__name__) + +# The route() function of the Flask class is a decorator, +# which tells the application which URL should call +# the associated function. +@app.route('/hello') +# ‘/’ URL is bound with hello_world() function. +def hello_world(): + output = "" + for name, value in os.environ.items(): + output = output + "{0}: {1} \n".format(name, value) + + return output + +@app.route('/') +def index(): + return render_template('index.html') + +# main driver function +if __name__ == '__main__': + + # run() method of Flask class runs the application + # on the local development server. + app.run() \ No newline at end of file diff --git a/backstage-reference/templates/example-python-flask-eks/content/templates/index.html b/backstage-reference/templates/example-python-flask-eks/content/templates/index.html new file mode 100644 index 00000000..c5d6ed68 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/content/templates/index.html @@ -0,0 +1,14 @@ + + + + + + My website + + + +

Welcome my website!

+

Built with Flask

+ + + diff --git a/backstage-reference/templates/example-python-flask-eks/template.yaml b/backstage-reference/templates/example-python-flask-eks/template.yaml new file mode 100644 index 00000000..0a19f060 --- /dev/null +++ b/backstage-reference/templates/example-python-flask-eks/template.yaml @@ -0,0 +1,240 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: example-python-flask-eks-template + title: Kubernetes - Python Flask starter application + description: >- + Create a 'Hello World' Python application + utilizing the Flask web app framework, + running on an Elastic Kubernetes Service cluster. + tags: + - python + - flask + - aws + - eks + - kubernetes + - kustomize +spec: + owner: group:admins + type: website + + # These parameters are used to generate the input form in the frontend, and are + # used to gather input data for the execution of the template. + parameters: + - title: Provide some basic component information + required: + - component_id + - owner + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + description: + title: Description + type: string + description: Help others understand what this website is for + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + allowedKinds: + - Group + - title: Provide environment information for the application + required: + - environment + - namespace + - k8sIAMRoleBindingType + properties: + environment: + title: AWS Environment + type: string + description: The AWS Environment where the database is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: eks + defaultKind: AWSEnvironment + namespace: + title: k8s Namespace + type: string + description: The k8s namespace to assign to application resources for the environment selected above + k8sIAMRoleBindingType: + title: Namespace-bound Kubectl Admin Access + description: Choose how to map an AWS IAM role with namespace-bound k8s admin access + type: string + default: create_new_k8s_namespace_admin_iam_role + enum: + - create_new_k8s_namespace_admin_iam_role + - existing_new_k8s_namespace_admin_iam_role + enumNames: + - 'Create a separate role for the K8s namespace' + - 'Import existing role and grant it access to the K8s namespace' + + # Only ask for the existing IAM role if user chose to use an existing role + dependencies: + k8sIAMRoleBindingType: + oneOf: + - properties: + k8sIAMRoleBindingType: + enum: + - existing_new_k8s_namespace_admin_iam_role + existingK8sNamespaceAdminRole: + title: Existing IAM role ARN + type: string + description: Existing IAM role to grant namespace privileges to + - properties: + k8sIAMRoleBindingType: + enum: + - create_new_k8s_namespace_admin_iam_role + + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-app + + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + # Each step executes an action, in this case one templates files into the working directory. + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_EKS_CLUSTER_ARN=${{ each.value.clusterArn }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + TARGET_KUBECTL_LAMBDA_ARN=${{ each.value.kubectlLambdaArn }} + TARGET_KUBECTL_LAMBDA_ROLE_ARN=${{ each.value.kubectlLambdaRoleArn }} + NAMESPACE=${{ parameters.namespace }} + K8S_IAM_ROLE_BINDING_TYPE=${{ parameters.k8sIAMRoleBindingType }} + APP_ADMIN_ROLE_ARN=${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: createSecretManager + name: Create a Secret + action: opa:create-secret + input: + secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token + + - id: fetchIac + name: Fetch EKS Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_eks + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower }} + + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + owner: ${{ parameters.owner }} + destination: ${{ parameters.repoUrl | parseRepoUrl }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ parameters.environment }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + awsRegion: ${{ steps['opaGetAwsEnvProviders'].output.region }} + awsAccount: ${{ steps['opaGetAwsEnvProviders'].output.account }} + namespace: ${{ parameters.namespace }} + k8sIAMRoleBindingType: ${{ parameters.k8sIAMRoleBindingType }} + existingK8sNamespaceAdminRole: ${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: entityDetail + name: Get AWSEnvironment entity details + action: catalog:fetch + input: + entityRef: ${{ parameters.environment }} + + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + + # This step publishes the contents of the working directory to GitLab. + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + # Create a gitlab repository access token and store it in a SecretsManager secret + - id: createRepoToken + name: Create Repo Access Token + action: opa:createRepoAccessToken:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + projectId: ${{ steps['publish'].output.projectId}} + secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + # The final step is to register our new component in the catalog. + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/example-python-flask/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-python-flask/content/.backstage/catalog-info.yaml index fce03048..47a0196b 100644 --- a/backstage-reference/templates/example-python-flask/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-python-flask/content/.backstage/catalog-info.yaml @@ -2,6 +2,7 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} @@ -9,12 +10,11 @@ metadata: - aws - python - flask - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} - iac-type: cdk - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-ecs owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-python-flask/content/.gitlab-ci.yml b/backstage-reference/templates/example-python-flask/content/.gitlab-ci.yml index c5708124..6577324c 100644 --- a/backstage-reference/templates/example-python-flask/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-python-flask/content/.gitlab-ci.yml @@ -1,12 +1,12 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-python-flask" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' @@ -16,3 +16,4 @@ include: - 'common/cicd/.gitlab-ci-aws-base.yml' - 'common/cicd/.gitlab-ci-aws-iac-ecs.yml' - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-python-flask/content/Dockerfile b/backstage-reference/templates/example-python-flask/content/Dockerfile index 058ce7ba..e25c5e02 100644 --- a/backstage-reference/templates/example-python-flask/content/Dockerfile +++ b/backstage-reference/templates/example-python-flask/content/Dockerfile @@ -14,4 +14,7 @@ COPY server.py . COPY templates ./templates ENV FLASK_APP server +# Specify a non-root user +USER nobody + CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0"] diff --git a/backstage-reference/templates/example-python-flask/content/requirements.txt b/backstage-reference/templates/example-python-flask/content/requirements.txt index ded8d331..c708748a 100644 --- a/backstage-reference/templates/example-python-flask/content/requirements.txt +++ b/backstage-reference/templates/example-python-flask/content/requirements.txt @@ -1,7 +1,7 @@ click==8.1.3 Flask==2.3.3 itsdangerous==2.1.2 -Jinja2==3.1.3 +Jinja2==3.1.2 MarkupSafe==2.1.2 -Werkzeug==3.0.1 +Werkzeug==2.3.7 gunicorn==20.1.0 diff --git a/backstage-reference/templates/example-python-flask/content/templates/index.html b/backstage-reference/templates/example-python-flask/content/templates/index.html index a5ecec44..c5d6ed68 100644 --- a/backstage-reference/templates/example-python-flask/content/templates/index.html +++ b/backstage-reference/templates/example-python-flask/content/templates/index.html @@ -1,11 +1,14 @@ + - - PACE team site + + My website + -

Welcome to the PACE team website

-

Built with Flask

+

Welcome my website!

+

Built with Flask

- \ No newline at end of file + + diff --git a/backstage-reference/templates/example-python-flask/template.yaml b/backstage-reference/templates/example-python-flask/template.yaml index 0af8775b..44846c8c 100644 --- a/backstage-reference/templates/example-python-flask/template.yaml +++ b/backstage-reference/templates/example-python-flask/template.yaml @@ -6,11 +6,13 @@ metadata: title: Python Flask starter application description: >- Create a 'Hello World' Python application - utilizing the Flask web app framework + utilizing the Flask web app framework, + running on an AWS Elastic Container Service cluster. tags: - python - flask - aws + - ecs spec: owner: group:admins type: website @@ -52,8 +54,11 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs defaultKind: AWSEnvironment - - title: Choose a location + - title: Choose a git repository location required: - repoUrl properties: @@ -81,6 +86,12 @@ spec: input: environmentRef: ${{ parameters.environment }} + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + - id: createProviderPropsFiles each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} name: Store environment provider parameters @@ -97,7 +108,7 @@ spec: PREFIX=${{ each.value.envProviderPrefix }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -114,8 +125,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: # IaC code uses PORT for container port mapping, but Flask apps use FLASK_RUN_PORT. These values should match. PORT: 8081 FLASK_RUN_PORT: 8081 @@ -126,15 +137,15 @@ spec: input: url: ./content values: - component_id: ${{ parameters.component_id }} + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} destination: ${{ parameters.repoUrl | parseRepoUrl }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ parameters.environment }} - aws_region: ${{ steps['opaGetAwsEnvProviders'].output.region }} - aws_account: ${{ steps['opaGetAwsEnvProviders'].output.account }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - id: entityDetail name: Get AWSEnvironment entity details @@ -142,6 +153,13 @@ spec: input: entityRef: ${{ parameters.environment }} + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + # This step publishes the contents of the working directory to GitLab. - id: publish name: Publish diff --git a/backstage-reference/templates/example-serverless-rest-api/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-serverless-rest-api/content/.backstage/catalog-info.yaml index 1c1b6051..8acfe107 100644 --- a/backstage-reference/templates/example-serverless-rest-api/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-serverless-rest-api/content/.backstage/catalog-info.yaml @@ -2,6 +2,7 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} kebabName: ${{ values.kebabName | dump }} {%- if values.description %} description: ${{values.description | dump}} @@ -13,16 +14,14 @@ metadata: - openapi - apigateway - serverless - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} - aws.amazon.com/opa-component-subtype: "serverless-rest-api" # links: # - title: Example Title # url: http://www.example.com - iac-type: cdk - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-serverless owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-serverless-rest-api/content/.gitlab-ci.yml b/backstage-reference/templates/example-serverless-rest-api/content/.gitlab-ci.yml index b5fcfd3a..228a661a 100644 --- a/backstage-reference/templates/example-serverless-rest-api/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-serverless-rest-api/content/.gitlab-ci.yml @@ -1,13 +1,13 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_KEBAB_NAME: "${{ values.kebabName }}" APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-serverless-rest-api" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' diff --git a/backstage-reference/templates/example-serverless-rest-api/content/README.md b/backstage-reference/templates/example-serverless-rest-api/content/README.md index 074bcbcd..34712b80 100644 --- a/backstage-reference/templates/example-serverless-rest-api/content/README.md +++ b/backstage-reference/templates/example-serverless-rest-api/content/README.md @@ -113,7 +113,7 @@ To simplify troubleshooting, SAM CLI has a command called `sam logs`. `sam logs` `NOTE`: This command works for all AWS Lambda functions; not just the ones you deploy using SAM. ```bash -sam-sports-equipment-ts$ sam logs -n PersistDataFunction --stack-name sam-sports-equipment-ts --tail +sam-sports-equipment-ts$ sam logs -n PersistDataFunction --stackName sam-sports-equipment-ts --tail ``` You can find more information and examples about filtering Lambda function logs in the [SAM CLI Documentation](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-logging.html). @@ -133,7 +133,7 @@ sports-equipment$ npm run test To delete the sample application that you created, use the AWS CLI. Assuming you used your project name for the stack name, you can run the following: ```bash -aws cloudformation delete-stack --stack-name sam-sports-equipment-ts +aws cloudformation delete-stack --stackName sam-sports-equipment-ts ``` ## Resources diff --git a/backstage-reference/templates/example-serverless-rest-api/content/template.yml b/backstage-reference/templates/example-serverless-rest-api/content/template.yml index 7e42106e..f6134132 100644 --- a/backstage-reference/templates/example-serverless-rest-api/content/template.yml +++ b/backstage-reference/templates/example-serverless-rest-api/content/template.yml @@ -119,8 +119,8 @@ Resources: # Cant delete since it is attached to network interfaces managed by lambda service DeletionPolicy: Retain Properties: - GroupName: !Sub '${AppName}-lambda-sg' - GroupDescription: !Sub 'A Security Group for ${AppName} lambda functions' + GroupName: !Sub '${AppName}-lambda-sg-${Stage}' + GroupDescription: !Sub 'A Security Group for ${AppName} lambda functions in stage ${Stage}' VpcId: !Ref VpcId GetEquipmentFunctionLogGroup: diff --git a/backstage-reference/templates/example-serverless-rest-api/template.yaml b/backstage-reference/templates/example-serverless-rest-api/template.yaml index ea7f8f36..927d829b 100644 --- a/backstage-reference/templates/example-serverless-rest-api/template.yaml +++ b/backstage-reference/templates/example-serverless-rest-api/template.yaml @@ -62,6 +62,9 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: serverless defaultKind: AWSEnvironment - title: Choose a git repository location required: @@ -88,7 +91,7 @@ spec: name: Get Component Info action: opa:get-component-info input: - componentName: ${{ parameters.component_id }} + componentName: ${{ parameters.component_id | lower }} - id: opaGetAwsEnvProviders name: Get AWS Environment Providers @@ -134,8 +137,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_serverless_api targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base @@ -143,15 +146,16 @@ spec: input: url: ./content values: - component_id: ${{ parameters.component_id }} + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} destination: ${{ parameters.repoUrl | parseRepoUrl }} owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_environment_short_name: ${{ steps['opaGetAwsEnvProviders'].output.envShortName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsEnvironment_short_name: ${{ steps['opaGetAwsEnvProviders'].output.envShortName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} kebabName: ${{ steps['getComponentInfo'].output.kebabCaseComponentName }} - id: entityDetail diff --git a/backstage-reference/templates/example-springboot-eks/.gitignore b/backstage-reference/templates/example-springboot-eks/.gitignore new file mode 100644 index 00000000..fcc2fc6b --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/.gitignore @@ -0,0 +1,2 @@ +# be sure to include .js files +!**/*.js \ No newline at end of file diff --git a/backstage-reference/templates/example-springboot-eks/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-springboot-eks/content/.backstage/catalog-info.yaml new file mode 100644 index 00000000..a27b4b5f --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/.backstage/catalog-info.yaml @@ -0,0 +1,28 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} + {%- if values.description %} + description: ${{values.description | dump}} + {%- endif %} + tags: + - aws + - java + - springboot + - k8s + # links: + # - title: ${{ values.component_id}} endpoint + # url: ${{ values.aws_bp_outputs["opa-alb-endpoint"] | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} + + # Configure where k8s configurations are within the project + k8sConfigDirName: k8s + +spec: + type: aws-app + subType: aws-eks + owner: ${{ values.owner | dump }} + lifecycle: experimental + dependsOn: [] diff --git a/backstage-reference/templates/example-springboot-eks/content/.editorconfig b/backstage-reference/templates/example-springboot-eks/content/.editorconfig new file mode 100644 index 00000000..81357d3e --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/.editorconfig @@ -0,0 +1,36 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +max_line_length = 120 + +[*.html] +indent_style = space +indent_size = 2 + +[*.{ts,json,js,tsx,jsx}] +indent_style = space +indent_size = 2 + +[*.md] +indent_size = 2 +indent_style = space + +[Dockerfile] +indent_style = space +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 +indent_style = space + +[Makefile] +indent_size = 4 +indent_style = tab diff --git a/backstage-reference/templates/example-springboot-eks/content/.gitignore b/backstage-reference/templates/example-springboot-eks/content/.gitignore new file mode 100644 index 00000000..37fe0ff0 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/.gitignore @@ -0,0 +1,64 @@ +# macOS +.DS_Store + +# Intellij +.idea/ +*.iml + +# VS Code +.vscode/** +!.vscode/launch.json +!.vscode/tasks.json +.vsls.json +git-temp + +# logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Coverage directory +coverage + +# Dependency directories +node_modules/ +**/.venv/ +**/__pycache__ + +# Yarn 3 files +.pnp.* +**/.yarn/* +!**/.yarn/patches +!**/.yarn/plugins +!**/.yarn/releases +!**/.yarn/sdks +!**/.yarn/versions + +# Node version directives +.nvmrc + +# Optional eslint cache +.eslintcache + +# dotenv environment variables file +.env +.env.* + +# Local configuration files +*.local.yaml + + +# transpiled JavaScript, Typings and test files +*.d.ts +!jest.config.js + +# CDK asset staging directory +.cdk.staging +cdk.out +cdk.context.json + +# Temp files +**/*.bak \ No newline at end of file diff --git a/backstage-reference/templates/example-springboot-eks/content/.gitlab-ci.yml b/backstage-reference/templates/example-springboot-eks/content/.gitlab-ci.yml new file mode 100644 index 00000000..27632de6 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/.gitlab-ci.yml @@ -0,0 +1,20 @@ +stages: + - env-creation + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage + +variables: + APP_SHORT_NAME: "${{ values.component_id }}" + APP_TEMPLATE_NAME: "example-springboot-eks" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" + +include: + - project: 'opa-admin/backstage-reference' + ref: main + file: + - 'common/cicd/.gitlab-ci-job-defaults-cdk.yml' + - 'common/cicd/.gitlab-ci-aws-base.yml' + - 'common/cicd/.gitlab-ci-aws-iac-eks.yml' + # If you want to call kubectl directly instead of going through lambda, comment above line and uncomment below line + # - 'common/cicd/.gitlab-ci-aws-iac-eks-kubectl.yml' + - 'common/cicd/.gitlab-ci-aws-dind-spring-boot.yml' diff --git a/backstage-reference/templates/example-springboot-eks/content/HELP.md b/backstage-reference/templates/example-springboot-eks/content/HELP.md new file mode 100644 index 00000000..cdbdcc6e --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/HELP.md @@ -0,0 +1,22 @@ +# Read Me First +The following was discovered as part of building this project: + +* The original package name 'dev.aws.pace.fsi.rest-service' is invalid and this project uses 'dev.aws.pace.fsi.restservice' instead. + +# Getting Started + +### Reference Documentation +For further reference, please consider the following sections: + +* [Official Apache Maven documentation](https://maven.apache.org/guides/index.html) +* [Spring Boot Maven Plugin Reference Guide](https://docs.spring.io/spring-boot/docs/3.0.3/maven-plugin/reference/html/) +* [Create an OCI image](https://docs.spring.io/spring-boot/docs/3.0.3/maven-plugin/reference/html/#build-image) +* [Spring Web](https://docs.spring.io/spring-boot/docs/3.0.3/reference/htmlsingle/#web) + +### Guides +The following guides illustrate how to use some features concretely: + +* [Building a RESTful Web Service](https://spring.io/guides/gs/rest-service/) +* [Serving Web Content with Spring MVC](https://spring.io/guides/gs/serving-web-content/) +* [Building REST services with Spring](https://spring.io/guides/tutorials/rest/) + diff --git a/backstage-reference/templates/example-springboot-eks/content/README.md b/backstage-reference/templates/example-springboot-eks/content/README.md new file mode 100644 index 00000000..02f409ce --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/README.md @@ -0,0 +1,14 @@ +# Springboot REST service with RDS + +This application is a starter skeleton for a Springboot Java REST service with an RDS database + +## Common commands: +```sh +mvnw clean - Cleans the maven project by deleting the target directory +mvnw package - build the maven project and create JAR/WAR files +mvnw install - Build the maven project and install the package file (JAR, WAR, pom.xml, etc) to the local repository +mvnw validate - validate the project is correct and all necessary information is available +mvnw compile - compile the source code of the project +mvnw spring-boot:run - run the service +mvnw spring-boot:build-image - build a OCI image using paketo buildpacks +``` diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/deployment.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/deployment.yaml new file mode 100644 index 00000000..8cbe2821 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + template: + metadata: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + spec: + serviceAccountName: overrideMe + containers: + - name: api + image: 12345678912.dkr.ecr.us-east-1.amazonaws.com/${{ values.component_id }}-overrideMe-overrideMe:latest + imagePullPolicy: Always + resources: + limits: + memory: 600Mi + cpu: "1" + requests: + cpu: "100m" + ports: + - containerPort: ${{ values.appPort }} + livenessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: / + port: ${{ values.appPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/ingress.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/ingress.yaml new file mode 100644 index 00000000..faea5b8d --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/ingress.yaml @@ -0,0 +1,45 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + annotations: + kubernetes.io/ingress.class: "alb" + alb.ingress.kubernetes.io/scheme: "internet-facing" + alb.ingress.kubernetes.io/healthcheck-path: "/health" + alb.ingress.kubernetes.io/success-codes: "200,201,302" + alb.ingress.kubernetes.io/target-type: "ip" + + # Tag load balancer so that it shows up as an OPA application resource + # Note - this setting must be overriden per environment provider + alb.ingress.kubernetes.io/tags: "aws-apps-${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER=${{ values.component_id }}" + + # Allows multiple services to use the same ALB + alb.ingress.kubernetes.io/group.name: ${{ values.component_id }}-ENV_PLACEHOLDER + + # Configure the load balancer name. Comment this out to have the name be auto-generated + # Load balancer name can only be up to 32 characters long + # alb.ingress.kubernetes.io/load-balancer-name: "${{ values.component_id }}-ENV_PLACEHOLDER-NS_PLACEHOLDER" + + # To enable HTTPS, you need a valid SSL certificate + # Here are some example annotations to use for enabling an HTTPS listener for your load balancer: + # alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + # alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + # alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:::certificate/ + + # To configure an IP allow-list for the load balancer + # alb.ingress.kubernetes.io/inbound-cidrs: 10.0.0.0/24 + + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: ${{ values.component_id }}-service + port: + number: 80 diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/kustomization.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/kustomization.yaml new file mode 100644 index 00000000..af8ebbc8 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml + - ingress.yaml + - nsAdminRoleBinding.yaml + - nsViewerRoleBinding.yaml + - service.yaml + - serviceAccount.yaml diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..c298d053 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsAdminRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..18367e98 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/nsViewerRoleBinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +subjects: +- kind: User + name: APP_ADMIN_ROLE_PLACEHOLDER + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: opa-namespace-viewer + apiGroup: rbac.authorization.k8s.io diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/service.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/service.yaml new file mode 100644 index 00000000..89ecd5da --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe +spec: + selector: + app.kubernetes.io/name: ${{ values.component_id }} + ports: + - name: http + port: 80 + targetPort: ${{ values.appPort }} + protocol: TCP + type: NodePort diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/base/serviceAccount.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/base/serviceAccount.yaml new file mode 100644 index 00000000..d30a718e --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/base/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: overrideMe + annotations: + eks.amazonaws.com/role-arn: overrideMe diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/deployment.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/deployment.yaml new file mode 100644 index 00000000..b70b25fe --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ${{ values.component_id }}-mainpod + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + matchLabels: + app.kubernetes.io/env: ENV_PLACEHOLDER + template: + metadata: + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + spec: + serviceAccountName: ${{ values.component_id }}-sa + containers: + - name: api + image: ACCT_PLACEHOLDER.dkr.ecr.REGION_PLACEHOLDER.amazonaws.com/${{ values.component_id }}-ENV_PLACEHOLDER-ENV_PROVIDER_PLACEHOLDER:latest + envFrom: + - configMapRef: + name: ${{ values.component_id }}-env-vars diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/ingress.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/ingress.yaml new file mode 100644 index 00000000..243436f7 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/ingress.yaml @@ -0,0 +1,6 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ${{ values.component_id }}-ingress + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/kustomization.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/kustomization.yaml new file mode 100644 index 00000000..a307a41b --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/kustomization.yaml @@ -0,0 +1,22 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: NS_PLACEHOLDER +nameSuffix: -ENV_PLACEHOLDER +configMapGenerator: + - name: ${{ values.component_id }}-env-vars + literals: + - ENVIRONMENT_NAME=ENV_PLACEHOLDER +generatorOptions: + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER +resources: + - ../base + - ./namespace.yaml +patches: + - path: deployment.yaml + - path: ingress.yaml + - path: nsAdminRoleBinding.yaml + - path: nsViewerRoleBinding.yaml + - path: service.yaml + - path: serviceAccount.yaml diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/namespace.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/namespace.yaml new file mode 100644 index 00000000..17dfd4ef --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/namespace.yaml @@ -0,0 +1,6 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: NS_PLACEHOLDER + labels: + name: NS_PLACEHOLDER diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml new file mode 100644 index 00000000..876c1f02 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsAdminRoleBinding.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ${{ values.component_id }}-admin + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER + diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml new file mode 100644 index 00000000..ba4f4ac3 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/nsViewerRoleBinding.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${{ values.component_id }}-view-ns + labels: + app.kubernetes.io/name: ${{ values.component_id }} + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/service.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/service.yaml new file mode 100644 index 00000000..9dbbdcef --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: ${{ values.component_id }}-service + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER +spec: + selector: + app.kubernetes.io/env: ENV_PLACEHOLDER diff --git a/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/serviceAccount.yaml b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/serviceAccount.yaml new file mode 100644 index 00000000..2a12df32 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/k8s/new-env-template/serviceAccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${{ values.component_id }}-sa + labels: + app.kubernetes.io/env: ENV_PLACEHOLDER + annotations: + eks.amazonaws.com/role-arn: SA_ROLE_PLACEHOLDER + diff --git a/backstage-reference/templates/example-springboot-eks/content/mvnw b/backstage-reference/templates/example-springboot-eks/content/mvnw new file mode 100755 index 00000000..8a8fb228 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/mvnw @@ -0,0 +1,316 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /usr/local/etc/mavenrc ] ; then + . /usr/local/etc/mavenrc + fi + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`\\unset -f command; \\command -v java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/backstage-reference/templates/example-springboot-eks/content/mvnw.cmd b/backstage-reference/templates/example-springboot-eks/content/mvnw.cmd new file mode 100644 index 00000000..1d8ab018 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/mvnw.cmd @@ -0,0 +1,188 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%"=="on" pause + +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% + +cmd /C exit /B %ERROR_CODE% diff --git a/backstage-reference/templates/example-springboot-eks/content/pom.xml b/backstage-reference/templates/example-springboot-eks/content/pom.xml new file mode 100644 index 00000000..564d1835 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/pom.xml @@ -0,0 +1,53 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.0.3 + + + dev.aws.pace.fsi + ${{ values.component_id }} + 0.0.1 + ${{ values.component_id }} + ${{ values.description }} + + 17 + + + + org.springframework.boot + spring-boot-starter-web + + + + org.postgresql + postgresql + runtime + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + 17 + + + + + + + + diff --git a/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/Greeting.java b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/Greeting.java new file mode 100644 index 00000000..875ce524 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/Greeting.java @@ -0,0 +1,3 @@ +package dev.aws.pace.fsi.restservice; + +public record Greeting(long id, String content) { } diff --git a/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/GreetingController.java b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/GreetingController.java new file mode 100644 index 00000000..89677a0b --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/GreetingController.java @@ -0,0 +1,21 @@ +package dev.aws.pace.fsi.restservice; + +import java.util.concurrent.atomic.AtomicLong; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class GreetingController { + + private static final String template = "Hello, %s!"; + private final AtomicLong counter = new AtomicLong(); + + // @GetMapping("/greeting") + @GetMapping("/") + public Greeting greeting(@RequestParam(value = "name", defaultValue = "${{ values.greetee }}") String name) { + return new Greeting(counter.incrementAndGet(), String.format(template, name)); + } + +} diff --git a/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/RestServiceApplication.java b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/RestServiceApplication.java new file mode 100644 index 00000000..403aaa6d --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/src/main/java/dev/aws/pace/fsi/restservice/RestServiceApplication.java @@ -0,0 +1,13 @@ +package dev.aws.pace.fsi.restservice; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class RestServiceApplication { + + public static void main(String[] args) { + SpringApplication.run(RestServiceApplication.class, args); + } + +} diff --git a/backstage-reference/templates/example-springboot-eks/content/src/main/resources/application.properties b/backstage-reference/templates/example-springboot-eks/content/src/main/resources/application.properties new file mode 100644 index 00000000..faa129a5 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/src/main/resources/application.properties @@ -0,0 +1 @@ +server.port=${{ values.port }} \ No newline at end of file diff --git a/backstage-reference/templates/example-springboot-eks/content/target/classes/application.properties b/backstage-reference/templates/example-springboot-eks/content/target/classes/application.properties new file mode 100644 index 00000000..faa129a5 --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/content/target/classes/application.properties @@ -0,0 +1 @@ +server.port=${{ values.port }} \ No newline at end of file diff --git a/backstage-reference/templates/example-springboot-eks/template.yaml b/backstage-reference/templates/example-springboot-eks/template.yaml new file mode 100644 index 00000000..192bef4d --- /dev/null +++ b/backstage-reference/templates/example-springboot-eks/template.yaml @@ -0,0 +1,235 @@ +apiVersion: scaffolder.backstage.io/v1beta3 +# https://backstage.io/docs/features/software-catalog/descriptor-format#kind-template +kind: Template +metadata: + name: example-springboot-eks-template + title: Kubernetes - Java Spring Boot Web Service + description: >- + Create sample Java Spring Boot web service, running on an Elastic Kubernetes Service cluster. + tags: + - java + - springboot + - aws + - eks + - kubernetes + - kustomize +spec: + owner: group:admins + type: service + + # These parameters are used to generate the input form in the frontend, and are + # used to gather input data for the execution of the template. + parameters: + - title: Provide basic component information + required: + - component_id + properties: + component_id: + title: Name + type: string + description: Unique name of the component + ui:field: EntityNamePicker + ui:autofocus: true + description: + title: Description + type: string + description: Help others understand what this service is for + owner: + title: Owner + type: string + description: Owner of the component + ui:field: OwnerPicker + ui:options: + catalogFilter: + kind: [Group] + - title: Provide environment information for the application + required: + - environment + - namespace + properties: + environment: + title: AWS Environment + type: string + description: The AWS Environment where the database is created + ui:field: EntityPicker + ui:options: + allowedKinds: + - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: eks + defaultKind: AWSEnvironment + namespace: + title: k8s Namespace + type: string + description: The k8s namespace to assign to application resources for the environment selected above + k8sIAMRoleBindingType: + title: Namespace-bound Kubectl Admin Access + description: Choose how to map an AWS IAM role with namespace-bound k8s admin access + type: string + default: create_new_k8s_namespace_admin_iam_role + enum: + - create_new_k8s_namespace_admin_iam_role + - existing_new_k8s_namespace_admin_iam_role + enumNames: + - 'Create a separate role for the K8s namespace' + - 'Import existing role and grant it access to the K8s namespace' + + # Only ask for the existing IAM role if user chose to use an existing role + dependencies: + k8sIAMRoleBindingType: + oneOf: + - properties: + k8sIAMRoleBindingType: + enum: + - existing_new_k8s_namespace_admin_iam_role + existingK8sNamespaceAdminRole: + title: Existing IAM role ARN + type: string + description: Existing IAM role to grant namespace privileges to + - properties: + k8sIAMRoleBindingType: + enum: + - create_new_k8s_namespace_admin_iam_role + + - title: Choose a git repository location + required: + - repoUrl + properties: + repoUrl: + title: Repository Location + type: string + ui:field: RepoUrlPicker + ui:options: + allowedHosts: + - {{ gitlab_hostname }} + allowedOwners: + - aws-app + + # These steps are executed in the scaffolder backend, using data that we gathered + # via the parameters above. + steps: + - id: opaGetPlatformInfo + name: Get OPA platform information + action: opa:get-platform-metadata + + - id: opaGetAwsEnvProviders + name: Get AWS Environment Providers + action: opa:get-env-providers + input: + environmentRef: ${{ parameters.environment }} + + - id: debugEnvironment + name: Print the environment entity info + action: debug:log + input: + message: ${{ steps['opaGetAwsEnvProviders'].output | dump }} + + - id: createProviderPropsFiles + each: ${{ steps['opaGetAwsEnvProviders'].output.envProviders }} + name: Store environment provider parameters + action: roadiehq:utils:fs:write + input: + path: .awsdeployment/providers/${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName}}.properties + content: | + TARGET_VPCID=${{ each.value.vpcId }} + TARGET_EKS_CLUSTER_ARN=${{ each.value.clusterArn }} + TARGET_ENV_NAME=${{ steps['opaGetAwsEnvProviders'].output.envName }} + TARGET_ENV_PROVIDER_NAME=${{ each.value.envProviderName }} + ACCOUNT=${{ each.value.accountId }} + REGION=${{ each.value.region }} + PREFIX=${{ each.value.envProviderPrefix }} + ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} + OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com + OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} + TARGET_KUBECTL_LAMBDA_ARN=${{ each.value.kubectlLambdaArn }} + TARGET_KUBECTL_LAMBDA_ROLE_ARN=${{ each.value.kubectlLambdaRoleArn }} + NAMESPACE=${{ parameters.namespace }} + K8S_IAM_ROLE_BINDING_TYPE=${{ parameters.k8sIAMRoleBindingType }} + APP_ADMIN_ROLE_ARN=${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: createSecretManager + name: Create a Secret + action: opa:create-secret + input: + secretName: aws-apps-${{ (parameters.repoUrl | parseRepoUrl).repo | lower }}-access-token + + - id: fetchIac + name: Fetch EKS Infrastructure as Code + action: fetch:template + input: + url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_eks + targetPath: ./.iac + values: + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" + + - id: fetchBase + name: Fetch Base + action: fetch:template + input: + url: ./content + values: + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} + description: ${{ parameters.description }} + greetee: "World" + owner: ${{ parameters.owner }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + namespace: ${{ parameters.namespace }} + k8sIAMRoleBindingType: ${{ parameters.k8sIAMRoleBindingType }} + existingK8sNamespaceAdminRole: ${{ parameters.existingK8sNamespaceAdminRole|default('', true) }} + + - id: entityDetail + name: Get AWSEnvironment entity details + action: catalog:fetch + input: + entityRef: ${{ parameters.environment }} + + - id: debugEntity + name: Print the workspace + action: debug:log + input: + message: ${{ steps['entityDetail'].output.entity | dump }} + listWorkspace: true + + # This step publishes the contents of the working directory to GitLab. + - id: publish + name: Publish + action: publish:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + repoVisibility: internal + defaultBranch: main + + # Create a gitlab repository access token and store it in a SecretsManager secret + - id: createRepoToken + name: Create Repo Token + action: opa:createRepoAccessToken:gitlab + input: + repoUrl: ${{ parameters.repoUrl }} + projectId: ${{ steps['publish'].output.projectId }} + secretArn: ${{ steps['createSecretManager'].output.awsSecretArn }} + + # The final step is to register our new component in the catalog. + - id: register + name: Register + action: catalog:register + input: + repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }} + catalogInfoPath: "/.backstage/catalog-info.yaml" + + # Outputs are displayed to the user after a successful execution of the template. + output: + links: + - title: Repository + url: ${{ steps['publish'].output.remoteUrl }} + - title: Open in catalog + icon: catalog + entityRef: ${{ steps['register'].output.entityRef }} diff --git a/backstage-reference/templates/example-springboot/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-springboot/content/.backstage/catalog-info.yaml index 549d937b..168cd4f9 100644 --- a/backstage-reference/templates/example-springboot/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-springboot/content/.backstage/catalog-info.yaml @@ -2,6 +2,7 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} @@ -9,15 +10,14 @@ metadata: - aws - java - springboot - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} # links: # - title: ${{ values.component_id}} endpoint # url: ${{ values.aws_bp_outputs["opa-alb-endpoint"] | dump }} - iac-type: cdk - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: cdk + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-ecs owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-springboot/content/.gitlab-ci.yml b/backstage-reference/templates/example-springboot/content/.gitlab-ci.yml index b7587d26..680032d6 100644 --- a/backstage-reference/templates/example-springboot/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-springboot/content/.gitlab-ci.yml @@ -1,12 +1,12 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-springboot" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' @@ -16,3 +16,4 @@ include: - 'common/cicd/.gitlab-ci-aws-base.yml' - 'common/cicd/.gitlab-ci-aws-iac-ecs.yml' - 'common/cicd/.gitlab-ci-aws-dind-spring-boot.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-springboot/template.yaml b/backstage-reference/templates/example-springboot/template.yaml index 63d10cf2..27e8b2a4 100644 --- a/backstage-reference/templates/example-springboot/template.yaml +++ b/backstage-reference/templates/example-springboot/template.yaml @@ -3,13 +3,14 @@ apiVersion: scaffolder.backstage.io/v1beta3 kind: Template metadata: name: example-springboot-template - title: Java Spring Boot Web Service + title: ECS - Java Spring Boot Web Service description: >- - Create sample Java Spring Boot web service. + Create sample Java Spring Boot web service, running on an AWS Elastic Container Service cluster. tags: - java - springboot - aws + - ecs spec: owner: group:admins type: service @@ -43,11 +44,6 @@ spec: required: - environment properties: - greetee: - title: Default Greetee - type: string - description: Who do you want to say hello to? - default: World environment: title: AWS Environment type: string @@ -56,6 +52,9 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs defaultKind: AWSEnvironment - title: Choose a git repository location @@ -107,7 +106,7 @@ spec: PREFIX=${{ each.value.envProviderPrefix }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -124,8 +123,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base @@ -133,15 +132,16 @@ spec: input: url: ./content values: - app_port: "8080" - component_id: ${{ parameters.component_id }} + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} - greetee: ${{ parameters.greetee }} + greetee: "World" owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - id: entityDetail name: Get AWSEnvironment entity details diff --git a/backstage-reference/templates/example-tf-nodejs/content/.backstage/catalog-info.yaml b/backstage-reference/templates/example-tf-nodejs/content/.backstage/catalog-info.yaml index 624a1da2..f0f69f25 100644 --- a/backstage-reference/templates/example-tf-nodejs/content/.backstage/catalog-info.yaml +++ b/backstage-reference/templates/example-tf-nodejs/content/.backstage/catalog-info.yaml @@ -2,21 +2,21 @@ apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: ${{ values.component_id | dump }} + title: ${{ values.title | dump }} {%- if values.description %} description: ${{values.description | dump}} {%- endif %} tags: - aws - nodejs - annotations: - aws.amazon.com/opa-repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} # links: # - title: Example Title # url: http://www.example.com - iac-type: terraform - repo-secret-arn: ${{ values.aws_secret_repo_arn | dump }} + iacType: terraform + repoSecretArn: ${{ values.awsSecretRepoArn | dump }} spec: type: aws-app + subType: aws-ecs owner: ${{ values.owner | dump }} lifecycle: experimental dependsOn: [] diff --git a/backstage-reference/templates/example-tf-nodejs/content/.gitlab-ci.yml b/backstage-reference/templates/example-tf-nodejs/content/.gitlab-ci.yml index 5c3175a8..7d5ca851 100644 --- a/backstage-reference/templates/example-tf-nodejs/content/.gitlab-ci.yml +++ b/backstage-reference/templates/example-tf-nodejs/content/.gitlab-ci.yml @@ -1,12 +1,12 @@ stages: - env-creation - - prepare-${{values.aws_environment_name}}-stage - - ${{values.aws_environment_name}}-stage + - prepare-${{values.awsEnvironmentName}}-stage + - ${{values.awsEnvironmentName}}-stage variables: APP_SHORT_NAME: "${{ values.component_id }}" APP_TEMPLATE_NAME: "example-tf-nodejs" - OPA_PLATFORM_REGION: "${{ values.platform_region }}" + OPA_PLATFORM_REGION: "${{ values.platformRegion }}" include: - project: 'opa-admin/backstage-reference' @@ -17,3 +17,4 @@ include: - 'common/cicd/.gitlab-ci-aws-tf-base.yml' - 'common/cicd/.gitlab-ci-aws-iac-tf-ecs.yml' - 'common/cicd/.gitlab-ci-aws-image-kaniko.yml' + - 'common/cicd/.gitlab-ci-aws-image-deploy.yml' diff --git a/backstage-reference/templates/example-tf-nodejs/content/Dockerfile b/backstage-reference/templates/example-tf-nodejs/content/Dockerfile index 36e320d2..5ac1bf25 100644 --- a/backstage-reference/templates/example-tf-nodejs/content/Dockerfile +++ b/backstage-reference/templates/example-tf-nodejs/content/Dockerfile @@ -11,5 +11,5 @@ RUN yarn install # Bundle app source COPY . . -EXPOSE ${{ values.app_port }} +EXPOSE ${{ values.appPort }} CMD [ "node", "src/index.js" ] diff --git a/backstage-reference/templates/example-tf-nodejs/content/src/index.js b/backstage-reference/templates/example-tf-nodejs/content/src/index.js index 40007955..03429348 100644 --- a/backstage-reference/templates/example-tf-nodejs/content/src/index.js +++ b/backstage-reference/templates/example-tf-nodejs/content/src/index.js @@ -1,6 +1,6 @@ const express = require('express'); const app = express(); -const port = ${{ values.app_port }} || '??port??'; +const port = ${{ values.appPort }} || '??port??'; console.log("Hello World Log") diff --git a/backstage-reference/templates/example-tf-nodejs/template.yaml b/backstage-reference/templates/example-tf-nodejs/template.yaml index 2e908820..f2a220c0 100644 --- a/backstage-reference/templates/example-tf-nodejs/template.yaml +++ b/backstage-reference/templates/example-tf-nodejs/template.yaml @@ -54,6 +54,9 @@ spec: ui:options: allowedKinds: - AWSEnvironment + catalogFilter: + - kind: AWSEnvironment + metadata.environmentType: ecs defaultKind: AWSEnvironment - title: Choose a git repository location @@ -103,10 +106,10 @@ spec: ACCOUNT=${{ each.value.accountId }} REGION=${{ each.value.region }} PREFIX=${{ each.value.envProviderPrefix }} - APP_NAME=${{ parameters.component_id }} + APP_NAME=${{ parameters.component_id | lower }} ENV_ROLE_ARN=${{ each.value.assumedRoleArn }} OPA_CI_ENVIRONMENT=${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} - OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id }}-${{ each.value.envProviderName }} + OPA_CI_REGISTRY_IMAGE=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com/${{ parameters.component_id | lower }}-${{ steps['opaGetAwsEnvProviders'].output.envName }}-${{ each.value.envProviderName }} OPA_CI_REGISTRY=${{ each.value.accountId }}.dkr.ecr.${{ each.value.region }}.amazonaws.com OPA_CI_ENVIRONMENT_MANUAL_APPROVAL={% if steps['opaGetAwsEnvProviders'].output.envDeployManualApproval %}true{% else %}false{% endif %} @@ -123,8 +126,8 @@ spec: url: https://{{ gitlab_hostname }}/opa-admin/backstage-reference/-/tree/main/common/tf_aws_ecs targetPath: ./.iac values: - component_id: ${{ parameters.component_id }} - app_env_plaintext: "" + component_id: ${{ parameters.component_id | lower }} + appEnvPlaintext: "" - id: fetchBase name: Fetch Base @@ -132,14 +135,15 @@ spec: input: url: ./content values: - app_port: "8080" - component_id: ${{ parameters.component_id }} + appPort: "8080" + component_id: ${{ parameters.component_id | lower }} + title: ${{ parameters.component_id }} description: ${{ parameters.description }} owner: ${{ parameters.owner }} - platform_region: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} - aws_environment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} - aws_environment_name: ${{ steps['opaGetAwsEnvProviders'].output.envName }} - aws_secret_repo_arn: ${{ steps['createSecretManager'].output.awsSecretArn }} + platformRegion: ${{ steps['opaGetPlatformInfo'].output.platformRegion }} + awsEnvironment: ${{ steps['opaGetAwsEnvProviders'].output.envRef }} + awsEnvironmentName: ${{ steps['opaGetAwsEnvProviders'].output.envName }} + awsSecretRepoArn: ${{ steps['createSecretManager'].output.awsSecretArn }} - id: entityDetail name: Get AWSEnvironment entity details diff --git a/build-script/backstage-install.sh b/build-script/backstage-install.sh index 84bf001b..07648667 100755 --- a/build-script/backstage-install.sh +++ b/build-script/backstage-install.sh @@ -2,8 +2,8 @@ # The Create App version has a direct correlation with the version of Backstage # that is installed. -# 0.5.4 will install Backstage 0.17.5 -BACKSTAGE_CREATE_APP_VERSION="0.5.4" +# 0.5.7 will install Backstage 1.20.3 +BACKSTAGE_CREATE_APP_VERSION="0.5.8" # The OPA Backstage plugins can be installed in 1 of 2 ways. Firstly, the latest # published NPM packages can be used. Alternatively, the plugins can be installed @@ -65,11 +65,11 @@ cp $opaHomeDir/config/app-config.aws-production.yaml $backstageDir echo "" #intentional blank line echo "Installing backend dependencies" yarn --cwd packages/backend add \ - "@backstage/plugin-catalog-backend-module-gitlab@^0.2.6" \ - "@backstage/plugin-permission-backend@^0.5.25" \ - "@roadiehq/catalog-backend-module-okta@^0.8.5" \ - "@roadiehq/scaffolder-backend-module-utils@^1.10.1" \ - "@immobiliarelabs/backstage-plugin-gitlab-backend@^6.0.0" \ + "@backstage/plugin-catalog-backend-module-gitlab@^0.3.5" \ + "@backstage/plugin-permission-backend@^0.5.31" \ + "@roadiehq/catalog-backend-module-okta@^0.9.3" \ + "@roadiehq/scaffolder-backend-module-utils@^1.11.0" \ + "@immobiliarelabs/backstage-plugin-gitlab-backend@^6.4.0" \ "@aws/plugin-aws-apps-backend-for-backstage@${AWS_APPS_BACKEND_VERSION}" \ "@aws/plugin-scaffolder-backend-aws-apps-for-backstage@${AWS_APPS_SCAFFOLDER_VERSION}" @@ -77,7 +77,7 @@ yarn --cwd packages/backend add \ echo "" #intentional blank line echo "Installing frontend dependencies" yarn --cwd packages/app add \ - "@immobiliarelabs/backstage-plugin-gitlab@^6.0.0" \ + "@immobiliarelabs/backstage-plugin-gitlab@^6.4.0" \ "@aws/plugin-aws-apps-for-backstage@${AWS_APPS_VERSION}" \ "@backstage/plugin-home" \ "@aws/plugin-aws-apps-demo-for-backstage@${AWS_APPS_DEMO_VERSION}" diff --git a/build-script/gitlab-tools.sh b/build-script/gitlab-tools.sh index af93da46..6d25bdcd 100755 --- a/build-script/gitlab-tools.sh +++ b/build-script/gitlab-tools.sh @@ -16,6 +16,11 @@ GITLAB_TOKEN=$SECRET_GITLAB_CONFIG_PROP_apiToken # Try to create a new project if one doesn't exist (will fail through) curl -H "Content-Type:application/json" "https://$SSM_GITLAB_HOSTNAME/api/v4/projects?private_token=$GITLAB_TOKEN" -d "{ \"name\": \"backstage-reference\" , \"visibility\": \"internal\" }" +# Take backup of Git configs if they are present +if [ -f "$appDir/git-temp/backstage-reference/.git/config" ]; then + cp $appDir/git-temp/backstage-reference/.git/config $appDir/git-config-temp +fi + # Clean the temp directory if it exists to start from a blank slate if [ -d "$appDir/git-temp" ]; then rm -rf $appDir/git-temp @@ -25,10 +30,15 @@ mkdir -p $appDir/git-temp echo -e "\nCloning from https://$SSM_GITLAB_HOSTNAME/opa-admin/backstage-reference.git\n" git -C $appDir/git-temp clone -q "https://oauth2:$GITLAB_TOKEN@$SSM_GITLAB_HOSTNAME/opa-admin/backstage-reference.git" +# Reinstate Git configs if available +if [ -f "$appDir/git-config-temp" ]; then + mv $appDir/git-config-temp $appDir/git-temp/backstage-reference/.git/config +fi + # copy files to temp git repo rsync -a --delete --exclude='**/node_modules' --exclude='**/cdk.out' --exclude='**/.git' $appDir/backstage-reference/ $appDir/git-temp/backstage-reference -rsync -a --delete --exclude='**/node_modules' --exclude='**/cdk.out' $appDir/iac/roots/{opa-common-constructs,opa-ecs-environment,opa-serverless-environment} $appDir/git-temp/backstage-reference/environments +rsync -a --delete --exclude='**/node_modules' --exclude='**/cdk.out' $appDir/iac/roots/{opa-common-constructs,opa-ecs-environment,opa-eks-environment,opa-serverless-environment} $appDir/git-temp/backstage-reference/environments \cp $appDir/iac/roots/package.json $appDir/git-temp/backstage-reference/environments @@ -37,14 +47,15 @@ cd $appDir/git-temp/backstage-reference; # Replace variable placeholders with env specific information if [[ "$OSTYPE" == "darwin"* ]]; then find . -type f -name "*.yaml" -exec sed -i "" "s/{{ *gitlab_hostname *}}/$SSM_GITLAB_HOSTNAME/g" {} +; - find . -type f -name "*.yaml" -exec sed -i "" "s/{{ *aws-account *}}/$AWS_ACCOUNT_ID/g" {} +; + find . -type f -name "*.yaml" -exec sed -i "" "s/{{ *awsAccount *}}/$AWS_ACCOUNT_ID/g" {} +; else find . -type f -name "*.yaml" -exec sed -i "s/{{ *gitlab_hostname *}}/$SSM_GITLAB_HOSTNAME/g" {} +; - find . -type f -name "*.yaml" -exec sed -i "s/{{ *aws-account *}}/$AWS_ACCOUNT_ID/g" {} +; + find . -type f -name "*.yaml" -exec sed -i "s/{{ *awsAccount *}}/$AWS_ACCOUNT_ID/g" {} +; fi +IS_DEFENDER=$(type "git-defender" 2>/dev/null) # if the system is using git-defender and the repo is not configured, configure it -if command -v git-defender && ! grep -q "\[defender\]" .git/config ; then +if [[ ! -z "$IS_DEFENDER" ]] && ! grep -q "\[defender\]" .git/config ; then echo "Found git-defender, but repo is not configured. Proceeding to configure repo for git-defender" (sleep 1; echo -e "y\n"; sleep 1; echo -e "y\n";)|git defender --setup echo "" @@ -53,11 +64,37 @@ fi # Add and commit changes to repo if there are files to commit if [ -n "$(git status --porcelain=v1 2>/dev/null)" ]; then + echo "Changes found, committing to repo" git add --all + echo "Committing changes" git commit --no-verify -m "Reference Commit" - git push + + # set a variable to track retry attempts + retry_attempts=0 + # set a variable to track the number of retries + max_retries=5 + # set a variable to track the sleep time between retries + sleep_time=30 + # Push to git. If the command fails, retry up to 5 times with a sleep between retries + while ! git push; do + # uncomment the following code to print the git configuration for debugging purposes + # if [ $retry_attempts -eq 0 ]; then + # cat .git/config + # fi + # increment the retry attempts + retry_attempts=$((retry_attempts+1)) + # if the retry attempts are greater than the max retries, exit the script + if [ $retry_attempts -gt $max_retries ]; then + echo "Max retries exceeded, exiting" + exit 1 + # otherwise, sleep for 30 seconds and try again + else + echo "Push failed, retrying in $sleep_time seconds" + sleep $sleep_time + fi + done + else echo "No changes to commit." fi -cd - echo "Finished setting up the backstage reference repo." diff --git a/config/app-config.aws-production.yaml b/config/app-config.aws-production.yaml index 4b7aab38..cb7ee507 100644 --- a/config/app-config.aws-production.yaml +++ b/config/app-config.aws-production.yaml @@ -61,8 +61,9 @@ auth: clientId: ${OKTA_CLIENT_ID} clientSecret: ${OKTA_CLIENT_SECRET} audience: ${OKTA_ORG_URL} - # authServerId: - # idp: + # authServerId: ${AUTH_OKTA_AUTH_SERVER_ID} + # idp: ${AUTH_OKTA_IDP} + # additionalScopes: ${AUTH_OKTA_ADDITIONAL_SCOPES} integrations: gitlab: @@ -110,13 +111,15 @@ catalog: okta: - orgUrl: ${OKTA_ORG_URL} token: ${OKTA_API_TOKEN} + # userFilter: profile.department eq "engineering" + # groupFilter: profile.name eq "Everyone" gitlab: opa: host: ${SSM_GITLAB_HOSTNAME} branch: main fallbackBranch: main skipForkedRepos: false - # group: example-group # Optional. Group and subgroup (if needed) to look for repositories. If not present the whole instance will be scanned + # group: example-org/teams # Required for user ingestion. Group and subgroup (if needed) to look for users. entityFilename: .backstage/catalog-info.yaml projectPattern: '[\s\S]*' schedule: diff --git a/config/aws-production.Dockerfile b/config/aws-production.Dockerfile index 94efdb3d..a047b57e 100644 --- a/config/aws-production.Dockerfile +++ b/config/aws-production.Dockerfile @@ -34,7 +34,11 @@ ENV NODE_ENV production COPY --chown=node:node backstage/yarn.lock backstage/package.json backstage/packages/backend/dist/skeleton.tar.gz ./ RUN tar xzf skeleton.tar.gz && rm skeleton.tar.gz -RUN --mount=type=cache,target=/home/node/.cache/yarn,sharing=locked,uid=1000,gid=1000 \ +# Clearing Yarn cache to avoid issues with corrupted cache files +RUN yarn cache clean + +# Yarn install with cache mount and network timeout settings +RUN target=/home/node/.cache/yarn,sharing=locked,uid=1000,gid=1000 \ yarn install --frozen-lockfile --production --network-timeout 300000 # Then copy the rest of the backend bundle, along with any other files we might want. diff --git a/config/sample.env b/config/sample.env index 1bdd907d..f3520da5 100644 --- a/config/sample.env +++ b/config/sample.env @@ -37,7 +37,7 @@ OKTA_API_TOKEN="TODO" # The org URL for your Okta domain (e.g. https://dev-12345678.okta.com) OKTA_AUDIENCE="https://TODO.okta.com" OKTA_AUTH_SERVER_ID="" -# Application need confifured as mentioned in [Backstage Authentication documentation](https://backstage.io/docs/auth/) +# Application needs to be configured as mentioned in [Backstage Authentication documentation](https://backstage.io/docs/auth/) OKTA_CLIENT_ID="TODO" OKTA_CLIENT_SECRET="TODO" OKTA_IDP="" diff --git a/iac/roots/opa-basic-environment/.gitignore b/iac/roots/opa-basic-environment/.gitignore new file mode 100644 index 00000000..f60797b6 --- /dev/null +++ b/iac/roots/opa-basic-environment/.gitignore @@ -0,0 +1,8 @@ +*.js +!jest.config.js +*.d.ts +node_modules + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-basic-environment/.npmignore b/iac/roots/opa-basic-environment/.npmignore new file mode 100644 index 00000000..c1d6d45d --- /dev/null +++ b/iac/roots/opa-basic-environment/.npmignore @@ -0,0 +1,6 @@ +*.ts +!*.d.ts + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-basic-environment/README.md b/iac/roots/opa-basic-environment/README.md new file mode 100644 index 00000000..6c7dbf7a --- /dev/null +++ b/iac/roots/opa-basic-environment/README.md @@ -0,0 +1,22 @@ +# OPA +## Intro +This CDK Stack deploy a new backend environment to support the OPA solution. for further details refer to : +1. [GitHub Repo](https://github.com/awslabs/app-development-for-backstage-io-on-aws) +2. [YouTube Channel](https://www.youtube.com/playlist?list=PLhr1KZpdzukemoBUAPNUMCgGk88pdURJB) + +## Installation + +1. Option 1 - CLI - using your favorite terminal execute the below make command +```bash +make deploy +``` +2. Option 2 - CloudFormation - In order to deploy the stack it first must be generated + 1. Synthesized the stack + ``` + cdk synth + ``` + 2. use the generated stack in CF template and provide the required parameters. [Additional Information](https://aws.amazon.com/blogs/infrastructure-and-automation/deploy-cloudformation-stacks-at-the-click-of-a-button/) + + +## What's included? + diff --git a/iac/roots/opa-basic-environment/cdk.json b/iac/roots/opa-basic-environment/cdk.json new file mode 100644 index 00000000..6c0c1fd7 --- /dev/null +++ b/iac/roots/opa-basic-environment/cdk.json @@ -0,0 +1,54 @@ +{ + "app": "npx ts-node --prefer-ts-exts src/opa-basic-env-app.ts", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "**/*.d.ts", + "**/*.js", + "tsconfig.json", + "package*.json", + "yarn.lock", + "node_modules", + "test" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true + } +} diff --git a/iac/roots/opa-basic-environment/jest.config.js b/iac/roots/opa-basic-environment/jest.config.js new file mode 100644 index 00000000..08263b89 --- /dev/null +++ b/iac/roots/opa-basic-environment/jest.config.js @@ -0,0 +1,8 @@ +module.exports = { + testEnvironment: 'node', + roots: ['/test'], + testMatch: ['**/*.test.ts'], + transform: { + '^.+\\.tsx?$': 'ts-jest' + } +}; diff --git a/iac/roots/opa-basic-environment/package.json b/iac/roots/opa-basic-environment/package.json new file mode 100644 index 00000000..840279c9 --- /dev/null +++ b/iac/roots/opa-basic-environment/package.json @@ -0,0 +1,33 @@ +{ + "name": "@aws/aws-app-development-basic-environment", + "version": "0.3.0", + "description": "A stack to create a Basic environment for OPA", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "license": "Apache-2.0", + "files": [ + "/dist" + ], + "scripts": { + "build": "tsc", + "watch": "tsc -w", + "test": "jest", + "cdk": "cdk" + }, + "devDependencies": { + "@types/jest": "^29.5.1", + "@types/node": "^20.3.1", + "@types/prettier": "2.6.0", + "@types/source-map-support": "^0.5.6", + "aws-cdk": "2.88.0", + "jest": "^29.5.0", + "ts-jest": "^29.1.0", + "ts-node": "^10.9.1", + "typescript": "~5.0.4" + }, + "dependencies": { + "@aws/aws-app-development-common-constructs": "0.3.0", + "aws-cdk-lib": "2.88.0", + "constructs": "^10.0.0" + } +} diff --git a/iac/roots/opa-basic-environment/src/constructs/basic-env-operations-role-construct.ts b/iac/roots/opa-basic-environment/src/constructs/basic-env-operations-role-construct.ts new file mode 100644 index 00000000..f008aabf --- /dev/null +++ b/iac/roots/opa-basic-environment/src/constructs/basic-env-operations-role-construct.ts @@ -0,0 +1,225 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as kms from "aws-cdk-lib/aws-kms"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { Construct } from "constructs"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +/* eslint-disable @typescript-eslint/no-empty-interface */ +export interface BasicOperationsConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + KMSkey: kms.IKey; + assumedBy: string; + auditTable: string; +} + +const defaultProps: Partial = {}; + +export class BasicOperationsConstruct extends Construct { + public IAMRole: iam.Role; + public operationsRoleParam: ssm.StringParameter; + public operationsRoleArnParam: ssm.StringParameter; + + constructor(parent: Construct, name: string, props: BasicOperationsConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // Create Iam role + this.IAMRole = new iam.Role(this, `${envIdentifier}-role`, { + assumedBy: new iam.ArnPrincipal(props.assumedBy), + roleName: name, + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonEC2ContainerRegistryFullAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("CloudWatchFullAccess"), + ], + maxSessionDuration: cdk.Duration.seconds(43200), + }); + + // Add Secret and SSM access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "secretsmanager:CreateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecret", + "secretsmanager:TagResource", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:secretsmanager:*:${props.opaEnv.awsAccount}:secret:*`], + }) + ); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMReadOnlyAccess")); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:GetObject", + "s3:GetObjectAttributes" + ], + effect: iam.Effect.ALLOW, + resources: ["arn:aws:s3:::*/packaged.yaml"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:ListGroupResources" + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:resource-groups:*:${props.opaEnv.awsAccount}:group/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["tag:GetResources"], + effect: iam.Effect.ALLOW, + resources: ["*"], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:Scan", + "dynamodb:PutItem", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:table/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["kms:Decrypt"], + effect: iam.Effect.ALLOW, + resources: [props.KMSkey.keyArn], + }) + ); + + // Write Audit access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:List*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Put*", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:${props.auditTable}`], + }) + ); + + // allow creation of a Resource Group to track application resources via tags + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:CreateGroup" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], // CreateGroup does not support resource-level permissions and requires a wildcard + }) + ); + + // allow SAM template conversion into standard CloudFormation + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "cloudformation:CreateChangeSet", + ], + effect: iam.Effect.ALLOW, + resources: [ + "arn:aws:cloudformation:*:aws:transform/Include", + "arn:aws:cloudformation:*:aws:transform/Serverless-2016-10-31" + ] + }) + ); + + // allow deploying a serverless application stack (such as a SAM template) + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "cloudformation:CreateChangeSet", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackEvents", + "cloudformation:ListStackResources", + "cloudformation:UpdateStack", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:cloudformation:*:${props.opaEnv.awsAccount}:stack/*`], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + // allow creating security groups for Lambda functions + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteSecurityGroup", + "ec2:DescribeSecurityGroups", + "ec2:ModifySecurityGroupRules", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + ], + effect: iam.Effect.ALLOW, + resources: [`*`], + }) + ); + + // Add managed role policies to support SAM template deployment for non-root roles + // + // In a production scenario, a customized IAM policy granting specific permissions should be created. + // See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-permissions-cloudformation.html + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCloudFormationFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("IAMFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSLambda_FullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonAPIGatewayAdministrator")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3ReadOnlyAccess")); + + // now save the VPC in SSM Param + const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { + allowedPattern: ".*", + description: `The Operations Role for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/operations-role`, + stringValue: this.IAMRole.roleName, + }); + + const roleArnParam = new ssm.StringParameter(this, `${envIdentifier}-role-arn-param`, { + allowedPattern: ".*", + description: `The Operations Role Arn for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/operations-role-arn`, + stringValue: this.IAMRole.roleArn, + }); + + this.operationsRoleParam = roleParam; + this.operationsRoleArnParam = roleArnParam; + + } + +} diff --git a/iac/roots/opa-basic-environment/src/constructs/basic-env-provisioning-role-construct.ts b/iac/roots/opa-basic-environment/src/constructs/basic-env-provisioning-role-construct.ts new file mode 100644 index 00000000..0f6a79d7 --- /dev/null +++ b/iac/roots/opa-basic-environment/src/constructs/basic-env-provisioning-role-construct.ts @@ -0,0 +1,177 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as kms from "aws-cdk-lib/aws-kms"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { Construct } from "constructs"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +/* eslint-disable @typescript-eslint/no-empty-interface */ +export interface BasicProvisioningConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + KMSkey: kms.IKey; + assumedBy: string; + auditTable: string; +} + +const defaultProps: Partial = {}; + +export class BasicProvisioningConstruct extends Construct { + public IAMRole: iam.Role; + public provisioningRoleParam: ssm.StringParameter; + public provisioningRoleArnParam: ssm.StringParameter; + constructor(parent: Construct, name: string, props: BasicProvisioningConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // Create Iam role + this.IAMRole = new iam.Role(this, `${envIdentifier}-role`, { + assumedBy: new iam.ArnPrincipal(props.assumedBy), + roleName: name, + managedPolicies: [ + // !FIXME: Need to scope down the role from PowerUserAccess. This workaround is to allow provisioning for sprint 2 demo + iam.ManagedPolicy.fromAwsManagedPolicyName("PowerUserAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonEC2ContainerRegistryFullAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("CloudWatchFullAccess"), + ], + maxSessionDuration: cdk.Duration.seconds(43200), + }); + + // Add Secret and SSM access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "secretsmanager:CreateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecret", + "secretsmanager:TagResource", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:secretsmanager:*:${props.opaEnv.awsAccount}:secret:*`], + }) + ); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMReadOnlyAccess")); + + // Bucket creation and tagging and reading for serverless deployments + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:CreateBucket", + "s3:PutBucketTagging" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:GetObject", + "s3:GetObjectAttributes" + ], + effect: iam.Effect.ALLOW, + resources: ["arn:aws:s3:::*/packaged.yaml"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + // Add resource group access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:ListGroupResources", + "resource-groups:Tag", + "resource-groups:DeleteGroup" + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:resource-groups:*:${props.opaEnv.awsAccount}:group/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["tag:GetResources"], + effect: iam.Effect.ALLOW, + resources: ["*"], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["kms:Decrypt"], + effect: iam.Effect.ALLOW, + resources: [props.KMSkey.keyArn], + }) + ); + + // Write Audit access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:List*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Put*", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:${props.auditTable}`], + }) + ); + + // allow creation of a Resource Group to track application resources via tags + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:CreateGroup" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], // CreateGroup does not support resource-level permissions and requires a wildcard + }) + ); + + // Add managed role policies to support SAM template deployment for non-root roles + // + // In a production scenario, a customized IAM policy granting specific permissions should be created. + // See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-permissions-cloudformation.html + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCloudFormationFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("IAMFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess")); + + // now save the VPC in SSM Param + const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { + allowedPattern: ".*", + description: `The Provisioning Role for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/provisioning-role`, + stringValue: this.IAMRole.roleName, + }); + + const roleArnParam = new ssm.StringParameter(this, `${envIdentifier}-role-arn-param`, { + allowedPattern: ".*", + description: `The Provisioning Role Arn for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/provisioning-role-arn`, + stringValue: this.IAMRole.roleArn, + }); + + this.provisioningRoleParam = roleParam; + this.provisioningRoleArnParam = roleArnParam; + } + +} diff --git a/iac/roots/opa-basic-environment/src/opa-basic-env-app.ts b/iac/roots/opa-basic-environment/src/opa-basic-env-app.ts new file mode 100644 index 00000000..9339ce6f --- /dev/null +++ b/iac/roots/opa-basic-environment/src/opa-basic-env-app.ts @@ -0,0 +1,50 @@ +#!/usr/bin/env node + +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import "source-map-support/register"; +import { OPABasicEnvStack } from "./opa-basic-environment-stack"; +import { makeRandom } from "@aws/aws-app-development-common-constructs"; + +function basicMandatory(propertyName: string) { + if (!process.env[propertyName]) + throw new Error(`${propertyName} Environment variable is missing and mandatory for Basic environment`); +} + +/** + * Main application function, make it async so it can call asnyc functions properly. + */ +async function main() { + const app = new cdk.App(); + + console.log("Loading Configurations for Basic Environment..."); + + const account = process.env.AWS_ACCOUNT_ID as string; + const region = process.env.AWS_DEFAULT_REGION as string; + + const env = { region, account }; + + basicMandatory("ENV_NAME"); + basicMandatory("AWS_ACCOUNT_ID"); + basicMandatory("PLATFORM_ROLE_ARN"); + basicMandatory("PIPELINE_ROLE_ARN"); + + // generate unique environment identifier + const envID = makeRandom(4); + console.log("Generating unique Environment identifier for Basic environment: " + envID) + + // scope: Construct, id: string, props: OPABasicEnvStackProps + new OPABasicEnvStack(app, `BASIC-ENV-${process.env.ENV_NAME}-Stack`, { + // stackName: `opa-basic-environment`, // Do not use stack name to get a generated stack name so multiple stacks can be created + description: `${envID} Basic Environment for OPA(AWS App Development)`, + uniqueEnvIdentifier: envID, + env, + }); + + + app.synth(); +} + +main(); diff --git a/iac/roots/opa-basic-environment/src/opa-basic-environment-stack.ts b/iac/roots/opa-basic-environment/src/opa-basic-environment-stack.ts new file mode 100644 index 00000000..9501d156 --- /dev/null +++ b/iac/roots/opa-basic-environment/src/opa-basic-environment-stack.ts @@ -0,0 +1,126 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import * as kms from "aws-cdk-lib/aws-kms"; +import { Construct } from "constructs"; +import { OPAEnvironmentParams, DynamoDBConstruct, } from '@aws/aws-app-development-common-constructs' +import { BasicProvisioningConstruct } from './constructs/basic-env-provisioning-role-construct' +import { BasicOperationsConstruct } from "./constructs/basic-env-operations-role-construct"; + +export interface OPABasicEnvStackProps extends cdk.StackProps { + uniqueEnvIdentifier: string; +} + +export class OPABasicEnvStack extends cdk.Stack { + + constructor(scope: Construct, id: string, props: OPABasicEnvStackProps) { + super(scope, id, props); + + const prefix = process.env.PREFIX as string || "opa"; + const envName = process.env.ENV_NAME as string + const awsAccount = process.env.AWS_ACCOUNT_ID as string + const platformRoleArn = process.env.PLATFORM_ROLE_ARN as string + const pipelineRoleArn = process.env.PIPELINE_ROLE_ARN as string + const awsRegion = process.env.AWS_DEFAULT_REGION as string || "us-east-1" + const cidrInput = process.env.ENV_CIDR as string || "10.0.0.0/24" + + // Creating environment params object + + const opaEnvParams: OPAEnvironmentParams = { + envName: envName, + awsRegion: awsRegion, + awsAccount: awsAccount, + prefix: prefix + } + + const envIdentifier = opaEnvParams.envName; + const envPathIdentifier = `/${envIdentifier}` + + // Create encryption key for all data at rest encryption + const key = new kms.Key(this, `${envIdentifier}-key`, { + alias: `${envIdentifier}-key`, + enableKeyRotation: true, + removalPolicy: cdk.RemovalPolicy.DESTROY, + pendingWindow: cdk.Duration.days(8), + }); + + // Save KMS key arn in an SSM Parameter + new ssm.StringParameter(this, `${envIdentifier}-key-param`, { + allowedPattern: ".*", + description: `The KMS Key for Basic Solution: ${envIdentifier} Environment`, + parameterName: `${envPathIdentifier}/kms-key`, + stringValue: key.keyArn, + }); + + //create audit table + const auditTableConstruct = new DynamoDBConstruct(this, "audit-table", { + opaEnv: opaEnvParams, + tableName: `${envIdentifier}-audit`, + kmsKey: key, + }); + + // Create pipeline provisioning role for the environment + const provisioningRoleConstruct = new BasicProvisioningConstruct(this, `${opaEnvParams.prefix}-${envIdentifier}-provisioning-role`, { + opaEnv: opaEnvParams, + KMSkey: key, + assumedBy: pipelineRoleArn, + auditTable: auditTableConstruct.table.tableName + }); + + // Create operations role for the environment + const operationsRoleConstruct = new BasicOperationsConstruct(this, `${opaEnvParams.prefix}-${envIdentifier}-operations-role`, { + opaEnv: opaEnvParams, + KMSkey: key, + assumedBy: platformRoleArn, + auditTable: auditTableConstruct.table.tableName + }); + + // save the unique environment identifier + const uniqueEnvId = new ssm.StringParameter(this, `${envIdentifier}-unique-id-param`, { + allowedPattern: ".*", + description: `The Unique ID for: ${opaEnvParams.envName} Environment`, + parameterName: `${envPathIdentifier}/unique-id`, + stringValue: props.uniqueEnvIdentifier, + }); + + // Printing outputs + new cdk.CfnOutput(this, "Environment_Name", { + value: envName, + }); + + // Printing the unique environment ID + new cdk.CfnOutput(this, "Environment_ID", { + value: uniqueEnvId.stringValue, + }); + + // Printing audit table + new cdk.CfnOutput(this, "AuditTable", { + value: auditTableConstruct.tableParam.parameterName, + }); + + // Print role information + new cdk.CfnOutput(this, "Provisioning_Role", { + value: provisioningRoleConstruct.provisioningRoleParam.parameterName, + }); + + new cdk.CfnOutput(this, "Provisioning_Role_ARN", { + value: provisioningRoleConstruct.provisioningRoleArnParam.parameterName, + }); + + new cdk.CfnOutput(this, "Operations_Role", { + value: operationsRoleConstruct.operationsRoleParam.parameterName, + }); + + new cdk.CfnOutput(this, "Operations_Role_ARN", { + value: operationsRoleConstruct.operationsRoleArnParam.parameterName, + }); + + // print the stack name as a Cloudformation output + new cdk.CfnOutput(this, `StackName`, { + value: this.stackName, + description: "The Basic Environment Provider CF Stack name", + }); + } +} diff --git a/iac/roots/opa-common-constructs/package.json b/iac/roots/opa-common-constructs/package.json index 9ab022f9..b75457e2 100644 --- a/iac/roots/opa-common-constructs/package.json +++ b/iac/roots/opa-common-constructs/package.json @@ -1,10 +1,9 @@ { "name": "@aws/aws-app-development-common-constructs", - "version": "1.0.0", + "version": "0.3.0", "description": "Constructs collection for OPA", "main": "index.ts", "types": "index.d.ts", - "private": true, "files": [ "/dist" ], @@ -20,7 +19,7 @@ "@types/node": "^20.3.1", "@types/prettier": "2.7.3", "@types/source-map-support": "^0.5.6", - "aws-cdk": "2.88.0", + "aws-cdk": "2.120.0", "jest": "^29.5.0", "ts-jest": "^29.1.0", "ts-node": "^10.9.1", @@ -28,7 +27,7 @@ }, "dependencies": { "@aws-sdk/client-s3": "^3.370.0", - "aws-cdk-lib": "2.88.0", + "aws-cdk-lib": "2.120.0", "cdk-nag": "^2.27.95", "constructs": "^10.2.52", "source-map-support": "^0.5.21" diff --git a/iac/roots/opa-common-constructs/src/ecs-cluster-construct.ts b/iac/roots/opa-common-constructs/src/ecs-cluster-construct.ts index f5407f0e..359fc0b9 100644 --- a/iac/roots/opa-common-constructs/src/ecs-cluster-construct.ts +++ b/iac/roots/opa-common-constructs/src/ecs-cluster-construct.ts @@ -17,7 +17,7 @@ export interface EcsClusterConstructProps extends cdk.StackProps { /** * The VPC to launch the cluster in */ - vpc: vpc.Vpc; + vpc: vpc.IVpc; /** * Whether to launch a fargate launch-type cluster or EC2 launch-type * diff --git a/iac/roots/opa-common-constructs/src/network-construct.ts b/iac/roots/opa-common-constructs/src/network-construct.ts index 316e7c25..b0cdf433 100644 --- a/iac/roots/opa-common-constructs/src/network-construct.ts +++ b/iac/roots/opa-common-constructs/src/network-construct.ts @@ -3,12 +3,12 @@ import * as cdk from "aws-cdk-lib"; import * as ec2 from "aws-cdk-lib/aws-ec2"; -import * as ssm from "aws-cdk-lib/aws-ssm"; +import { SubnetType } from "aws-cdk-lib/aws-ec2"; import * as s3 from "aws-cdk-lib/aws-s3"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { NagSuppressions } from "cdk-nag"; import { Construct } from "constructs"; -import { SubnetType } from "aws-cdk-lib/aws-ec2"; import { OPAEnvironmentParams } from "./opa-environment-params"; -import { NagSuppressions } from "cdk-nag"; /* eslint-disable @typescript-eslint/no-empty-interface */ export interface NetworkConstructProps extends cdk.StackProps { @@ -16,8 +16,9 @@ export interface NetworkConstructProps extends cdk.StackProps { readonly cidrRange: string; readonly isIsolated: boolean; readonly allowedIPs?: string[]; - readonly publicVpcNatGatewayCount: number - readonly vpcAzCount: number + readonly publicVpcNatGatewayCount: number; + readonly vpcAzCount: number; + readonly existingVpcId?: string; } const defaultProps: Partial = {}; @@ -26,7 +27,7 @@ const defaultProps: Partial = {}; * Deploys the VpcAllowedIps construct */ export class NetworkConstruct extends Construct { - public readonly vpc: ec2.Vpc; + public readonly vpc: ec2.IVpc; public readonly allowedIpsSg: ec2.SecurityGroup; public readonly publicEIPref!: string[]; public readonly logBucket: s3.IBucket; @@ -40,114 +41,125 @@ export class NetworkConstruct extends Construct { /* eslint-disable @typescript-eslint/no-unused-vars */ props = { ...defaultProps, ...props }; - let vpc = null; - let publicSubnetIds; - let privateSubnetIds; - - if (props.isIsolated) { - // Private VPC - vpc = new ec2.Vpc(this, `${envIdentifier}-VPC`, { - ipAddresses: ec2.IpAddresses.cidr(props.cidrRange), - enableDnsHostnames: true, - enableDnsSupport: true, - vpcName: name, - natGateways: 0, - subnetConfiguration: [{ cidrMask: 23, name: "Isolated", subnetType: ec2.SubnetType.PRIVATE_ISOLATED }], - maxAzs: props.vpcAzCount, + let vpc; + + if (props.existingVpcId) { + //Use the existing VPC + + vpc = ec2.Vpc.fromLookup(this, "ExistingVPC", { + vpcId: props.existingVpcId, }); - publicSubnetIds = "[]"; - privateSubnetIds = JSON.stringify( - vpc.selectSubnets({ subnetType: SubnetType.PRIVATE_ISOLATED }).subnets - .map(subnet => subnet.subnetId)); + // Retrieve the subnet IDs + + // Retrieve the subnet IDs + const publicsubnetIds = vpc.publicSubnets.length > 0 + ? vpc.publicSubnets.map(subnet => subnet.subnetId) + : [' ']; + const privateSubnetIds = vpc.privateSubnets.length > 0 + ? vpc.privateSubnets.map(subnet => subnet.subnetId) + : [' ']; + + // Store the public subnet IDs in AWS Systems Manager Parameter Store + new ssm.StringListParameter(this, `${envIdentifier}-pub-subnet-param`, { + allowedPattern: ".*", + description: `The VPC public subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/vpc/public-subnets`, + stringListValue: publicsubnetIds, + }); - } else { - // Public VPC - const allocationIds: string[] = []; - this.publicEIPref = []; + // Store the private subnet IDs in AWS Systems Manager Parameter Store + new ssm.StringListParameter(this, `${envIdentifier}-priv-subnet-param`, { + allowedPattern: ".*", + description: `The VPC Private subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/vpc/private-subnets`, + stringListValue: privateSubnetIds, + }); - // Create as many EIP as there are AZ/Subnets and store their allocIds & refs. - for (let i = 0; i < props.vpcAzCount; i++) { - const eip = new ec2.CfnEIP(this, `VPCPublicSubnet${i + 1}NATGatewayEIP${i}`, { - domain: "vpc", - tags: [ + + } else { + let publicSubnetIds; + let privateSubnetIds; + + if (props.isIsolated) { + // Private VPC + vpc = new ec2.Vpc(this, `${envIdentifier}-VPC`, { + ipAddresses: ec2.IpAddresses.cidr(props.cidrRange), + enableDnsHostnames: true, + enableDnsSupport: true, + vpcName: name, + natGateways: 0, + subnetConfiguration: [ { - key: "Name", - value: `${name}/VPC/PublicSubnet${i + 1}`, + cidrMask: 23, + name: "Isolated", + subnetType: ec2.SubnetType.PRIVATE_ISOLATED, }, ], + maxAzs: props.vpcAzCount, }); - allocationIds.push(eip.attrAllocationId); + publicSubnetIds = "[]"; + privateSubnetIds = JSON.stringify( + vpc.selectSubnets({ subnetType: SubnetType.PRIVATE_ISOLATED }).subnets.map((subnet) => subnet.subnetId) + ); + } else { + // Public VPC + const allocationIds: string[] = []; + this.publicEIPref = []; + + // Create as many EIP as there are AZ/Subnets and store their allocIds & refs. + for (let i = 0; i < props.vpcAzCount; i++) { + const eip = new ec2.CfnEIP(this, `VPCPublicSubnet${i + 1}NATGatewayEIP${i}`, { + domain: "vpc", + tags: [ + { + key: "Name", + value: `${name}/VPC/PublicSubnet${i + 1}`, + }, + ], + }); + + allocationIds.push(eip.attrAllocationId); + + // Do whatever you need with your EIPs here, ie. store their ref for later use + this.publicEIPref.push(eip.ref); + } + vpc = new ec2.Vpc(this, `${envIdentifier}-VPC`, { + ipAddresses: ec2.IpAddresses.cidr(props.cidrRange), + enableDnsHostnames: true, + enableDnsSupport: true, + vpcName: name, + natGateways: props.publicVpcNatGatewayCount, + maxAzs: props.vpcAzCount, + natGatewayProvider: ec2.NatProvider.gateway({ + eipAllocationIds: allocationIds, + }), + defaultInstanceTenancy: ec2.DefaultInstanceTenancy.DEFAULT, + }); - // Do whatever you need with your EIPs here, ie. store their ref for later use - this.publicEIPref.push(eip.ref); + publicSubnetIds = JSON.stringify(vpc.publicSubnets.map((subnet) => subnet.subnetId)); + privateSubnetIds = JSON.stringify(vpc.privateSubnets.map((subnet) => subnet.subnetId)); } - vpc = new ec2.Vpc(this, `${envIdentifier}-VPC`, { - ipAddresses: ec2.IpAddresses.cidr(props.cidrRange), - enableDnsHostnames: true, - enableDnsSupport: true, - vpcName: name, - natGateways: props.publicVpcNatGatewayCount, - maxAzs: props.vpcAzCount, - natGatewayProvider: ec2.NatProvider.gateway({ eipAllocationIds: allocationIds }), - defaultInstanceTenancy: ec2.DefaultInstanceTenancy.DEFAULT, + // now save the VPC in SSM Param + + + const vpcPubSubnetParam = new ssm.StringListParameter(this, `${envIdentifier}-pub-subnet-param`, { + allowedPattern: ".*", + description: `The VPC public subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/vpc/public-subnets`, + stringListValue: JSON.parse(publicSubnetIds) as string[], }); - publicSubnetIds = JSON.stringify(vpc.publicSubnets.map(subnet => subnet.subnetId)); - privateSubnetIds = JSON.stringify(vpc.privateSubnets.map(subnet => subnet.subnetId)); - } - - // now save the VPC in SSM Param - const vpcParam = new ssm.StringParameter(this, `${envIdentifier}-vpc-param`, { - allowedPattern: ".*", - description: `The VPC ID for OPA Solution: ${props.opaEnv.envName} Environment`, - parameterName: `${envPathIdentifier}/vpc`, - stringValue: vpc.vpcId, - }); - - const vpcPubSubnetParam = new ssm.StringListParameter(this, `${envIdentifier}-pub-subnet-param`, { - allowedPattern: ".*", - description: `The VPC public subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, - parameterName: `${envPathIdentifier}/vpc/public-subnets`, - stringListValue: (JSON.parse(publicSubnetIds) as string[]), - }); - - const vpcPriSubnetParam = new ssm.StringListParameter(this, `${envIdentifier}-priv-subnet-param`, { - allowedPattern: ".*", - description: `The VPC Private subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, - parameterName: `${envPathIdentifier}/vpc/private-subnets`, - stringListValue: (JSON.parse(privateSubnetIds) as string[]), - }); - - // Create an S3 bucket to use for network logs - const accessLogBucket = new s3.Bucket(this, `${envIdentifier}-log-bucket`, { - // CDK does not reliably delete this S3 bucket since health check logs - // keep getting added and appear to create a race condition. - // This bucket will be deleted by direct commands instead. - removalPolicy: cdk.RemovalPolicy.RETAIN, - objectOwnership: s3.ObjectOwnership.OBJECT_WRITER, - autoDeleteObjects: false, - versioned: false, - enforceSSL: true, - encryption: s3.BucketEncryption.S3_MANAGED, - blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, - // serverAccessLogsPrefix: 'serverAccessLogs', - }); - - NagSuppressions.addResourceSuppressions(accessLogBucket, [ - { id: "AwsSolutions-S1", reason: "Access log bucket will not record access since that will introduce cyclic behavior preventing deletion of the bucket" }, - ]); - - // TODO: enable S3 bucket loggingin PROD. For prototype, default logging to CloudWatch Logs will be sufficient - vpc.addFlowLog(`${envIdentifier}-FlowLogs`, { - // destination:ec2.FlowLogDestination.toS3(accessLogBucket), - // trafficType:ec2.FlowLogTrafficType.ALL - }); + const vpcPriSubnetParam = new ssm.StringListParameter(this, `${envIdentifier}-priv-subnet-param`, { + allowedPattern: ".*", + description: `The VPC Private subnetIds for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/vpc/private-subnets`, + stringListValue: JSON.parse(privateSubnetIds) as string[], + }); - this.logBucket = accessLogBucket; + // Create VPC Endpoints to access the network - // Create VPC Endpoints to access the network vpc.addGatewayEndpoint("dynamoDBEndpoint", { service: ec2.GatewayVpcEndpointAwsService.DYNAMODB, }); @@ -195,6 +207,47 @@ export class NetworkConstruct extends Construct { vpc.addInterfaceEndpoint("ECRDockerEndpoint", { service: ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER, }); + + } + + const vpcParam = new ssm.StringParameter(this, `${envIdentifier}-vpc-param`, { + allowedPattern: ".*", + description: `The VPC ID for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/vpc`, + stringValue: vpc.vpcId, + }); + // Create an S3 bucket to use for network logs + const accessLogBucket = new s3.Bucket(this, `${envIdentifier}-log-bucket`, { + // CDK does not reliably delete this S3 bucket since health check logs + // keep getting added and appear to create a race condition. + // This bucket will be deleted by direct commands instead. + removalPolicy: cdk.RemovalPolicy.RETAIN, + objectOwnership: s3.ObjectOwnership.OBJECT_WRITER, + autoDeleteObjects: false, + versioned: false, + enforceSSL: true, + encryption: s3.BucketEncryption.S3_MANAGED, + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, + // serverAccessLogsPrefix: 'serverAccessLogs', + }); + + NagSuppressions.addResourceSuppressions(accessLogBucket, [ + { + id: "AwsSolutions-S1", + reason: + "Access log bucket will not record access since that will introduce cyclic behavior preventing deletion of the bucket", + }, + ]); + + // TODO: enable S3 bucket loggingin PROD. For prototype, default logging to CloudWatch Logs will be sufficient + vpc.addFlowLog(`${envIdentifier}-FlowLogs`, { + // destination:ec2.FlowLogDestination.toS3(accessLogBucket), + // trafficType:ec2.FlowLogTrafficType.ALL + }); + + this.logBucket = accessLogBucket; + + // *** ADD more as required kinesis, MSK etc. *** // Create security group with external specific access @@ -203,14 +256,32 @@ export class NetworkConstruct extends Construct { allowAllOutbound: true, description: "security group for allowed IPs", }); + NagSuppressions.addResourceSuppressions(allowedIpsSg, [ + { + id: "AwsSolutions-EC23", + reason: "Access public access for training and workshop reasons", + }, + ]); if (props.allowedIPs) { for (const ip of props.allowedIPs) { - if (ip.startsWith('pl-')) { + if (ip.startsWith("pl-")) { // add the prefix list to the security group - allowedIpsSg.addIngressRule(ec2.Peer.prefixList(ip), ec2.Port.tcp(5432), "allow DB access from Allowed Prefix List"); - allowedIpsSg.addIngressRule(ec2.Peer.prefixList(ip), ec2.Port.tcp(80), "allow HTTP access from Allowed Prefix List"); - allowedIpsSg.addIngressRule(ec2.Peer.prefixList(ip), ec2.Port.tcp(443), "allow HTTPS access from Allowed Prefix List"); + allowedIpsSg.addIngressRule( + ec2.Peer.prefixList(ip), + ec2.Port.tcp(5432), + "allow DB access from Allowed Prefix List" + ); + allowedIpsSg.addIngressRule( + ec2.Peer.prefixList(ip), + ec2.Port.tcp(80), + "allow HTTP access from Allowed Prefix List" + ); + allowedIpsSg.addIngressRule( + ec2.Peer.prefixList(ip), + ec2.Port.tcp(443), + "allow HTTPS access from Allowed Prefix List" + ); } else { // add the ip to the security group allowedIpsSg.addIngressRule(ec2.Peer.ipv4(ip), ec2.Port.tcp(5432), "allow DB access from Allowed IP"); @@ -222,6 +293,5 @@ export class NetworkConstruct extends Construct { this.allowedIpsSg = allowedIpsSg; this.vpc = vpc; this.vpcParam = vpcParam; - } } diff --git a/iac/roots/opa-common-constructs/src/rds-construct.ts b/iac/roots/opa-common-constructs/src/rds-construct.ts index 74225188..f656cfb3 100644 --- a/iac/roots/opa-common-constructs/src/rds-construct.ts +++ b/iac/roots/opa-common-constructs/src/rds-construct.ts @@ -12,7 +12,7 @@ import { NagSuppressions } from "cdk-nag"; /* eslint-disable @typescript-eslint/no-empty-interface */ export interface RdsConstructProps extends cdk.StackProps { readonly opaEnv: OPAEnvironmentParams; - readonly vpc: cdk.aws_ec2.Vpc; + readonly vpc: cdk.aws_ec2.IVpc; readonly kmsKey: cdk.aws_kms.IKey; readonly instanceType: ec2.InstanceType; } diff --git a/iac/roots/opa-ecs-ec2-environment/.gitignore b/iac/roots/opa-ecs-ec2-environment/.gitignore new file mode 100644 index 00000000..60f6b2f9 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/.gitignore @@ -0,0 +1,8 @@ +*.js +!jest.config.js +*.d.ts +node_modules +output +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-ecs-ec2-environment/.npmignore b/iac/roots/opa-ecs-ec2-environment/.npmignore new file mode 100644 index 00000000..c1d6d45d --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/.npmignore @@ -0,0 +1,6 @@ +*.ts +!*.d.ts + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-ecs-ec2-environment/README.md b/iac/roots/opa-ecs-ec2-environment/README.md new file mode 100644 index 00000000..c38a1dc6 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/README.md @@ -0,0 +1,14 @@ +# OPA ECS IaC + +This folder contains CDK code that is used by Backstage when creating new ECS (with EC2) Providers. + +## CDK Useful commands + +## Useful commands + +* `npm run build` compile typescript to js +* `npm run watch` watch for changes and compile +* `npm run test` perform the jest unit tests +* `cdk deploy` deploy this stack to your default AWS account/region +* `cdk diff` compare deployed stack with current state +* `cdk synth` emits the synthesized CloudFormation template diff --git a/iac/roots/opa-ecs-ec2-environment/cdk.json b/iac/roots/opa-ecs-ec2-environment/cdk.json new file mode 100644 index 00000000..2b54805a --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/cdk.json @@ -0,0 +1,55 @@ +{ + "app": "npx ts-node --prefer-ts-exts src/opa-ecs-env-app.ts", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "**/*.d.ts", + "**/*.js", + "tsconfig.json", + "package*.json", + "yarn.lock", + "node_modules", + "test" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true + } +} \ No newline at end of file diff --git a/iac/roots/opa-ecs-ec2-environment/jest.config.js b/iac/roots/opa-ecs-ec2-environment/jest.config.js new file mode 100644 index 00000000..08263b89 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/jest.config.js @@ -0,0 +1,8 @@ +module.exports = { + testEnvironment: 'node', + roots: ['/test'], + testMatch: ['**/*.test.ts'], + transform: { + '^.+\\.tsx?$': 'ts-jest' + } +}; diff --git a/iac/roots/opa-ecs-ec2-environment/package.json b/iac/roots/opa-ecs-ec2-environment/package.json new file mode 100644 index 00000000..891d5658 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/package.json @@ -0,0 +1,28 @@ +{ + "name": "@aws/aws-app-development-ecs-ec2-environment", + "version": "0.3.0", + "description": "A stack to create an ECS environment for OPA", + "license": "Apache-2.0", + "scripts": { + "build": "tsc", + "watch": "tsc -w", + "test": "jest", + "cdk": "cdk" + }, + "devDependencies": { + "@types/jest": "^29.5.2", + "@types/node": "^20.3.1", + "jest": "^29.5.0", + "ts-jest": "^29.1.0", + "aws-cdk": "2.120.0", + "ts-node": "^10.9.1", + "typescript": "~5.0.4", + "@aws/aws-app-development-common-constructs": "0.3.0" + }, + "dependencies": { + "aws-cdk-lib": "2.120.0", + "constructs": "^10.0.0", + "source-map-support": "^0.5.21", + "@aws/aws-app-development-common-constructs": "0.3.0" + } +} diff --git a/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-operations-role-construct.ts b/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-operations-role-construct.ts new file mode 100644 index 00000000..7391d38e --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-operations-role-construct.ts @@ -0,0 +1,225 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import * as ecs from "aws-cdk-lib/aws-ecs"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as kms from "aws-cdk-lib/aws-kms"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { Construct } from "constructs"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +/* eslint-disable @typescript-eslint/no-empty-interface */ +export interface ECSOperationsConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + KMSkey: kms.IKey; + vpcCollection: ec2.IVpc[]; + ecsCollection: ecs.ICluster[]; + assumedBy: string; + auditTable: string; +} + +const defaultProps: Partial = {}; + +export class ECSOperationsConstruct extends Construct { + public IAMRole: iam.Role; + public operationsRoleParam: ssm.StringParameter; + public operationsRoleArnParam: ssm.StringParameter; + + constructor(parent: Construct, name: string, props: ECSOperationsConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // Create Iam role + this.IAMRole = new iam.Role(this, `${envIdentifier}-role`, { + assumedBy: new iam.ArnPrincipal(props.assumedBy), + roleName: name, + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonEC2ContainerRegistryFullAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("CloudWatchFullAccess"), + ], + maxSessionDuration: cdk.Duration.seconds(43200), + }); + + // Add Secret and SSM access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "secretsmanager:CreateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecret", + "secretsmanager:TagResource", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:secretsmanager:*:${props.opaEnv.awsAccount}:secret:*`], + }) + ); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMReadOnlyAccess")); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:GetObject", + "s3:GetObjectAttributes" + ], + effect: iam.Effect.ALLOW, + resources: ["arn:aws:s3:::*/packaged.yaml"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:ListGroupResources" + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:resource-groups:*:${props.opaEnv.awsAccount}:group/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["tag:GetResources"], + effect: iam.Effect.ALLOW, + resources: ["*"], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:Scan", + "dynamodb:PutItem", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:table/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["kms:Decrypt"], + effect: iam.Effect.ALLOW, + resources: [props.KMSkey.keyArn], + }) + ); + + // Set access for vpc related resources + for (const vpc of props.vpcCollection) { + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ec2:*"], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:ec2:*:${props.opaEnv.awsAccount}:vpc/${vpc.vpcId}`], + }) + ); + } + + for (const ecs of props.ecsCollection) { + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ecs:*"], + effect: iam.Effect.ALLOW, + resources: ["*"], // Fix for ecs cluster restriction doc https://docs.aws.amazon.com/AmazonECS/latest/userguide/security_iam_id-based-policy-examples.html + conditions: { + "ArnEquals": { + "ecs:cluster": ecs.clusterArn + } + } + }) + ); + } + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ecs:DescribeTaskDefinition", "ecs:RegisterTaskDefinition"], + effect: iam.Effect.ALLOW, + resources: ["*"], // Fix for ecs cluster restriction doc https://docs.aws.amazon.com/AmazonECS/latest/userguide/security_iam_id-based-policy-examples.html + }) + ); + + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["iam:PassRole"], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:iam::${props.opaEnv.awsAccount}:role/*`], + conditions: { + "StringEquals": { + "iam:PassedToService": "ecs-tasks.amazonaws.com" + } + } + }) + ); + + + // Write Audit access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:List*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Put*", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:${props.auditTable}`], + }) + ); + + // allow creation of a Resource Group to track application resources via tags + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:CreateGroup" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], // CreateGroup does not support resource-level permissions and requires a wildcard + }) + ); + + // Add managed role policies to support SAM template deployment for non-root roles + // + // In a production scenario, a customized IAM policy granting specific permissions should be created. + // See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-permissions-cloudformation.html + + // Required to remove stacks of Apps - delete App + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCloudFormationFullAccess")); + // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("IAMFullAccess")); + // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSLambda_FullAccess")); + // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonAPIGatewayAdministrator")); + // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess")); + + // now save the VPC in SSM Param + const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { + allowedPattern: ".*", + description: `The Operations Role for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/operations-role`, + stringValue: this.IAMRole.roleName, + }); + + const roleArnParam = new ssm.StringParameter(this, `${envIdentifier}-role-arn-param`, { + allowedPattern: ".*", + description: `The Operations Role Arn for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/operations-role-arn`, + stringValue: this.IAMRole.roleArn, + }); + + this.operationsRoleParam = roleParam; + this.operationsRoleArnParam = roleArnParam; + + } + +} diff --git a/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-provisioning-role-construct.ts b/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-provisioning-role-construct.ts new file mode 100644 index 00000000..11c7ab3d --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/src/constructs/ecs-env-provisioning-role-construct.ts @@ -0,0 +1,219 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import * as ecs from "aws-cdk-lib/aws-ecs"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as kms from "aws-cdk-lib/aws-kms"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { Construct } from "constructs"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +/* eslint-disable @typescript-eslint/no-empty-interface */ +export interface ECSProvisioningConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + KMSkey: kms.IKey; + vpcCollection: ec2.IVpc[]; + ecsCollection: ecs.ICluster[]; + assumedBy: string; + auditTable: string; +} + +const defaultProps: Partial = {}; + +export class ECSProvisioningConstruct extends Construct { + public IAMRole: iam.Role; + public provisioningRoleParam: ssm.StringParameter; + public provisioningRoleArnParam: ssm.StringParameter; + constructor(parent: Construct, name: string, props: ECSProvisioningConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // Create Iam role + this.IAMRole = new iam.Role(this, `${envIdentifier}-role`, { + assumedBy: new iam.ArnPrincipal(props.assumedBy), + roleName: name, + managedPolicies: [ + // !FIXME: Need to scope down the role from PowerUserAccess. This workaround is to allow provisioning for sprint 2 demo + iam.ManagedPolicy.fromAwsManagedPolicyName("PowerUserAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonEC2ContainerRegistryFullAccess"), + iam.ManagedPolicy.fromAwsManagedPolicyName("CloudWatchFullAccess"), + ], + maxSessionDuration: cdk.Duration.seconds(43200), + }); + + // Add Secret and SSM access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "secretsmanager:CreateSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecret", + "secretsmanager:TagResource", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:secretsmanager:*:${props.opaEnv.awsAccount}:secret:*`], + }) + ); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMReadOnlyAccess")); + + // Bucket creation and tagging and reading for serverless deployments + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:CreateBucket", + "s3:PutBucketTagging" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "s3:GetObject", + "s3:GetObjectAttributes" + ], + effect: iam.Effect.ALLOW, + resources: ["arn:aws:s3:::*/packaged.yaml"], + conditions: { + "StringEquals": { + "aws:ResourceAccount": props.opaEnv.awsAccount + } + } + }) + ); + + // Add resource group access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:ListGroupResources", + "resource-groups:Tag", + "resource-groups:DeleteGroup" + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:resource-groups:*:${props.opaEnv.awsAccount}:group/*`], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["tag:GetResources"], + effect: iam.Effect.ALLOW, + resources: ["*"], + }) + ); + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["kms:Decrypt"], + effect: iam.Effect.ALLOW, + resources: [props.KMSkey.keyArn], + }) + ); + + // Set access for vpc related resources + for (const vpc of props.vpcCollection) { + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ec2:*"], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:ec2:*:${props.opaEnv.awsAccount}:vpc/${vpc.vpcId}`], + }) + ); + } + + for (const ecs of props.ecsCollection) { + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ecs:*"], + effect: iam.Effect.ALLOW, + // resources: [ecs.clusterArn, ecs.clusterArn + "/*"], + resources: ["*"], // Fix for ecs cluster restriction doc https://docs.aws.amazon.com/AmazonECS/latest/userguide/security_iam_id-based-policy-examples.html + conditions: { + "ArnEquals": { + "ecs:cluster": ecs.clusterArn + } + } + }) + ); + } + + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: ["ecs:DescribeTaskDefinition", "ecs:RegisterTaskDefinition"], + effect: iam.Effect.ALLOW, + // resources: [ecs.clusterArn, ecs.clusterArn + "/*"], + resources: ["*"], // Fix for ecs cluster restriction doc https://docs.aws.amazon.com/AmazonECS/latest/userguide/security_iam_id-based-policy-examples.html + }) + ); + + // Write Audit access + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "dynamodb:List*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Put*", + "dynamodb:CreateTable", // for Terraform state management + "dynamodb:Update*", + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:dynamodb:*:${props.opaEnv.awsAccount}:${props.auditTable}`], + }) + ); + + // allow creation of a Resource Group to track application resources via tags + this.IAMRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "resource-groups:CreateGroup" + ], + effect: iam.Effect.ALLOW, + resources: ["*"], // CreateGroup does not support resource-level permissions and requires a wildcard + }) + ); + + // Add managed role policies to support SAM template deployment for non-root roles + // + // In a production scenario, a customized IAM policy granting specific permissions should be created. + // See https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-permissions-cloudformation.html + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCloudFormationFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("IAMFullAccess")); + this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess")); + + // now save the VPC in SSM Param + const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { + allowedPattern: ".*", + description: `The Provisioning Role for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/provisioning-role`, + stringValue: this.IAMRole.roleName, + }); + + const roleArnParam = new ssm.StringParameter(this, `${envIdentifier}-role-arn-param`, { + allowedPattern: ".*", + description: `The Provisioning Role Arn for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/provisioning-role-arn`, + stringValue: this.IAMRole.roleArn, + }); + + this.provisioningRoleParam = roleParam; + this.provisioningRoleArnParam = roleArnParam; + } + +} diff --git a/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-env-app.ts b/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-env-app.ts new file mode 100644 index 00000000..f8066ad8 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-env-app.ts @@ -0,0 +1,53 @@ +#!/usr/bin/env node + +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import "source-map-support/register"; +import { OPAECSEnvStack } from "./opa-ecs-environment-stack"; +import { makeRandom } from "@aws/aws-app-development-common-constructs"; + +/** + * Main application function, make it async so it can call asnyc functions properly. + */ +async function main() { + const app = new cdk.App(); + + console.log("Loading Configurations..."); + + const account = process.env.AWS_ACCOUNT_ID as string; + const region = process.env.AWS_DEFAULT_REGION as string; + + const env = { region, account }; + + if (!process.env.ENV_NAME) + throw new Error("ENV_NAME Environment variable is missing and mandatory"); + + if (!process.env.AWS_ACCOUNT_ID) + throw new Error("AWS_ACCOUNT_ID Environment variable is missing and mandatory"); + + if (!process.env.PLATFORM_ROLE_ARN) + throw new Error("PLATFORM_ROLE_ARN Environment variable is missing and mandatory"); + + if (!process.env.PIPELINE_ROLE_ARN) + throw new Error("PIPELINE_ROLE_ARN Environment variable is missing and mandatory"); + + const prefix = process.env.PREFIX as string || "opa"; + // generate unique environment identifier + const envID = makeRandom(4); + console.log("Generating unique Environment identifier: " + envID) + + new OPAECSEnvStack(app, `${prefix}-${process.env.ENV_NAME}-Stack`, { + // stackName: `opa-ecs-environment`, // Do not use stack name to get a generated stack name so multiple stacks can be created + description: `${envID} ECS Environment for OPA(AWS App Development)`, + uniqueEnvIdentifier: envID, + env, + }); + + + app.synth(); +} + + +main(); diff --git a/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-environment-stack.ts b/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-environment-stack.ts new file mode 100644 index 00000000..a8403d07 --- /dev/null +++ b/iac/roots/opa-ecs-ec2-environment/src/opa-ecs-environment-stack.ts @@ -0,0 +1,176 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { + DynamoDBConstruct, + EcsClusterConstruct, + NetworkConstruct, + OPAEnvironmentParams, +} from "@aws/aws-app-development-common-constructs"; +import * as cdk from "aws-cdk-lib"; +import * as kms from "aws-cdk-lib/aws-kms"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { Construct } from "constructs"; +import { ECSOperationsConstruct } from "./constructs/ecs-env-operations-role-construct"; +import { ECSProvisioningConstruct } from "./constructs/ecs-env-provisioning-role-construct"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; + +export interface OPAECSEnvStackProps extends cdk.StackProps { + uniqueEnvIdentifier: string; +} + +export class OPAECSEnvStack extends cdk.Stack { + constructor(scope: Construct, id: string, props: OPAECSEnvStackProps) { + super(scope, id, props); + + const prefix = (process.env.PREFIX as string) || "opa"; + const envName = process.env.ENV_NAME as string; + const awsAccount = process.env.AWS_ACCOUNT_ID as string; + const platformRoleArn = process.env.PLATFORM_ROLE_ARN as string; + const pipelineRoleArn = process.env.PIPELINE_ROLE_ARN as string; + const awsRegion = (process.env.AWS_DEFAULT_REGION as string) || "us-east-1"; + const cidrInput = (process.env.ENV_CIDR as string) || "10.0.0.0/24"; + const ec2InstanceType = new ec2.InstanceType( + process.env.EC2_INSTANCE_TYPE as string + ); + const ec2MaxCapacity = parseInt(process.env.EC2_MAX_CAPACITY || "6", 10); + + // Creating environment params object + + const opaEnvParams: OPAEnvironmentParams = { + envName: envName.toLowerCase(), + awsRegion: awsRegion, + awsAccount: awsAccount, + prefix: prefix.toLowerCase(), + }; + + const envIdentifier = opaEnvParams.envName; + const envPathIdentifier = `/${envIdentifier}`; + + // Create encryption key for all data at rest encryption + const key = new kms.Key(this, `${envIdentifier}-key`, { + alias: `${envIdentifier}-key`, + enableKeyRotation: true, + removalPolicy: cdk.RemovalPolicy.DESTROY, + pendingWindow: cdk.Duration.days(8), + }); + + // Save KMS key arn in an SSM Parameter + new ssm.StringParameter(this, `${envIdentifier}-key-param`, { + allowedPattern: ".*", + description: `The KMS Key for ECS Solution: ${envIdentifier} Environment`, + parameterName: `${envPathIdentifier}/kms-key`, + stringValue: key.keyArn, + }); + + // Create underlying network construct + const network = new NetworkConstruct(this, envIdentifier, { + opaEnv: opaEnvParams, + cidrRange: cidrInput, + isIsolated: false, + publicVpcNatGatewayCount: 3, + vpcAzCount: 3, + }); + + // Create ECS EC2 Cluster + const ecsAppCluster = new EcsClusterConstruct(this, `${envIdentifier}-app-runtime`, { + opaEnv: opaEnvParams, + vpc: network.vpc, + isFargateCluster: false, + ec2InstanceType, + ec2MaxCapacity, + encryptionKey: key, + }); + + //create audit table + const auditTableConstruct = new DynamoDBConstruct(this, "audit-table", { + opaEnv: opaEnvParams, + tableName: `${envIdentifier}-audit`, + kmsKey: key, + }); + + // Create pipeline provisioning role for the environment + const provisioningRoleConstruct = new ECSProvisioningConstruct( + this, + `${opaEnvParams.prefix}-${envIdentifier}-provisioning-role`, + { + opaEnv: opaEnvParams, + KMSkey: key, + vpcCollection: [network.vpc], + ecsCollection: [ecsAppCluster.cluster], + assumedBy: pipelineRoleArn, + auditTable: auditTableConstruct.table.tableName, + } + ); + + // Create operations role for the environment + const operationsRoleConstruct = new ECSOperationsConstruct( + this, + `${opaEnvParams.prefix}-${envIdentifier}-operations-role`, + { + opaEnv: opaEnvParams, + KMSkey: key, + vpcCollection: [network.vpc], + ecsCollection: [ecsAppCluster.cluster], + assumedBy: platformRoleArn, + auditTable: auditTableConstruct.table.tableName, + } + ); + + // save the unique environment identifier + const uniqueEnvId = new ssm.StringParameter(this, `${envIdentifier}-unique-id-param`, { + allowedPattern: ".*", + description: `The Unique ID for: ${opaEnvParams.envName} Environment`, + parameterName: `${envPathIdentifier}/unique-id`, + stringValue: props.uniqueEnvIdentifier, + }); + + // Printing outputs + new cdk.CfnOutput(this, "EnvironmentName", { + value: envName, + }); + + // Printing the unique environment ID + new cdk.CfnOutput(this, "EnvironmentID", { + value: uniqueEnvId.stringValue, + }); + + // Printing the unique environment ID + new cdk.CfnOutput(this, "VPC", { + value: network.vpcParam.parameterName, + }); + + // Printing the ECS Cluster name + new cdk.CfnOutput(this, "ClusterName", { + value: ecsAppCluster.clusterParam.parameterName, + }); + + // Printing audit table + new cdk.CfnOutput(this, "AuditTable", { + value: auditTableConstruct.tableParam.parameterName, + }); + + // Print role information + new cdk.CfnOutput(this, "ProvisioningRole", { + value: provisioningRoleConstruct.provisioningRoleParam.parameterName, + }); + + new cdk.CfnOutput(this, "ProvisioningRoleARN", { + value: provisioningRoleConstruct.provisioningRoleArnParam.parameterName, + }); + + new cdk.CfnOutput(this, "OperationsRole", { + value: operationsRoleConstruct.operationsRoleParam.parameterName, + }); + + new cdk.CfnOutput(this, "OperationsRoleARN", { + value: operationsRoleConstruct.operationsRoleArnParam.parameterName, + }); + + // print the stack name as a Cloudformation output + new cdk.CfnOutput(this, `StackName`, { + value: this.stackName, + description: "The ECS CF Stack name", + }); + } +} diff --git a/iac/roots/opa-ecs-environment/README.md b/iac/roots/opa-ecs-environment/README.md index 320efc02..b7dced59 100644 --- a/iac/roots/opa-ecs-environment/README.md +++ b/iac/roots/opa-ecs-environment/README.md @@ -1,10 +1,8 @@ -# Welcome to your CDK TypeScript project +# OPA ECS IaC -This is a blank project for CDK development with TypeScript. +This folder contains CDK code that is used by Backstage when creating new ECS (Fargate) Providers. -The `cdk.json` file tells the CDK Toolkit how to execute your app. - -## Useful commands +## CDK Useful commands * `npm run build` compile typescript to js * `npm run watch` watch for changes and compile diff --git a/iac/roots/opa-ecs-environment/package.json b/iac/roots/opa-ecs-environment/package.json index f8aa46e4..ab698d94 100644 --- a/iac/roots/opa-ecs-environment/package.json +++ b/iac/roots/opa-ecs-environment/package.json @@ -1,8 +1,7 @@ { "name": "@aws/aws-app-development-ecs-environment", - "version": "0.1.0", + "version": "0.3.0", "description": "A stack to create an ECS environment for OPA", - "private": true, "license": "Apache-2.0", "scripts": { "build": "tsc", @@ -15,15 +14,15 @@ "@types/node": "^20.3.1", "jest": "^29.5.0", "ts-jest": "^29.1.0", - "aws-cdk": "2.88.0", + "aws-cdk": "2.120.0", "ts-node": "^10.9.1", "typescript": "~5.0.4", - "@aws/aws-app-development-common-constructs": "1.0.0" + "@aws/aws-app-development-common-constructs": "0.3.0" }, "dependencies": { - "aws-cdk-lib": "2.88.0", + "aws-cdk-lib": "2.120.0", "constructs": "^10.0.0", "source-map-support": "^0.5.21", - "@aws/aws-app-development-common-constructs": "1.0.0" + "@aws/aws-app-development-common-constructs": "0.3.0" } } diff --git a/iac/roots/opa-ecs-environment/src/constructs/ecs-env-operations-role-construct.ts b/iac/roots/opa-ecs-environment/src/constructs/ecs-env-operations-role-construct.ts index 0abd1a9a..9c90f638 100644 --- a/iac/roots/opa-ecs-environment/src/constructs/ecs-env-operations-role-construct.ts +++ b/iac/roots/opa-ecs-environment/src/constructs/ecs-env-operations-role-construct.ts @@ -14,7 +14,7 @@ import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs export interface ECSOperationsConstructProps extends cdk.StackProps { readonly opaEnv: OPAEnvironmentParams; KMSkey: kms.IKey; - vpcCollection: ec2.Vpc[]; + vpcCollection: ec2.IVpc[]; ecsCollection: ecs.ICluster[]; assumedBy: string; auditTable: string; @@ -202,7 +202,7 @@ export class ECSOperationsConstruct extends Construct { // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonAPIGatewayAdministrator")); // this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess")); - // now save the VPC in SSM Param + // now save the Operations Role in SSM Param const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { allowedPattern: ".*", description: `The Operations Role for OPA Solution: ${props.opaEnv.envName} Environment`, diff --git a/iac/roots/opa-ecs-environment/src/constructs/ecs-env-provisioning-role-construct.ts b/iac/roots/opa-ecs-environment/src/constructs/ecs-env-provisioning-role-construct.ts index 141c00b5..2f158c11 100644 --- a/iac/roots/opa-ecs-environment/src/constructs/ecs-env-provisioning-role-construct.ts +++ b/iac/roots/opa-ecs-environment/src/constructs/ecs-env-provisioning-role-construct.ts @@ -14,7 +14,7 @@ import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs export interface ECSProvisioningConstructProps extends cdk.StackProps { readonly opaEnv: OPAEnvironmentParams; KMSkey: kms.IKey; - vpcCollection: ec2.Vpc[]; + vpcCollection: ec2.IVpc[]; ecsCollection: ecs.ICluster[]; assumedBy: string; auditTable: string; @@ -197,7 +197,7 @@ export class ECSProvisioningConstruct extends Construct { this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("IAMFullAccess")); this.IAMRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonS3FullAccess")); - // now save the VPC in SSM Param + // now save the Provisioning Role in SSM Param const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { allowedPattern: ".*", description: `The Provisioning Role for OPA Solution: ${props.opaEnv.envName} Environment`, diff --git a/iac/roots/opa-ecs-environment/src/ecs-input.ts b/iac/roots/opa-ecs-environment/src/ecs-input.ts new file mode 100644 index 00000000..d0262cfc --- /dev/null +++ b/iac/roots/opa-ecs-environment/src/ecs-input.ts @@ -0,0 +1,60 @@ +#!/usr/bin/env node + +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +export enum ECS_ENV_VARS { + ENV_NAME = "ENV_NAME", + AWS_ACCOUNT_ID = "AWS_ACCOUNT_ID", + AWS_DEFAULT_REGION = "AWS_DEFAULT_REGION", + PLATFORM_ROLE_ARN = "PLATFORM_ROLE_ARN", + PIPELINE_ROLE_ARN = "PIPELINE_ROLE_ARN", + PREFIX = "PREFIX", +} + +export enum ECS_OPTIONAL_ENV_VARS { + VPC_ID = "VPC_ID", + ENV_CIDR = "ENV_CIDR" + +} + +export function validateECSRequiredEnvVars() { + Object.values(ECS_ENV_VARS).forEach(val => { + if (!process.env[val]) { + throw new Error(`${val} Environment variable is missing and mandatory for ECS environment`); + } + }); +} + + + +export function getAccountId(): string { + return process.env[ECS_ENV_VARS.AWS_ACCOUNT_ID] as string +} + +export function getEnvironmentName(): string { + return process.env[ECS_ENV_VARS.ENV_NAME] as string +} + +export function getRegion(): string { + return process.env[ECS_ENV_VARS.AWS_DEFAULT_REGION] as string +} + +export function getPrefix(): string { + return process.env[ECS_ENV_VARS.PREFIX] as string || "opa"; +} + +export function getVpcCIDR(): string { + return process.env[ECS_OPTIONAL_ENV_VARS.ENV_CIDR] as string || "10.0.0.0/24"; +} +export function getExistingVpcId(): string { + return process.env[ECS_OPTIONAL_ENV_VARS.VPC_ID] as string; +} + +export function getPlatformRoleArn(): string { + return process.env[ECS_ENV_VARS.PLATFORM_ROLE_ARN] as string ; +} + +export function getPipelineRoleArn(): string { + return process.env[ECS_ENV_VARS.PIPELINE_ROLE_ARN] as string; +} diff --git a/iac/roots/opa-ecs-environment/src/opa-ecs-env-app.ts b/iac/roots/opa-ecs-environment/src/opa-ecs-env-app.ts index 338c1485..62d61d58 100644 --- a/iac/roots/opa-ecs-environment/src/opa-ecs-env-app.ts +++ b/iac/roots/opa-ecs-environment/src/opa-ecs-env-app.ts @@ -5,8 +5,15 @@ import * as cdk from "aws-cdk-lib"; import "source-map-support/register"; -import { OPAECSEnvStack } from "./opa-ecs-environment-stack"; import { makeRandom } from "@aws/aws-app-development-common-constructs"; +import { OPAECSEnvStack } from "./opa-ecs-environment-stack"; +import { + getAccountId, + getRegion, + getPrefix, + getEnvironmentName, + validateECSRequiredEnvVars +} from "./ecs-input"; /** * Main application function, make it async so it can call asnyc functions properly. @@ -14,40 +21,25 @@ import { makeRandom } from "@aws/aws-app-development-common-constructs"; async function main() { const app = new cdk.App(); - console.log("Loading Configurations..."); - - const account = process.env.AWS_ACCOUNT_ID as string; - const region = process.env.AWS_DEFAULT_REGION as string; + console.log("Loading Configurations for ECS Environment..."); + validateECSRequiredEnvVars(); + const account = getAccountId(); + const region = getRegion(); const env = { region, account }; - if (!process.env.ENV_NAME) - throw new Error("ENV_NAME Environment variable is missing and mandatory"); - - if (!process.env.AWS_ACCOUNT_ID) - throw new Error("AWS_ACCOUNT_ID Environment variable is missing and mandatory"); - - if (!process.env.PLATFORM_ROLE_ARN) - throw new Error("PLATFORM_ROLE_ARN Environment variable is missing and mandatory"); - - if (!process.env.PIPELINE_ROLE_ARN) - throw new Error("PIPELINE_ROLE_ARN Environment variable is missing and mandatory"); - - const prefix = process.env.PREFIX as string || "opa"; // generate unique environment identifier const envID = makeRandom(4); - console.log("Generating unique Environment identifier: " + envID) + console.log(`Generating unique Environment identifier: ${envID}`); - new OPAECSEnvStack(app, `ECS-ENV-${prefix}-${process.env.ENV_NAME}-Stack`, { + new OPAECSEnvStack(app, `ECS-ENV-${getPrefix()}-${getEnvironmentName()}-Stack`, { // stackName: `opa-ecs-environment`, // Do not use stack name to get a generated stack name so multiple stacks can be created - description: `${envID} ECS Environment for OPA(AWS App Development)`, + description: `${envID} ECS Environment for OPA`, uniqueEnvIdentifier: envID, env, }); - app.synth(); } - main(); diff --git a/iac/roots/opa-ecs-environment/src/opa-ecs-environment-stack.ts b/iac/roots/opa-ecs-environment/src/opa-ecs-environment-stack.ts index a17bd6d5..b5a6b113 100644 --- a/iac/roots/opa-ecs-environment/src/opa-ecs-environment-stack.ts +++ b/iac/roots/opa-ecs-environment/src/opa-ecs-environment-stack.ts @@ -8,6 +8,16 @@ import { Construct } from "constructs"; import { NetworkConstruct, OPAEnvironmentParams, EcsClusterConstruct, DynamoDBConstruct, } from '@aws/aws-app-development-common-constructs' import { ECSProvisioningConstruct } from './constructs/ecs-env-provisioning-role-construct' import { ECSOperationsConstruct } from "./constructs/ecs-env-operations-role-construct"; +import { + getAccountId, + getRegion, + getPrefix, + getEnvironmentName, + getPlatformRoleArn, + getPipelineRoleArn, + getVpcCIDR, + getExistingVpcId, +} from "./ecs-input"; export interface OPAECSEnvStackProps extends cdk.StackProps { uniqueEnvIdentifier: string; @@ -18,13 +28,14 @@ export class OPAECSEnvStack extends cdk.Stack { constructor(scope: Construct, id: string, props: OPAECSEnvStackProps) { super(scope, id, props); - const prefix = process.env.PREFIX as string || "opa"; - const envName = process.env.ENV_NAME as string - const awsAccount = process.env.AWS_ACCOUNT_ID as string - const platformRoleArn = process.env.PLATFORM_ROLE_ARN as string - const pipelineRoleArn = process.env.PIPELINE_ROLE_ARN as string - const awsRegion = process.env.AWS_DEFAULT_REGION as string || "us-east-1" - const cidrInput = process.env.ENV_CIDR as string || "10.0.0.0/24" + const prefix = getPrefix().toLowerCase(); + const envName = getEnvironmentName().toLowerCase(); + const awsAccount = getAccountId(); + const platformRoleArn = getPlatformRoleArn(); + const pipelineRoleArn = getPipelineRoleArn(); + const awsRegion = getRegion(); + const cidrInput = getVpcCIDR(); + const existingVpcId = getExistingVpcId(); // Creating environment params object @@ -35,36 +46,36 @@ export class OPAECSEnvStack extends cdk.Stack { prefix: prefix } - const envIdentifier = opaEnvParams.envName; - const envPathIdentifier = `/${envIdentifier}` + const envPathIdentifier = `/${envName}` // Create encryption key for all data at rest encryption - const key = new kms.Key(this, `${envIdentifier}-key`, { - alias: `${envIdentifier}-key`, + const key = new kms.Key(this, `${envName}-key`, { + alias: `${envName}-key`, enableKeyRotation: true, removalPolicy: cdk.RemovalPolicy.DESTROY, pendingWindow: cdk.Duration.days(8), }); // Save KMS key arn in an SSM Parameter - new ssm.StringParameter(this, `${envIdentifier}-key-param`, { + new ssm.StringParameter(this, `${envName}-key-param`, { allowedPattern: ".*", - description: `The KMS Key for ECS Solution: ${envIdentifier} Environment`, + description: `The KMS Key for ECS Solution: ${envName} Environment`, parameterName: `${envPathIdentifier}/kms-key`, stringValue: key.keyArn, }); - // Create underline network construct - const network = new NetworkConstruct(this, envIdentifier, { + // Create underlying network construct + const network = new NetworkConstruct(this, envName, { opaEnv: opaEnvParams, cidrRange: cidrInput, isIsolated: false, publicVpcNatGatewayCount: 3, - vpcAzCount: 3 + vpcAzCount: 3, + existingVpcId, }) // Create ECS Cluster - const ecsAppCluster = new EcsClusterConstruct(this, `${envIdentifier}-app-runtime`, { + const ecsAppCluster = new EcsClusterConstruct(this, `${envName}-app-runtime`, { opaEnv: opaEnvParams, vpc: network.vpc, isFargateCluster: true, @@ -74,12 +85,12 @@ export class OPAECSEnvStack extends cdk.Stack { //create audit table const auditTableConstruct = new DynamoDBConstruct(this, "audit-table", { opaEnv: opaEnvParams, - tableName: `${envIdentifier}-audit`, + tableName: `${envName}-audit`, kmsKey: key, }); // Create pipeline provisioning role for the environment - const provisioningRoleConstruct = new ECSProvisioningConstruct(this, `${opaEnvParams.prefix}-${envIdentifier}-provisioning-role`, { + const provisioningRoleConstruct = new ECSProvisioningConstruct(this, `${opaEnvParams.prefix}-${envName}-provisioning-role`, { opaEnv: opaEnvParams, KMSkey: key, vpcCollection: [network.vpc], @@ -89,7 +100,7 @@ export class OPAECSEnvStack extends cdk.Stack { }); // Create operations role for the environment - const operationsRoleConstruct = new ECSOperationsConstruct(this, `${opaEnvParams.prefix}-${envIdentifier}-operations-role`, { + const operationsRoleConstruct = new ECSOperationsConstruct(this, `${opaEnvParams.prefix}-${envName}-operations-role`, { opaEnv: opaEnvParams, KMSkey: key, vpcCollection: [network.vpc], @@ -99,7 +110,7 @@ export class OPAECSEnvStack extends cdk.Stack { }); // save the unique environment identifier - const uniqueEnvId = new ssm.StringParameter(this, `${envIdentifier}-unique-id-param`, { + const uniqueEnvId = new ssm.StringParameter(this, `${envName}-unique-id-param`, { allowedPattern: ".*", description: `The Unique ID for: ${opaEnvParams.envName} Environment`, parameterName: `${envPathIdentifier}/unique-id`, @@ -107,12 +118,12 @@ export class OPAECSEnvStack extends cdk.Stack { }); // Printing outputs - new cdk.CfnOutput(this, "Environment_Name", { + new cdk.CfnOutput(this, "EnvironmentName", { value: envName, }); // Printing the unique environment ID - new cdk.CfnOutput(this, "Environment_ID", { + new cdk.CfnOutput(this, "EnvironmentID", { value: uniqueEnvId.stringValue, }); @@ -122,7 +133,7 @@ export class OPAECSEnvStack extends cdk.Stack { }); // Printing the ECS Cluster name - new cdk.CfnOutput(this, "Cluster_Name", { + new cdk.CfnOutput(this, "ClusterName", { value: ecsAppCluster.clusterParam.parameterName, }); @@ -132,19 +143,19 @@ export class OPAECSEnvStack extends cdk.Stack { }); // Print role information - new cdk.CfnOutput(this, "Provisioning_Role", { + new cdk.CfnOutput(this, "ProvisioningRole", { value: provisioningRoleConstruct.provisioningRoleParam.parameterName, }); - new cdk.CfnOutput(this, "Provisioning_Role_ARN", { + new cdk.CfnOutput(this, "ProvisioningRoleARN", { value: provisioningRoleConstruct.provisioningRoleArnParam.parameterName, }); - new cdk.CfnOutput(this, "Operations_Role", { + new cdk.CfnOutput(this, "OperationsRole", { value: operationsRoleConstruct.operationsRoleParam.parameterName, }); - new cdk.CfnOutput(this, "Operations_Role_ARN", { + new cdk.CfnOutput(this, "OperationsRoleARN", { value: operationsRoleConstruct.operationsRoleArnParam.parameterName, }); diff --git a/iac/roots/opa-eks-environment/.gitignore b/iac/roots/opa-eks-environment/.gitignore new file mode 100644 index 00000000..f60797b6 --- /dev/null +++ b/iac/roots/opa-eks-environment/.gitignore @@ -0,0 +1,8 @@ +*.js +!jest.config.js +*.d.ts +node_modules + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-eks-environment/.npmignore b/iac/roots/opa-eks-environment/.npmignore new file mode 100644 index 00000000..c1d6d45d --- /dev/null +++ b/iac/roots/opa-eks-environment/.npmignore @@ -0,0 +1,6 @@ +*.ts +!*.d.ts + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/iac/roots/opa-eks-environment/README.md b/iac/roots/opa-eks-environment/README.md new file mode 100644 index 00000000..8f53b2b2 --- /dev/null +++ b/iac/roots/opa-eks-environment/README.md @@ -0,0 +1,12 @@ +# OPA EKS IaC + +This folder contains CDK code that is used by Backstage when creating new EKS (Fargate) Providers. + +## CDK Useful commands + +* `npm run build` compile typescript to js +* `npm run watch` watch for changes and compile +* `npm run test` perform the jest unit tests +* `cdk deploy` deploy this stack to your default AWS account/region +* `cdk diff` compare deployed stack with current state +* `cdk synth` emits the synthesized CloudFormation template diff --git a/iac/roots/opa-eks-environment/cdk.json b/iac/roots/opa-eks-environment/cdk.json new file mode 100644 index 00000000..298a967b --- /dev/null +++ b/iac/roots/opa-eks-environment/cdk.json @@ -0,0 +1,54 @@ +{ + "app": "npx ts-node --prefer-ts-exts src/opa-eks-env-app.ts", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "**/*.d.ts", + "**/*.js", + "tsconfig.json", + "package*.json", + "yarn.lock", + "node_modules", + "test" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true + } +} \ No newline at end of file diff --git a/iac/roots/opa-eks-environment/jest.config.js b/iac/roots/opa-eks-environment/jest.config.js new file mode 100644 index 00000000..08263b89 --- /dev/null +++ b/iac/roots/opa-eks-environment/jest.config.js @@ -0,0 +1,8 @@ +module.exports = { + testEnvironment: 'node', + roots: ['/test'], + testMatch: ['**/*.test.ts'], + transform: { + '^.+\\.tsx?$': 'ts-jest' + } +}; diff --git a/iac/roots/opa-eks-environment/package.json b/iac/roots/opa-eks-environment/package.json new file mode 100644 index 00000000..b191ce94 --- /dev/null +++ b/iac/roots/opa-eks-environment/package.json @@ -0,0 +1,35 @@ +{ + "name": "@aws/aws-app-development-eks-environment", + "version": "0.3.0", + "description": "A stack to create an EKS environment for OPA", + "license": "Apache-2.0", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "files": [ + "/dist" + ], + "scripts": { + "build": "tsc", + "watch": "tsc -w", + "test": "jest", + "cdk": "cdk" + }, + "devDependencies": { + "@types/jest": "^29.5.1", + "@types/js-yaml": "^4.0.5", + "@types/node": "^20.3.1", + "@types/prettier": "2.6.0", + "@types/source-map-support": "^0.5.6", + "aws-cdk": "2.120.0", + "jest": "^29.5.0", + "ts-jest": "^29.1.0", + "ts-node": "^10.9.1", + "typescript": "~5.0.4" + }, + "dependencies": { + "@aws/aws-app-development-common-constructs": "0.3.0", + "@aws-cdk/lambda-layer-kubectl-v28": "^2.1.0", + "aws-cdk-lib": "2.120.0", + "constructs": "^10.0.0" + } +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-admin-role-construct.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-admin-role-construct.ts new file mode 100644 index 00000000..6ff871c2 --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-admin-role-construct.ts @@ -0,0 +1,135 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import { NagSuppressions } from "cdk-nag"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as kms from "aws-cdk-lib/aws-kms"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +export interface EKSClusterAdminRoleConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + readonly eksClusterName: string; + readonly kmsKey: kms.IKey; + /** + * Scope for CfnOutput + */ + cfnOutputScope: any +} + +const defaultProps: Partial = {}; + +export class EKSClusterAdminRoleConstruct extends Construct { + public iamRole: iam.Role; + + constructor(parent: Construct, name: string, props: EKSClusterAdminRoleConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + + // Create IAM role + this.iamRole = new iam.Role(this, `${envIdentifier}-cluster-admin-role`, { + assumedBy: new iam.AccountPrincipal(props.opaEnv.awsAccount), + // roleName: name, - let CDK generate the role name + maxSessionDuration: cdk.Duration.seconds(43200), + }); + NagSuppressions.addResourceSuppressions(this.iamRole, [ + { id: "AwsSolutions-IAM4", reason: "Assumed roles will use AWS managed policies for demonstration purposes. Customers will be advised/required to assess and apply custom policies based on their role requirements" }, + { id: "AwsSolutions-IAM5", reason: "Assumed roles will require permissions to perform multiple eks, ddb, and ec2 for demonstration purposes. Customers will be advised/required to assess and apply minimal permission based on role mappings to their idP groups" }, + ], true + ); + + // Add read-only permissions for EKS + this.iamRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "eks:DescribeEksAnywhereSubscription", + "eks:DescribeCluster", + "eks:ListClusters", + "eks:ListTagsForResource", + ], + effect: iam.Effect.ALLOW, + resources: ["*"], + }) + ); + + // Add cluster-specific permissions + this.iamRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "eks:AccessKubernetesApi", + "eks:AssociateEncryptionConfig", + "eks:AssociateIdentityProviderConfig", + "eks:CreateAddon", + "eks:CreateFargateProfile", + "eks:CreateNodegroup", + "eks:DeleteAddon", + "eks:DeleteFargateProfile", + "eks:DeleteNodegroup", + "eks:DescribeAddon", + "eks:DescribeAddonConfiguration", + "eks:DescribeAddonVersions", + "eks:DescribeCluster", + "eks:DescribeFargateProfile", + "eks:DescribeIdentityProviderConfig", + "eks:DescribeNodegroup", + "eks:DescribeUpdate", + "eks:ListAddons", + "eks:ListFargateProfiles", + "eks:ListIdentityProviderConfigs", + "eks:ListNodegroups", + "eks:ListUpdates", + "eks:TagResource", + "eks:UntagResource", + "eks:UpdateAddon", + "eks:UpdateClusterConfig", + "eks:UpdateClusterVersion", + "eks:UpdateNodegroupConfig", + "eks:UpdateNodegroupVersion", + ], + effect: iam.Effect.ALLOW, + resources: [ + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:addon/${props.eksClusterName}/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:cluster/${props.eksClusterName}`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:fargateprofile/${props.eksClusterName}/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:identityproviderconfig/${props.eksClusterName}/*/*/*`, + `arn:aws:eks:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:nodegroup/${props.eksClusterName}/*/*`, + ], + }) + ); + + this.iamRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "kms:Decrypt", + ], + effect: iam.Effect.ALLOW, + resources: [props.kmsKey.keyArn], + }) + ); + + // allow calling kubectl lambda function (needed if cluster is private or IP-restricted) + // Function names for EKS kubectl Lambda handlers are dynamically generated and will truncate + // a portion of the environment identifier. Ensure that the resource identifier in the policy is no + // longer than 35 characters + const truncatedName = `EKS-ENV-${props.opaEnv.prefix}-${props.opaEnv.envName}`.substring(0,34); + this.iamRole.addToPolicy( + new iam.PolicyStatement({ + actions: [ + "lambda:InvokeFunction" + ], + effect: iam.Effect.ALLOW, + resources: [`arn:aws:lambda:${props.opaEnv.awsRegion}:${props.opaEnv.awsAccount}:function:${truncatedName}*`], + }) + ); + + this.iamRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonVPCReadOnlyAccess")); + this.iamRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AWSCloudFormationReadOnlyAccess")); + this.iamRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMReadOnlyAccess")); + + } +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-construct.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-construct.ts new file mode 100644 index 00000000..a8c6a207 --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-construct.ts @@ -0,0 +1,260 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import * as kms from "aws-cdk-lib/aws-kms" +import * as eks from "aws-cdk-lib/aws-eks"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import { KubectlV28Layer } from '@aws-cdk/lambda-layer-kubectl-v28'; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; +import { + getCreateK8sOpaResources, + getExistingClusterName, + getExistingKubectlOnEventLambdaArn, + getNodeType, + NODE_TYPE, +} from "../eks-input"; +import { OPAEKSManagedNodeClusterConstruct } from "./eks-env-cluster-managed-node"; +import { OPAEKSFargateClusterConstruct } from "./eks-env-cluster-fargate"; +import { KubectlProvider } from "aws-cdk-lib/aws-eks"; + +// This class creates an EKS Cluster that uses either Fargate or a Managed Node Group, +// depending on the user selection during provider scaffolding + +export interface OPAEKSClusterConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + /** + * make cluster public or private + */ + privateCluster: boolean; + /** + * IP CIDR block allow-list if the EKS API Server Endpoint allows public access + */ + apiAllowList: string[] | undefined; + /** + * An IAM role that will be added to the system:masters Kubernetes RBAC group. + * This role is Highly privileged and can only be accesed in cloudtrail through the CreateCluster api + * upon cluster creation + */ + clusterMasterRole: iam.Role + /** + * Role that provides permissions for the Kubernetes control + * plane to make calls to AWS API operations on your behalf. + */ + controlPlaneRole: iam.Role | undefined + /** + * VPC where the cluster should reside + */ + vpc: ec2.IVpc + /** + * IAM role to set in the aws-auth ConfigMap as a cluster-admin + */ + clusterAdminRole: iam.IRole + /** + * IAM role to assign as pod execution role in the Fargate Profile + */ + podExecutionRole: iam.IRole | undefined + + /** + * IAM role to assign as the kubectl lambda's execution role + */ + lambdaExecutionRole: iam.IRole + + /** + * Scope for CfnOutput + */ + cfnOutputScope: any +} + +export class OPAEKSClusterConstruct extends Construct { + public readonly clusterParameter: ssm.StringParameter; + public readonly cluster: eks.Cluster; + + constructor(parent: Construct, name: string, props: OPAEKSClusterConstructProps) { + super(parent, name); + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // define if the cluster will be public or private + let endpointAccess = props.privateCluster ? eks.EndpointAccess.PRIVATE : eks.EndpointAccess.PUBLIC_AND_PRIVATE; + if (!props.privateCluster && props.apiAllowList && props.apiAllowList.length) { + endpointAccess = endpointAccess.onlyFrom(...props.apiAllowList); + } + + if (getExistingClusterName()) { + console.log("...Importing Existing EKS Cluster"); + } else { + console.log("...Creating EKS Cluster"); + } + + const clusterName = `${envIdentifier}-cluster`; + const clusterLogging = [ + eks.ClusterLoggingTypes.API, + eks.ClusterLoggingTypes.AUTHENTICATOR, + eks.ClusterLoggingTypes.SCHEDULER, + ]; + const kubectlLayer = new KubectlV28Layer(this, 'kubectl'); + const albControllerVersion = eks.AlbControllerVersion.V2_6_2; + const kubernetesVersion = eks.KubernetesVersion.V1_28; + + const clusterAdminK8sUsername = props.clusterAdminRole.roleArn; + + // These configs could be used if you only want to grant access to a specific user who has assumed + // the clusterAdmin role. It translates from the cluster admin role ARN to an equivalent "assumed-role" ARN + // const clusterAdminRoleParts = cdk.Fn.split(":", props.clusterAdminRole.roleArn); + // const clusterAdminRoleAccount = cdk.Fn.select(4, clusterAdminRoleParts); + // const clusterAdminRoleName = cdk.Fn.select(1, cdk.Fn.split("/", cdk.Fn.select(5, clusterAdminRoleParts))); + // const clusterAdminK8sUsername = cdk.Fn.join('', + // [ + // "arn:aws:sts::", + // clusterAdminRoleAccount, + // ":assumed-role/", + // clusterAdminRoleName, + // "/{{SessionName}}" + // ]); + + let clusterConstruct; + + const isCreateNewCluster = !getExistingClusterName(); + + if (isCreateNewCluster) { + + // define KMS key for secrets encryption + const clusterKmsKey = new kms.Key(this, `${envIdentifier}-cluster-key`, { + enableKeyRotation: true, + alias: cdk.Fn.join('', ['alias/', 'eks/', `${envIdentifier}-cluster-key-alias`]), + }); + + if (getNodeType() === NODE_TYPE.MANAGED) { + clusterConstruct = new OPAEKSManagedNodeClusterConstruct(this, `${envIdentifier}-app-runtime`, { + cfnOutputScope: props.cfnOutputScope, + opaEnv: props.opaEnv, + clusterMasterRole: props.clusterMasterRole, + controlPlaneRole: props.controlPlaneRole!, + vpc: props.vpc, + endpointAccess, + clusterName, + clusterLogging, + kubectlLayer, + kubernetesVersion, + albControllerVersion, + clusterKmsKey, + lambdaExecutionRole: props.lambdaExecutionRole, + }); + } else { + clusterConstruct = new OPAEKSFargateClusterConstruct(this, `${envIdentifier}-app-runtime`, { + podExecutionRole: props.podExecutionRole!, + cfnOutputScope: props.cfnOutputScope, + opaEnv: props.opaEnv, + clusterMasterRole: props.clusterMasterRole, + controlPlaneRole: props.controlPlaneRole!, + vpc: props.vpc, + endpointAccess, + clusterName, + clusterLogging, + kubectlLayer, + kubernetesVersion, + albControllerVersion, + clusterKmsKey, + lambdaExecutionRole: props.lambdaExecutionRole, + }); + } + + } else { + + const clusterAttributes: eks.ClusterAttributes = { + clusterName: getExistingClusterName(), + vpc: props.vpc, + kubectlRoleArn: props.clusterAdminRole.roleArn, + kubectlLambdaRole: props.lambdaExecutionRole, + kubectlLayer, + }; + + if (getExistingKubectlOnEventLambdaArn()) { + const kubectlProvider = eks.KubectlProvider.fromKubectlProviderAttributes(this, 'KubectlProvider', { + functionArn: getExistingKubectlOnEventLambdaArn(), + kubectlRoleArn: props.clusterAdminRole.roleArn, + handlerRole: props.lambdaExecutionRole, + }); + (clusterAttributes as any).kubectlProvider = kubectlProvider; + } + + const existingCluster = eks.Cluster.fromClusterAttributes(this, "EKS", clusterAttributes); + if (!existingCluster) { + throw new Error(`Failed to get EKS cluster with name ${clusterName} in vpc ${props.vpc}`); + } + clusterConstruct = { + cluster: (existingCluster as eks.Cluster) + }; + + } + + const cluster = clusterConstruct.cluster; + + if (getCreateK8sOpaResources()) { + + // Create a ClusterRoleBinding for the cluster admin IAM role + const clusterAdminClusterRoleBinding = cluster.addManifest(`cluster-admin-role-binding`, { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRoleBinding', + metadata: { name: 'opa-cluster-admin' }, + subjects: [ + { + kind: 'User', + name: clusterAdminK8sUsername, + apiGroup: 'rbac.authorization.k8s.io' + } + ], + roleRef: { + kind: 'ClusterRole', + name: 'cluster-admin', + apiGroup: 'rbac.authorization.k8s.io' + } + }); + + // Create a ClusterRole that allows for viewing namespaces + const clusterNamespaceViewerRole = cluster.addManifest(`cluster-ns-viewer-role`, { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRole', + metadata: { name: 'opa-namespace-viewer' }, + rules: [ + { + apiGroups: [''], + resources: ['namespaces'], + verbs: ['get', 'list', 'watch'], + } + ], + }); + } + + if (isCreateNewCluster) { + + // Get a reference to the aws-auth ConfigMap created by eks.FargateCluster or eks.Cluster construct + const awsAuth = cluster.awsAuth; + + // Add the cluster admin IAM role to the aws-auth ConfigMap so that it can be used with kubectl + // Note - the environment provider provisioning role already has cluster access since it is set in the systems:master group + awsAuth.addRoleMapping(props.clusterAdminRole, { username: clusterAdminK8sUsername, groups: [] }); + + } else if (!getExistingKubectlOnEventLambdaArn() && !getCreateK8sOpaResources()) { + + // OPA requires the KubeCtl Provider, which is typically a lambda function + // that can execute kubectl commands, even when the Kubernetes API server + // only allows private access. We only want to create this if it wasn't already created. + KubectlProvider.getOrCreate(this, cluster); + } + + const clusterParam = new ssm.StringParameter(this, `${name}-eks-cluster-param`, { + allowedPattern: ".*", + description: `The EKS Cluster for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/eks-cluster`, + stringValue: cluster.clusterArn + }); + this.clusterParameter = clusterParam; + } + +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-fargate.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-fargate.ts new file mode 100644 index 00000000..53291476 --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-fargate.ts @@ -0,0 +1,119 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import * as kms from "aws-cdk-lib/aws-kms" +import * as eks from "aws-cdk-lib/aws-eks"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; +import { OPAEKSFargateClusterFluentBitConstruct } from "./eks-env-fargate-fluent-bit-config-construct"; + +// This class creates an EKS Cluster that uses Fargate + +export interface OPAEKSFargateClusterConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + + /** + * An IAM role that will be added to the system:masters Kubernetes RBAC group. + * This role is Highly privileged and can only be accesed in cloudtrail through the CreateCluster api + * upon cluster creation + */ + clusterMasterRole: iam.Role + /** + * Role that provides permissions for the Kubernetes control + * plane to make calls to AWS API operations on your behalf. + */ + controlPlaneRole: iam.Role + + vpc: ec2.IVpc + + /** + * IAM role to assign as pod execution role in the Fargate Profile + */ + podExecutionRole: iam.IRole + + /** + * Scope for CfnOutput + */ + cfnOutputScope: any + + clusterName: string + + clusterKmsKey: kms.Key + + endpointAccess: cdk.aws_eks.EndpointAccess + + kubernetesVersion: eks.KubernetesVersion + + kubectlLayer: cdk.aws_lambda.ILayerVersion + + albControllerVersion: eks.AlbControllerVersion + + clusterLogging: cdk.aws_eks.ClusterLoggingTypes[] | undefined + + /** + * IAM role to assign as the kubectl lambda's execution role + */ + lambdaExecutionRole: iam.IRole +} + +// FargateClusterProps has bug in cdk lib 2.118 where it is not exposing a kubectlLambdaRole property +interface ExtFargateClusterProps extends cdk.aws_eks.FargateClusterProps { kubectlLambdaRole?: cdk.aws_iam.IRole | undefined } + +export class OPAEKSFargateClusterConstruct extends Construct { + public readonly cluster: eks.Cluster + + constructor(parent: Construct, name: string, props: OPAEKSFargateClusterConstructProps) { + super(parent, name); + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + + const cluster = new eks.FargateCluster(this, `${envIdentifier}-app-runtime`, { + version: props.kubernetesVersion, + clusterLogging: props.clusterLogging, + clusterName: props.clusterName, + endpointAccess: props.endpointAccess, + mastersRole: props.clusterMasterRole, + role: props.controlPlaneRole, + outputClusterName: true, + outputConfigCommand: true, + outputMastersRoleArn: true, + vpc: props.vpc, + // Ensure EKS helper lambdas are in private subnets + placeClusterHandlerInVpc: true, + secretsEncryptionKey: props.clusterKmsKey, + // Create a lambda function that can authenticate and make calls with kubectl and helm + // See https://github.com/cdklabs/awscdk-asset-kubectl#readme + kubectlLayer: props.kubectlLayer, + kubectlLambdaRole: props.lambdaExecutionRole, + albController: { + version: props.albControllerVersion, + }, + + defaultProfile: { + selectors: [ + { namespace: 'kube-system' }, + { namespace: 'default' }, + { namespace: '*' } + ], + podExecutionRole: props.podExecutionRole + }, + + /* + * Controls the "eks.amazonaws.com/compute-type" annotation in the CoreDNS configuration + * on your cluster to determine which compute type to use for CoreDNS. + */ + coreDnsComputeType: eks.CoreDnsComputeType.FARGATE + } as ExtFargateClusterProps); + this.cluster = cluster; + + new OPAEKSFargateClusterFluentBitConstruct(this, `${envIdentifier}-fluent-bit-config`, { + opaEnv: props.opaEnv, + cluster, + }); + + } + +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-managed-node.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-managed-node.ts new file mode 100644 index 00000000..392104d0 --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-cluster-managed-node.ts @@ -0,0 +1,117 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import * as kms from "aws-cdk-lib/aws-kms" +import * as eks from "aws-cdk-lib/aws-eks"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as ec2 from "aws-cdk-lib/aws-ec2"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; +import { OPAEKSManagedNodeClusterFluentBitConstruct } from "./eks-env-managed-node-fluent-bit-config-construct"; +import { + getAmiType, + getInstanceType, + getNodeGroupDesiredSize, + getNodeGroupDiskSize, + getNodeGroupMaxSize, + getNodeGroupMinSize, +} from "../eks-input"; + +// This class creates an EKS Cluster with a Managed Node Group + +export interface OPAEKSManagedNodeClusterConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + + /** + * An IAM role that will be added to the system:masters Kubernetes RBAC group. + * This role is Highly privileged and can only be accesed in cloudtrail through the CreateCluster api + * upon cluster creation + */ + clusterMasterRole: iam.Role + /** + * Role that provides permissions for the Kubernetes control + * plane to make calls to AWS API operations on your behalf. + */ + controlPlaneRole: iam.Role + + vpc: ec2.IVpc + + /** + * Scope for CfnOutput + */ + cfnOutputScope: any + + clusterName: string + + clusterKmsKey: kms.Key + + endpointAccess: cdk.aws_eks.EndpointAccess + + kubernetesVersion: eks.KubernetesVersion + + kubectlLayer: cdk.aws_lambda.ILayerVersion + + albControllerVersion: eks.AlbControllerVersion + + clusterLogging: cdk.aws_eks.ClusterLoggingTypes[] | undefined + + /** + * IAM role to assign as the kubectl lambda's execution role + */ + lambdaExecutionRole: iam.IRole +} + +export class OPAEKSManagedNodeClusterConstruct extends Construct { + public readonly cluster: eks.Cluster + + constructor(parent: Construct, name: string, props: OPAEKSManagedNodeClusterConstructProps) { + super(parent, name); + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + + // See documentation here: https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks.Cluster.html + const cluster = new eks.Cluster(this, `${envIdentifier}-app-runtime`, { + version: props.kubernetesVersion, + defaultCapacity: 0, + clusterLogging: props.clusterLogging, + clusterName: props.clusterName, + endpointAccess: props.endpointAccess, + mastersRole: props.clusterMasterRole, + role: props.controlPlaneRole, + outputClusterName: true, + outputConfigCommand: true, + outputMastersRoleArn: true, + vpc: props.vpc, + // Ensure EKS helper lambdas are in private subnets + placeClusterHandlerInVpc: true, + secretsEncryptionKey: props.clusterKmsKey, + // Create a lambda function that can authenticate and make calls with kubectl and helm + // See https://github.com/cdklabs/awscdk-asset-kubectl#readme + kubectlLayer: props.kubectlLayer, + kubectlLambdaRole: props.lambdaExecutionRole, + albController: { + version: props.albControllerVersion, + }, + }); + this.cluster = cluster; + + cluster.addNodegroupCapacity('custom-node-group', { + instanceTypes: [new ec2.InstanceType(getInstanceType())], + minSize: getNodeGroupMinSize(), + desiredSize: getNodeGroupDesiredSize(), + maxSize: getNodeGroupMaxSize(), + diskSize: getNodeGroupDiskSize(), + amiType: eks.NodegroupAmiType[getAmiType() as keyof typeof eks.NodegroupAmiType], + }); + + new OPAEKSManagedNodeClusterFluentBitConstruct(this, `${envIdentifier}-fluent-bit-config`, { + opaEnv: props.opaEnv, + clusterName: props.clusterName, + cluster, + cfnOutputScope: props.cfnOutputScope + }); + + } + +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-control-plane-role-construct.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-control-plane-role-construct.ts new file mode 100644 index 00000000..3029038f --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-control-plane-role-construct.ts @@ -0,0 +1,78 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import { NagSuppressions } from "cdk-nag"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as ssm from "aws-cdk-lib/aws-ssm"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +export interface EKSControlPlaneConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + /** + * Scope for CfnOutput + */ + cfnOutputScope: any +} + +const defaultProps: Partial = {}; + +export class EKSControlPlaneRoleConstruct extends Construct { + public IAMRole: iam.Role; + public controlPlaneRoleParam: ssm.StringParameter; + public controlPlaneRoleArnParam: ssm.StringParameter; + + constructor(parent: Construct, name: string, props: EKSControlPlaneConstructProps) { + super(parent, name); + + /* eslint-disable @typescript-eslint/no-unused-vars */ + props = { ...defaultProps, ...props }; + + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + const envPathIdentifier = `/${props.opaEnv.prefix.toLowerCase()}/${props.opaEnv.envName.toLowerCase()}`; + + // Create Iam role + this.IAMRole = new iam.Role(this, `${envIdentifier}-role`, { + assumedBy: new iam.ServicePrincipal("eks.amazonaws.com"), + roleName: name, + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonEKSClusterPolicy"), + ], + maxSessionDuration: cdk.Duration.seconds(43200), + }); + NagSuppressions.addResourceSuppressions(this.IAMRole, [ + { id: "AwsSolutions-IAM4", reason: "Assumed roles will use AWS managed policies for demonstration purposes. Customers will be advised/required to assess and apply custom policies based on their role requirements" }, + { id: "AwsSolutions-IAM5", reason: "Assumed roles will require permissions to perform multiple ecs, ddb, and ec2 for demonstration purposes. Customers will be advised/required to assess and apply minimal permission based on role mappings to their idP groups" }, + ], true + ); + + // now save the Role in SSM Param + const roleParam = new ssm.StringParameter(this, `${envIdentifier}-role-param`, { + allowedPattern: ".*", + description: `The EKS Control Plane Role for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/eks-control-plane-role`, + stringValue: this.IAMRole.roleName, + }); + + const roleArnParam = new ssm.StringParameter(this, `${envIdentifier}-role-arn-param`, { + allowedPattern: ".*", + description: `The EKS Cluster Control Plane Role Arn for OPA Solution: ${props.opaEnv.envName} Environment`, + parameterName: `${envPathIdentifier}/eks-control-plane-role-arn`, + stringValue: this.IAMRole.roleArn, + }); + + // Post params to output + new cdk.CfnOutput(props.cfnOutputScope, "ContolPlaneRoleParam", { + value: roleParam.parameterName, + }); + + // Post params to output + new cdk.CfnOutput(props.cfnOutputScope, "ControlPlaneRoleArnParam", { + value: roleArnParam.parameterName, + }); + this.controlPlaneRoleParam = roleParam; + this.controlPlaneRoleArnParam = roleArnParam; + } + +} diff --git a/iac/roots/opa-eks-environment/src/constructs/eks-env-fargate-fluent-bit-config-construct.ts b/iac/roots/opa-eks-environment/src/constructs/eks-env-fargate-fluent-bit-config-construct.ts new file mode 100644 index 00000000..22ce7c28 --- /dev/null +++ b/iac/roots/opa-eks-environment/src/constructs/eks-env-fargate-fluent-bit-config-construct.ts @@ -0,0 +1,57 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import * as cdk from "aws-cdk-lib"; +import { Construct } from "constructs"; +import * as eks from "aws-cdk-lib/aws-eks"; +import { OPAEnvironmentParams } from "@aws/aws-app-development-common-constructs"; + +export interface OPAEKSFargateClusterFluentBitConstructProps extends cdk.StackProps { + readonly opaEnv: OPAEnvironmentParams; + + cluster: eks.Cluster +} + +export class OPAEKSFargateClusterFluentBitConstruct extends Construct { + + constructor(parent: Construct, name: string, props: OPAEKSFargateClusterFluentBitConstructProps) { + super(parent, name); + + const stack = cdk.Stack.of(this); + const envIdentifier = `${props.opaEnv.prefix.toLowerCase()}-${props.opaEnv.envName}`; + + // See https://docs.aws.amazon.com/eks/latest/userguide/fargate-logging.html + // See https://docs.fluentbit.io/manual/pipeline/outputs/cloudwatch + + // creating observability namespace + const awsObservabilityManifest = props.cluster.addManifest(`${envIdentifier}-aws-observability-namespace`, { + apiVersion: 'v1', + kind: 'Namespace', + metadata: { name: 'aws-observability', labels: { 'aws-observability': 'enabled' } }, + }); + + // Define the ConfigMap manifest + const awsLoggingConfigMap = { + apiVersion: 'v1', + kind: 'ConfigMap', + metadata: { + name: 'aws-logging', + namespace: 'aws-observability', + }, + data: { + flb_log_cw: 'false', + 'filters.conf': `[FILTER]\n Name parser\n Match *\n Key_name log\n Parser crio\n[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n Buffer_Size 0\n Kube_Meta_Cache_TTL 300s`, + 'output.conf': `[OUTPUT]\n Name cloudwatch_logs\n Match kube.*\n region ${stack.region}\n log_group_name /aws/apps/${envIdentifier}\n log_stream_prefix fluent-bit-fallback-\n log_group_template /aws/apps/${envIdentifier}/$kubernetes['namespace_name']\n log_stream_template $kubernetes['pod_name'].$kubernetes['container_name']\n log_retention_days 60\n auto_create_group true`, + 'parsers.conf': `[PARSER]\n Name crio\n Format Regex\n Regex ^(?