diff --git a/.github/workflows/aws-deploy.yml b/.github/workflows/aws-deploy.yml index 5a3cdefc..0e458500 100644 --- a/.github/workflows/aws-deploy.yml +++ b/.github/workflows/aws-deploy.yml @@ -17,202 +17,14 @@ on: version: description: 'Version Number' required: false -# push: -# branches: -# - main -# pull_request: - -concurrency: ${{ github.event.inputs.environment }} - -env: - DOCKER_TAGS: ${{ secrets.DOCKER_TAGS }} - IMAGE_ID: ${{ secrets.AWS_ECR_URI }} - IMAGE_NAME: bcer-api - TFC_WORKSPACE: ${{ github.event.inputs.environment }} - TF_VERSION: 1.3.7 - TG_SRC_PATH: Terraform - TG_VERSION: 0.44.5 - -permissions: - id-token: write # This is required for requesting the JWT - contents: read # This is required for actions/checkout - + zap_scan: + description: 'Run ZAP Scan' + type: boolean + required: false jobs: - docker_push: - name: Docker Push - environment: ${{ github.event.inputs.environment }} - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - ref: FEATURE/aws - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_DEPLOY_ROLE_ARN }} - aws-region: ca-central-1 - - - name: Amazon ECR Login - uses: aws-actions/amazon-ecr-login@v1 - with: - mask-password: 'true' - - - name: Cache - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Docker Setup Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build and push Docker images (run migrations) - if: inputs.migrations == true - uses: docker/build-push-action@v4 - with: - builder: ${{ steps.buildx.outputs.name }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - context: packages/bcer-api/app - file: packages/bcer-api/app/Dockerfile.aws.migrations - push: true - tags: ${{ env.DOCKER_TAGS }} - - - name: Build and push Docker images - if: inputs.migrations == false - uses: docker/build-push-action@v4 - with: - builder: ${{ steps.buildx.outputs.name }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - context: packages/bcer-api/app - file: packages/bcer-api/app/Dockerfile.aws - push: true - tags: ${{ env.DOCKER_TAGS }} - - terraform_apply: - name: Terraform Apply - environment: ${{ github.event.inputs.environment }} - runs-on: ubuntu-latest - needs: docker_push - steps: - - name: Set TF_VAR_TIMESTAMP - run: echo "TF_VAR_TIMESTAMP=$(date --rfc-3339=seconds)" >> $GITHUB_ENV - - - name: Checkout - uses: actions/checkout@v3 - with: - ref: ${{ github.event.workflow_run.head_branch }} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_DEPLOY_ROLE_ARN }} - aws-region: ca-central-1 - - - name: HashiCorp - Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: ${{ env.TF_VERSION }} - - - name: Terragrunt installer - uses: autero1/action-terragrunt@v1.3.2 - with: - terragrunt_version: ${{ env.TG_VERSION }} - - - name: Terragrunt Apply - working-directory: ${{ env.TG_SRC_PATH }}/${{ env.TFC_WORKSPACE }} - env: - app_image: ${{ env.IMAGE_ID }}:${{ github.sha }} - LICENSE_PLATE: ${{ secrets.MY_LICENSE_PLATE }} - run: | - terragrunt run-all apply --terragrunt-non-interactive - - build_jobs: - name: Build Jobs - if: inputs.frontends == true - environment: ${{ github.event.inputs.environment }} + nothing: + name: nothing runs-on: ubuntu-latest - needs: terraform_apply steps: - name: Checkout uses: actions/checkout@v3 - with: - ref: ${{ github.event.workflow_run.head_branch }} - - - name: Use Node.js 14 - uses: actions/setup-node@v3 - with: - node-version: 14 - - - name: Build project shared components - run: | - cd packages/bcer-shared-components - npm install - npm run build - - - name: Copy env file to retailer app - run: | - cd packages/bcer-retailer-app - cp .config/.env.aws.${{ github.event.inputs.environment }} app/.env - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - role-to-assume: ${{ secrets.TERRAFORM_DEPLOY_ROLE_ARN }} - aws-region: ca-central-1 - - - name: Build project retailer app - run: | - cd packages/bcer-retailer-app/app - npm install - npm run build - - - name: Upload to S3 bucket retail app - run: | - cd packages/bcer-retailer-app/app/build - aws s3 sync . s3://bcer-${{ github.event.inputs.environment }}/retailer --delete - env: - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} - AWS_REGION: ca-central-1 - - - name: Copy env file to bcer-data-portal - run: | - cd packages/bcer-data-portal - cp .config/.env.aws.${{ github.event.inputs.environment }} app/.env - - - name: Build project data portal frontend - run: | - cd packages/bcer-data-portal/app - npm install - npm run build - - - name: Upload to S3 bucket data portal - run: | - cd packages/bcer-data-portal/app/build - aws s3 sync . s3://bcer-${{ github.event.inputs.environment }}/portal --delete - env: - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} - AWS_REGION: ca-central-1 -# ----------------------------Commenting out while trying to get rollback working------------------------------- -# bump_version: -# runs-on: ubuntu-latest -# permissions: write-all - -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: '0' - -# - name: Bump version and push tag -# uses: anothrNick/github-tag-action@1.59.0 # Don't use @master unless you're happy to test the latest version -# env: -# DEFAULT_BUMP: major -# DEFAULT_BRANCH: main -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# RELEASE_BRANCHES : main -# WITH_V: true diff --git a/.gitignore b/.gitignore index 174c1e18..b0120692 100644 --- a/.gitignore +++ b/.gitignore @@ -36,8 +36,6 @@ cypress/videos/ # .tfstate files *.tfstate *.tfstate.* -.terraform.lock.hcl -**/.terragrunt-cache/** # Crash log files crash.log diff --git a/Database Scripts/aurora_create_db.sql b/Database Scripts/aurora_create_db.sql deleted file mode 100644 index 825890ea..00000000 --- a/Database Scripts/aurora_create_db.sql +++ /dev/null @@ -1,11 +0,0 @@ ---As Postgres User -CREATE USER bcer_proxy WITH PASSWORD 'bcer'; ---RDS handles the CREATE DATABASE bcer step ---CREATE DATABASE bcer WITH OWNER = bcer ENCODING = 'UTF8' CONNECTION LIMIT = -1 IS_TEMPLATE = False; -GRANT ALL PRIVILEGES on database bcer to bcer_proxy; - ---Switch to bcer_proxy user ---Use the BCER database -CREATE SCHEMA bcer; -GRANT ALL ON ALL TABLES IN SCHEMA bcer TO bcer_proxy; -GRANT ALL ON SCHEMA bcer TO bcer_proxy; \ No newline at end of file diff --git a/Infrastructure/alb.tf b/Infrastructure/alb.tf index a447208b..99b7b8e2 100644 --- a/Infrastructure/alb.tf +++ b/Infrastructure/alb.tf @@ -13,17 +13,18 @@ data "aws_alb_listener" "front_end" { } resource "aws_alb_target_group" "app" { - name = "${var.application}-${var.target_env}-target-group" + name = "bcer-dev-target-group" port = var.app_port protocol = "HTTP" vpc_id = data.aws_vpc.main.id target_type = "ip" deregistration_delay = 30 lifecycle { - ignore_changes = [name] + create_before_destroy = true } stickiness { type = "lb_cookie" + } health_check { @@ -36,7 +37,7 @@ resource "aws_alb_target_group" "app" { unhealthy_threshold = "2" } - tags = local.common_tags + tags = local.common_tags } resource "aws_lb_listener_rule" "host_based_weighted_routing" { @@ -48,9 +49,10 @@ resource "aws_lb_listener_rule" "host_based_weighted_routing" { type = "forward" target_group_arn = aws_alb_target_group.app.arn } + #figure out what to place here condition { path_pattern { values = ["/*"] } } -} +} \ No newline at end of file diff --git a/Infrastructure/api-gateway.tf b/Infrastructure/api-gateway.tf deleted file mode 100644 index 8ad7d950..00000000 --- a/Infrastructure/api-gateway.tf +++ /dev/null @@ -1,51 +0,0 @@ -data "aws_acm_certificate" "bcer_api_certificate" { - domain = "bcer-${var.target_env}.api.hlth.gov.bc.ca" - statuses = ["ISSUED"] - most_recent = true -} - -resource "aws_cloudwatch_log_group" "bcer_api_access_logs" { - name = "bcer-${var.target_env}-api-gateway" - retention_in_days = 90 -} - -module "api_gateway" { - source = "terraform-aws-modules/apigateway-v2/aws" - version = "2.2.2" - - name = "${var.application}-http-api" - description = "HTTP API Gateway" - protocol_type = "HTTP" - create_api_domain_name = false - - domain_name = "bcer-${var.target_env}.api.hlth.gov.bc.ca" - domain_name_certificate_arn = data.aws_acm_certificate.bcer_api_certificate.arn - default_stage_access_log_destination_arn = aws_cloudwatch_log_group.bcer_api_access_logs.arn - - # default_route_settings = { - # detailed_metrics_enabled = true - # throttling_burst_limit = 100 - # throttling_rate_limit = 100 - # } - - integrations = { - "ANY /{proxy+}" = { - connection_type = "VPC_LINK" - vpc_link = "bcer-vpc" - integration_uri = data.aws_alb_listener.front_end.arn - integration_type = "HTTP_PROXY" - integration_method = "ANY" - } - } - vpc_links = { - bcer-vpc = { - name = "${var.application}-vpc-link" - security_group_ids = [data.aws_security_group.web.id] - subnet_ids = data.aws_subnets.web.ids - } - } - - # tags = { - # Name = "dev-api-new" - # } -} diff --git a/Infrastructure/aurora-v2.tf b/Infrastructure/aurora-v2.tf index fb110983..8b5bd200 100644 --- a/Infrastructure/aurora-v2.tf +++ b/Infrastructure/aurora-v2.tf @@ -3,6 +3,7 @@ resource "random_pet" "bcer_subnet_group_name" { length = 2 } + resource "random_password" "bcer_master_password" { length = 16 special = true @@ -29,6 +30,7 @@ resource "aws_db_subnet_group" "bcer_subnet_group" { tags = { managed-by = "terraform" } + tags_all = { managed-by = "terraform" } @@ -40,8 +42,7 @@ data "aws_rds_engine_version" "postgresql" { } module "aurora_postgresql_v2" { - source = "terraform-aws-modules/rds-aurora/aws" - version = "7.7.1" + source = "terraform-aws-modules/rds-aurora/aws" name = "${var.bcer_cluster_name}-${var.target_env}" engine = data.aws_rds_engine_version.postgresql.engine @@ -103,10 +104,6 @@ resource "aws_rds_cluster_parameter_group" "bcer_postgresql13" { tags = { managed-by = "terraform" } - parameter { - name = "timezone" - value = var.timezone - } } resource "random_pet" "master_creds_secret_name" { @@ -116,6 +113,7 @@ resource "random_pet" "master_creds_secret_name" { resource "aws_secretsmanager_secret" "bcer_mastercreds_secret" { name = random_pet.master_creds_secret_name.id + tags = { managed-by = "terraform" } @@ -129,9 +127,6 @@ resource "aws_secretsmanager_secret_version" "bcer_mastercreds_secret_version" { "password": "${random_password.bcer_master_password.result}" } EOF - lifecycle { - ignore_changes = [secret_string] - } } resource "random_password" "bcer_api_password" { @@ -147,6 +142,7 @@ variable "bcer_api_username" { sensitive = true } + resource "random_pet" "api_creds_secret_name" { prefix = "bcer-api-creds" length = 2 @@ -154,6 +150,7 @@ resource "random_pet" "api_creds_secret_name" { resource "aws_secretsmanager_secret" "bcer_apicreds_secret" { name = random_pet.api_creds_secret_name.id + tags = { managed-by = "terraform" } @@ -167,7 +164,4 @@ resource "aws_secretsmanager_secret_version" "bcer_apicreds_secret_version" { "password": "${random_password.bcer_api_password.result}" } EOF - lifecycle { - ignore_changes = [secret_string] - } } diff --git a/Infrastructure/cloudfront.tf b/Infrastructure/cloudfront.tf new file mode 100644 index 00000000..0efea329 --- /dev/null +++ b/Infrastructure/cloudfront.tf @@ -0,0 +1,119 @@ +locals { + s3_origin_id = "s3-bcer.example.com" +} + +resource "aws_cloudfront_origin_access_identity" "origin_access_identity" { + comment = "bcer.example.com" +} + +resource "aws_cloudfront_distribution" "s3_distribution" { + origin { + domain_name = aws_s3_bucket.static.bucket_regional_domain_name + origin_id = local.s3_origin_id + + s3_origin_config { + origin_access_identity = aws_cloudfront_origin_access_identity.origin_access_identity.cloudfront_access_identity_path + } + } + + enabled = true + is_ipv6_enabled = true + comment = "my-cloudfront" + default_root_object = "/retailer/index.html" + + # Configure logging here if required + #logging_config { + # include_cookies = false + # bucket = "mylogs.s3.amazonaws.com" + # prefix = "myprefix" + #} + + # If you have domain configured use it here + #aliases = ["mywebsite.example.com", "s3-static-web-dev.example.com"] + + default_cache_behavior { + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + target_origin_id = local.s3_origin_id + + forwarded_values { + query_string = false + + cookies { + forward = "none" + } + } + + viewer_protocol_policy = "allow-all" + min_ttl = 0 + default_ttl = 3600 + max_ttl = 86400 + } + + # Cache behavior with precedence 0 + ordered_cache_behavior { + cache_policy_id = "658327ea-f89d-4fab-a63d-7e88639e58f6" + path_pattern = "*" + allowed_methods = ["GET", "HEAD", "OPTIONS"] + cached_methods = ["GET", "HEAD", "OPTIONS"] + target_origin_id = local.s3_origin_id + + + min_ttl = 0 + default_ttl = 86400 + max_ttl = 31536000 + compress = true + viewer_protocol_policy = "redirect-to-https" + } + + + + price_class = "PriceClass_100" + + restrictions { + geo_restriction { + restriction_type = "whitelist" + locations = ["CA"] + } + } + +# tags = { +# Environment = "development" +# Name = "my-tag" +# } + + viewer_certificate { + cloudfront_default_certificate = true + } +} + +# to get the Cloud front URL if doamin/alias is not configured +output "cloudfront_domain_name" { + value = aws_cloudfront_distribution.s3_distribution.domain_name +} + +data "aws_iam_policy_document" "s3_policy" { + statement { + actions = ["s3:GetObject"] + resources = ["${aws_s3_bucket.static.arn}/*"] + + principals { + type = "AWS" + identifiers = [aws_cloudfront_origin_access_identity.origin_access_identity.iam_arn] + } + } +} + +resource "aws_s3_bucket_policy" "mybucket" { + bucket = aws_s3_bucket.static.id + policy = data.aws_iam_policy_document.s3_policy.json +} + +resource "aws_s3_bucket_public_access_block" "mybucket" { + bucket = aws_s3_bucket.static.id + + block_public_acls = true + block_public_policy = true + //ignore_public_acls = true + //restrict_public_buckets = true +} \ No newline at end of file diff --git a/Infrastructure/cloudwatch_alarms.tf b/Infrastructure/cloudwatch_alarms.tf index e56b0604..bcb03312 100644 --- a/Infrastructure/cloudwatch_alarms.tf +++ b/Infrastructure/cloudwatch_alarms.tf @@ -2,13 +2,12 @@ resource "aws_sns_topic" "alerts" { name = "cloudwatch_alarms" } - ########################################## ######## CloudWatch Alarm for ECS ######## ########################################## -resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { - alarm_name = "ecs-cpu-utilization-${var.application}" +resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization_alarm" { + alarm_name = "ecs-cpu-utilization-alarm" comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = "2" metric_name = "CPUUtilization" @@ -19,9 +18,10 @@ resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { alarm_description = "This metric checks the CPU utilization of the ECS service" tags = local.common_tags + dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name - ServiceName = aws_ecs_service.main.name + ClusterName = var.cluster_name + ServiceName = var.ecs_service_name } alarm_actions = [ @@ -29,8 +29,9 @@ resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" { ] } + resource "aws_cloudwatch_metric_alarm" "ecs_memory_utilization" { - alarm_name = "ecs-memory-utilization-${var.application}" + alarm_name = "ecs-memory-utilization-alarm" comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = "2" metric_name = "MemoryUtilization" @@ -41,9 +42,10 @@ resource "aws_cloudwatch_metric_alarm" "ecs_memory_utilization" { alarm_description = "Alarm for ECS memory utilization exceeding 80%" tags = local.common_tags - dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name - ServiceName = aws_ecs_service.main.name + + dimensions = { + ClusterName = var.cluster_name + ServiceName = var.ecs_service_name } alarm_actions = [ @@ -51,8 +53,9 @@ resource "aws_cloudwatch_metric_alarm" "ecs_memory_utilization" { ] } + resource "aws_cloudwatch_metric_alarm" "ecs_service_status" { - alarm_name = "ecs-service-status-${var.application}" + alarm_name = "ecs-service-status" comparison_operator = "LessThanThreshold" evaluation_periods = "1" metric_name = "ServiceState" @@ -62,20 +65,22 @@ resource "aws_cloudwatch_metric_alarm" "ecs_service_status" { threshold = "1" tags = local.common_tags - dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name - ServiceName = aws_ecs_service.main.name + + dimensions = { + ClusterName = var.cluster_name + ServiceName = var.ecs_service_name } - + alarm_description = "Alarm for Amazon ECS service status" - + alarm_actions = [ aws_sns_topic.alerts.arn ] } + resource "aws_cloudwatch_metric_alarm" "ecs_network_traffic" { - alarm_name = "ecs-network-traffic-${var.application}" + alarm_name = "ecs-network-traffic" comparison_operator = "GreaterThanThreshold" evaluation_periods = "1" metric_name = "NetworkIn" @@ -85,20 +90,22 @@ resource "aws_cloudwatch_metric_alarm" "ecs_network_traffic" { threshold = "100000" tags = local.common_tags - dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name - ServiceName = aws_ecs_service.main.name + + dimensions = { + ClusterName = var.cluster_name + ServiceName = var.ecs_service_name } - + alarm_description = "Alarm for Amazon ECS Network Traffic" - + alarm_actions = [ aws_sns_topic.alerts.arn ] } + resource "aws_cloudwatch_metric_alarm" "ecs_disk_usage" { - alarm_name = "ecs-disk-usage-${var.application}" + alarm_name = "ecs-disk-usage" comparison_operator = "GreaterThanThreshold" evaluation_periods = "1" metric_name = "TaskFilesystemUtilization" @@ -108,20 +115,21 @@ resource "aws_cloudwatch_metric_alarm" "ecs_disk_usage" { threshold = "80" tags = local.common_tags - dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name - ServiceName = aws_ecs_service.main.name + + dimensions = { + ClusterName = var.cluster_name + ServiceName = var.ecs_service_name } - + alarm_description = "Alarm for Amazon ECS task filesystem utilization" - + alarm_actions = [ aws_sns_topic.alerts.arn ] } resource "aws_cloudwatch_metric_alarm" "ecs_task_failures" { - alarm_name = "ecs-task-failures-${var.application}" + alarm_name = "ecs-task-failures" comparison_operator = "GreaterThanThreshold" evaluation_periods = "1" metric_name = "TaskFailures" @@ -131,12 +139,13 @@ resource "aws_cloudwatch_metric_alarm" "ecs_task_failures" { threshold = "1" tags = local.common_tags + dimensions = { - ClusterName = aws_ecs_cluster.bcer_cluster.name + ClusterName = var.cluster_name } - + alarm_description = "Alarm for Amazon ECS task failures" - + alarm_actions = [ aws_sns_topic.alerts.arn ] @@ -147,8 +156,9 @@ resource "aws_cloudwatch_metric_alarm" "ecs_task_failures" { ###### CloudWatch Alarm for Aurora ####### ########################################## -resource "aws_cloudwatch_metric_alarm" "aurora_cpu_utilization" { - alarm_name = "aurora-cpu-utilization-${var.application}" + +resource "aws_cloudwatch_metric_alarm" "aurora_cpu_alarm" { + alarm_name = "aurora-cpu-utilization" comparison_operator = "GreaterThanThreshold" evaluation_periods = "3" metric_name = "CPUUtilization" @@ -162,13 +172,15 @@ resource "aws_cloudwatch_metric_alarm" "aurora_cpu_utilization" { aws_sns_topic.alerts.arn ] + dimensions = { - DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" + DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" } } -resource "aws_cloudwatch_metric_alarm" "aurora_db_connections" { - alarm_name = "aurora-db-connections-${var.application}" + +resource "aws_cloudwatch_metric_alarm" "db_connections_alarm" { + alarm_name = "aurora-db-connections-alarm" comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = 2 metric_name = "DatabaseConnections" @@ -177,18 +189,19 @@ resource "aws_cloudwatch_metric_alarm" "aurora_db_connections" { statistic = "Average" threshold = 100 alarm_description = "Alarm when the number of database connections exceeds 100 for 2 consecutive periods" - + alarm_actions = [ aws_sns_topic.alerts.arn ] dimensions = { - DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" + DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" } } -resource "aws_cloudwatch_metric_alarm" "aurora_disk_queue_depth" { - alarm_name = "aurora-disk-queue-depth-${var.application}" + +resource "aws_cloudwatch_metric_alarm" "disk_queue_depth_alarm" { + alarm_name = "aurora-disk-queue-depth-alarm" comparison_operator = "GreaterThanThreshold" evaluation_periods = 2 metric_name = "DiskQueueDepth" @@ -202,7 +215,7 @@ resource "aws_cloudwatch_metric_alarm" "aurora_disk_queue_depth" { ] dimensions = { - DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" + DBInstanceIdentifier = "${var.db_instance_identifier}-${var.target_env}" } } @@ -211,18 +224,18 @@ resource "aws_cloudwatch_metric_alarm" "aurora_disk_queue_depth" { ###### CloudWatch Alarm for Billing ####### ########################################## -resource "aws_cloudwatch_metric_alarm" "billing" { - alarm_name = "billing-${var.application}" +resource "aws_cloudwatch_metric_alarm" "billing_alarm" { + alarm_name = "Billing Alert" comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = "1" metric_name = "EstimatedCharges" namespace = "AWS/Billing" period = "86400" # 1 day (in seconds) statistic = "Maximum" - threshold = "375" - + threshold = "375" + alarm_description = "This alarm will be triggered if the estimated charges for the account exceed $3000 CAD within a 1-month period." - + alarm_actions = [ aws_sns_topic.alerts.arn ] diff --git a/Infrastructure/ec2.tf b/Infrastructure/ec2.tf new file mode 100644 index 00000000..001acf90 --- /dev/null +++ b/Infrastructure/ec2.tf @@ -0,0 +1,29 @@ +# data "aws_ami" "ec2" { +# most_recent = true + +# filter { +# name = "name" +# values = ["amzn2-ami-kernel-5.10-hvm*"] +# } + +# filter { +# name = "virtualization-type" +# values = ["hvm"] +# } + +# owners = ["137112412989"] +# } + +# resource "aws_instance" "db_access_ec2" { +# ami = data.aws_ami.ec2.id +# instance_type = "t2.micro" +# iam_instance_profile = "EC2-Default-SSM-AD-Role-ip" +# #subject to change +# security_groups = [data.aws_security_group.data.id] +# #look into making this dynamic +# subnet_id = data.aws_subnets.data.ids[0] +# user_data = < ENVIRONMENT= make package-build +VERSION= ENVIRONMENT= make package-app # Example -VERSION=2.2.0 ENVIRONMENT=development make package-build +VERSION=2.2.0 ENVIRONMENT=development make package-app ``` -## Notes -- Currently, we only deploy to on-prem servers. The documentation in each readme will reflect as such. - -## Running application in local - -- Run the following command at the root directory to prepare project for local development. - - ```sh - make setup-local - ``` - This command builds the shared component library and creates proper .env files for front end applications - -- Create a `.env` file in `packages/bcer-api/app` folder and populate it with necessary values. The list of environment -variable can be found below. - -- Run the following command at the root directory to start all the applications. - - ```sh - make run-local - ``` - Note: Docker Desktop is required to use this command. - This will spin up all the applications and they can be accessed on the following urls - 1. API - bcer-api: `http://localhost:4000` - 2. Retailer Application - bcer-retailer-app: `http://localhost:3000` - 3. Data Portal - bcer-data-portal: `http://localhost:3001` - -## Running E2E tests - -- Run the following command at the root directory to prepare project for test environment. - - ```sh - make setup-test - ``` - -- Run the following to start the applications in test mode - - ```sh - make run-test - ``` - -- Once the application is running, run one of the following commands - - ```sh - npm run test:open - ``` - This will open the cypress interactive window where each test can be run individually and actual behavior can be monitored - - ```sh - npm run test:run - ``` - This will run the tests without interactive window and the results will be printed on the screen. - -> Tests are setup to run in a sequential order. - -## List of envs for bcer-api - -``` -CLOSE_LOCATION_CRON_TIME= -CRON_JOB_NAMES= - -# Map - -GA_KEY= -BC_DIRECTION_API_KEY= -MAP_BOX_ACCESS_TOKEN= -MAP_BOX_TILE_LAYER= -MAP_BOX_ATTRIBUTION= -MAP_BOX_ID= - -# Text Notification - -ENABLE_TEXT_MESSAGES= <`true` to enable text messaging functionality> -ENABLE_SUBSCRIPTION= <`true` to enable subscription to text messaging> -TEXT_API_KEY= -TEXT_GENERIC_NOTIFICATION_TEMPLATE_ID= -TEXT_REFERENCE= -TEXT_API_PROXY= -TEXT_API_PROXY_PORT= -# Date Config - -SALES_REPORT_END_DATE= -NOI_EXPIRY_DATE= -REPORTING_YEAR_START= -SALES_REPORT_START_DATE= -NOI_VALID_TILL= -CRON_TIME_ZONE=