diff --git a/CHANGELOG.md b/CHANGELOG.md index 65ae4c6..3a1b12f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - fixed the failing regex error in the eks module when env var SEEDFARMER_PARAMETER_REPLICATED_ECR_IMAGES_METADATA_S3_PATH is not set - refactored docker images replication module - fixed `network/basic-cdk` module integration test failure +- fixed `integration/fsx-lustre-on-eks` module to support EKS clusters with non-public endpoints ### **Removed** diff --git a/modules/integration/fsx-lustre-on-eks/README.md b/modules/integration/fsx-lustre-on-eks/README.md index 026b17d..d902463 100644 --- a/modules/integration/fsx-lustre-on-eks/README.md +++ b/modules/integration/fsx-lustre-on-eks/README.md @@ -2,7 +2,7 @@ This module creates an integration between Amazon FSx Lustre and an existing Amazon EKS cluster. -## Prerequisistes +## Prerequisites The FSX Lustre should be deployed already with a Security Group associated. They should be in the same VPC as the EKS cluster. FSx CSI driver/addon should already be deployed on the EKS cluster. We do not support VPC-Peering at this time. @@ -14,7 +14,7 @@ The FSX Lustre should be deployed already with a Security Group associated. They #### Required -- `eks-cluster-admin-role-arn` - the role that has kubectl access / admin access to the EKS clsuter +- `eks-cluster-admin-role-arn` - the role that has kubectl access / admin access to the EKS cluster - `eks-cluster-name` - the name of the EKS cluster - `eks-oidc-arn` - the OpenID provider ARN of the cluster - `eks-cluster-security-group-id` - the EKS cluster security group to allow ingress to FSX @@ -29,17 +29,19 @@ The FSX Lustre should be deployed already with a Security Group associated. They - only one of `namespace`, `namespace_ssm` or `namespace_secret` can be used - `namespace_secret` - the name of the SSM parameter that has the string value to be used as the Namespace that the PVC will be created in - only one of `namespace`, `namespace_ssm` or `namespace_secret` can be used - - if using this parameter, the unique entry to AWS SecretsManager is required with the following JSON format (username representing the namespace): - ```json - { - "username": "user1" - } - ``` +- if using this parameter, the unique entry to AWS SecretsManager is required with the following JSON format (username representing the namespace): +```json + { + "username": "user1" + } +``` #### Optional -- `fsx-storage-capacity`: the amount (in GB) of storage, **defaults to 1200**, with the following guidelines: - - valid values are 1200, 2400 , and increments of 3600 +- `fsx-storage-capacity` - the amount (in GB) of storage, **defaults to 1200**, with the following guidelines: + - valid values are 1200, 2400, and increments of 3600 +- `vpc-id` - id of the VPC in which the cluster was created. **Required if the EKS cluster endpoint is not accessible publicly** +- `private-subnet-ids` - private subnets. **Required if the EKS cluster endpoint is not accessible publicly** #### Input Example @@ -86,6 +88,19 @@ parameters: value: fs-066f18902985fdba0.fsx.us-east-1.amazonaws.com - name: dra_export_path value: "/ray/export" # Must start with a `/`. + # If EKS cluster endpoint is not publicly accessible, add the VpcID and PrivateSubnetIds properties. Example: + - name: VpcId + valueFrom: + moduleMetadata: + group: base + name: networking + key: VpcId + - name: PrivateSubnetIds + valueFrom: + moduleMetadata: + group: base + name: networking + key: PrivateSubnetIds ``` ### Module Metadata Outputs diff --git a/modules/integration/fsx-lustre-on-eks/app.py b/modules/integration/fsx-lustre-on-eks/app.py index 94dacb6..484ae8b 100644 --- a/modules/integration/fsx-lustre-on-eks/app.py +++ b/modules/integration/fsx-lustre-on-eks/app.py @@ -1,8 +1,9 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +import json import os -from typing import cast +from typing import List, Optional, cast import aws_cdk from aws_cdk import App, CfnOutput @@ -35,6 +36,9 @@ def _param(name: str) -> str: # This gets set in the deployspec...NOTE no PARAMETER prefix eks_namespace = os.getenv("EKS_NAMESPACE") +vpc_id = os.getenv(_param("VPC_ID"), None) +private_subnet_ids = json.loads(os.getenv(_param("PRIVATE_SUBNET_IDS"), "[]")) + if not eks_namespace: raise ValueError("No EKS Namespace defined...error") @@ -63,6 +67,8 @@ def _param(name: str) -> str: eks_handler_role_arn=cast(str, eks_handler_role_arn), eks_cluster_security_group_id=cast(str, eks_cluster_sg_id), eks_namespace=eks_namespace, + vpc_id=vpc_id, + private_subnet_ids=cast(Optional[List[str]], private_subnet_ids), env=aws_cdk.Environment( account=os.environ["CDK_DEFAULT_ACCOUNT"], region=os.environ["CDK_DEFAULT_REGION"], diff --git a/modules/integration/fsx-lustre-on-eks/stack_fsx_eks.py b/modules/integration/fsx-lustre-on-eks/stack_fsx_eks.py index ae1c005..d49eafc 100644 --- a/modules/integration/fsx-lustre-on-eks/stack_fsx_eks.py +++ b/modules/integration/fsx-lustre-on-eks/stack_fsx_eks.py @@ -1,7 +1,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -from typing import Any, cast +from typing import Any, List, Optional, cast import cdk_nag from aws_cdk import Aspects, Stack, Tags @@ -33,6 +33,8 @@ def __init__( eks_handler_role_arn: str, eks_cluster_security_group_id: str, dra_export_path: str, + vpc_id: Optional[str], + private_subnet_ids: Optional[List[str]], **kwargs: Any, ) -> None: super().__init__( @@ -57,6 +59,17 @@ def __init__( handler_role = iam.Role.from_role_arn(self, "HandlerRole", eks_handler_role_arn) + # Required if EKS cluster endpoint is not accessible publicly + vpcConfig = ( + { + "vpc": ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpc_id), + "kubectl_private_subnet_ids": private_subnet_ids, + "kubectl_security_group_id": eks_cluster_security_group_id, + } + if vpc_id + else {} + ) + eks_cluster = eks.Cluster.from_cluster_attributes( self, f"{dep_mod}-eks-cluster", @@ -65,6 +78,7 @@ def __init__( open_id_connect_provider=provider, kubectl_lambda_role=handler_role, kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"), + **vpcConfig, # type: ignore ) fsx_security_group = ec2.SecurityGroup.from_security_group_id(self, "FSXSecurityGroup", fsx_security_group_id) diff --git a/modules/integration/fsx-lustre-on-eks/tests/test_stack.py b/modules/integration/fsx-lustre-on-eks/tests/test_stack.py index 21ca183..3fbbc80 100644 --- a/modules/integration/fsx-lustre-on-eks/tests/test_stack.py +++ b/modules/integration/fsx-lustre-on-eks/tests/test_stack.py @@ -46,6 +46,43 @@ def test_synthesize_stack(stack_defaults): eks_handler_role_arn="arn:aws:iam::123456789012:role/eks-handler-role", eks_namespace="service.example.com", dra_export_path="/test/path", + vpc_id=None, + private_subnet_ids=None, + env=cdk.Environment( + account=os.environ["CDK_DEFAULT_ACCOUNT"], + region=os.environ["CDK_DEFAULT_REGION"], + ), + ) + + +def test_synthesize_stack_vpc(stack_defaults): + import stack_fsx_eks + + app = cdk.App() + proj_name = "addf" + dep_name = "test-deployment" + mod_name = "test-module" + + stack_fsx_eks.FSXFileStorageOnEKS( + scope=app, + id=f"{proj_name}-{dep_name}-{mod_name}", + project_name=proj_name, + deployment_name=dep_name, + module_name=mod_name, + eks_cluster_name="myekscluster", + eks_admin_role_arn="arn:aws:iam::123456789012:role/eks-admin-role", + eks_oidc_arn="arn:aws:iam::123456789012:oidc-provider/server.example.com", + eks_cluster_security_group_id="sg-0123456", + fsx_file_system_id="foobar", + fsx_security_group_id="sg-0123456", + fsx_mount_name="foobar", + fsx_dns_name="example.com", + fsx_storage_capacity="1200Gi", + eks_handler_role_arn="arn:aws:iam::123456789012:role/eks-handler-role", + eks_namespace="service.example.com", + dra_export_path="/test/path", + vpc_id="vpc-123", + private_subnet_ids=["sub01", "sub02"], env=cdk.Environment( account=os.environ["CDK_DEFAULT_ACCOUNT"], region=os.environ["CDK_DEFAULT_REGION"],