diff --git a/.gitignore b/.gitignore index f4e8521..78114dc 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ *.tfvars openshift_pull_secret.json .terraform.lock.hcl +*.pem +installer-files/** # OS X files .DS_Store \ No newline at end of file diff --git a/README.md b/README.md index 8570140..04def0d 100644 --- a/README.md +++ b/README.md @@ -30,17 +30,6 @@ This project uses mainly Terraform as infrastructure management and installation git --version ``` -3. Install OpenShift command line `oc` cli: - - ```bash - wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-4.x.xx.tar.gz - tar -xvf openshift-client-linux-4.x.xx.tar.gz - chmod u+x oc kubectl - sudo mv oc /usr/local/bin - sudo mv kubectl /usr/local/bin - oc version - ``` - 4. Install wget command: - MacOS: @@ -54,8 +43,6 @@ This project uses mainly Terraform as infrastructure management and installation zypper install wget ``` -5. Install jq: see [https://stedolan.github.io/jq/download/](https://stedolan.github.io/jq/download/) - 6. Get the Terraform code ```bash @@ -123,37 +110,33 @@ This project installs the OpenShift 4 in several stages where each stage automat cluster_name = "ocp4" base_domain = "example.com" openshift_pull_secret = "./openshift_pull_secret.json" -openshift_installer_url = "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.6.28" +openshift_version = "4.6.28" -aws_access_key_id = "AAAA" -aws_secret_access_key = "AbcDefGhiJkl" -aws_ami = "ami-06f85a7940faa3217" aws_extra_tags = { "owner" = "admin" } -aws_azs = [ - "us-east-1a", - "us-east-1b", - "us-east-1c" - ] aws_region = "us-east-1" aws_publish_strategy = "External" ``` |name | required | description and value | |----------------|------------|--------------| -| `cluster_name` | yes | The name of the OpenShift cluster you will install | -| `base_domain` | yes | The domain that has been created in Route53 public hosted zone | +| `cluster_name` | yes | The name of the OpenShift cluster you will install | +| `base_domain` | yes | The domain that has been created in Route53 public hosted zone | | `openshift_pull_secret` | no | The value refers to a file name that contain downloaded pull secret from https://cloud.redhat.com/openshift/pull-secret; the default name is `openshift_pull_secret.json` | -| `openshift_installer_url` | no | The URL to the download site for Red Hat OpenShift installation and client codes. | +| `openshift_version` | yes | The openshift version to be installed. | | `aws_region` | yes | AWS region that the VPC will be created in. By default, uses `us-east-2`. Note that for an HA installation, the AWS selected region should have at least 3 availability zones. | | `aws_extra_tags` | no | AWS tag to identify a resource for example owner:myname | -| `aws_ami` | yes | Red Hat CoreOS ami for your region (see [here](https://docs.openshift.com/container-platform/4.6/installing/installing_aws/installing-aws-user-infra.html#installation-aws-user-infra-rhcos-ami_installing-aws-user-infra)). Other platforms images information can be found [here](https://github.com/openshift/installer/blob/master/data/data/rhcos.json) | -| `aws_secret_access_key` | yes | adding aws_secret_access_key to the cluster | -| `aws_access_key_id` | yes | adding aws_access_key_id to the cluster | -| `aws_azs` | yes | list of availability zones to deploy VMs | +| `aws_azs` | no | list of availability zones to deploy VMs - default to the [`a`, `b`, `c`] | +| `openshift_byo_dns` | no | whether to ignore DNS resources (you still need a public zone defined) | +| `openshift_ssh_key` | no | whether to use a specific public key | +| `openshift_additional_trust_bundle` | no | additional trust bundle for accessing resources - ie proxy or repo | | `aws_publish_strategy` | no | Whether to publish the API endpoint externally - Default: "External" | | `airgapped` | no | A map with enabled (true/false) and repository name - This must be used with `aws_publish_strategy` of `Internal` | +| `proxy_config` | no | To be implemented | +| `use_ipv4` | no | To be implemented | +| `use_ipv6` | no | To be implemented | + See [Terraform documentation](https://www.terraform.io/intro/getting-started/variables.html) for the format of this file. diff --git a/config.tf b/config.tf index 276a936..e9706a6 100644 --- a/config.tf +++ b/config.tf @@ -43,7 +43,56 @@ variable "openshift_pull_secret" { description = "File containing pull secret - get it from https://cloud.redhat.com/openshift/install/pull-secret" } -variable "openshift_installer_url" { - type = string - description = "URL of the appropriate OpenShift installer under https://mirror.openshift.com/pub/openshift-v4/clients/ocp/" +variable "use_ipv4" { + type = bool + default = true + description = "not implemented" +} + +variable "use_ipv6" { + type = bool + default = false + description = "not implemented" +} + +variable "openshift_version" { + type = string + default = "4.6.28" +} + +variable "airgapped" { + type = map(string) + default = { + enabled = false + repository = "" + } +} + +variable "proxy_config" { + type = map(string) + description = "Not implemented" + default = { + enabled = false + httpProxy = "http://user:password@ip:port" + httpsProxy = "http://user:password@ip:port" + noProxy = "ip1,ip2,ip3,.example.com,cidr/mask" + } +} + +variable "openshift_additional_trust_bundle" { + description = "path to a file with all your additional ca certificates" + type = string + default = "" +} + +variable "openshift_ssh_key" { + description = "Path to SSH Public Key file to use for OpenShift Installation" + type = string + default = "" +} + +variable "openshift_byo_dns" { + description = "Do not deploy any public or private DNS zone into Azure" + type = bool + default = false } \ No newline at end of file diff --git a/delocp.sh b/delocp.sh deleted file mode 100755 index 6134c70..0000000 --- a/delocp.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -clusterId=$1 - -if [ -z $clusterId ]; then - exit 99 -fi - -terraform destroy -auto-approve & - -sleep 10 -workers=$(aws ec2 describe-instances --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'Reservations[].Instances[].[InstanceId, Tags[?Key==`Name`] | [0].Value]' --output text | grep worker | cut -d$'\t' -f1) - -aws ec2 terminate-instances --instance-ids ${workers} - -vpcid=$(grep vpc terraform.tfstate | grep vpc_id | grep vpc- | head -1 | cut -d"\"" -f4) -elbname=$(aws elb describe-load-balancers --query 'LoadBalancerDescriptions[].[LoadBalancerName,VPCId]' --output text | cut -d$'\t' -f1) -aws elb delete-load-balancer --load-balancer-name ${elbname} - -sleep 300 - -sg=$(aws ec2 describe-security-groups --filters Name="tag:kubernetes.io/cluster/${clusterId}",Values="owned" --query 'SecurityGroups[].[GroupId,GroupName]' --output text | grep "k8s-elb" | cut -d$'\t' -f1) - -aws ec2 delete-security-group --group-id ${sg} - -sleep 60 - -aws s3 ls | grep ${clusterId} | awk '{print "aws s3 rb —force s3://"$3}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-user-policy --user-name "$1" --policy-name "$1"-policy"}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-access-key --user-name "$1" --access-key-id $(aws iam list-access-keys --user-name "$1" --query 'AccessKeyMetadata[].AccessKeyId' --output text)"}' | bash - -aws iam list-users --query 'Users[].[UserName,UserId]' --output text | grep ${clusterId} | awk '{print "aws iam delete-user --user-name "$1}' | bash - -exit 0 diff --git a/helper.tf b/helper.tf new file mode 100644 index 0000000..9b29b5d --- /dev/null +++ b/helper.tf @@ -0,0 +1,12 @@ +locals { + major_version = join(".", slice(split(".", var.openshift_version), 0, 2)) + aws_azs = (var.aws_azs != null) ? var.aws_azs : tolist([join("",[var.aws_region,"a"]),join("",[var.aws_region,"b"]),join("",[var.aws_region,"c"])]) + rhcos_image = lookup(lookup(lookup(jsondecode(data.http.images.body), "amis"), "${var.aws_region}"),"hvm") +} + +data "http" "images" { + url = "https://raw.githubusercontent.com/openshift/installer/release-${local.major_version}/data/data/rhcos.json" + request_headers = { + Accept = "application/json" + } +} \ No newline at end of file diff --git a/install/aws_cleanup.sh b/install/aws_cleanup.sh index ca56244..9b023a8 100755 --- a/install/aws_cleanup.sh +++ b/install/aws_cleanup.sh @@ -1,10 +1,10 @@ #!/bin/bash -path=$(dirname $0) -clusterId=$(cat $path/infraID) +path=$(dirname $0) +clusterId=$(cat $path/../installer-files/infraID) if [ -z "$clusterId" ]; then - exit 99 + exit fi if [ -z "$AWS_ACCESS_KEY_ID" ]; then @@ -13,6 +13,10 @@ fi if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then exit 80 fi +if [ -z "$AWS_DEFAULT_REGION" ]; then + exit 80 +fi + echo "0 - Start processing for cluster $clusterId - waiting for masters to be destroyed" masters=3 @@ -24,7 +28,6 @@ while [ $masters -gt 0 ]; do sleep 10 fi done - workers=$(echo "$nodes" | cut -d$'\t' -f1) echo "1 - Deleting workers - $workers -" diff --git a/install/installer.tf b/install/installer.tf index 685c384..9e82395 100644 --- a/install/installer.tf +++ b/install/installer.tf @@ -1,17 +1,12 @@ -#locals { -# infrastructure_id = "${var.infrastructure_id != "" ? "${var.infrastructure_id}" : "${var.clustername}-${random_id.clusterid.hex}"}" -# infrastructure_id = -#} - resource "null_resource" "openshift_installer" { provisioner "local-exec" { command = < ${path.module}/infraID" - } - - provisioner "local-exec" { - when = destroy - command = "rm -rf ${path.module}/infraID" - } -} - - -data "local_file" "infrastructureID" { - depends_on = [ - null_resource.extractInfrastructureID - ] - filename = "${path.module}/infraID" - -} - resource "null_resource" "delete_aws_resources" { depends_on = [ null_resource.cleanup @@ -244,6 +147,7 @@ resource "null_resource" "delete_aws_resources" { provisioner "local-exec" { when = destroy command = "${path.module}/aws_cleanup.sh" + #command = "${path.root}/installer-files//openshift-install --dir=${path.root}/installer-files/temp destroy cluster" } } @@ -255,22 +159,22 @@ resource "null_resource" "cleanup" { provisioner "local-exec" { when = destroy - command = "rm -rf ${path.module}/temp" + command = "rm -rf ${path.root}/installer-files//temp" } provisioner "local-exec" { when = destroy - command = "rm -f ${path.module}/openshift-install" + command = "rm -f ${path.root}/installer-files//openshift-install" } provisioner "local-exec" { when = destroy - command = "rm -f ${path.module}/oc" + command = "rm -f ${path.root}/installer-files//oc" } provisioner "local-exec" { when = destroy - command = "rm -f ${path.module}/kubectl" + command = "rm -f ${path.root}/installer-files//kubectl" } } @@ -279,7 +183,7 @@ data "local_file" "bootstrap_ign" { null_resource.generate_ignition_config ] - filename = "${path.module}/temp/bootstrap.ign" + filename = "${path.root}/installer-files//temp/bootstrap.ign" } data "local_file" "master_ign" { @@ -287,7 +191,7 @@ data "local_file" "master_ign" { null_resource.generate_ignition_config ] - filename = "${path.module}/temp/master.ign" + filename = "${path.root}/installer-files//temp/master.ign" } data "local_file" "worker_ign" { @@ -295,39 +199,17 @@ data "local_file" "worker_ign" { null_resource.generate_ignition_config ] - filename = "${path.module}/temp/worker.ign" + filename = "${path.root}/installer-files//temp/worker.ign" } resource "null_resource" "get_auth_config" { depends_on = [null_resource.generate_ignition_config] provisioner "local-exec" { when = create - command = "cp ${path.module}/temp/auth/* ${path.root}/ " + command = "cp ${path.root}/installer-files//temp/auth/* ${path.root}/ " } provisioner "local-exec" { when = destroy command = "rm ${path.root}/kubeconfig ${path.root}/kubeadmin-password " } } - -resource "local_file" "airgapped_registry_upgrades" { - count = var.airgapped["enabled"] ? 1 : 0 - filename = "${path.module}/temp/openshift/99_airgapped_registry_upgrades.yaml" - depends_on = [ - null_resource.generate_manifests, - ] - content = < (var.infra_count % length(var.aws_worker_availability_zones)) ? 0 : 1)] +} + +data "local_file" "cabundle" { + count = var.openshift_additional_trust_bundle == "" ? 0 : 1 + filename = "${var.openshift_additional_trust_bundle}" +} + + +data "template_file" "install_config_yaml" { + template = <<-EOF +apiVersion: v1 +baseDomain: ${var.domain} +compute: +- hyperthreading: Enabled + name: worker + replicas: 3 + platform: + aws: + rootVolume: + iops: ${var.aws_worker_root_volume_iops} + size: ${var.aws_worker_root_volume_size} + type: ${var.aws_worker_root_volume_type} + type: ${var.aws_worker_instance_type} + zones: + %{ for zone in var.aws_worker_availability_zones} + - ${zone}%{ endfor } +controlPlane: + hyperthreading: Enabled + name: master + replicas: ${var.master_count} +metadata: + name: ${var.clustername} +networking: + clusterNetworks: + - cidr: ${var.cluster_network_cidr} + hostPrefix: ${var.cluster_network_host_prefix} + machineCIDR: ${var.vpc_cidr_block} + networkType: OpenShiftSDN + serviceNetwork: + - ${var.service_network_cidr} +platform: + aws: + region: ${var.aws_region} +pullSecret: '${file(var.openshift_pull_secret)}' +sshKey: '${local.public_ssh_key}' +%{if var.airgapped["enabled"]}imageContentSources: +- mirrors: + - ${var.airgapped["repository"]} + source: quay.io/openshift-release-dev/ocp-release +- mirrors: + - ${var.airgapped["repository"]} + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev%{endif} +%{if var.proxy_config["enabled"]}proxy: + httpProxy: ${var.proxy_config["httpProxy"]} + httpsProxy: ${var.proxy_config["httpsProxy"]} + noProxy: ${var.proxy_config["noProxy"]}%{endif} +%{if var.openshift_additional_trust_bundle != ""}additionalTrustBundle: | + ${indent(2,data.local_file.cabundle[0].content)}%{endif} +EOF +} + + +resource "local_file" "install_config" { + content = data.template_file.install_config_yaml.rendered + filename = "${path.root}/installer-files/install-config.yaml" +} + +# when the subnets are provided, modify the worker machinesets +resource "null_resource" "manifest_cleanup_worker_machineset" { + depends_on = [ + null_resource.generate_manifests + ] + count = var.aws_private_subnets != null ? length(var.aws_private_subnets) : 0 + provisioner "local-exec" { + command = "rm -f ${path.root}/installer-files/temp/openshift/99_openshift-cluster-api_worker-machineset-${count.index}.yaml" + } +} + +resource "local_file" "create_worker_machineset" { + depends_on = [ + null_resource.manifest_cleanup_worker_machineset + ] + count = var.aws_private_subnets != null ? length(var.aws_private_subnets) : 0 + file_permission = "0644" + filename = "${path.root}/installer-files/temp/openshift/99_openshift-cluster-api_worker-machineset-${count.index}.yaml" + content = < ${path.root}/installer-files/infraID" + } + + provisioner "local-exec" { + when = destroy + command = "rm -rf ${path.root}/installer-files/infraID" + } +} + + +data "local_file" "infrastructureID" { + depends_on = [ + null_resource.extractInfrastructureID + ] + filename = "${path.root}/installer-files/infraID" + +} + +resource "local_file" "airgapped_registry_upgrades" { + count = var.airgapped["enabled"] ? 1 : 0 + filename = "${path.root}/installer-files/temp/openshift/99_airgapped_registry_upgrades.yaml" + depends_on = [ + null_resource.generate_manifests, + ] + content = < 0 ? length(var.aws_worker_availability_zones) : 0 + file_permission = "0644" + filename = "${path.root}/installer-files/temp/openshift/99_openshift-cluster-api_infra-machineset-${count.index}.yaml" + content = < 0 ? 1 : 0 + content = data.template_file.cluster-monitoring-configmap.rendered + filename = "${path.root}/installer-files/temp/openshift/99_cluster-monitoring-configmap.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + + +data "template_file" "configure-image-registry-job-serviceaccount" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-image-registry-job-serviceaccount.rendered + filename = "${path.root}/installer-files/openshift/99_configure-image-registry-job-serviceaccount.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-image-registry-job-clusterrole" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-image-registry-job-clusterrole.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-image-registry-job-clusterrole.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-image-registry-job-clusterrolebinding" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-image-registry-job-clusterrolebinding.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-image-registry-job-clusterrolebinding.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-image-registry-job" { + template = </dev/null 2>&1; do sleep 1;done;/usr/bin/oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\": {\"nodeSelector\": {\"node-role.kubernetes.io/infra\": \"\"}}}'"] + restartPolicy: Never +EOF +} + +resource "local_file" "configure-image-registry-job" { + count = var.infra_count > 0 ? 1 : 0 + content = data.template_file.configure-image-registry-job.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-image-registry-job.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-ingress-job-serviceaccount" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-ingress-job-serviceaccount.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-ingress-job-serviceaccount.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-ingress-job-clusterrole" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-ingress-job-clusterrole.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-ingress-job-clusterrole.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-ingress-job-clusterrolebinding" { + template = < 0 ? 1 : 0 + content = data.template_file.configure-ingress-job-clusterrolebinding.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-ingress-job-clusterrolebinding.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + +data "template_file" "configure-ingress-job" { + template = </dev/null 2>&1; do sleep 1;done;/usr/bin/oc patch ingresscontrollers.operator.openshift.io default -n openshift-ingress-operator --type merge --patch '{\"spec\": {\"nodePlacement\": {\"nodeSelector\": {\"matchLabels\": {\"node-role.kubernetes.io/infra\": \"\"}}}}}'"] + restartPolicy: Never +EOF +} + +resource "local_file" "configure-ingress-job" { + count = var.infra_count > 0 ? 1 : 0 + content = data.template_file.configure-ingress-job.rendered + filename = "${path.root}/installer-files/temp/openshift/99_configure-ingress-job.yml" + depends_on = [ + null_resource.generate_manifests, + ] +} + diff --git a/install/variables.tf b/install/variables.tf index 4e0ac09..6fac988 100644 --- a/install/variables.tf +++ b/install/variables.tf @@ -58,13 +58,45 @@ EOF } - variable "master_count" { type = number description = "The number of master nodes." default = 3 } +variable "infra_count" { + type = number + description = "The number of infra nodes." + default = 0 +} + +variable "aws_infra_instance_type" { + type = string + description = "Instance type for the infra node(s). Example: `m4.large`." + default = "m5.xlarge" +} + +variable "aws_infra_root_volume_type" { + type = string + description = "The type of volume for the root block device of infra nodes." +} + +variable "aws_infra_root_volume_size" { + type = string + description = "The size of the volume in gigabytes for the root block device of infra nodes." +} + +variable "aws_infra_root_volume_iops" { + type = string + + description = <