diff --git a/modules/aks/README.md b/modules/aks/README.md index 0219b2e..1b76d7f 100644 --- a/modules/aks/README.md +++ b/modules/aks/README.md @@ -1,12 +1,12 @@ ## Providers -| Name | Version | -| ---------- | --------- | -| kubernetes | = 1.13.3 | -| azuread | >= 1.0.0 | -| azurerm | >= 2.33.0 | -| null | >= 3.0.0 | -| random | >= 3.0.0 | +| Name | Version | +| ---------- | -------- | +| kubernetes | = 1.13.3 | +| azuread | = 1.4.0 | +| azurerm | = 2.48.0 | +| null | = 3.1.0 | +| random | = 3.1.0 | ## Inputs @@ -14,7 +14,7 @@ | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | :------: | | cluster_name | Unique cluster name. Used in multiple resources to identify your cluster resources | `string` | n/a | yes | | cluster_version | Kubernetes Cluster Version. Look at the cloud providers documentation to discover available versions. EKS example -> 1.16, GKE example -> 1.16.8-gke.9 | `string` | n/a | yes | -| dmz_cidr_range | Network CIDR range from where cluster control plane will be accessible | `string` | n/a | yes | +| dmz_cidr_range | Network CIDR range from where cluster control plane will be accessible | `string` or `list(string)` | n/a | yes | | network | Network where the Kubernetes cluster will be hosted | `string` | n/a | yes | | resource_group_name | Resource group name where every resource will be placed. Required only in AKS installer (*) | `string` | n/a | yes | | ssh_public_key | Cluster administrator public ssh key. Used to access cluster nodes with the operator_ssh_user | `string` | n/a | yes | diff --git a/modules/aks/cluster.tf b/modules/aks/cluster.tf index 6c6b9ae..99b7ecc 100644 --- a/modules/aks/cluster.tf +++ b/modules/aks/cluster.tf @@ -1,3 +1,71 @@ +# Control Plane subnet and security Groups +data "azurerm_subnet" "aks" { + name = var.node_pools[0].subnetworks != null && length(var.node_pools[0].subnetworks) > 0 ? var.node_pools[0].subnetworks[0] : var.subnetworks[0] + virtual_network_name = var.network + resource_group_name = data.azurerm_resource_group.aks.name +} + +# Node Pool subnets and security Groups +data "azurerm_subnet" "node_pools" { + count = length(var.node_pools) + name = var.node_pools[count.index].subnetworks != null && length(var.node_pools[count.index].subnetworks) > 0 ? var.node_pools[count.index].subnetworks[0] : var.subnetworks[0] + virtual_network_name = var.network + resource_group_name = data.azurerm_resource_group.aks.name +} + +# Security Rule enabling local.parsed_dmz_cidr_range to access the control plane endpoint. Cloud Installers v1.5.0 +resource "azurerm_network_security_rule" "aks" { + name = "${var.cluster_name} - Control Plane" + priority = 200 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" # Control plane + source_address_prefixes = local.parsed_dmz_cidr_range + destination_address_prefixes = data.azurerm_subnet.aks.address_prefixes + resource_group_name = data.azurerm_resource_group.aks.name + network_security_group_name = element(split("/", data.azurerm_subnet.aks.network_security_group_id), length(split("/", data.azurerm_subnet.aks.network_security_group_id)) - 1) +} + +# Custom firewall rules v1.5.0 in the cloud installers +locals { + azurerm_network_security_rules = flatten([ + [for nodePool in var.node_pools : [ + [for rule in nodePool.additional_firewall_rules : { + name = rule.name + priority = 300 + direction = rule.direction == "ingress" ? "Inbound" : "Outbound" + access = "Allow" + protocol = rule.protocol + source_port_range = "*" + destination_port_range = rule.ports + source_address_prefixes = rule.direction == "ingress" ? [rule.cidr_block] : element(data.azurerm_subnet.node_pools.*.address_prefixes, index(var.node_pools.*.name, nodePool.name)) + destination_address_prefixes = rule.direction == "egress" ? [rule.cidr_block] : element(data.azurerm_subnet.node_pools.*.address_prefixes, index(var.node_pools.*.name, nodePool.name)) + resource_group_name = data.azurerm_resource_group.aks.name + network_security_group_name = element(split("/", element(data.azurerm_subnet.node_pools.*.network_security_group_id, index(var.node_pools.*.name, nodePool.name))), length(split("/", element(data.azurerm_subnet.node_pools.*.network_security_group_id, index(var.node_pools.*.name, nodePool.name)))) - 1) + }] + ] + ] + ]) +} + +resource "azurerm_network_security_rule" "node_pools" { + count = length(local.azurerm_network_security_rules) + + name = local.azurerm_network_security_rules[count.index].name + priority = local.azurerm_network_security_rules[count.index].priority + count.index # Required because the priority is unique across rules in a security group + direction = local.azurerm_network_security_rules[count.index].direction + access = local.azurerm_network_security_rules[count.index].access + protocol = local.azurerm_network_security_rules[count.index].protocol + source_port_range = local.azurerm_network_security_rules[count.index].source_port_range + destination_port_range = local.azurerm_network_security_rules[count.index].destination_port_range + source_address_prefixes = local.azurerm_network_security_rules[count.index].source_address_prefixes + destination_address_prefixes = local.azurerm_network_security_rules[count.index].destination_address_prefixes + resource_group_name = local.azurerm_network_security_rules[count.index].resource_group_name + network_security_group_name = local.azurerm_network_security_rules[count.index].network_security_group_name +} + resource "azurerm_kubernetes_cluster" "aks" { name = var.cluster_name kubernetes_version = var.cluster_version @@ -16,7 +84,7 @@ resource "azurerm_kubernetes_cluster" "aks" { node_taints = var.node_pools[0].taints type = "VirtualMachineScaleSets" os_disk_size_gb = var.node_pools[0].volume_size - vnet_subnet_id = var.node_pools[0].subnetworks != null ? var.node_pools[0].subnetworks : data.azurerm_subnet.subnetwork.id + vnet_subnet_id = element(data.azurerm_subnet.node_pools.*.id, index(var.node_pools.*.name, var.node_pools[0].name)) min_count = var.node_pools[0].min_size max_count = var.node_pools[0].max_size tags = merge(var.tags, var.node_pools[0].tags) @@ -48,7 +116,7 @@ resource "azurerm_kubernetes_cluster" "aks" { # api_server_authorized_ip_ranges is not compatible with private clusters. # Maybe we should consider to create some Security Groups around the # control-plane and the node_pools - # api_server_authorized_ip_ranges = [var.dmz_cidr_range] + # api_server_authorized_ip_ranges = [local.parsed_dmz_cidr_range] enable_pod_security_policy = false @@ -112,7 +180,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "aks" { max_pods = element(var.node_pools, count.index + 1).max_pods != null ? element(var.node_pools, count.index + 1).max_pods : 250 os_disk_size_gb = element(var.node_pools, count.index + 1).volume_size os_type = "Linux" - vnet_subnet_id = element(var.node_pools, count.index + 1).subnetworks != null ? element(var.node_pools, count.index + 1).subnetworks : data.azurerm_subnet.subnetwork.id + vnet_subnet_id = element(data.azurerm_subnet.node_pools.*.id, index(var.node_pools.*.name, var.node_pools[count.index + 1].name)) enable_auto_scaling = true min_count = element(var.node_pools, count.index + 1).min_size max_count = element(var.node_pools, count.index + 1).max_size diff --git a/modules/aks/data.tf b/modules/aks/data.tf index a0044ef..972a32d 100644 --- a/modules/aks/data.tf +++ b/modules/aks/data.tf @@ -4,9 +4,3 @@ data "azurerm_subscription" "current" { data "azurerm_resource_group" "aks" { name = var.resource_group_name } - -data "azurerm_subnet" "subnetwork" { - name = var.subnetworks[0] - virtual_network_name = var.network - resource_group_name = var.resource_group_name -} diff --git a/modules/aks/main.tf b/modules/aks/main.tf index 8c43274..5ce4979 100644 --- a/modules/aks/main.tf +++ b/modules/aks/main.tf @@ -2,9 +2,9 @@ terraform { required_version = ">= 0.12.0" required_providers { kubernetes = "= 1.13.3" - azuread = ">= 1.0.0" - azurerm = ">= 2.33.0" - random = ">= 3.0.0" - null = ">= 3.0.0" + azuread = "= 1.4.0" + azurerm = "= 2.48.0" + random = "= 3.1.0" + null = "= 3.1.0" } } diff --git a/modules/aks/variables.tf b/modules/aks/variables.tf index 8ee0c3c..bb6d439 100644 --- a/modules/aks/variables.tf +++ b/modules/aks/variables.tf @@ -19,10 +19,13 @@ variable "subnetworks" { } variable "dmz_cidr_range" { - type = string description = "Network CIDR range from where cluster control plane will be accessible" } +locals { + parsed_dmz_cidr_range = flatten([var.dmz_cidr_range]) +} + variable "ssh_public_key" { type = string description = "Cluster administrator public ssh key. Used to access cluster nodes with the operator_ssh_user" @@ -42,6 +45,14 @@ variable "node_pools" { labels = map(string) taints = list(string) tags = map(string) + additional_firewall_rules = list(object({ + name = string + direction = string + cidr_block = string + protocol = string + ports = string + tags = map(string) + })) })) default = [] }