Skip to content

Commit

Permalink
Add v1.5.0 features to AKS SIGHUP Cloud installer
Browse files Browse the repository at this point in the history
  • Loading branch information
angelbarrera92 committed Mar 5, 2021
1 parent 2560e63 commit 2d6f210
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 22 deletions.
16 changes: 8 additions & 8 deletions modules/aks/README.md
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
## Providers

| Name | Version |
| ---------- | --------- |
| kubernetes | = 1.13.3 |
| azuread | >= 1.0.0 |
| azurerm | >= 2.33.0 |
| null | >= 3.0.0 |
| random | >= 3.0.0 |
| Name | Version |
| ---------- | -------- |
| kubernetes | = 1.13.3 |
| azuread | = 1.4.0 |
| azurerm | = 2.48.0 |
| null | = 3.1.0 |
| random | = 3.1.0 |

## Inputs

| Name | Description | Type | Default | Required |
| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | :------: |
| cluster_name | Unique cluster name. Used in multiple resources to identify your cluster resources | `string` | n/a | yes |
| cluster_version | Kubernetes Cluster Version. Look at the cloud providers documentation to discover available versions. EKS example -> 1.16, GKE example -> 1.16.8-gke.9 | `string` | n/a | yes |
| dmz_cidr_range | Network CIDR range from where cluster control plane will be accessible | `string` | n/a | yes |
| dmz_cidr_range | Network CIDR range from where cluster control plane will be accessible | `string` or `list(string)` | n/a | yes |
| network | Network where the Kubernetes cluster will be hosted | `string` | n/a | yes |
| resource_group_name | Resource group name where every resource will be placed. Required only in AKS installer (*) | `string` | n/a | yes |
| ssh_public_key | Cluster administrator public ssh key. Used to access cluster nodes with the operator_ssh_user | `string` | n/a | yes |
Expand Down
74 changes: 71 additions & 3 deletions modules/aks/cluster.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,71 @@
# Control Plane subnet and security Groups
data "azurerm_subnet" "aks" {
name = var.node_pools[0].subnetworks != null && length(var.node_pools[0].subnetworks) > 0 ? var.node_pools[0].subnetworks[0] : var.subnetworks[0]
virtual_network_name = var.network
resource_group_name = data.azurerm_resource_group.aks.name
}

# Node Pool subnets and security Groups
data "azurerm_subnet" "node_pools" {
count = length(var.node_pools)
name = var.node_pools[count.index].subnetworks != null && length(var.node_pools[count.index].subnetworks) > 0 ? var.node_pools[count.index].subnetworks[0] : var.subnetworks[0]
virtual_network_name = var.network
resource_group_name = data.azurerm_resource_group.aks.name
}

# Security Rule enabling local.parsed_dmz_cidr_range to access the control plane endpoint. Cloud Installers v1.5.0
resource "azurerm_network_security_rule" "aks" {
name = "${var.cluster_name} - Control Plane"
priority = 200
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "443" # Control plane
source_address_prefixes = local.parsed_dmz_cidr_range
destination_address_prefixes = data.azurerm_subnet.aks.address_prefixes
resource_group_name = data.azurerm_resource_group.aks.name
network_security_group_name = element(split("/", data.azurerm_subnet.aks.network_security_group_id), length(split("/", data.azurerm_subnet.aks.network_security_group_id)) - 1)
}

# Custom firewall rules v1.5.0 in the cloud installers
locals {
azurerm_network_security_rules = flatten([
[for nodePool in var.node_pools : [
[for rule in nodePool.additional_firewall_rules : {
name = rule.name
priority = 300
direction = rule.direction == "ingress" ? "Inbound" : "Outbound"
access = "Allow"
protocol = rule.protocol
source_port_range = "*"
destination_port_range = rule.ports
source_address_prefixes = rule.direction == "ingress" ? [rule.cidr_block] : element(data.azurerm_subnet.node_pools.*.address_prefixes, index(var.node_pools.*.name, nodePool.name))
destination_address_prefixes = rule.direction == "egress" ? [rule.cidr_block] : element(data.azurerm_subnet.node_pools.*.address_prefixes, index(var.node_pools.*.name, nodePool.name))
resource_group_name = data.azurerm_resource_group.aks.name
network_security_group_name = element(split("/", element(data.azurerm_subnet.node_pools.*.network_security_group_id, index(var.node_pools.*.name, nodePool.name))), length(split("/", element(data.azurerm_subnet.node_pools.*.network_security_group_id, index(var.node_pools.*.name, nodePool.name)))) - 1)
}]
]
]
])
}

resource "azurerm_network_security_rule" "node_pools" {
count = length(local.azurerm_network_security_rules)

name = local.azurerm_network_security_rules[count.index].name
priority = local.azurerm_network_security_rules[count.index].priority + count.index # Required because the priority is unique across rules in a security group
direction = local.azurerm_network_security_rules[count.index].direction
access = local.azurerm_network_security_rules[count.index].access
protocol = local.azurerm_network_security_rules[count.index].protocol
source_port_range = local.azurerm_network_security_rules[count.index].source_port_range
destination_port_range = local.azurerm_network_security_rules[count.index].destination_port_range
source_address_prefixes = local.azurerm_network_security_rules[count.index].source_address_prefixes
destination_address_prefixes = local.azurerm_network_security_rules[count.index].destination_address_prefixes
resource_group_name = local.azurerm_network_security_rules[count.index].resource_group_name
network_security_group_name = local.azurerm_network_security_rules[count.index].network_security_group_name
}

resource "azurerm_kubernetes_cluster" "aks" {
name = var.cluster_name
kubernetes_version = var.cluster_version
Expand All @@ -16,7 +84,7 @@ resource "azurerm_kubernetes_cluster" "aks" {
node_taints = var.node_pools[0].taints
type = "VirtualMachineScaleSets"
os_disk_size_gb = var.node_pools[0].volume_size
vnet_subnet_id = var.node_pools[0].subnetworks != null ? var.node_pools[0].subnetworks : data.azurerm_subnet.subnetwork.id
vnet_subnet_id = element(data.azurerm_subnet.node_pools.*.id, index(var.node_pools.*.name, var.node_pools[0].name))
min_count = var.node_pools[0].min_size
max_count = var.node_pools[0].max_size
tags = merge(var.tags, var.node_pools[0].tags)
Expand Down Expand Up @@ -48,7 +116,7 @@ resource "azurerm_kubernetes_cluster" "aks" {
# api_server_authorized_ip_ranges is not compatible with private clusters.
# Maybe we should consider to create some Security Groups around the
# control-plane and the node_pools
# api_server_authorized_ip_ranges = [var.dmz_cidr_range]
# api_server_authorized_ip_ranges = [local.parsed_dmz_cidr_range]

enable_pod_security_policy = false

Expand Down Expand Up @@ -112,7 +180,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "aks" {
max_pods = element(var.node_pools, count.index + 1).max_pods != null ? element(var.node_pools, count.index + 1).max_pods : 250
os_disk_size_gb = element(var.node_pools, count.index + 1).volume_size
os_type = "Linux"
vnet_subnet_id = element(var.node_pools, count.index + 1).subnetworks != null ? element(var.node_pools, count.index + 1).subnetworks : data.azurerm_subnet.subnetwork.id
vnet_subnet_id = element(data.azurerm_subnet.node_pools.*.id, index(var.node_pools.*.name, var.node_pools[count.index + 1].name))
enable_auto_scaling = true
min_count = element(var.node_pools, count.index + 1).min_size
max_count = element(var.node_pools, count.index + 1).max_size
Expand Down
6 changes: 0 additions & 6 deletions modules/aks/data.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,3 @@ data "azurerm_subscription" "current" {
data "azurerm_resource_group" "aks" {
name = var.resource_group_name
}

data "azurerm_subnet" "subnetwork" {
name = var.subnetworks[0]
virtual_network_name = var.network
resource_group_name = var.resource_group_name
}
8 changes: 4 additions & 4 deletions modules/aks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ terraform {
required_version = ">= 0.12.0"
required_providers {
kubernetes = "= 1.13.3"
azuread = ">= 1.0.0"
azurerm = ">= 2.33.0"
random = ">= 3.0.0"
null = ">= 3.0.0"
azuread = "= 1.4.0"
azurerm = "= 2.48.0"
random = "= 3.1.0"
null = "= 3.1.0"
}
}
13 changes: 12 additions & 1 deletion modules/aks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,13 @@ variable "subnetworks" {
}

variable "dmz_cidr_range" {
type = string
description = "Network CIDR range from where cluster control plane will be accessible"
}

locals {
parsed_dmz_cidr_range = flatten([var.dmz_cidr_range])
}

variable "ssh_public_key" {
type = string
description = "Cluster administrator public ssh key. Used to access cluster nodes with the operator_ssh_user"
Expand All @@ -42,6 +45,14 @@ variable "node_pools" {
labels = map(string)
taints = list(string)
tags = map(string)
additional_firewall_rules = list(object({
name = string
direction = string
cidr_block = string
protocol = string
ports = string
tags = map(string)
}))
}))
default = []
}
Expand Down

0 comments on commit 2d6f210

Please sign in to comment.