Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: additional node pool settings, single auto scaler profile and api server access #10

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ locals {
max_count = try(pools.max_count, 0)
min_count = try(pools.min_count, 0)
max_surge = try(pools.max_surge, 50)
poolname = "aks${pools_key}"
poolname = try(pools.name, "aks${pools_key}")
aks_cluster_id = azurerm_kubernetes_cluster.aks.id

linux_os_config = try(pools.config.linux_os, {
Expand Down Expand Up @@ -48,6 +48,9 @@ locals {
enable_host_encryption = try(pools.enable.host_encryption, false)
availability_zones = try(pools.availability_zones, [])
vnet_subnet_id = try(pools.vnet_subnet_id, null)
os_disk_type = try(pools.os_disk_type, null)
os_disk_size_gb = try(pools.os_disk_size_gb, null)
orchestrator_version = try(pools.orchestrator_version, null)

custom_ca_trust = try(pools.custom_ca_trust, false)
tags = try(pools.tags, {})
Expand Down
75 changes: 49 additions & 26 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -80,28 +80,26 @@ resource "azurerm_kubernetes_cluster" "aks" {
}

dynamic "auto_scaler_profile" {
for_each = {
for k, v in try(var.cluster.auto_scaler_profile, {}) : k => v
}
for_each = var.cluster.auto_scaler_profile != null ? [1] : []

content {
balance_similar_node_groups = try(auto_scaler_profile.value.balance_similar_node_groups, false)
expander = try(auto_scaler_profile.value.expander, null)
max_graceful_termination_sec = try(auto_scaler_profile.value.max_graceful_termination_sec, null)
max_node_provisioning_time = try(auto_scaler_profile.value.max_node_provisioning_time, null)
max_unready_nodes = try(auto_scaler_profile.value.max_unready_nodes, null)
max_unready_percentage = try(auto_scaler_profile.value.max_unready_percentage, null)
new_pod_scale_up_delay = try(auto_scaler_profile.value.new_pod_scale_up_delay, null)
scale_down_delay_after_add = try(auto_scaler_profile.value.scale_down_delay_after_add, null)
scale_down_delay_after_delete = try(auto_scaler_profile.value.scale_down_delay_after_delete, null)
scale_down_delay_after_failure = try(auto_scaler_profile.value.scale_down_delay_after_failure, null)
scan_interval = try(auto_scaler_profile.value.scan_interval, null)
scale_down_unneeded = try(auto_scaler_profile.value.scale_down_unneeded, null)
scale_down_unready = try(auto_scaler_profile.value.scale_down_unready, null)
scale_down_utilization_threshold = try(auto_scaler_profile.value.scale_down_utilization_threshold, null)
empty_bulk_delete_max = try(auto_scaler_profile.value.empty_bulk_delete_max, null)
skip_nodes_with_local_storage = try(auto_scaler_profile.value.skip_nodes_with_local_storage, null)
skip_nodes_with_system_pods = try(auto_scaler_profile.value.skip_nodes_with_system_pods, null)
balance_similar_node_groups = try(var.cluster.auto_scaler_profile.balance_similar_node_groups, false)
expander = try(var.cluster.auto_scaler_profile.expander, null)
max_graceful_termination_sec = try(var.cluster.auto_scaler_profile.max_graceful_termination_sec, null)
max_node_provisioning_time = try(var.cluster.auto_scaler_profile.max_node_provisioning_time, null)
max_unready_nodes = try(var.cluster.auto_scaler_profile.max_unready_nodes, null)
max_unready_percentage = try(var.cluster.auto_scaler_profile.max_unready_percentage, null)
new_pod_scale_up_delay = try(var.cluster.auto_scaler_profile.new_pod_scale_up_delay, null)
scale_down_delay_after_add = try(var.cluster.auto_scaler_profile.scale_down_delay_after_add, null)
scale_down_delay_after_delete = try(var.cluster.auto_scaler_profile.scale_down_delay_after_delete, null)
scale_down_delay_after_failure = try(var.cluster.auto_scaler_profile.scale_down_delay_after_failure, null)
scan_interval = try(var.cluster.auto_scaler_profile.scan_interval, null)
scale_down_unneeded = try(var.cluster.auto_scaler_profile.scale_down_unneeded, null)
scale_down_unready = try(var.cluster.auto_scaler_profile.scale_down_unready, null)
scale_down_utilization_threshold = try(var.cluster.auto_scaler_profile.scale_down_utilization_threshold, null)
empty_bulk_delete_max = try(var.cluster.auto_scaler_profile.empty_bulk_delete_max, null)
skip_nodes_with_local_storage = try(var.cluster.auto_scaler_profile.skip_nodes_with_local_storage, null)
skip_nodes_with_system_pods = try(var.cluster.auto_scaler_profile.skip_nodes_with_system_pods, null)
}
}

Expand All @@ -121,7 +119,7 @@ resource "azurerm_kubernetes_cluster" "aks" {

content {
log_analytics_workspace_id = try(var.cluster.workspace.id, null)
msi_auth_for_monitoring_enabled = try(oms_agent.value.msi_auth_for_monitoring_enabled, false)
msi_auth_for_monitoring_enabled = try(var.cluster.enable.msi_auth_for_monitoring_enabled, false)
}
}

Expand Down Expand Up @@ -170,9 +168,9 @@ resource "azurerm_kubernetes_cluster" "aks" {
for_each = var.cluster.profile == "linux" ? { "default" = {} } : {}

content {
admin_username = try(linux_profile.value.username, "nodeadmin")
admin_username = try(var.cluster.linux_admin_username, "nodeadmin")
ssh_key {
key_data = azurerm_key_vault_secret.tls_public_key_secret[linux_profile.key].value
key_data = var.ssh_public_key_provided ? data.azurerm_key_vault_secret.tls_public_key_secret["default"] : azurerm_key_vault_secret.tls_public_key_secret[linux_profile.key].value
}
}
}
Expand Down Expand Up @@ -275,6 +273,11 @@ resource "azurerm_kubernetes_cluster" "aks" {
type = try(var.cluster.default_node_pool.type, "VirtualMachineScaleSets")
workload_runtime = try(var.cluster.default_node_pool.workload_runtime, null)

kubelet_disk_type = try(var.cluster.default_node_pool.kubelet_disk_type, null)
os_disk_type = try(var.cluster.default_node_pool.os_disk_type, null)
os_disk_size_gb = try(var.cluster.default_node_pool.os_disk_size_gb, null)
orchestrator_version = try(var.cluster.default_node_pool.orchestrator_version, null)

dynamic "upgrade_settings" {
for_each = {
for k, v in try(var.cluster.node_pools.upgrade_settings, {}) : k => v
Expand Down Expand Up @@ -351,32 +354,49 @@ resource "azurerm_kubernetes_cluster" "aks" {
identity {
type = "SystemAssigned"
}

# api-server access profile
dynamic "api_server_access_profile" {
for_each = try(var.cluster.api_server_access_profile, null) != null ? { "default" = var.cluster.api_server_access_profile } : {}
content {
authorized_ip_ranges = try(var.api_server_access_profile.authorized_ip_ranges, null)
subnet_id = try(var.api_server_access_profile.subnet_id, null)
vnet_integration_enabled = try(var.api_server_access_profile.vnet, null)
}
}
}

# secrets
resource "tls_private_key" "tls_key" {
for_each = var.cluster.profile == "linux" ? { "default" = {} } : {}
for_each = var.cluster.profile == "linux" && !var.ssh_public_key_provided ? { "default" = {} } : {}

algorithm = "RSA"
rsa_bits = 4096
}

resource "azurerm_key_vault_secret" "tls_public_key_secret" {
for_each = var.cluster.profile == "linux" ? { "default" = {} } : {}
for_each = var.cluster.profile == "linux" && !var.ssh_public_key_provided ? { "default" = {} } : {}

name = format("%s-%s-%s", "kvs", var.cluster.name, "pub")
value = tls_private_key.tls_key[each.key].public_key_openssh
key_vault_id = var.keyvault
}

resource "azurerm_key_vault_secret" "tls_private_key_secret" {
for_each = var.cluster.profile == "linux" ? { "default" = {} } : {}
for_each = var.cluster.profile == "linux" && !var.ssh_public_key_provided ? { "default" = {} } : {}

name = format("%s-%s-%s", "kvs", var.cluster.name, "prv")
value = tls_private_key.tls_key[each.key].private_key_pem
key_vault_id = var.keyvault
}

data "azurerm_key_vault_secret" "tls_public_key_secret" {
for_each = var.cluster.profile == "linux" && var.ssh_public_key_provided ? { "default" = {} } : {}

name = var.ssh_public_key_keyvault_secret_name
key_vault_id = var.keyvault
}

# random password
resource "random_password" "password" {
for_each = var.cluster.profile == "windows" ? { "default" = {} } : {}
Expand Down Expand Up @@ -428,6 +448,9 @@ resource "azurerm_kubernetes_cluster_node_pool" "pools" {
snapshot_id = each.value.snapshot_id
workload_runtime = each.value.workload_runtime
vnet_subnet_id = each.value.vnet_subnet_id
os_disk_type = each.value.os_disk_type
os_disk_size_gb = each.value.os_disk_size_gb
orchestrator_version = each.value.orchestrator_version

dynamic "windows_profile" {
for_each = each.value.os_type == "windows" ? [1] : []
Expand Down
12 changes: 12 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,15 @@ variable "resourcegroup" {
type = string
default = null
}

variable "ssh_public_key_provided" {
description = "Decide whether the user brings their pre-existing ssh key"
type = bool
default = false
}

variable "ssh_public_key_keyvault_secret_name" {
description = "The secret name of the pre-existing ssh key"
type = string
default = null
}
Loading