From e60d0f23e4ad5c352578b37b4d89980c25ad5dc7 Mon Sep 17 00:00:00 2001 From: Krishnadhas N K <108367225+githubofkrishnadhas@users.noreply.github.com> Date: Fri, 27 Dec 2024 22:19:41 +0530 Subject: [PATCH 1/3] Terraform module for azure kubernetes cluster (#13) * DEVOPS-292 data file * DEVOPS-301 added plan files to gitignore * DEVOPS-292 kubernetes terraform code * DEVOPS-300 output tf code * DEVOPS-300 providers and variables tf code * Update terraform tf files DEVOPS-301 DEVOPS-302 * remove role assignment resource block --- .gitignore | 2 + kubernetes-cluster/data.tf | 42 ++++ kubernetes-cluster/kubernetes.tf | 118 +++++++++++ kubernetes-cluster/output.tf | 9 + kubernetes-cluster/providers.tf | 12 ++ kubernetes-cluster/variables.tf | 346 +++++++++++++++++++++++++++++++ 6 files changed, 529 insertions(+) create mode 100644 kubernetes-cluster/data.tf create mode 100644 kubernetes-cluster/kubernetes.tf create mode 100644 kubernetes-cluster/output.tf create mode 100644 kubernetes-cluster/providers.tf create mode 100644 kubernetes-cluster/variables.tf diff --git a/.gitignore b/.gitignore index 34fdb7c..5c429cf 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,5 @@ terraform.rc # Un-ignore subdirectories and their contents within .github # !.github/**/ # README./ + +*.out diff --git a/kubernetes-cluster/data.tf b/kubernetes-cluster/data.tf new file mode 100644 index 0000000..eefd7b2 --- /dev/null +++ b/kubernetes-cluster/data.tf @@ -0,0 +1,42 @@ +# current user / service principal details +data "azurerm_client_config" "current" { +} + +# get admin group details +data "azuread_groups" "cluster_admin" { + display_names = [for group in var.cluster_admin_group_names : group] +} + +# get sp app id from kv +data "azuread_service_principal" "sp" { + client_id = data.azurerm_key_vault_secret.appid.value +} + +# get subnet id for nodepool +data "azurerm_subnet" "vnet_subnet_cidr" { + name = var.subnet_name + virtual_network_name = var.virtual_network_name + resource_group_name = var.virtual_netwok_rg +} + +# get subscription id +data "azurerm_subscription" "primary" { +} + +# get kv details +data "azurerm_key_vault" "existing" { + name = var.keyvault_name + resource_group_name = var.keyvault_rg_name +} + +# get sp app id from kv +data "azurerm_key_vault_secret" "secret" { + name = "AKS-SP-PASSWORD" + key_vault_id = data.azurerm_key_vault.existing.id +} + +# get sp app secret from kv +data "azurerm_key_vault_secret" "appid" { + name = "AKS-SP-APPID" + key_vault_id = data.azurerm_key_vault.existing.id +} \ No newline at end of file diff --git a/kubernetes-cluster/kubernetes.tf b/kubernetes-cluster/kubernetes.tf new file mode 100644 index 0000000..c11fa3b --- /dev/null +++ b/kubernetes-cluster/kubernetes.tf @@ -0,0 +1,118 @@ +# Create k8s service resource group +resource "azurerm_resource_group" "rg" { + name = upper(var.resource_group_name) + location = var.location + tags = { + Environment = upper(var.environment) + Orchestrator = "Terraform" + DisplayName = upper(var.resource_group_name) + ApplicationName = lower(var.application_name) + Temporary = upper(var.temporary) + } +} + +# Create AKS cluster, nodepools, loadbalancer etc +resource "azurerm_kubernetes_cluster" "aks_cluster" { + name = upper(var.aks_cluster_name) + location = var.location + resource_group_name = azurerm_resource_group.rg.name + automatic_upgrade_channel = var.automatic_upgrade_channel + kubernetes_version = var.kubernetes_version + sku_tier = var.kubernetes_sku_tier + node_resource_group = var.aks_node_resource_group_name + + dns_prefix = var.aks_cluster_name + + azure_active_directory_role_based_access_control { + tenant_id = data.azurerm_client_config.current.tenant_id + admin_group_object_ids = data.azuread_groups.cluster_admin.object_ids + } + + network_profile { + network_plugin = var.network_plugin + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + pod_cidr = var.pod_cidr_range + service_cidr = var.service_cidr_subnet + load_balancer_sku = var.load_balancer_sku + dns_service_ip = cidrhost((var.service_cidr_subnet), 5) # 5th ip on service cidr subnet + } + + service_principal { + client_id = data.azurerm_key_vault_secret.appid.value + client_secret = data.azurerm_key_vault_secret.secret.value + } + + workload_identity_enabled = var.workload_identity_enabled + oidc_issuer_enabled = var.workload_identity_enabled ? true : false + + support_plan = var.support_plan + + storage_profile { + blob_driver_enabled = var.enable_blob_driver + disk_driver_enabled = var.enable_disk_driver + file_driver_enabled = var.enable_fileshare_driver + } + + default_node_pool { + name = var.default_nodepool_name + vm_size = var.default_nodepool_sku + auto_scaling_enabled = var.default_nodepool_autoscaling + type = "VirtualMachineScaleSets" + node_public_ip_enabled = var.node_public_ip_enabled + orchestrator_version = var.kubernetes_version + max_pods = var.max_pods_per_node + vnet_subnet_id = data.azurerm_subnet.vnet_subnet_cidr.id + os_sku = var.os_sku + max_count = var.default_nodepool_max_count + min_count = var.default_nodepool_min_count + tags = { + Environment = upper(var.environment) + DisplayName = upper(var.default_nodepool_name) + ApplicationName = lower(var.application_name) + Temporary = upper(var.temporary) + } + + } + + depends_on = [ azurerm_resource_group.rg ] + + key_vault_secrets_provider { + secret_rotation_enabled = var.key_vault_secrets_provider + secret_rotation_interval = var.secret_rotation_interval + } + + tags = { + Environment = upper(var.environment) + Orchestrator = "Terraform" + DisplayName = upper(var.aks_cluster_name) + ApplicationName = lower(var.application_name) + Temporary = upper(var.temporary) + } + +} + +# Create worker nodepool +resource "azurerm_kubernetes_cluster_node_pool" "workernodes" { + name = var.worker_nodepool_name + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id + vm_size = var.worker_nodepool_sku + auto_scaling_enabled = var.worker_nodepool_autoscaling + node_public_ip_enabled = var.node_public_ip_enabled + priority = var.worker_node_spot ? "Spot" : "Regular" + scale_down_mode = var.worker_node_scaledown_mode + vnet_subnet_id = data.azurerm_subnet.vnet_subnet_cidr.id + min_count = var.worker_nodepool_min_count + max_count = var.worker_nodepool_max_count + eviction_policy = var.worker_nodepool_eviction_policy + os_sku = var.os_sku + os_disk_size_gb = var.os_disk_size_in_gb + orchestrator_version = var.kubernetes_version + max_pods = var.max_pods_per_node + tags = { + Environment = upper(var.environment) + DisplayName = upper(var.worker_nodepool_name) + ApplicationName = lower(var.application_name) + Temporary = upper(var.temporary) + } +} \ No newline at end of file diff --git a/kubernetes-cluster/output.tf b/kubernetes-cluster/output.tf new file mode 100644 index 0000000..602175e --- /dev/null +++ b/kubernetes-cluster/output.tf @@ -0,0 +1,9 @@ +output "aks_cluster_name" { + description = "Azure AKS name" + value = azurerm_kubernetes_cluster.aks_cluster.name +} + +output "aks_cluster_control_plane_url" { + description = "FQDN of the Azure Kubernetes Managed Cluster" + value = azurerm_kubernetes_cluster.aks_cluster.fqdn +} \ No newline at end of file diff --git a/kubernetes-cluster/providers.tf b/kubernetes-cluster/providers.tf new file mode 100644 index 0000000..5221fd2 --- /dev/null +++ b/kubernetes-cluster/providers.tf @@ -0,0 +1,12 @@ +terraform { + required_version = "~> 1.3" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "<= 4.14.0" + } + } +} +provider "azurerm" { + features {} +} \ No newline at end of file diff --git a/kubernetes-cluster/variables.tf b/kubernetes-cluster/variables.tf new file mode 100644 index 0000000..6327f71 --- /dev/null +++ b/kubernetes-cluster/variables.tf @@ -0,0 +1,346 @@ +variable "resource_group_name" { + default = "" + description = "Azure resource group name to create aks cluster" + type = string +} + +variable "aks_node_resource_group_name" { + default = "" + description = "Azure node resource group name" + type = string +} + +variable "aks_cluster_name" { + default = "" + description = "Name of Aks cluster in Azure" + type = string +} + +variable "location" { + default = "" + description = "Azure location" + type = string +} + + +variable "application_name" { + default = "" + description = "Azure application name tag value" + type = string +} + +variable "environment" { + default = "" + description = "Environment tag value in Azure" + type = string + validation { + condition = contains(["DEV", "QA", "UAT", "PROD"], var.environment) + error_message = "Environment value should be one among DEV or QA or UAT or PROD." + } +} + +variable "temporary" { + default = "" + description = "Temporary tag value in Azure" + type = string + validation { + condition = contains(["TRUE", "FALSE"], upper(var.temporary)) + error_message = "The temporary tag value must be either 'TRUE' or 'FALSE'." + } + +} + +variable "default_nodepool_sku" { + default = "Standard_D2ps_v5" + description = "Aks system node pool" + type = string +} + +variable "default_nodepool_name" { + default = "agentpool" + description = "System nodepool for cluster" + type = string + +} + +variable "default_nodepool_autoscaling" { + default = true + description = "Enable auto scaling in default nodepool" + type = bool +} + +variable "default_nodepool_max_count" { + description = "Maximum number of nodes in default nodepool" + default = 1 + type = number + +} + +variable "default_nodepool_min_count" { + description = "Minimum number of nodes in default nodepool" + default = 1 + type = number +} + +variable "cluster_admin_group_names"{ + default = [""] + type = list(string) + description = "Name of Azure AD groups to provide cluster admin permissions." +} + + +variable "max_pods_per_node" { + default = 100 + description = "maximum number of pods that can be accumulated in a node" + type = number + validation { + condition = var.max_pods_per_node <= 250 && var.max_pods_per_node >= 10 + error_message = "The value must be between 10 and 250 (inclusive)." + } +} + +variable "key_vault_secrets_provider" { + default = true + description = "Should the secret store CSI driver on the AKS cluster be enabled" + type = bool +} + +variable "secret_rotation_interval" { + default = "1m" + type = string + description = "The interval to poll for secret rotation" +} + +variable "network_plugin" { + default = "azure" + description = "Azure CNI network plugin" + type = string +} + +variable "network_policy" { + default = "azure" + description = "Azure networ policy to be used with CNI cluster" + type = string +} + +variable "network_plugin_mode" { + default = "overlay" + description = "Specifies the network plugin mode used for building the Kubernetes network" + type = string +} + +variable "pod_cidr_range" { + default = "172.0.0.0/16" + description = "CIDR range for pods" + type = string +} + +variable "virtual_network_name" { + default = "" + description = "Azure Vnet name" + type = string +} + +variable "subnet_name" { + default = "" + description = "Azure subnet name" + type = string +} + +variable "service_cidr_subnet" { + default = "192.168.0.0/16" + description = "Azure service cidr subnet" + type = string +} + +variable "virtual_netwok_rg" { + default = "" + description = "Azure Vnet resource group" + type = string +} + +variable "load_balancer_sku" { + default = "" + description = "Load balancer SKu" + type = string + + validation { + condition = contains(["basic", "standard"], var.load_balancer_sku) + error_message = "Load balancer SKU should be either Basic or Standard." + } +} + +variable "os_sku" { + default = "Ubuntu" + description = "AKS node pool image" + type = string +} + +variable "os_disk_size_in_gb" { + default = 32 + description = "OS disk size" + type = number +} + +variable "keyvault_name" { + default = "" + description = "Azure Key Vault name" + type = string +} + +variable "keyvault_rg_name" { + default = "" + description = "Azure Key Vault rg name" + type = string +} + +variable "workload_identity_enabled" { + default = true + description = "Enable workload identity in azure or not" + type = bool +} + +variable "enable_blob_driver" { + default = true + description = "Enable Blob CSI driver enabled" + type = bool +} + +variable "enable_disk_driver" { + default = true + description = "Enable Disk CSI driver enabled" + type = bool +} + +variable "enable_fileshare_driver" { + default = true + description = "Enable Fileshare CSI driver enabled" + type = bool +} + +variable "kubernetes_sku_tier" { + description = "SKU Tier that should be used for this Kubernetes Cluster" + default = "" + type = string + validation { + condition = contains(["Free", "Standard", "Premium"], var.kubernetes_sku_tier) + error_message = "Kubernetes SKU should be One among Basic, Standard Or Premium." + } +} + +variable "private_cluster" { + default = false + description = "Deploy AKS cluster without exposing publically accessible endpoint" + type = bool + validation { + condition = var.private_cluster == true || var.private_cluster == false + error_message = "private_cluster value must either be true or false." + } +} + +variable "automatic_upgrade_channel" { + description = "The upgrade channel for this Kubernetes Cluster" + default = "" + type = string + validation { + condition = contains(["patch", "rapid", "node-image", "stable"], var.automatic_upgrade_channel) + error_message = "The upgrade channel for this Kubernetes Cluster should be One among Patch, Rapid, Node-Image Or Stable." + } +} + +variable "support_plan" { + default = "KubernetesOfficial" + description = "Specifies the support plan which should be used for this Kubernetes Cluster" + type = string + validation { + condition = contains(["AKSLongTermSupport", "KubernetesOfficial"], var.support_plan) + error_message = "Possible values are KubernetesOfficial and AKSLongTermSupport." + } +} + +variable "worker_nodepool_name" { + default = "workernodes" + description = "Additional kubernetes node pool" + type = string +} + +variable "worker_nodepool_sku" { + default = "Standard_D2ads_v5" + description = "Azure VM Sku of worker node" + type = string +} + +variable "worker_node_spot" { + type = bool + default = true + description = "Worker nodes are spot or ondemand" + validation { + condition = var.worker_node_spot == true || var.worker_node_spot == false + error_message = "worker_nodepool_sku should be true or false" + } +} + +variable "worker_nodepool_autoscaling" { + default = true + description = "Enable auto scaling in worker nodepool" + type = bool +} + +variable "node_public_ip_enabled" { + default = false + type = bool + description = "Should each node have a Public IP Address" +} + +variable "worker_node_scaledown_mode" { + default = "Deallocate" + description = "Specifies how the node pool should deal with scaled-down nodes" + type = string + validation { + condition = contains(["Delete", "Deallocate"], var.worker_node_scaledown_mode) + error_message = "This Value should be either Delete or Deallocate." + } +} + +variable "worker_nodepool_max_count" { + description = "Maximum number of nodes in default nodepool" + default = 5 + type = number + +} + +variable "worker_nodepool_min_count" { + description = "Minimum number of nodes in default nodepool" + default = 0 + type = number +} + +variable "worker_nodepool_mode" { + type = string + default = "User" + description = "Should this Node Pool be used for System or User resources" + validation { + condition = contains(["User", "System"], var.worker_nodepool_mode) + error_message = "This Value should be either Delete or Deallocate." + } +} + +variable "worker_nodepool_eviction_policy" { + default = "Deallocate" + description = " Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool" + type = string + validation { + condition = contains(["Delete", "Deallocate"], var.worker_nodepool_eviction_policy) + error_message = "This Value should be either Delete or Deallocate." + } +} + +variable "kubernetes_version" { + type = string + default = "1.30.4" + description = "Kubernetes version in AKS cluster" + validation { + condition = can(regex("^[0-9]+\\.[0-9]+\\.[0-9]+$", var.kubernetes_version)) + error_message = "The version must be in the format 'major.minor.patch', where major, minor, and patch are non-negative integers." + } +} \ No newline at end of file From 2cf23da53675ed27ecfbbb7ca1d272d8336de41a Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 27 Dec 2024 16:49:53 +0000 Subject: [PATCH 2/3] Update documentation --- kubernetes-cluster/README.md | 143 +++++++++++++++++++++++ user-assigned-managed-identity/README.md | 2 +- 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 kubernetes-cluster/README.md diff --git a/kubernetes-cluster/README.md b/kubernetes-cluster/README.md new file mode 100644 index 0000000..a99b773 --- /dev/null +++ b/kubernetes-cluster/README.md @@ -0,0 +1,143 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement_terraform) | ~> 1.3 | +| [azurerm](#requirement_azurerm) | <= 4.14.0 | +## Usage +Basic usage of this module is as follows: + ```hcl + module "example" { + source = "" + + # Optional variables + aks_cluster_name = "" + aks_node_resource_group_name = "" + application_name = "" + automatic_upgrade_channel = "" + cluster_admin_group_names = [ + "" +] + default_nodepool_autoscaling = true + default_nodepool_max_count = 1 + default_nodepool_min_count = 1 + default_nodepool_name = "agentpool" + default_nodepool_sku = "Standard_D2ps_v5" + enable_blob_driver = true + enable_disk_driver = true + enable_fileshare_driver = true + environment = "" + key_vault_secrets_provider = true + keyvault_name = "" + keyvault_rg_name = "" + kubernetes_sku_tier = "" + kubernetes_version = "1.30.4" + load_balancer_sku = "" + location = "" + max_pods_per_node = 100 + network_plugin = "azure" + network_plugin_mode = "overlay" + network_policy = "azure" + node_public_ip_enabled = false + os_disk_size_in_gb = 32 + os_sku = "Ubuntu" + pod_cidr_range = "172.0.0.0/16" + private_cluster = false + resource_group_name = "" + secret_rotation_interval = "1m" + service_cidr_subnet = "192.168.0.0/16" + subnet_name = "" + support_plan = "KubernetesOfficial" + temporary = "" + virtual_netwok_rg = "" + virtual_network_name = "" + worker_node_scaledown_mode = "Deallocate" + worker_node_spot = true + worker_nodepool_autoscaling = true + worker_nodepool_eviction_policy = "Deallocate" + worker_nodepool_max_count = 5 + worker_nodepool_min_count = 0 + worker_nodepool_mode = "User" + worker_nodepool_name = "workernodes" + worker_nodepool_sku = "Standard_D2ads_v5" + workload_identity_enabled = true + } + ``` + +## Resources + +| Name | Type | +|------|------| +| [azurerm_kubernetes_cluster.aks_cluster](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.workernodes](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_resource_group.rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) | resource | +| [azuread_groups.cluster_admin](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/data-sources/groups) | data source | +| [azuread_service_principal.sp](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/data-sources/service_principal) | data source | +| [azurerm_client_config.current](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | +| [azurerm_key_vault.existing](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/key_vault) | data source | +| [azurerm_key_vault_secret.appid](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/key_vault_secret) | data source | +| [azurerm_key_vault_secret.secret](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/key_vault_secret) | data source | +| [azurerm_subnet.vnet_subnet_cidr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subnet) | data source | +| [azurerm_subscription.primary](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/subscription) | data source | + +## Inputs + +| Name | Description | Type | Required | +|------|-------------|------|:--------:| +| [aks_cluster_name](#input_aks_cluster_name) | Name of Aks cluster in Azure | `string` | no | +| [aks_node_resource_group_name](#input_aks_node_resource_group_name) | Azure node resource group name | `string` | no | +| [application_name](#input_application_name) | Azure application name tag value | `string` | no | +| [automatic_upgrade_channel](#input_automatic_upgrade_channel) | The upgrade channel for this Kubernetes Cluster | `string` | no | +| [cluster_admin_group_names](#input_cluster_admin_group_names) | Name of Azure AD groups to provide cluster admin permissions. | `list(string)` | no | +| [default_nodepool_autoscaling](#input_default_nodepool_autoscaling) | Enable auto scaling in default nodepool | `bool` | no | +| [default_nodepool_max_count](#input_default_nodepool_max_count) | Maximum number of nodes in default nodepool | `number` | no | +| [default_nodepool_min_count](#input_default_nodepool_min_count) | Minimum number of nodes in default nodepool | `number` | no | +| [default_nodepool_name](#input_default_nodepool_name) | System nodepool for cluster | `string` | no | +| [default_nodepool_sku](#input_default_nodepool_sku) | Aks system node pool | `string` | no | +| [enable_blob_driver](#input_enable_blob_driver) | Enable Blob CSI driver enabled | `bool` | no | +| [enable_disk_driver](#input_enable_disk_driver) | Enable Disk CSI driver enabled | `bool` | no | +| [enable_fileshare_driver](#input_enable_fileshare_driver) | Enable Fileshare CSI driver enabled | `bool` | no | +| [environment](#input_environment) | Environment tag value in Azure | `string` | no | +| [key_vault_secrets_provider](#input_key_vault_secrets_provider) | Should the secret store CSI driver on the AKS cluster be enabled | `bool` | no | +| [keyvault_name](#input_keyvault_name) | Azure Key Vault name | `string` | no | +| [keyvault_rg_name](#input_keyvault_rg_name) | Azure Key Vault rg name | `string` | no | +| [kubernetes_sku_tier](#input_kubernetes_sku_tier) | SKU Tier that should be used for this Kubernetes Cluster | `string` | no | +| [kubernetes_version](#input_kubernetes_version) | Kubernetes version in AKS cluster | `string` | no | +| [load_balancer_sku](#input_load_balancer_sku) | Load balancer SKu | `string` | no | +| [location](#input_location) | Azure location | `string` | no | +| [max_pods_per_node](#input_max_pods_per_node) | maximum number of pods that can be accumulated in a node | `number` | no | +| [network_plugin](#input_network_plugin) | Azure CNI network plugin | `string` | no | +| [network_plugin_mode](#input_network_plugin_mode) | Specifies the network plugin mode used for building the Kubernetes network | `string` | no | +| [network_policy](#input_network_policy) | Azure networ policy to be used with CNI cluster | `string` | no | +| [node_public_ip_enabled](#input_node_public_ip_enabled) | Should each node have a Public IP Address | `bool` | no | +| [os_disk_size_in_gb](#input_os_disk_size_in_gb) | OS disk size | `number` | no | +| [os_sku](#input_os_sku) | AKS node pool image | `string` | no | +| [pod_cidr_range](#input_pod_cidr_range) | CIDR range for pods | `string` | no | +| [private_cluster](#input_private_cluster) | Deploy AKS cluster without exposing publically accessible endpoint | `bool` | no | +| [resource_group_name](#input_resource_group_name) | Azure resource group name to create aks cluster | `string` | no | +| [secret_rotation_interval](#input_secret_rotation_interval) | The interval to poll for secret rotation | `string` | no | +| [service_cidr_subnet](#input_service_cidr_subnet) | Azure service cidr subnet | `string` | no | +| [subnet_name](#input_subnet_name) | Azure subnet name | `string` | no | +| [support_plan](#input_support_plan) | Specifies the support plan which should be used for this Kubernetes Cluster | `string` | no | +| [temporary](#input_temporary) | Temporary tag value in Azure | `string` | no | +| [virtual_netwok_rg](#input_virtual_netwok_rg) | Azure Vnet resource group | `string` | no | +| [virtual_network_name](#input_virtual_network_name) | Azure Vnet name | `string` | no | +| [worker_node_scaledown_mode](#input_worker_node_scaledown_mode) | Specifies how the node pool should deal with scaled-down nodes | `string` | no | +| [worker_node_spot](#input_worker_node_spot) | Worker nodes are spot or ondemand | `bool` | no | +| [worker_nodepool_autoscaling](#input_worker_nodepool_autoscaling) | Enable auto scaling in worker nodepool | `bool` | no | +| [worker_nodepool_eviction_policy](#input_worker_nodepool_eviction_policy) | Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool | `string` | no | +| [worker_nodepool_max_count](#input_worker_nodepool_max_count) | Maximum number of nodes in default nodepool | `number` | no | +| [worker_nodepool_min_count](#input_worker_nodepool_min_count) | Minimum number of nodes in default nodepool | `number` | no | +| [worker_nodepool_mode](#input_worker_nodepool_mode) | Should this Node Pool be used for System or User resources | `string` | no | +| [worker_nodepool_name](#input_worker_nodepool_name) | Additional kubernetes node pool | `string` | no | +| [worker_nodepool_sku](#input_worker_nodepool_sku) | Azure VM Sku of worker node | `string` | no | +| [workload_identity_enabled](#input_workload_identity_enabled) | Enable workload identity in azure or not | `bool` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aks_cluster_control_plane_url](#output_aks_cluster_control_plane_url) | FQDN of the Azure Kubernetes Managed Cluster | +| [aks_cluster_name](#output_aks_cluster_name) | Azure AKS name | + \ No newline at end of file diff --git a/user-assigned-managed-identity/README.md b/user-assigned-managed-identity/README.md index 0208b27..3a301c8 100644 --- a/user-assigned-managed-identity/README.md +++ b/user-assigned-managed-identity/README.md @@ -17,7 +17,7 @@ Basic usage of this module is as follows: location = "" managed_identity_name = "" resource_group_name = "" - temporary = "" + temporary = "TRUE" } ``` From 7e529a8ec906e67cc93b1709497495bcb8b100cc Mon Sep 17 00:00:00 2001 From: Krishnadhas N K <108367225+githubofkrishnadhas@users.noreply.github.com> Date: Fri, 27 Dec 2024 22:27:50 +0530 Subject: [PATCH 3/3] Fix root readme github workflow (#14) * Fix root readme github workflow * Fix root readme github workflow --- .github/workflows/create-root-readme.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/create-root-readme.yaml b/.github/workflows/create-root-readme.yaml index 0cd5084..bc15859 100644 --- a/.github/workflows/create-root-readme.yaml +++ b/.github/workflows/create-root-readme.yaml @@ -2,7 +2,7 @@ name: create-root-readme on: workflow_run: - workflows: ["generate-terraform-docs"] # Runs after completion of generate-terraform-docs workflow + workflows: [ "generate-terraform-docs" ] # Runs after completion of generate-terraform-docs workflow types: - completed @@ -16,6 +16,13 @@ jobs: - name: Check out the repository uses: actions/checkout@v4 + - name: Token generator + uses: githubofkrishnadhas/github-access-using-githubapp@v2 + id: token-generation + with: + github_app_id: ${{ secrets.TOKEN_GENERATOR_APPID }} + github_app_private_key: ${{ secrets.TOKEN_GENERATOR_PRIVATE_KEY }} + - name: Ensure tree command is installed run: sudo apt update && sudo apt-get install -y tree @@ -24,9 +31,12 @@ jobs: bash create-readme.sh - name: Commit and Push Changes + env: + GITHUB_TOKEN: ${{ steps.token-generation.outputs.token }} run: | git config user.name 'github-actions' git config user.email 'actions@github.com' git add . git commit -m "Update documentation" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/devwithkrishna/azure-terraform-modules.git git push