From d92823c109899a625b96e35736f30b1b0bef4138 Mon Sep 17 00:00:00 2001 From: Viktor Ribchev Date: Wed, 8 Nov 2023 15:58:53 +0200 Subject: [PATCH] Added script which, creates and attaches volumes Set TIER and DISK_SIZE_GB as Terraform variables --- .terraform.lock.hcl | 3 + main.tf | 3 + modules/configuration/main.tf | 11 +++ modules/vm/main.tf | 2 + modules/vm/templates/entrypoint.sh.tpl | 96 +++++++++++++++++++++++++- modules/vm/variables.tf | 14 ++++ variables.tf | 12 ++++ 7 files changed, 139 insertions(+), 2 deletions(-) diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl index 7c6f4a1..7115337 100644 --- a/.terraform.lock.hcl +++ b/.terraform.lock.hcl @@ -6,6 +6,7 @@ provider "registry.terraform.io/hashicorp/azurerm" { constraints = ">= 3.71.0, >= 3.76.0" hashes = [ "h1:DWJ+qB1AY68Is827deEJH4pV7BL4PhDmaaWLlYkhqLM=", + "h1:oXXa023punihP4XHVp1gUlkflXJ6Y/Oa3+tYIDz3HXM=", "zh:09a965d5a35ddf418c0cc0eda507f79ba65ce679faa1ffc636c965c22cd2da88", "zh:144523f78596df2843ccf9c4dfa53670c71c66ef1edb96853b4d06b8d2973e26", "zh:1b2bbd1b2a7a8715f1bc828a174fc8f6810831cfebf3bffef141638b59aa4589", @@ -25,6 +26,7 @@ provider "registry.terraform.io/hashicorp/random" { version = "3.5.1" constraints = "~> 3.0" hashes = [ + "h1:3hjTP5tQBspPcFAJlfafnWrNrKnr7J4Cp0qB9jbqf30=", "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", @@ -46,6 +48,7 @@ provider "registry.terraform.io/microsoft/azuredevops" { constraints = ">= 0.9.0" hashes = [ "h1:GazdScTk4i4y9aIsvsO7GYkHYPYJBfaddaU+VlkLnZg=", + "h1:GwFmEDohB4JeBGMMvSOdSw7SHxh1xZqQUfko7eaW+l4=", "zh:07e596c045f8ee411c630e29e180e946d5e75af615e0223877de9c4718ff0265", "zh:18c07b7b610a85079b510117296da1fe2cd99da3664ece2a98390329dac2b58a", "zh:220949b1271420864d324f0494739b70ed79f66ad3d2928d9acb804bc04d1e75", diff --git a/main.tf b/main.tf index d33c365..3247a07 100644 --- a/main.tf +++ b/main.tf @@ -137,6 +137,9 @@ module "vm" { identity_name = module.identity.identity_name key_vault_name = module.vault.key_vault_name + data_disk_performance_tier = var.data_disk_performance_tier + disk_size_gb = var.disk_size_gb + instance_type = var.instance_type image_id = module.graphdb_image.image_id node_count = var.node_count diff --git a/modules/configuration/main.tf b/modules/configuration/main.tf index d31c978..05686c5 100644 --- a/modules/configuration/main.tf +++ b/modules/configuration/main.tf @@ -1,3 +1,7 @@ +data "azurerm_resource_group" "graphdb" { + name = var.resource_group_name +} + data "azurerm_user_assigned_identity" "graphdb-instances" { name = var.identity_name resource_group_name = var.resource_group_name @@ -76,3 +80,10 @@ resource "azurerm_role_assignment" "graphdb-license-secret-reader" { scope = data.azurerm_key_vault.graphdb.id role_definition_name = "Key Vault Secrets User" } + +# TODO should be moved to vm module +resource "azurerm_role_assignment" "rg-contributor-role" { + principal_id = data.azurerm_user_assigned_identity.graphdb-instances.principal_id + scope = data.azurerm_resource_group.graphdb.id + role_definition_name = "Contributor" +} diff --git a/modules/vm/main.tf b/modules/vm/main.tf index 5243e15..8274358 100644 --- a/modules/vm/main.tf +++ b/modules/vm/main.tf @@ -72,6 +72,8 @@ locals { user_data_script = var.custom_user_data != null ? var.custom_user_data : templatefile("${path.module}/templates/entrypoint.sh.tpl", { load_balancer_fqdn : var.load_balancer_fqdn key_vault_name : var.key_vault_name + data_disk_performance_tier : var.data_disk_performance_tier + disk_size_gb : var.disk_size_gb }) } diff --git a/modules/vm/templates/entrypoint.sh.tpl b/modules/vm/templates/entrypoint.sh.tpl index 6a56ad5..ba6a72a 100644 --- a/modules/vm/templates/entrypoint.sh.tpl +++ b/modules/vm/templates/entrypoint.sh.tpl @@ -15,8 +15,100 @@ done # Login in Azure CLI with managed identity (user or system assigned) az login --identity -# TODO: Find/create/mount volumes -# https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/tutorial-use-disks-cli +# Find/create/attach volumes +INSTANCE_HOSTNAME=\'$(hostname)\' +SUBSCRIPTION_ID=$(az account show --query "id" --output tsv) +RESOURSE_GROUP=$(az vmss list --query "[0].resourceGroup" --output tsv) +VMSS_NAME=$(az vmss list --query "[0].name" --output tsv) +INSTANCE_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].instanceId" --output tsv) +ZONE_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].zones" --output tsv) +REGION_ID=$(az vmss list-instances --resource-group $RESOURSE_GROUP --name $VMSS_NAME --query "[?contains(osProfile.computerName, $${INSTANCE_HOSTNAME})].location" --output tsv) +# Do NOT change the LUN. Based on this we find and mount the disk in the VM +LUN=2 + +TIER=${data_disk_performance_tier} +DISK_SIZE_GB=${disk_size_gb} + +# TODO Define the disk name based on the hostname ?? +diskName="Disk_$${VMSS_NAME}_$${INSTANCE_ID}" + +for i in $(seq 1 6); do +# Wait for existing disks in the VMSS which are unattached +existingUnattachedDisk=$( + az disk list --resource-group $RESOURSE_GROUP \ + --query "[?diskState=='Unattached' && starts_with(name, 'Disk_$${VMSS_NAME}')].{Name:name}" \ + --output tsv + ) + + if [ -z "$${existingUnattachedDisk:-}" ]; then + echo 'Disk not yet available' + sleep 10 + else + break + fi +done + +if [ -z "$existingUnattachedDisk" ]; then + echo "Creating a new managed disk" + az disk create --resource-group $RESOURSE_GROUP --name $diskName --size-gb $DISK_SIZE_GB --location $REGION_ID --sku Premium_LRS --zone $ZONE_ID --tier $TIER +fi + +# Checks if a managed disk is attached to the instance +attachedDisk=$(az vmss list-instances --resource-group "$RESOURSE_GROUP" --name "$VMSS_NAME" --query "[?instanceId==\"$INSTANCE_ID\"].storageProfile.dataDisks[].name" --output tsv) + +if [ -z "$attachedDisk" ]; then + echo "No data disks attached for instance ID $INSTANCE_ID in VMSS $VMSS_NAME." + # Try to attach an existing managed disk + availableDisks=$(az disk list --resource-group $RESOURSE_GROUP --query "[?diskState=='Unattached' && starts_with(name, 'Disk_$${VMSS_NAME}') && zones[0]=='$${ZONE_ID}'].{Name:name}" --output tsv) + echo "Attaching available disk $availableDisks." + # Set Internal Field Separator to newline to handle spaces in names + IFS=$'\n' + # Would iterate through all available disks and attempt to attach them + for availableDisk in $availableDisks; do + az vmss disk attach --vmss-name $VMSS_NAME --resource-group $RESOURSE_GROUP --instance-id $INSTANCE_ID --lun $LUN --disk "$availableDisk" || true + done +fi + +# Gets device name based on LUN +graphdb_device=$(lsscsi --scsi --size | awk '/\[1:.*:0:2\]/ {print $7}') + +# Check if the device is present after attaching the disk +if [ -b "$graphdb_device" ]; then + echo "Device $graphdb_device is available." +else + echo "Device $graphdb_device is not available. Something went wrong." + exit 1 +fi + +# create a file system if there isn't any +if [ "$graphdb_device: data" = "$(file -s $graphdb_device)" ]; then + mkfs -t ext4 $graphdb_device +fi + +disk_mount_point="/var/opt/graphdb" +mkdir -p "$disk_mount_point" + +# Check if the disk is already mounted +if ! mount | grep -q "$graphdb_device"; then + echo "The disk at $graphdb_device is not mounted." + + # Add an entry to the fstab file to automatically mount the disk + if ! grep -q "$graphdb_device" /etc/fstab; then + echo "$graphdb_device $disk_mount_point ext4 defaults 0 2" >> /etc/fstab + fi + + # Mount the disk + mount "$disk_mount_point" + echo "The disk at $graphdb_device is now mounted at $disk_mount_point." +else + echo "The disk at $graphdb_device is already mounted." +fi + +# Recreates folders if necessary and changes owner + +mkdir -p /var/opt/graphdb/node /var/opt/graphdb/cluster-proxy +# TODO research how to avoid using chown, as it would be a slow operation if data is present. +chown -R graphdb:graphdb /var/opt/graphdb # # DNS hack diff --git a/modules/vm/variables.tf b/modules/vm/variables.tf index 623a2d5..5c00f0b 100644 --- a/modules/vm/variables.tf +++ b/modules/vm/variables.tf @@ -93,3 +93,17 @@ variable "custom_user_data" { type = string default = null } + +# Managed Data Disks + +variable "disk_size_gb" { + description = "Size of the managed data disk which will be created" + type = number + default = null +} + +variable "data_disk_performance_tier" { + description = "Performance tier of the managed data disk" + type = string + default = null +} diff --git a/variables.tf b/variables.tf index edc49cb..b6d4be2 100644 --- a/variables.tf +++ b/variables.tf @@ -111,3 +111,15 @@ variable "custom_graphdb_vm_user_data" { type = string default = null } + +variable "disk_size_gb" { + description = "Size of the managed data disk which will be created" + type = number + default = 500 +} + +variable "data_disk_performance_tier" { + description = "Performance tier of the managed data disk" + type = string + default = "P40" +}