From 05594a1f92c3b96a0e9ffc149d0704bc5a9af7fa Mon Sep 17 00:00:00 2001 From: Carlos Lapao Date: Thu, 21 Nov 2024 09:55:56 +0000 Subject: [PATCH] Add always run scripts (#65) * Add always run scripts - Added the ability to set a post_processor_script to always run on update - Fixed some issues where in some cases the update would but the vm in the wrong state - Fixed an issue where some errors would bring a nil pointer - Added an option to wait for network before querying to the datasource vm - Added a retry mechanism to attempt to get the internal ip on create/update * fix the documentation --- docs/data-sources/vm.md | 1 + docs/resources/clone_vm.md | 2 + docs/resources/remote_vm.md | 2 + docs/resources/vagrant_box.md | 2 + internal/clone_vm/resource.go | 112 ++++++++++++----- internal/common/ensure_machine_running.go | 12 +- internal/common/post_processor_script.go | 6 + internal/common/specs.go | 4 +- internal/remoteimage/resource.go | 101 ++++++++++++---- .../post_processor_script.go | 1 + .../schemas/postprocessorscript/schemas.go | 4 + internal/telemetry/main.go | 1 + internal/telemetry/telemetry_item.go | 3 + internal/vagrantbox/resource.go | 113 +++++++++++++----- internal/virtualmachine/datasource.go | 68 +++++++---- .../models/datasource_models_v2.go | 32 +++++ .../schemas/datasource_schema_v2.go | 92 ++++++++++++++ main.go | 16 +-- 18 files changed, 456 insertions(+), 116 deletions(-) create mode 100644 internal/virtualmachine/models/datasource_models_v2.go create mode 100644 internal/virtualmachine/schemas/datasource_schema_v2.go diff --git a/docs/data-sources/vm.md b/docs/data-sources/vm.md index 9e19718..0f9d6ed 100644 --- a/docs/data-sources/vm.md +++ b/docs/data-sources/vm.md @@ -44,6 +44,7 @@ data "parallels-desktop_vm" "example" { - `filter` (Block, Optional) Filter block, this is used to filter data sources (see [below for nested schema](#nestedblock--filter)) - `host` (String) Parallels Desktop DevOps Host - `orchestrator` (String) Parallels Desktop DevOps Orchestrator +- `wait_for_network_up` (Boolean) Wait for network up ### Read-Only diff --git a/docs/resources/clone_vm.md b/docs/resources/clone_vm.md index b6ce282..d6b3883 100644 --- a/docs/resources/clone_vm.md +++ b/docs/resources/clone_vm.md @@ -189,6 +189,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--on_destroy_script--retry)) @@ -223,6 +224,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--post_processor_script--retry)) diff --git a/docs/resources/remote_vm.md b/docs/resources/remote_vm.md index 3a7ce66..18f4aba 100644 --- a/docs/resources/remote_vm.md +++ b/docs/resources/remote_vm.md @@ -197,6 +197,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--on_destroy_script--retry)) @@ -231,6 +232,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--post_processor_script--retry)) diff --git a/docs/resources/vagrant_box.md b/docs/resources/vagrant_box.md index 685c9e0..d738235 100644 --- a/docs/resources/vagrant_box.md +++ b/docs/resources/vagrant_box.md @@ -190,6 +190,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--on_destroy_script--retry)) @@ -224,6 +225,7 @@ Optional: Optional: +- `always_run_on_update` (Boolean) Always run on update - `environment_variables` (Map of String) Environment variables that can be used in the DevOps service, please see documentation to see which variables are available - `inline` (List of String) Inline script - `retry` (Block, Optional) Retry settings (see [below for nested schema](#nestedblock--post_processor_script--retry)) diff --git a/internal/clone_vm/resource.go b/internal/clone_vm/resource.go index fe3166a..c28db52 100644 --- a/internal/clone_vm/resource.go +++ b/internal/clone_vm/resource.go @@ -134,9 +134,9 @@ func (r *CloneVmResource) Create(ctx context.Context, req resource.CreateRequest } // Checking if the name is already in use - existingVms, diag := apiclient.GetVms(ctx, hostConfig, "name", data.Name.ValueString()) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + existingVms, existingVmDiag := apiclient.GetVms(ctx, hostConfig, "name", data.Name.ValueString()) + if existingVmDiag.HasError() { + resp.Diagnostics.Append(existingVmDiag...) return } @@ -146,9 +146,9 @@ func (r *CloneVmResource) Create(ctx context.Context, req resource.CreateRequest } // Checking if we can find the base vm to clone - vm, diag := apiclient.GetVm(ctx, hostConfig, data.BaseVmId.ValueString()) - if diag.HasError() { - diag.Append(diag...) + vm, getVmDiag := apiclient.GetVm(ctx, hostConfig, data.BaseVmId.ValueString()) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } @@ -177,9 +177,9 @@ func (r *CloneVmResource) Create(ctx context.Context, req resource.CreateRequest } // Checking if we can find the base vm to clone - createdVms, diag := apiclient.GetVms(ctx, hostConfig, "name", data.Name.ValueString()) - if diag.HasError() { - diag.Append(diag...) + createdVms, createVmDiag := apiclient.GetVms(ctx, hostConfig, "name", data.Name.ValueString()) + if createVmDiag.HasError() { + resp.Diagnostics.Append(createVmDiag...) return } if len(createdVms) != 1 { @@ -194,9 +194,9 @@ func (r *CloneVmResource) Create(ctx context.Context, req resource.CreateRequest // stopping the machine as it might need some operations where the machine needs to be stopped // add anything here in sequence that needs to be done before the machine is started // so we do not loose time waiting for the machine to stop - stoppedVm, diag := common.EnsureMachineStopped(ctx, hostConfig, &clonedVm) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + stoppedVm, stoppedVmDiag := common.EnsureMachineStopped(ctx, hostConfig, &clonedVm) + if stoppedVmDiag.HasError() { + resp.Diagnostics.Append(stoppedVmDiag...) } // Applying the Specs block @@ -331,13 +331,24 @@ func (r *CloneVmResource) Create(ctx context.Context, req resource.CreateRequest externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, vm.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, vm.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.ExternalIp = types.StringValue(externalIp) @@ -507,15 +518,16 @@ func (r *CloneVmResource) Update(ctx context.Context, req resource.UpdateRequest DisableTlsValidation: r.provider.DisableTlsValidation.ValueBool(), } - vm, diag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + vm, getVmDiag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } if vm == nil { resp.State.RemoveResource(ctx) return } + currentVmState := vm.State nameChanges := apimodels.NewVmConfigRequest(vm.User) currentState := vm.State @@ -666,13 +678,24 @@ func (r *CloneVmResource) Update(ctx context.Context, req resource.UpdateRequest externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, vm.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, vm.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.ID = types.StringValue(refreshVm.ID) @@ -705,6 +728,37 @@ func (r *CloneVmResource) Update(ctx context.Context, req resource.UpdateRequest } } + if currentVmState != refreshVm.State { + // If the vm state is desync we nee to set it right + switch currentVmState { + case "running": + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + if refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + case "stopped": + if refreshVm.State == "running" || refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStop) + } + case "paused": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + case "suspended": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + } + } + tflog.Info(ctx, "Updated vm with id "+data.ID.ValueString()+" and name "+data.Name.ValueString()) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) diff --git a/internal/common/ensure_machine_running.go b/internal/common/ensure_machine_running.go index d5d082f..2b38236 100644 --- a/internal/common/ensure_machine_running.go +++ b/internal/common/ensure_machine_running.go @@ -21,20 +21,24 @@ func EnsureMachineRunning(ctx context.Context, hostConfig apiclient.HostConfig, return vm, diagnostics } - maxRetries := 10 + maxRetries := 20 retryCount := 0 for { diagnostics = diag.Diagnostics{} retryCount += 1 if returnVm.State != "running" { tflog.Info(ctx, "Machine "+returnVm.Name+" is not running, starting it"+fmt.Sprintf("[%v/%v]", retryCount, maxRetries)) - result, stateDiag := apiclient.SetMachineState(ctx, hostConfig, returnVm.ID, apiclient.MachineStateOpStart) + op := apiclient.MachineStateOpStart + if returnVm.State == "suspended" || returnVm.State == "paused" { + op = apiclient.MachineStateOpResume + } + result, stateDiag := apiclient.SetMachineState(ctx, hostConfig, returnVm.ID, op) if stateDiag.HasError() { diagnostics.Append(stateDiag...) } if !result { - diagnostics.AddError("error starting vm", "error starting vm") + diagnostics.AddError("Error starting vm", "Could not set the state of the machine to running") } tflog.Info(ctx, "Checking if "+returnVm.Name+" is running") @@ -62,7 +66,7 @@ func EnsureMachineRunning(ctx context.Context, hostConfig apiclient.HostConfig, // We have run out of retries, add an error and break out of the loop if retryCount >= maxRetries { - diagnostics.AddError("error starting vm", "error starting vm") + diagnostics.AddError("Error starting vm", "Could not verify the state of the machine after starting, retry count exceeded") break } diff --git a/internal/common/post_processor_script.go b/internal/common/post_processor_script.go index 9335b17..183ae30 100644 --- a/internal/common/post_processor_script.go +++ b/internal/common/post_processor_script.go @@ -75,7 +75,13 @@ func RunPostProcessorScript(ctx context.Context, hostConfig apiclient.HostConfig func PostProcessorHasChanges(ctx context.Context, planPostProcessorScript, statePostProcessorScript []*postprocessorscript.PostProcessorScript) bool { for i, script := range planPostProcessorScript { + if script.AlwaysRunOnUpdate.ValueBool() { + return true + } innerElements := script.Inline.Elements() + if len(innerElements) > 0 && len(statePostProcessorScript) == 0 { + return true + } if len(innerElements) != len(statePostProcessorScript[i].Inline.Elements()) { return true } diff --git a/internal/common/specs.go b/internal/common/specs.go index 0964823..b653963 100644 --- a/internal/common/specs.go +++ b/internal/common/specs.go @@ -62,7 +62,7 @@ func SpecsBlockOnUpdate(ctx context.Context, hostConfig apiclient.HostConfig, vm } if hardwareInfo.TotalAvailable.LogicalCpuCount-int64(updateValueInt) <= 0 { - diagnostics.AddError("not enough cpus", "not enough cpus") + diagnostics.AddError("Not enough cpus", "You requested more cpus than available, the machine will need "+updateValue+" cpus and we have "+fmt.Sprintf("%v", hardwareInfo.TotalAvailable.LogicalCpuCount)) return diagnostics } @@ -88,7 +88,7 @@ func SpecsBlockOnUpdate(ctx context.Context, hostConfig apiclient.HostConfig, vm } if hardwareInfo.TotalAvailable.MemorySize-float64(updateValueInt) <= 0 { - diagnostics.AddError("not enough memory", "not enough memory") + diagnostics.AddError("Not enough memory", "You requested more memory than available, the machine will need "+updateValue+" memory and we have "+fmt.Sprintf("%v", hardwareInfo.TotalAvailable.MemorySize)) return diagnostics } diff --git a/internal/remoteimage/resource.go b/internal/remoteimage/resource.go index f173d9b..9b1e912 100644 --- a/internal/remoteimage/resource.go +++ b/internal/remoteimage/resource.go @@ -369,13 +369,24 @@ func (r *RemoteVmResource) Create(ctx context.Context, req resource.CreateReques externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, response.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, refreshVm.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.OsType = types.StringValue(createdVM.OS) @@ -557,15 +568,16 @@ func (r *RemoteVmResource) Update(ctx context.Context, req resource.UpdateReques DisableTlsValidation: r.provider.DisableTlsValidation.ValueBool(), } - vm, diag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + vm, getVmDiag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } if vm == nil { resp.State.RemoveResource(ctx) return } + currentVmState := vm.State hostConfig.HostId = vm.HostId @@ -609,9 +621,9 @@ func (r *RemoteVmResource) Update(ctx context.Context, req resource.UpdateReques // Changing the name of the machine if nameChanges.HasChanges() { - _, diag := apiclient.ConfigureMachine(ctx, hostConfig, vm.ID, nameChanges) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + _, nameChangeDiag := apiclient.ConfigureMachine(ctx, hostConfig, vm.ID, nameChanges) + if nameChangeDiag.HasError() { + resp.Diagnostics.Append(nameChangeDiag...) return } } @@ -624,8 +636,8 @@ func (r *RemoteVmResource) Update(ctx context.Context, req resource.UpdateReques return } - if diags := common.SpecsBlockOnUpdate(ctx, hostConfig, vm, data.Specs, currentData.Specs); diags.HasError() { - resp.Diagnostics.Append(diags...) + if specBlockDiag := common.SpecsBlockOnUpdate(ctx, hostConfig, vm, data.Specs, currentData.Specs); specBlockDiag.HasError() { + resp.Diagnostics.Append(specBlockDiag...) return } } @@ -734,13 +746,24 @@ func (r *RemoteVmResource) Update(ctx context.Context, req resource.UpdateReques externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, vm.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, vm.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.ID = types.StringValue(vm.ID) @@ -775,7 +798,39 @@ func (r *RemoteVmResource) Update(ctx context.Context, req resource.UpdateReques } } - if (data.RunAfterCreate.ValueBool() || data.RunAfterCreate.IsUnknown() || data.RunAfterCreate.IsNull()) && (vm.State == "stopped") { + if currentVmState != refreshVm.State { + // If the vm state is desync we nee to set it right + switch currentVmState { + case "running": + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + if refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + case "stopped": + if refreshVm.State == "running" || refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStop) + } + case "paused": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + case "suspended": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + } + } + + if (data.RunAfterCreate.ValueBool() || data.KeepRunning.ValueBool() || (data.RunAfterCreate.IsUnknown() && data.KeepRunning.IsUnknown())) && + (vm.State == "stopped") { if _, diag := common.EnsureMachineRunning(ctx, hostConfig, vm); diag.HasError() { resp.Diagnostics.Append(diag...) return diff --git a/internal/schemas/postprocessorscript/post_processor_script.go b/internal/schemas/postprocessorscript/post_processor_script.go index b1600aa..ff275e1 100644 --- a/internal/schemas/postprocessorscript/post_processor_script.go +++ b/internal/schemas/postprocessorscript/post_processor_script.go @@ -18,6 +18,7 @@ import ( type PostProcessorScript struct { Inline types.List `tfsdk:"inline"` Retry *PostProcessorScriptRetry `tfsdk:"retry"` + AlwaysRunOnUpdate types.Bool `tfsdk:"always_run_on_update"` EnvironmentVariables map[string]types.String `tfsdk:"environment_variables"` Result basetypes.ListValue `tfsdk:"result"` } diff --git a/internal/schemas/postprocessorscript/schemas.go b/internal/schemas/postprocessorscript/schemas.go index 81222e5..a806073 100644 --- a/internal/schemas/postprocessorscript/schemas.go +++ b/internal/schemas/postprocessorscript/schemas.go @@ -38,6 +38,10 @@ var ( Optional: true, ElementType: types.StringType, }, + "always_run_on_update": schema.BoolAttribute{ + MarkdownDescription: "Always run on update", + Optional: true, + }, "result": schema.ListNestedAttribute{ MarkdownDescription: "Result of the script", Description: "Result of the script", diff --git a/internal/telemetry/main.go b/internal/telemetry/main.go index 489f320..5c3b66b 100644 --- a/internal/telemetry/main.go +++ b/internal/telemetry/main.go @@ -14,6 +14,7 @@ var ( globalTelemetryService *TelemetryService lock = &sync.Mutex{} AMPLITUDE_API_KEY string = "" + VERSION = "" ) func New(context context.Context) *TelemetryService { diff --git a/internal/telemetry/telemetry_item.go b/internal/telemetry/telemetry_item.go index 902ce83..f189924 100644 --- a/internal/telemetry/telemetry_item.go +++ b/internal/telemetry/telemetry_item.go @@ -32,6 +32,9 @@ func NewTelemetryItem(ctx context.Context, userId string, eventType TelemetryEve // Adding default properties item.Properties["os"] = runtime.GOOS item.Properties["architecture"] = runtime.GOARCH + if VERSION != "" { + item.Properties["version"] = VERSION + } hash := crypto.SHA256.New() hash.Write([]byte(userId)) diff --git a/internal/vagrantbox/resource.go b/internal/vagrantbox/resource.go index 9621e8a..c3be0e5 100644 --- a/internal/vagrantbox/resource.go +++ b/internal/vagrantbox/resource.go @@ -127,9 +127,9 @@ func (r *VagrantBoxResource) Create(ctx context.Context, req resource.CreateRequ } } - vm, diag := apiclient.GetVms(ctx, hostConfig, "Name", data.Name.String()) - if diag.HasError() { - diag.Append(diag...) + vm, getVmDiag := apiclient.GetVms(ctx, hostConfig, "Name", data.Name.String()) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } @@ -153,18 +153,18 @@ func (r *VagrantBoxResource) Create(ctx context.Context, req resource.CreateRequ createVmRequest.Owner = data.Owner.ValueString() } - response, diag := apiclient.CreateVm(ctx, hostConfig, createVmRequest) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + response, createVmDiag := apiclient.CreateVm(ctx, hostConfig, createVmRequest) + if createVmDiag.HasError() { + resp.Diagnostics.Append(createVmDiag...) return } data.ID = types.StringValue(response.ID) tflog.Info(ctx, "Created vm with id "+data.ID.ValueString()) - createdVM, diag := apiclient.GetVm(ctx, hostConfig, response.ID) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + createdVM, getVmDiag := apiclient.GetVm(ctx, hostConfig, response.ID) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } @@ -176,9 +176,9 @@ func (r *VagrantBoxResource) Create(ctx context.Context, req resource.CreateRequ // stopping the machine as it might need some operations where the machine needs to be stopped // add anything here in sequence that needs to be done before the machine is started // so we do not loose time waiting for the machine to stop - stoppedVm, diag := common.EnsureMachineStopped(ctx, hostConfig, createdVM) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + stoppedVm, stoppedVmDiag := common.EnsureMachineStopped(ctx, hostConfig, createdVM) + if stoppedVmDiag.HasError() { + resp.Diagnostics.Append(stoppedVmDiag...) } // Applying the Specs block @@ -313,13 +313,24 @@ func (r *VagrantBoxResource) Create(ctx context.Context, req resource.CreateRequ externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, response.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, response.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.ExternalIp = types.StringValue(externalIp) @@ -486,9 +497,9 @@ func (r *VagrantBoxResource) Update(ctx context.Context, req resource.UpdateRequ DisableTlsValidation: r.provider.DisableTlsValidation.ValueBool(), } - vm, diag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) - if diag.HasError() { - resp.Diagnostics.Append(diag...) + vm, getVmDiag := apiclient.GetVm(ctx, hostConfig, currentData.ID.ValueString()) + if getVmDiag.HasError() { + resp.Diagnostics.Append(getVmDiag...) return } if vm == nil { @@ -496,6 +507,8 @@ func (r *VagrantBoxResource) Update(ctx context.Context, req resource.UpdateRequ return } + currentVmState := vm.State + nameChanges := apimodels.NewVmConfigRequest(vm.User) currentState := vm.State needsRestart := false @@ -645,13 +658,24 @@ func (r *VagrantBoxResource) Update(ctx context.Context, req resource.UpdateRequ externalIp := "" internalIp := "" - refreshVm, refreshDiag := apiclient.GetVm(ctx, hostConfig, vm.ID) - if refreshDiag.HasError() { - resp.Diagnostics.Append(refreshDiag...) - return - } else { - externalIp = refreshVm.HostExternalIpAddress - internalIp = refreshVm.InternalIpAddress + retryAttempts := 10 + var refreshVm *apimodels.VirtualMachine + var refreshDiag diag.Diagnostics + for { + refreshVm, refreshDiag = apiclient.GetVm(ctx, hostConfig, vm.ID) + if !refreshDiag.HasError() { + externalIp = refreshVm.HostExternalIpAddress + internalIp = refreshVm.InternalIpAddress + } + if internalIp != "" { + time.Sleep(5 * time.Second) + break + } + if retryAttempts == 0 { + internalIp = "-" + break + } + retryAttempts-- } data.ExternalIp = types.StringValue(externalIp) @@ -684,6 +708,37 @@ func (r *VagrantBoxResource) Update(ctx context.Context, req resource.UpdateRequ } } + if currentVmState != refreshVm.State { + // If the vm state is desync we nee to set it right + switch currentVmState { + case "running": + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + if refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + case "stopped": + if refreshVm.State == "running" || refreshVm.State == "paused" || refreshVm.State == "suspended" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStop) + } + case "paused": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + case "suspended": + if refreshVm.State == "running" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpResume) + } + if refreshVm.State == "stopped" { + apiclient.SetMachineState(ctx, hostConfig, data.ID.ValueString(), apiclient.MachineStateOpStart) + } + } + } + tflog.Info(ctx, "Updated vm with id "+data.ID.ValueString()+" and name "+data.Name.ValueString()) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) diff --git a/internal/virtualmachine/datasource.go b/internal/virtualmachine/datasource.go index a3ee11c..142b832 100644 --- a/internal/virtualmachine/datasource.go +++ b/internal/virtualmachine/datasource.go @@ -3,6 +3,7 @@ package virtualmachine import ( "context" "fmt" + "time" "terraform-provider-parallels-desktop/internal/apiclient" "terraform-provider-parallels-desktop/internal/models" @@ -48,11 +49,11 @@ func (d *VirtualMachinesDataSource) Metadata(_ context.Context, req datasource.M } func (d *VirtualMachinesDataSource) Schema(_ context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { - resp.Schema = schemas.VirtualMachineDataSourceSchemaV1 + resp.Schema = schemas.VirtualMachineDataSourceSchemaV2 } func (d *VirtualMachinesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var data data_models.VirtualMachinesDataSourceModelV1 + var data data_models.VirtualMachinesDataSourceModelV2 resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) if resp.Diagnostics.HasError() { @@ -82,31 +83,56 @@ func (d *VirtualMachinesDataSource) Read(ctx context.Context, req datasource.Rea DisableTlsValidation: d.provider.DisableTlsValidation.ValueBool(), } - vms, diag := apiclient.GetVms(ctx, hostConfig, data.Filter.FieldName.ValueString(), data.Filter.Value.ValueString()) - if diag.HasError() { - diag.Append(diag...) - return - } + retryAttempts := 10 + for { + needsRefresh := false + data.Machines = make([]data_models.VirtualMachineModelV2, 0) + vms, diag := apiclient.GetVms(ctx, hostConfig, data.Filter.FieldName.ValueString(), data.Filter.Value.ValueString()) + if diag.HasError() { + diag.Append(diag...) + return + } + + for _, machine := range vms { + stateMachine := data_models.VirtualMachineModelV2{ + HostIP: types.StringValue("-"), + ID: types.StringValue(machine.ID), + Name: types.StringValue(machine.Name), + Description: types.StringValue(machine.Description), + OSType: types.StringValue(machine.OS), + State: types.StringValue(machine.State), + Home: types.StringValue(machine.Home), + ExternalIp: types.StringValue(machine.HostExternalIpAddress), + InternalIp: types.StringValue(machine.InternalIpAddress), + OrchestratorHostId: types.StringValue(machine.HostId), + } + if stateMachine.State.ValueString() == "running" && stateMachine.InternalIp.ValueString() == "" && data.WaitForNetworkUp.ValueBool() { + needsRefresh = true + time.Sleep(5 * time.Second) // wait for 5 seconds to give the network time to come up + break + } + + if stateMachine.InternalIp.ValueString() == "" { + stateMachine.InternalIp = types.StringValue("-") + } + + data.Machines = append(data.Machines, stateMachine) + } + + if !needsRefresh { + break + } - for _, machine := range vms { - stateMachine := data_models.VirtualMachineModelV1{ - HostIP: types.StringValue("-"), - ID: types.StringValue(machine.ID), - Name: types.StringValue(machine.Name), - Description: types.StringValue(machine.Description), - OSType: types.StringValue(machine.OS), - State: types.StringValue(machine.State), - Home: types.StringValue(machine.Home), - ExternalIp: types.StringValue(machine.HostExternalIpAddress), - InternalIp: types.StringValue(machine.InternalIpAddress), - OrchestratorHostId: types.StringValue(machine.HostId), + if retryAttempts == 0 { + resp.Diagnostics.AddError("timeout waiting for network to be up", "timeout waiting for network to be up") + return } - data.Machines = append(data.Machines, stateMachine) + retryAttempts-- } if data.Machines == nil { - data.Machines = make([]data_models.VirtualMachineModelV1, 0) + data.Machines = make([]data_models.VirtualMachineModelV2, 0) } diags := resp.State.Set(ctx, &data) diff --git a/internal/virtualmachine/models/datasource_models_v2.go b/internal/virtualmachine/models/datasource_models_v2.go new file mode 100644 index 0000000..0b53e4d --- /dev/null +++ b/internal/virtualmachine/models/datasource_models_v2.go @@ -0,0 +1,32 @@ +package models + +import ( + "terraform-provider-parallels-desktop/internal/schemas/authenticator" + "terraform-provider-parallels-desktop/internal/schemas/filter" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// virtualMachinesDataSourceModel represents the data source schema for the virtual_machines data source. +type VirtualMachinesDataSourceModelV2 struct { + Authenticator *authenticator.Authentication `tfsdk:"authenticator"` + Host types.String `tfsdk:"host"` + Orchestrator types.String `tfsdk:"orchestrator"` + WaitForNetworkUp types.Bool `tfsdk:"wait_for_network_up"` + Filter *filter.Filter `tfsdk:"filter"` + Machines []VirtualMachineModelV2 `tfsdk:"machines"` +} + +// virtualMachineModel represents a virtual machine model with its properties. +type VirtualMachineModelV2 struct { + HostIP types.String `tfsdk:"host_ip"` // The IP address of the host machine. + ID types.String `tfsdk:"id"` // The unique identifier of the virtual machine. + ExternalIp types.String `tfsdk:"external_ip"` // The external IP address of the virtual machine. + InternalIp types.String `tfsdk:"internal_ip"` // The internal IP address of the virtual machine. + OrchestratorHostId types.String `tfsdk:"orchestrator_host_id"` // The unique identifier of the orchestrator host. + Name types.String `tfsdk:"name"` // The name of the virtual machine. + Description types.String `tfsdk:"description"` // The description of the virtual machine. + OSType types.String `tfsdk:"os_type"` // The type of the operating system installed on the virtual machine. + State types.String `tfsdk:"state"` // The state of the virtual machine. + Home types.String `tfsdk:"home"` // The path to the virtual machine home directory. +} diff --git a/internal/virtualmachine/schemas/datasource_schema_v2.go b/internal/virtualmachine/schemas/datasource_schema_v2.go new file mode 100644 index 0000000..f95ef64 --- /dev/null +++ b/internal/virtualmachine/schemas/datasource_schema_v2.go @@ -0,0 +1,92 @@ +package schemas + +import ( + "terraform-provider-parallels-desktop/internal/schemas/authenticator" + "terraform-provider-parallels-desktop/internal/schemas/filter" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +var VirtualMachineDataSourceSchemaV2 = schema.Schema{ + MarkdownDescription: "Virtual Machine Data Source", + Blocks: map[string]schema.Block{ + authenticator.SchemaName: authenticator.SchemaBlock, + filter.SchemaName: filter.SchemaBlock, + }, + Attributes: map[string]schema.Attribute{ + "host": schema.StringAttribute{ + MarkdownDescription: "Parallels Desktop DevOps Host", + Optional: true, + Validators: []validator.String{ + stringvalidator.AtLeastOneOf(path.Expressions{ + path.MatchRoot("orchestrator"), + path.MatchRoot("host"), + }...), + }, + }, + "orchestrator": schema.StringAttribute{ + MarkdownDescription: "Parallels Desktop DevOps Orchestrator", + Optional: true, + Validators: []validator.String{ + stringvalidator.AtLeastOneOf(path.Expressions{ + path.MatchRoot("orchestrator"), + path.MatchRoot("host"), + }...), + }, + }, + "wait_for_network_up": schema.BoolAttribute{ + MarkdownDescription: "Wait for network up", + Optional: true, + }, + "machines": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "host_ip": schema.StringAttribute{ + MarkdownDescription: "The IP address of the host machine", + Computed: true, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "The unique identifier of the virtual machine", + Computed: true, + }, + "name": schema.StringAttribute{ + MarkdownDescription: "The name of the virtual machine", + Computed: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "The description of the virtual machine", + Computed: true, + }, + "os_type": schema.StringAttribute{ + MarkdownDescription: "The type of the operating system installed on the virtual machine", + Computed: true, + }, + "state": schema.StringAttribute{ + MarkdownDescription: "The state of the virtual machine", + Computed: true, + }, + "home": schema.StringAttribute{ + MarkdownDescription: "The path to the virtual machine home directory", + Computed: true, + }, + "orchestrator_host_id": schema.StringAttribute{ + MarkdownDescription: "Orchestrator Host Id if the VM is running in an orchestrator", + Computed: true, + }, + "external_ip": schema.StringAttribute{ + MarkdownDescription: "VM external IP address", + Computed: true, + }, + "internal_ip": schema.StringAttribute{ + MarkdownDescription: "VM internal IP address", + Computed: true, + }, + }, + }, + }, + }, +} diff --git a/main.go b/main.go index abb926c..ea66953 100644 --- a/main.go +++ b/main.go @@ -8,6 +8,8 @@ import ( "flag" "log" + "terraform-provider-parallels-desktop/internal/telemetry" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "terraform-provider-parallels-desktop/internal/provider" @@ -23,17 +25,16 @@ import ( // can be customized. //go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs -var ( - // these will be set by the goreleaser configuration - // to appropriate values for the compiled binary. - version string = "dev" +// these will be set by the goreleaser configuration +// to appropriate values for the compiled binary. +var version string = "dev" - // goreleaser can pass other information to the main package, such as the specific commit - // https://goreleaser.com/cookbooks/using-main.version/ -) +// goreleaser can pass other information to the main package, such as the specific commit +// https://goreleaser.com/cookbooks/using-main.version/ func main() { var debug bool + telemetry.VERSION = version flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") flag.Parse() @@ -49,7 +50,6 @@ func main() { } err := providerserver.Serve(context.Background(), provider.New(version), opts) - if err != nil { log.Fatal(err.Error()) }