diff --git a/client/vm.go b/client/vm.go index 05955ba..a36b50c 100644 --- a/client/vm.go +++ b/client/vm.go @@ -125,11 +125,12 @@ type Vm struct { ResourceSet *FlatResourceSet `json:"resourceSet"` // TODO: (#145) Uncomment this once issues with secure_boot have been figured out // SecureBoot bool `json:"secureBoot,omitempty"` - Tags []string `json:"tags"` - Videoram Videoram `json:"videoram,omitempty"` - Vga string `json:"vga,omitempty"` - StartDelay int `json:startDelay,omitempty"` - Host string `json:"$container"` + Tags []string `json:"tags"` + Videoram Videoram `json:"videoram,omitempty"` + Vga string `json:"vga,omitempty"` + StartDelay int `json:startDelay,omitempty"` + Host string `json:"$container"` + XenstoreData map[string]interface{} `json:"xenStoreData,omitempty"` // These fields are used for passing in disk inputs when // creating Vms, however, this is not a real field as far @@ -357,6 +358,17 @@ func (c *Client) CreateVm(vmReq Vm, createTime time.Duration) (*Vm, error) { return nil, err } + xsParams := map[string]interface{}{ + "id": vmId, + "xenStoreData": vmReq.XenstoreData, + } + var success bool + err = c.Call("vm.set", xsParams, &success) + + if err != nil { + return nil, err + } + bootAfterCreate := params["bootAfterCreate"].(bool) if !bootAfterCreate && vmReq.PowerState == RunningPowerState { err = c.StartVm(vmId) @@ -431,6 +443,10 @@ func (c *Client) UpdateVm(vmReq Vm) (*Vm, error) { params["resourceSet"] = vmReq.ResourceSet } + if len(vmReq.XenstoreData) > 0 { + params["xenStoreData"] = vmReq.XenstoreData + } + vga := vmReq.Vga if vga != "" { params["vga"] = vga diff --git a/docs/data-sources/vms.md b/docs/data-sources/vms.md index 49a8fb3..88f57ba 100644 --- a/docs/data-sources/vms.md +++ b/docs/data-sources/vms.md @@ -85,6 +85,7 @@ Read-Only: - `vga` (String) - `videoram` (Number) - `wait_for_ip` (Boolean) +- `xenstore` (Map of String) ### Nested Schema for `vms.disk` diff --git a/docs/index.md b/docs/index.md index 7c0e66f..5f40d4d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -46,11 +46,14 @@ provider "xenorchestra" { ## Schema +### Required + +- `password` (String) Password for xoa api. Can be set via the XOA_PASSWORD environment variable. +- `url` (String) Hostname of the xoa router. Can be set via the XOA_URL environment variable. +- `username` (String) User account for xoa api. Can be set via the XOA_USER environment variable. + ### Optional - `insecure` (Boolean) Whether SSL should be verified or not. Can be set via the XOA_INSECURE environment variable. -- `password` (String) Password for xoa api. Can be set via the XOA_PASSWORD environment variable. - `retry_max_time` (String) If `retry_mode` is set, this specifies the duration for which the backoff method will continue retries. Can be set via the `XOA_RETRY_MAX_TIME` environment variable - `retry_mode` (String) Specifies if retries should be attempted for requests that require eventual . Can be set via the XOA_RETRY_MODE environment variable. -- `url` (String) Hostname of the xoa router. Can be set via the XOA_URL environment variable. -- `username` (String) User account for xoa api. Can be set via the XOA_USER environment variable. diff --git a/docs/resources/vm.md b/docs/resources/vm.md index dc416bf..e979d35 100644 --- a/docs/resources/vm.md +++ b/docs/resources/vm.md @@ -89,6 +89,13 @@ resource "xenorchestra_vm" "bar" { timeouts { create = "20m" } + + // Note: Xen Orchestra populates values within Xenstore and will need ignored via + // lifecycle ignore_changes or modeled in your terraform code + xenstore = { + key1 = "val1" + key2 = "val2" + } } # vm resource that uses wait_for_ip @@ -170,6 +177,7 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd - `vga` (String) The video adapter the VM should use. Possible values include std and cirrus. - `videoram` (Number) The videoram option the VM should use. Possible values include 1, 2, 4, 8, 16 - `wait_for_ip` (Boolean) Whether terraform should wait until IP addresses are present on the VM's network interfaces before considering it created. This only works if guest-tools are installed in the VM. Defaults to false. +- `xenstore` (Map of String) The key value pairs to be populated in xenstore. ### Read-Only diff --git a/examples/resources/xenorchestra_vm/resource.tf b/examples/resources/xenorchestra_vm/resource.tf index b878fe6..5ce62e7 100644 --- a/examples/resources/xenorchestra_vm/resource.tf +++ b/examples/resources/xenorchestra_vm/resource.tf @@ -58,6 +58,13 @@ resource "xenorchestra_vm" "bar" { timeouts { create = "20m" } + + // Note: Xen Orchestra populates values within Xenstore and will need ignored via + // lifecycle ignore_changes or modeled in your terraform code + xenstore = { + key1 = "val1" + key2 = "val2" + } } # vm resource that uses wait_for_ip diff --git a/xoa/resource_xenorchestra_vm.go b/xoa/resource_xenorchestra_vm.go index af0ba4a..e5f6be7 100644 --- a/xoa/resource_xenorchestra_vm.go +++ b/xoa/resource_xenorchestra_vm.go @@ -9,6 +9,7 @@ import ( "regexp" "sort" "strconv" + "strings" "time" "github.com/ddelnano/terraform-provider-xenorchestra/client" @@ -408,6 +409,14 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd }, }, }, + "xenstore": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Description: "The key value pairs to be populated in xenstore.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "tags": resourceTags(), } } @@ -566,7 +575,8 @@ func resourceVmCreate(d *schema.ResourceData, m interface{}) error { Videoram: client.Videoram{ Value: d.Get("videoram").(int), }, - Vga: d.Get("vga").(string), + XenstoreData: d.Get("xenstore").(map[string]interface{}), + Vga: d.Get("vga").(string), } affinityHost := d.Get("affinity_host").(string) @@ -927,6 +937,23 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { vmReq.AffinityHost = &affinityHost } + if d.HasChange("xenstore") { + xenstoreParams := map[string]interface{}{} + o, n := d.GetChange("xenstore") + oXs := o.(map[string]interface{}) + nXs := n.(map[string]interface{}) + + for k, _ := range oXs { + xenstoreParams[k] = nil + } + + for k, v := range nXs { + xenstoreParams[k] = v + } + + vmReq.XenstoreData = xenstoreParams + } + haltPerformed := false if haltForUpdates { @@ -1185,10 +1212,30 @@ func recordToData(resource client.Vm, vifs []client.VIF, disks []client.Disk, cd return err } } + if xenstore := d.Get("xenstore").(map[string]interface{}); len(xenstore) > 0 { + filtered := filterXenstoreDataToVmData(resource.XenstoreData) + if err := d.Set("xenstore", filtered); err != nil { + return err + } + } return nil } +func filterXenstoreDataToVmData(xenstore map[string]interface{}) map[string]interface{} { + filtered := map[string]interface{}{} + for key, value := range xenstore { + if strings.HasPrefix(key, "vm-data/") { + pieces := strings.SplitAfterN(key, "vm-data/", 2) + if len(pieces) != 2 { + continue + } + filtered[pieces[1]] = value + } + } + return filtered +} + func vmBlockedOperationsToList(v client.Vm) []string { blockedOperations := []string{} for k, _ := range v.BlockedOperations { diff --git a/xoa/resource_xenorchestra_vm_test.go b/xoa/resource_xenorchestra_vm_test.go index 7e9be7c..e389015 100644 --- a/xoa/resource_xenorchestra_vm_test.go +++ b/xoa/resource_xenorchestra_vm_test.go @@ -622,6 +622,46 @@ func TestAccXenorchestraVm_createWhenWaitingForIp(t *testing.T) { }) } +func TestAccXenorchestraVm_createAndUpdateXenstoreData(t *testing.T) { + resourceName := "xenorchestra_vm.bar" + vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckXenorchestraVmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccVmConfigWithSingleXenstoreData(vmName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "xenstore.%", "2"), + resource.TestCheckResourceAttr(resourceName, "xenstore.first", "value"), + ), + }, + { + Config: testAccVmConfigWithMultipleXenstoreData(vmName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "xenstore.first", "value"), + resource.TestCheckResourceAttr(resourceName, "xenstore.second", "value"), + ), + }, + { + Config: testAccVmConfigWithSingleXenstoreData(vmName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "xenstore.%", "2"), + resource.TestCheckResourceAttr(resourceName, "xenstore.first", "value"), + resource.TestCheckNoResourceAttr(resourceName, "xenstore.second"), + ), + }, + }, + }) +} + func TestAccXenorchestraVm_ensureVmsInResourceSetsCanBeUpdatedByNonAdminUsers(t *testing.T) { vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) adminUser := os.Getenv("XOA_USER") @@ -2261,6 +2301,83 @@ resource "xenorchestra_vm" "bar" { `, accDefaultNetwork.NameLabel, accTestPool.Id, vmName, accDefaultSr.Id) } +func testAccVmConfigWithSingleXenstoreData(vmName string) string { + return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", vmName), "template") + testAccTemplateConfig() + fmt.Sprintf(` +data "xenorchestra_network" "network" { + name_label = "%s" + pool_id = "%s" +} + +resource "xenorchestra_vm" "bar" { + memory_max = 4295000000 + wait_for_ip = true + cpus = 1 + cloud_config = xenorchestra_cloud_config.bar.template + name_label = "%s" + name_description = "description" + template = data.xenorchestra_template.template.id + network { + network_id = data.xenorchestra_network.network.id + } + + disk { + sr_id = "%s" + name_label = "disk 1" + size = 10001317888 + } + + xenstore = { + first = "value" + } + + lifecycle { + ignore_changes = [ + xenstore["mmio-hole-size"], + ] + } +} +`, accDefaultNetwork.NameLabel, accTestPool.Id, vmName, accDefaultSr.Id) +} + +func testAccVmConfigWithMultipleXenstoreData(vmName string) string { + return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", vmName), "template") + testAccTemplateConfig() + fmt.Sprintf(` +data "xenorchestra_network" "network" { + name_label = "%s" + pool_id = "%s" +} + +resource "xenorchestra_vm" "bar" { + memory_max = 4295000000 + wait_for_ip = true + cpus = 1 + cloud_config = xenorchestra_cloud_config.bar.template + name_label = "%s" + name_description = "description" + template = data.xenorchestra_template.template.id + network { + network_id = data.xenorchestra_network.network.id + } + + disk { + sr_id = "%s" + name_label = "disk 1" + size = 10001317888 + } + + xenstore = { + first = "value" + second = "value" + } + + lifecycle { + ignore_changes = [ + xenstore["mmio-hole-size"], + ] + } +} +`, accDefaultNetwork.NameLabel, accTestPool.Id, vmName, accDefaultSr.Id) +} + func testAccVmConfigWithDiskNameLabelAndNameDescription(vmName, nameLabel, description string) string { return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", vmName), "template") + testAccTemplateConfig() + fmt.Sprintf(` data "xenorchestra_network" "network" {