diff --git a/GNUmakefile b/GNUmakefile
index 7f1fef6..01b7b2a 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -6,7 +6,13 @@ default: testacc
# Run acceptance tests
.PHONY: testacc
testacc: provider ## make testacc
- source .env && TF_ACC=1 go test ./xenserver/ -v $(TESTARGS) -timeout 120m
+ source .env \
+ && TF_ACC=1 go test -v $(TESTARGS) -timeout 60m ./xenserver/ \
+ && TF_ACC=1 TEST_POOL=1 go test -v -run TestAccPoolResource -timeout 60m ./xenserver/
+
+testpool: provider
+ source .env \
+ && TF_ACC=1 TEST_POOL=1 go test -v -run TestAccPoolResource -timeout 60m ./xenserver/
doc: ## make doc for terraform provider documentation
go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate --provider-name xenserver
@@ -16,7 +22,7 @@ provider: go.mod ## make provider
rm -f $(GOBIN)/terraform-provider-xenserver
go mod tidy
go install .
- ls -l $(GOBIN)/terraform-provider-xenserver
+ md5sum $(GOBIN)/terraform-provider-xenserver
apply: .env provider ## make apply
cd $(WORKDIR) && \
@@ -53,4 +59,4 @@ destroy_vm:
$(MAKE) WORKDIR=examples/vm-main destroy
destroy_pool:
- $(MAKE) WORKDIR=examples/pool-main destroy
\ No newline at end of file
+ $(MAKE) WORKDIR=examples/pool-main destroy
diff --git a/docs/data-sources/host.md b/docs/data-sources/host.md
index af27fd5..9593d31 100644
--- a/docs/data-sources/host.md
+++ b/docs/data-sources/host.md
@@ -28,6 +28,7 @@ output "host_output" {
### Optional
- `address` (String) The address by which this host can be contacted from any other host in the pool.
+- `is_coordinator` (Boolean) If true, show only coordinator of the pool, if false, show only supporter of the pool, if not set, show all hosts.
- `name_label` (String) The name of the host.
- `uuid` (String) The UUID of the host.
diff --git a/docs/resources/pif_configure.md b/docs/resources/pif_configure.md
index 1f020a9..e555b2b 100644
--- a/docs/resources/pif_configure.md
+++ b/docs/resources/pif_configure.md
@@ -3,12 +3,15 @@
page_title: "xenserver_pif_configure Resource - xenserver"
subcategory: ""
description: |-
- Provides an PIF configure resource to update the exist PIF parameters.
+ PIF configuration resource which is used to update the existing PIF parameters.
+ Noted that no new PIF will be deployed when terraform apply is executed. Additionally, when it comes to terraform destroy, it actually has no effect on this resource.
---
# xenserver_pif_configure (Resource)
-Provides an PIF configure resource to update the exist PIF parameters.
+PIF configuration resource which is used to update the existing PIF parameters.
+
+ Noted that no new PIF will be deployed when `terraform apply` is executed. Additionally, when it comes to `terraform destroy`, it actually has no effect on this resource.
## Example Usage
@@ -70,6 +73,7 @@ Optional:
- `dns` (String) Comma separated list of the IP addresses of the DNS servers to use.
- `gateway` (String) The IP gateway.
- `ip` (String) The IP address.
+- `name_label` (String) The name of the interface in IP Address Configuration.
- `netmask` (String) The IP netmask.
## Import
diff --git a/docs/resources/pool.md b/docs/resources/pool.md
index 1d713e5..c784190 100644
--- a/docs/resources/pool.md
+++ b/docs/resources/pool.md
@@ -3,36 +3,104 @@
page_title: "xenserver_pool Resource - xenserver"
subcategory: ""
description: |-
- Provides a pool resource.
+ This provides a pool resource. During the execution of terraform destroy for this particular resource, all of the hosts that are part of the pool will be separated and converted into standalone hosts.
---
# xenserver_pool (Resource)
-Provides a pool resource.
-
-
+This provides a pool resource. During the execution of `terraform destroy` for this particular resource, all of the hosts that are part of the pool will be separated and converted into standalone hosts.
+
+## Example Usage
+
+```terraform
+resource "xenserver_sr_nfs" "nfs" {
+ name_label = "NFS shared storage"
+ name_description = "A test NFS storage repository"
+ version = "3"
+ storage_location = format("%s:%s", local.env_vars["NFS_SERVER"], local.env_vars["NFS_SERVER_PATH"])
+}
+
+data "xenserver_pif" "pif" {
+ device = "eth0"
+}
+
+data "xenserver_pif" "pif1" {
+ device = "eth3"
+}
+
+locals {
+ pif1_data = tomap({for element in data.xenserver_pif.pif1.data_items: element.uuid => element})
+}
+
+resource "xenserver_pif_configure" "pif_update" {
+ for_each = local.pif1_data
+ uuid = each.key
+ interface = {
+ mode = "DHCP"
+ }
+}
+
+# Configure default SR and Management Network of the pool
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ default_sr = xenserver_sr_nfs.nfs.uuid
+ management_network = data.xenserver_pif.pif.data_items[0].network
+}
+
+# Join supporter into the pool
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ join_supporters = [
+ {
+ host = local.env_vars["SUPPORTER_HOST"]
+ username = local.env_vars["SUPPORTER_USERNAME"]
+ password = local.env_vars["SUPPORTER_PASSWORD"]
+ }
+ ]
+}
+
+# Eject supporter from the pool
+data "xenserver_host" "supporter" {
+ is_coordinator = false
+}
+
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ eject_supporters = [ data.xenserver_host.supporter.data_items[1].uuid ]
+}
+```
## Schema
### Required
-- `default_sr` (String) The default SR UUID of the pool.
- `name_label` (String) The name of the pool.
### Optional
+- `default_sr` (String) The default SR UUID of the pool. this SR should be shared SR.
+- `eject_supporters` (Set of String) The set of pool supporters which will be ejected from the pool.
+- `join_supporters` (Attributes Set) The set of pool supporters which will join the pool.
+
+-> **Note:**
+1. It would raise error if a supporter is in both join_supporters and eject_supporters.
+2. The join operation would be performed only when the host, username, and password are provided. (see [below for nested schema](#nestedatt--join_supporters))
- `management_network` (String) The management network UUID of the pool.
+
+-> **Note:**
+1. The management network would be reconfigured only when the management network UUID is provided.
+2. All of the hosts in the pool should have the same management network with network configuration, you can set network configuration by `resource pif_configure`.
+3.
- `name_description` (String) The description of the pool, default to be `""`.
-- `supporters` (Attributes Set) The set of pool supporters which will join the pool. (see [below for nested schema](#nestedatt--supporters))
### Read-Only
- `id` (String) The test ID of the pool.
- `uuid` (String) The UUID of the pool.
-
-### Nested Schema for `supporters`
+
+### Nested Schema for `join_supporters`
Optional:
@@ -40,6 +108,10 @@ Optional:
- `password` (String, Sensitive) The password of the host.
- `username` (String) The user name of the host.
-Read-Only:
+## Import
+
+Import is supported using the following syntax:
-- `uuid` (String) The UUID of the host.
+```shell
+terraform import xenserver_pool.pool 00000000-0000-0000-0000-000000000000
+```
diff --git a/docs/resources/snapshot.md b/docs/resources/snapshot.md
index 5a1714f..65c668b 100644
--- a/docs/resources/snapshot.md
+++ b/docs/resources/snapshot.md
@@ -91,9 +91,9 @@ resource "xenserver_snapshot" "snapshot" {
-> **Note:** When `revert` is true, the snapshot resource will be updated with new configuration first and then revert to VM.
~> **Warning:** After revert, the VM `hard_drive` will be updated. If snapshot revert to the VM resource defined in 'main.tf', it'll cause issue when continue execute terraform commands. There's a suggest solution to resolve this issue, follow the steps: 1. run `terraform state show xenserver_snapshot.`, get the revert VM's UUID 'vm_uuid' and revert VDIs' UUID 'vdi_uuid'. 2. run `terraform state rm xenserver_vm.` to remove the VM resource state. 3. run `terraform import xenserver_vm. ` to import the VM resource new state. 4. run `terraform state rm xenserver_vdi.` to remove the VDI resource state. Be careful, you only need to remove the VDI resource used in above VM resource. If there're multiple VDI resources, remove them all. 5. run `terraform import xenserver_vdi. ` to import the VDI resource new state. If there're multiple VDI resources, import them all.
-- `with_memory` (Boolean) True if snapshot with the VM's memory (VM must in running state), default to be `false`.
+- `with_memory` (Boolean) True if snapshot with the VM's memory, default to be `false`.
--> **Note:** `with_memory` is not allowed to be updated.
+-> **Note:** 1. `with_memory` field is not allowed to be updated. 2. the VM must be in a running state and have the [XenServer VM Tool](https://www.xenserver.com/downloads) installed.
### Read-Only
diff --git a/examples/pool-main/main.tf b/examples/pool-main/main.tf
index 5045c70..dd3e6d9 100644
--- a/examples/pool-main/main.tf
+++ b/examples/pool-main/main.tf
@@ -16,12 +16,13 @@ provider "xenserver" {
password = local.env_vars["XENSERVER_PASSWORD"]
}
-data "xenserver_sr" "sr" {
- name_label = "Local storage"
+resource "xenserver_sr_nfs" "nfs" {
+ name_label = "NFS shared storage"
+ name_description = "A test NFS storage repository"
+ version = "3"
+ storage_location = format("%s:%s", local.env_vars["NFS_SERVER"], local.env_vars["NFS_SERVER_PATH"])
}
-data "xenserver_host" "host" {}
-
data "xenserver_pif" "pif" {
device = "eth0"
}
@@ -43,7 +44,24 @@ resource "xenserver_pif_configure" "pif_update" {
}
resource "xenserver_pool" "pool" {
- name_label = "pool-1"
- default_sr = data.xenserver_sr.sr.data_items[0].uuid
- management_network = data.xenserver_pif.pif.data_items[0].network
-}
\ No newline at end of file
+ name_label = "pool"
+ # default_sr = xenserver_sr_nfs.nfs.uuid
+ # management_network = data.xenserver_pif.pif.data_items[0].network
+ join_supporters = [
+ {
+ host = local.env_vars["SUPPORTER_HOST"]
+ username = local.env_vars["SUPPORTER_USERNAME"]
+ password = local.env_vars["SUPPORTER_PASSWORD"]
+ }
+ ]
+}
+
+# comment out the following block for the second run
+# data "xenserver_host" "supporter" {
+# is_coordinator = false
+# }
+
+# resource "xenserver_pool" "pool" {
+# name_label = "pool"
+# eject_supporters = [ data.xenserver_host.supporter.data_items[1].uuid ]
+# }
\ No newline at end of file
diff --git a/examples/resources/xenserver_pool/import.sh b/examples/resources/xenserver_pool/import.sh
new file mode 100644
index 0000000..4b0161d
--- /dev/null
+++ b/examples/resources/xenserver_pool/import.sh
@@ -0,0 +1 @@
+terraform import xenserver_pool.pool 00000000-0000-0000-0000-000000000000
\ No newline at end of file
diff --git a/examples/resources/xenserver_pool/resource.tf b/examples/resources/xenserver_pool/resource.tf
new file mode 100644
index 0000000..56a33d1
--- /dev/null
+++ b/examples/resources/xenserver_pool/resource.tf
@@ -0,0 +1,55 @@
+resource "xenserver_sr_nfs" "nfs" {
+ name_label = "NFS shared storage"
+ name_description = "A test NFS storage repository"
+ version = "3"
+ storage_location = format("%s:%s", local.env_vars["NFS_SERVER"], local.env_vars["NFS_SERVER_PATH"])
+}
+
+data "xenserver_pif" "pif" {
+ device = "eth0"
+}
+
+data "xenserver_pif" "pif1" {
+ device = "eth3"
+}
+
+locals {
+ pif1_data = tomap({for element in data.xenserver_pif.pif1.data_items: element.uuid => element})
+}
+
+resource "xenserver_pif_configure" "pif_update" {
+ for_each = local.pif1_data
+ uuid = each.key
+ interface = {
+ mode = "DHCP"
+ }
+}
+
+# Configure default SR and Management Network of the pool
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ default_sr = xenserver_sr_nfs.nfs.uuid
+ management_network = data.xenserver_pif.pif.data_items[0].network
+}
+
+# Join supporter into the pool
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ join_supporters = [
+ {
+ host = local.env_vars["SUPPORTER_HOST"]
+ username = local.env_vars["SUPPORTER_USERNAME"]
+ password = local.env_vars["SUPPORTER_PASSWORD"]
+ }
+ ]
+}
+
+# Eject supporter from the pool
+data "xenserver_host" "supporter" {
+ is_coordinator = false
+}
+
+resource "xenserver_pool" "pool" {
+ name_label = "pool"
+ eject_supporters = [ data.xenserver_host.supporter.data_items[1].uuid ]
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 26a45cb..a3d9a32 100644
--- a/go.mod
+++ b/go.mod
@@ -5,6 +5,7 @@ go 1.22.2
replace xenapi => ./goSDK
require (
+ github.com/cenkalti/backoff/v4 v4.3.0
github.com/hashicorp/terraform-plugin-docs v0.19.4
github.com/hashicorp/terraform-plugin-framework v1.12.0
github.com/hashicorp/terraform-plugin-framework-validators v0.14.0
diff --git a/go.sum b/go.sum
index 94a295b..728f761 100644
--- a/go.sum
+++ b/go.sum
@@ -28,6 +28,8 @@ github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwN
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI=
github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
diff --git a/xenserver/host_data_source.go b/xenserver/host_data_source.go
index 74764b2..05c614a 100644
--- a/xenserver/host_data_source.go
+++ b/xenserver/host_data_source.go
@@ -48,6 +48,10 @@ func (d *hostDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, r
MarkdownDescription: "The address by which this host can be contacted from any other host in the pool.",
Optional: true,
},
+ "is_coordinator": schema.BoolAttribute{
+ MarkdownDescription: "If true, show only coordinator of the pool, if false, show only supporter of the pool, if not set, show all hosts.",
+ Optional: true,
+ },
"data_items": schema.ListNestedAttribute{
MarkdownDescription: "The return items of host.",
Computed: true,
@@ -63,15 +67,15 @@ func (d *hostDataSource) Configure(_ context.Context, req datasource.ConfigureRe
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
// Read refreshes the Terraform state with the latest data.
@@ -104,6 +108,27 @@ func (d *hostDataSource) Read(ctx context.Context, req datasource.ReadRequest, r
if !data.Address.IsNull() && hostRecord.Address != data.Address.ValueString() {
continue
}
+ if !data.IsCoordinator.IsNull() {
+ _, coordinatorUUID, err := getCoordinatorRef(d.session)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Unable to get coordinator ref",
+ err.Error(),
+ )
+ return
+ }
+
+ isCoordinator := hostRecord.UUID == coordinatorUUID
+ if data.IsCoordinator.ValueBool() {
+ if !isCoordinator {
+ continue
+ }
+ } else {
+ if isCoordinator {
+ continue
+ }
+ }
+ }
var hostData hostRecordData
err = updateHostRecordData(ctx, d.session, hostRecord, &hostData)
diff --git a/xenserver/host_data_source_test.go b/xenserver/host_data_source_test.go
index 468d44f..748fde6 100644
--- a/xenserver/host_data_source_test.go
+++ b/xenserver/host_data_source_test.go
@@ -1,15 +1,18 @@
package xenserver
import (
+ "fmt"
"testing"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
)
-func testAccHostDataSourceConfig() string {
- return `
-data "xenserver_host" "test_host_data" {}
-`
+func testAccHostDataSourceConfig(extra_config string) string {
+ return fmt.Sprintf(`
+data "xenserver_host" "host_data" {
+ %s
+}
+`, extra_config)
}
func TestAccHostDataSource(t *testing.T) {
@@ -18,9 +21,15 @@ func TestAccHostDataSource(t *testing.T) {
Steps: []resource.TestStep{
// Read testing
{
- Config: providerConfig + testAccHostDataSourceConfig(),
+ Config: providerConfig + testAccHostDataSourceConfig(""),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttrSet("data.xenserver_host.host_data", "data_items.#"),
+ ),
+ },
+ {
+ Config: providerConfig + testAccHostDataSourceConfig("is_coordinator = true"),
Check: resource.ComposeAggregateTestCheckFunc(
- resource.TestCheckResourceAttrSet("data.xenserver_host.test_host_data", "data_items.#"),
+ resource.TestCheckResourceAttr("data.xenserver_host.host_data", "data_items.#", "1"),
),
},
},
diff --git a/xenserver/host_utils.go b/xenserver/host_utils.go
index 107a6fe..07d7e42 100644
--- a/xenserver/host_utils.go
+++ b/xenserver/host_utils.go
@@ -7,16 +7,18 @@ import (
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
"xenapi"
)
// pifDataSourceModel describes the data source data model.
type hostDataSourceModel struct {
- NameLabel types.String `tfsdk:"name_label"`
- UUID types.String `tfsdk:"uuid"`
- Address types.String `tfsdk:"address"`
- DataItems []hostRecordData `tfsdk:"data_items"`
+ NameLabel types.String `tfsdk:"name_label"`
+ UUID types.String `tfsdk:"uuid"`
+ Address types.String `tfsdk:"address"`
+ IsCoordinator types.Bool `tfsdk:"is_coordinator"`
+ DataItems []hostRecordData `tfsdk:"data_items"`
}
type hostRecordData struct {
@@ -59,6 +61,7 @@ func hostDataSchema() map[string]schema.Attribute {
}
func updateHostRecordData(ctx context.Context, session *xenapi.Session, record xenapi.HostRecord, data *hostRecordData) error {
+ tflog.Debug(ctx, "Found host data: "+record.NameLabel)
data.UUID = types.StringValue(record.UUID)
data.NameLabel = types.StringValue(record.NameLabel)
data.NameDescription = types.StringValue(record.NameDescription)
diff --git a/xenserver/network_data_source.go b/xenserver/network_data_source.go
index 6abbca9..18ecfd9 100644
--- a/xenserver/network_data_source.go
+++ b/xenserver/network_data_source.go
@@ -136,15 +136,15 @@ func (d *networkDataSource) Configure(_ context.Context, req datasource.Configur
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
func (d *networkDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
diff --git a/xenserver/network_vlan_resource.go b/xenserver/network_vlan_resource.go
index 34bef8d..c0d40d4 100644
--- a/xenserver/network_vlan_resource.go
+++ b/xenserver/network_vlan_resource.go
@@ -121,15 +121,15 @@ func (r *vlanResource) Configure(_ context.Context, req resource.ConfigureReques
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *vlanResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/nic_data_source.go b/xenserver/nic_data_source.go
index 6736857..ed515c2 100644
--- a/xenserver/nic_data_source.go
+++ b/xenserver/nic_data_source.go
@@ -56,15 +56,16 @@ func (d *nicDataSource) Configure(_ context.Context, req datasource.ConfigureReq
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
func (d *nicDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
diff --git a/xenserver/pif_configure_resource.go b/xenserver/pif_configure_resource.go
index 7449adb..2d8ce80 100644
--- a/xenserver/pif_configure_resource.go
+++ b/xenserver/pif_configure_resource.go
@@ -38,7 +38,7 @@ func (r *pifConfigureResource) Metadata(_ context.Context, req resource.Metadata
func (r *pifConfigureResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
- MarkdownDescription: "Provides an PIF configure resource to update the exist PIF parameters.",
+ MarkdownDescription: "PIF configuration resource which is used to update the existing PIF parameters. \n\n Noted that no new PIF will be deployed when `terraform apply` is executed. Additionally, when it comes to `terraform destroy`, it actually has no effect on this resource.",
Attributes: map[string]schema.Attribute{
"uuid": schema.StringAttribute{
MarkdownDescription: "The UUID of the PIF.",
@@ -52,6 +52,10 @@ func (r *pifConfigureResource) Schema(_ context.Context, _ resource.SchemaReques
MarkdownDescription: "The IP interface of the PIF. Currently only support IPv4.",
Optional: true,
Attributes: map[string]schema.Attribute{
+ "name_label": schema.StringAttribute{
+ MarkdownDescription: "The name of the interface in IP Address Configuration.",
+ Optional: true,
+ },
"mode": schema.StringAttribute{
MarkdownDescription: "The protocol define the primary address of this PIF, for example, `\"None\"`, `\"DHCP\"`, `\"Static\"`.",
Required: true,
@@ -94,15 +98,15 @@ func (r *pifConfigureResource) Configure(_ context.Context, req resource.Configu
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *pifConfigureResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/pif_data_source.go b/xenserver/pif_data_source.go
index 7234212..3bf879b 100644
--- a/xenserver/pif_data_source.go
+++ b/xenserver/pif_data_source.go
@@ -215,15 +215,15 @@ func (d *pifDataSource) Configure(_ context.Context, req datasource.ConfigureReq
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
// Read refreshes the Terraform state with the latest data.
diff --git a/xenserver/pif_utils.go b/xenserver/pif_utils.go
index 8e0bc4c..35e5bcb 100644
--- a/xenserver/pif_utils.go
+++ b/xenserver/pif_utils.go
@@ -193,11 +193,12 @@ type pifConfigureResourceModel struct {
}
type InterfaceObject struct {
- Mode types.String `tfsdk:"mode"`
- IP types.String `tfsdk:"ip"`
- Gateway types.String `tfsdk:"gateway"`
- Netmask types.String `tfsdk:"netmask"`
- DNS types.String `tfsdk:"dns"`
+ NameLabel types.String `tfsdk:"name_label"`
+ Mode types.String `tfsdk:"mode"`
+ IP types.String `tfsdk:"ip"`
+ Gateway types.String `tfsdk:"gateway"`
+ Netmask types.String `tfsdk:"netmask"`
+ DNS types.String `tfsdk:"dns"`
}
func getIPConfigurationMode(mode string) xenapi.IPConfigurationMode {
@@ -228,23 +229,55 @@ func pifConfigureResourceModelUpdate(ctx context.Context, session *xenapi.Sessio
return errors.New(err.Error())
}
}
+
if !data.Interface.IsNull() {
+ pifMetricsRef, err := xenapi.PIF.GetMetrics(session, pifRef)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+
+ isPIFConnected, err := xenapi.PIFMetrics.GetCarrier(session, pifMetricsRef)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+
+ if !isPIFConnected {
+ return errors.New("the PIF with uuid " + data.UUID.ValueString() + " is not connected")
+ }
+
var interfaceObject InterfaceObject
diags := data.Interface.As(ctx, &interfaceObject, basetypes.ObjectAsOptions{})
if diags.HasError() {
return errors.New("unable to read PIF interface config")
}
+
+ if !interfaceObject.NameLabel.IsNull() {
+ oc, err := xenapi.PIF.GetOtherConfig(session, pifRef)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+
+ oc["management_purpose"] = interfaceObject.NameLabel.ValueString()
+
+ err = xenapi.PIF.SetOtherConfig(session, pifRef, oc)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ }
+
mode := getIPConfigurationMode(interfaceObject.Mode.ValueString())
ip := interfaceObject.IP.ValueString()
netmask := interfaceObject.Netmask.ValueString()
gateway := interfaceObject.Gateway.ValueString()
dns := interfaceObject.DNS.ValueString()
- err := xenapi.PIF.ReconfigureIP(session, pifRef, mode, ip, netmask, gateway, dns)
+
+ tflog.Debug(ctx, "Reconfigure PIF IP with mode: "+string(mode)+", ip: "+ip+", netmask: "+netmask+", gateway: "+gateway+", dns: "+dns)
+ err = xenapi.PIF.ReconfigureIP(session, pifRef, mode, ip, netmask, gateway, dns)
if err != nil {
tflog.Error(ctx, "unable to update the PIF 'interface'")
return errors.New(err.Error())
}
- if mode == "DHCP" {
+ if string(mode) == "DHCP" {
err := checkPIFHasIP(ctx, session, pifRef)
if err != nil {
return err
@@ -261,7 +294,7 @@ func checkPIFHasIP(ctx context.Context, session *xenapi.Session, ref xenapi.PIFR
for {
select {
case <-timeoutChan:
- return errors.New("get PIF IP timeout in 60 seconds")
+ return errors.New("get PIF IP timeout in 60 seconds, please check if the interface is connected")
default:
ip, err := xenapi.PIF.GetIP(session, ref)
if err != nil {
@@ -269,8 +302,10 @@ func checkPIFHasIP(ctx context.Context, session *xenapi.Session, ref xenapi.PIFR
return errors.New(err.Error())
}
if isValidIpAddress(net.ParseIP(ip)) {
+ tflog.Debug(ctx, "PIF IP is available: "+ip)
return nil
}
+
tflog.Debug(ctx, "-----> Retry get PIF IP")
time.Sleep(5 * time.Second)
}
diff --git a/xenserver/pool_resource.go b/xenserver/pool_resource.go
index bb851d7..f6ec99d 100644
--- a/xenserver/pool_resource.go
+++ b/xenserver/pool_resource.go
@@ -25,8 +25,8 @@ func NewPoolResource() resource.Resource {
// poolResource defines the resource implementation.
type poolResource struct {
- session *xenapi.Session
- providerConfig *providerModel
+ session *xenapi.Session
+ coordinatorConf *coordinatorConf
}
func (r *poolResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
@@ -35,7 +35,7 @@ func (r *poolResource) Metadata(_ context.Context, req resource.MetadataRequest,
func (r *poolResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
- MarkdownDescription: "Provides a pool resource.",
+ MarkdownDescription: "This provides a pool resource. During the execution of `terraform destroy` for this particular resource, all of the hosts that are part of the pool will be separated and converted into standalone hosts.",
Attributes: PoolSchema(),
}
}
@@ -46,15 +46,18 @@ func (r *poolResource) Configure(_ context.Context, req resource.ConfigureReques
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
- "Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ "Failed to get Provider Data in PoolResource",
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+
+ r.session = providerData.session
+ r.coordinatorConf = &providerData.coordinatorConf
}
func (r *poolResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
@@ -65,48 +68,42 @@ func (r *poolResource) Create(ctx context.Context, req resource.CreateRequest, r
}
tflog.Debug(ctx, "Creating pool...")
- poolParams, err := getPoolParams(plan)
+ poolParams := getPoolParams(plan)
+
+ poolRef, err := getPoolRef(r.session)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to get pool create params",
+ "Unable to get pool ref",
err.Error(),
)
return
}
- poolRef, err := getPoolRef(r.session)
+ err = poolJoin(ctx, r.session, r.coordinatorConf, plan)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to get pool ref",
+ "Unable to join pool in Create stage",
err.Error(),
)
return
}
- err = setPool(r.session, poolRef, poolParams)
+ err = poolEject(ctx, r.session, plan)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to set pool",
+ "Unable to eject pool in Create stage",
err.Error(),
)
-
- err = cleanupPoolResource(r.session, poolRef)
- if err != nil {
- resp.Diagnostics.AddError(
- "Unable to cleanup pool resource",
- err.Error(),
- )
- }
-
return
}
- err = poolJoin(r.providerConfig, poolParams)
+ err = setPool(r.session, poolRef, poolParams)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to join pool",
+ "Unable to set pool in Create stage",
err.Error(),
)
+
return
}
@@ -122,7 +119,7 @@ func (r *poolResource) Create(ctx context.Context, req resource.CreateRequest, r
err = updatePoolResourceModelComputed(r.session, poolRecord, &plan)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to update the computed fields of PoolResourceModel",
+ "Unable to update the computed fields of PoolResourceModel in Create stage",
err.Error(),
)
return
@@ -159,7 +156,7 @@ func (r *poolResource) Read(ctx context.Context, req resource.ReadRequest, resp
err = updatePoolResourceModel(r.session, poolRecord, &state)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to update the computed fields of PoolResourceModel",
+ "Unable to update the computed fields of PoolResourceModel in Read stage",
err.Error(),
)
return
@@ -179,7 +176,9 @@ func (r *poolResource) Update(ctx context.Context, req resource.UpdateRequest, r
return
}
- poolRef, err := xenapi.Pool.GetByUUID(r.session, state.UUID.ValueString())
+ poolParams := getPoolParams(plan)
+
+ poolRef, err := getPoolRef(r.session)
if err != nil {
resp.Diagnostics.AddError(
"Unable to get pool ref",
@@ -188,12 +187,31 @@ func (r *poolResource) Update(ctx context.Context, req resource.UpdateRequest, r
return
}
- err = poolResourceModelUpdate(r.session, poolRef, plan)
+ err = poolJoin(ctx, r.session, r.coordinatorConf, plan)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Unable to join pool in Update stage",
+ err.Error(),
+ )
+ return
+ }
+
+ err = poolEject(ctx, r.session, plan)
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Unable to eject pool in Update stage",
+ err.Error(),
+ )
+ return
+ }
+
+ err = setPool(r.session, poolRef, poolParams)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to update pool resource model",
+ "Unable to set pool in Update stage",
err.Error(),
)
+
return
}
@@ -209,7 +227,7 @@ func (r *poolResource) Update(ctx context.Context, req resource.UpdateRequest, r
err = updatePoolResourceModelComputed(r.session, poolRecord, &plan)
if err != nil {
resp.Diagnostics.AddError(
- "Unable to update the computed fields of PoolResourceModel",
+ "Unable to update the computed fields of PoolResourceModel in Update stage",
err.Error(),
)
return
@@ -225,22 +243,16 @@ func (r *poolResource) Delete(ctx context.Context, req resource.DeleteRequest, r
return
}
+ tflog.Debug(ctx, "Deleting pool...")
poolRef, err := xenapi.Pool.GetByUUID(r.session, state.UUID.ValueString())
if err != nil {
- resp.Diagnostics.AddError(
- "Unable to get pool ref",
- err.Error(),
- )
+ resp.Diagnostics.AddError("Unable to get pool ref", err.Error())
return
}
- tflog.Debug(ctx, "Deleting pool...")
err = cleanupPoolResource(r.session, poolRef)
if err != nil {
- resp.Diagnostics.AddError(
- "Unable to cleanup pool resource",
- err.Error(),
- )
+ resp.Diagnostics.AddError("Unable to cleanup pool resource", err.Error())
return
}
diff --git a/xenserver/pool_resource_test.go b/xenserver/pool_resource_test.go
index 27eccdc..0893071 100644
--- a/xenserver/pool_resource_test.go
+++ b/xenserver/pool_resource_test.go
@@ -2,67 +2,129 @@ package xenserver
import (
"fmt"
+ "os"
"testing"
"time"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
)
-func updatePIFConfigure(eth_index string, mode string) string {
+func pifResource(eth_index string) string {
return fmt.Sprintf(`
// configure eth1 PIF IP
data "xenserver_pif" "pif_data" {
device = "eth%s"
}
-// For a pool with 2 hosts
-resource "xenserver_pif_configure" "pif_update" {
+resource "xenserver_pif_configure" "pif1" {
uuid = data.xenserver_pif.pif_data.data_items[0].uuid
interface = {
- mode = "%s"
+ mode = "DHCP"
}
}
-resource "xenserver_pif_configure" "pif_update1" {
+resource "xenserver_pif_configure" "pif2" {
uuid = data.xenserver_pif.pif_data.data_items[1].uuid
interface = {
- mode = "%s"
+ mode = "DHCP"
}
}
-`, eth_index, mode, mode)
-}
-func testAccPoolResourceConfig(name_label string, name_description string, sr_index string, eth_index string) string {
- return fmt.Sprintf(`
-data "xenserver_sr" "sr" {
- name_label = "Local storage"
+resource "xenserver_pif_configure" "pif3" {
+ uuid = data.xenserver_pif.pif_data.data_items[2].uuid
+ interface = {
+ mode = "DHCP"
+ }
}
data "xenserver_pif" "pif" {
device = "eth%s"
}
+`, eth_index, eth_index)
+}
+
+func managementNetwork(index string) string {
+ return fmt.Sprintf(`
+ management_network = data.xenserver_pif.pif.data_items[%s].network
+ `, index)
+}
+
+func testPoolResource(name_label string,
+ name_description string,
+ storage_location string,
+ management_network string,
+ supporter_params string,
+ eject_supporter string) string {
+ return fmt.Sprintf(`
+resource "xenserver_sr_nfs" "nfs" {
+ name_label = "NFS"
+ version = "3"
+ storage_location = "%s"
+}
+
+data "xenserver_host" "supporter" {
+ is_coordinator = false
+}
resource "xenserver_pool" "pool" {
name_label = "%s"
name_description = "%s"
- default_sr = data.xenserver_sr.sr.data_items[%s].uuid
- management_network = data.xenserver_pif.pif.data_items[0].network
+ default_sr = xenserver_sr_nfs.nfs.uuid
+ %s
+ %s
+ %s
}
-`, eth_index, name_label, name_description, sr_index)
+`, storage_location,
+ name_label,
+ name_description,
+ management_network,
+ supporter_params,
+ eject_supporter)
+}
+
+func testJoinSupporterParams(supporterHost string, supporterUsername string, supporterPassowd string) string {
+ return fmt.Sprintf(`
+ join_supporters = [
+ {
+ host = "%s"
+ username = "%s"
+ password = "%s"
+ }
+ ]
+`, supporterHost, supporterUsername, supporterPassowd)
+}
+
+func ejectSupporterParams(index string) string {
+ return fmt.Sprintf(`
+ eject_supporters = [
+ data.xenserver_host.supporter.data_items[%s].uuid
+ ]
+`, index)
}
func TestAccPoolResource(t *testing.T) {
+ // skip test if TEST_POOL is not set
+ if os.Getenv("TEST_POOL") == "" {
+ t.Skip("Skipping TestAccPoolResource test due to TEST_POOL not set")
+ }
+
+ storageLocation := os.Getenv("NFS_SERVER") + ":" + os.Getenv("NFS_SERVER_PATH")
+ joinSupporterParams := testJoinSupporterParams(os.Getenv("SUPPORTER_HOST"), os.Getenv("SUPPORTER_USERNAME"), os.Getenv("SUPPORTER_PASSWORD"))
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
Steps: []resource.TestStep{
- // Create and Read testing
+ // Create and Read testing for Default SR and Pool Join
{
- Config: providerConfig + updatePIFConfigure("1", "DHCP") + testAccPoolResourceConfig("Test Pool A", "Test Pool A Description", "0", "0"),
+ Config: providerConfig + testPoolResource("Test Pool A",
+ "Test Pool Join",
+ storageLocation,
+ "",
+ joinSupporterParams,
+ ""),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr("xenserver_pool.pool", "name_label", "Test Pool A"),
- resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool A Description"),
+ resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool Join"),
resource.TestCheckResourceAttrSet("xenserver_pool.pool", "default_sr"),
- resource.TestCheckResourceAttrSet("xenserver_pool.pool", "management_network"),
),
},
// ImportState testing
@@ -70,25 +132,32 @@ func TestAccPoolResource(t *testing.T) {
ResourceName: "xenserver_pool.pool",
ImportState: true,
ImportStateVerify: true,
- ImportStateVerifyIgnore: []string{},
+ ImportStateVerifyIgnore: []string{"join_supporters"},
},
- // Update and Read testing
+ // Update and Read testing For Pool eject supporter
{
- Config: providerConfig + testAccPoolResourceConfig("Test Pool B", "Test Pool B Description", "1", "1"),
+ Config: providerConfig + testPoolResource("Test Pool B",
+ "Test Pool Eject",
+ storageLocation,
+ "",
+ "",
+ ejectSupporterParams("1")),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr("xenserver_pool.pool", "name_label", "Test Pool B"),
- resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool B Description"),
- resource.TestCheckResourceAttrSet("xenserver_pool.pool", "default_sr"),
- resource.TestCheckResourceAttrSet("xenserver_pool.pool", "management_network"),
+ resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool Eject"),
),
},
- // Revert changes
+ // Update and Read testing For Pool Management Network
{
- Config: providerConfig + testAccPoolResourceConfig("Test Pool A", "Test Pool A Description", "0", "0"),
+ Config: providerConfig + pifResource("3") + testPoolResource("Test Pool C",
+ "Test Pool Management Network",
+ storageLocation,
+ managementNetwork("2"),
+ "",
+ ""),
Check: resource.ComposeAggregateTestCheckFunc(
- resource.TestCheckResourceAttr("xenserver_pool.pool", "name_label", "Test Pool A"),
- resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool A Description"),
- resource.TestCheckResourceAttrSet("xenserver_pool.pool", "default_sr"),
+ resource.TestCheckResourceAttr("xenserver_pool.pool", "name_label", "Test Pool C"),
+ resource.TestCheckResourceAttr("xenserver_pool.pool", "name_description", "Test Pool Management Network"),
resource.TestCheckResourceAttrSet("xenserver_pool.pool", "management_network"),
),
},
@@ -96,6 +165,6 @@ func TestAccPoolResource(t *testing.T) {
},
})
- // sleep 10s to wait for supporters back to enable
- time.Sleep(10 * time.Second)
+ // sleep 30s to wait for supporters and management network back to enable
+ time.Sleep(30 * time.Second)
}
diff --git a/xenserver/pool_utils.go b/xenserver/pool_utils.go
index 2f1d2cb..bebb13a 100644
--- a/xenserver/pool_utils.go
+++ b/xenserver/pool_utils.go
@@ -1,33 +1,39 @@
package xenserver
import (
+ "context"
"errors"
+ "regexp"
+ "slices"
+ "strings"
"time"
+ "github.com/cenkalti/backoff/v4"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
"xenapi"
)
type poolResourceModel struct {
- NameLabel types.String `tfsdk:"name_label"`
- NameDescription types.String `tfsdk:"name_description"`
- DefaultSRUUID types.String `tfsdk:"default_sr"`
- ManagementNetworkUUID types.String `tfsdk:"management_network"`
- Supporters []supporterResourceModel `tfsdk:"supporters"`
- UUID types.String `tfsdk:"uuid"`
- ID types.String `tfsdk:"id"`
+ NameLabel types.String `tfsdk:"name_label"`
+ NameDescription types.String `tfsdk:"name_description"`
+ DefaultSRUUID types.String `tfsdk:"default_sr"`
+ ManagementNetworkUUID types.String `tfsdk:"management_network"`
+ JoinSupporters types.Set `tfsdk:"join_supporters"`
+ EjectSupporters types.Set `tfsdk:"eject_supporters"`
+ UUID types.String `tfsdk:"uuid"`
+ ID types.String `tfsdk:"id"`
}
-type supporterResourceModel struct {
+type joinSupporterResourceModel struct {
Host types.String `tfsdk:"host"`
Username types.String `tfsdk:"username"`
Password types.String `tfsdk:"password"`
- UUID types.String `tfsdk:"uuid"`
}
type poolParams struct {
@@ -35,13 +41,6 @@ type poolParams struct {
NameDescription string
DefaultSRUUID string
ManagementNetworkUUID string
- Supporters []supporterParams
-}
-
-type supporterParams struct {
- Host string
- Username string
- Password string
}
func PoolSchema() map[string]schema.Attribute {
@@ -57,16 +56,22 @@ func PoolSchema() map[string]schema.Attribute {
Default: stringdefault.StaticString(""),
},
"default_sr": schema.StringAttribute{
- MarkdownDescription: "The default SR UUID of the pool.",
- Required: true,
- },
- "management_network": schema.StringAttribute{
- MarkdownDescription: "The management network UUID of the pool.",
+ MarkdownDescription: "The default SR UUID of the pool. this SR should be shared SR.",
Optional: true,
Computed: true,
},
- "supporters": schema.SetNestedAttribute{
- MarkdownDescription: "The set of pool supporters which will join the pool.",
+ "management_network": schema.StringAttribute{
+ MarkdownDescription: "The management network UUID of the pool." +
+ "\n\n-> **Note:** \n1. The management network would be reconfigured only when the management network UUID is provided. " +
+ "\n2. All of the hosts in the pool should have the same management network with network configuration, you can set network configuration by `resource pif_configure`. " +
+ "\n3. ",
+ Optional: true,
+ Computed: true,
+ },
+ "join_supporters": schema.SetNestedAttribute{
+ MarkdownDescription: "The set of pool supporters which will join the pool." +
+ "\n\n-> **Note:** \n1. It would raise error if a supporter is in both join_supporters and eject_supporters." +
+ "\n2. The join operation would be performed only when the host, username, and password are provided.",
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"host": schema.StringAttribute{
@@ -82,17 +87,15 @@ func PoolSchema() map[string]schema.Attribute {
Optional: true,
Sensitive: true,
},
- "uuid": schema.StringAttribute{
- MarkdownDescription: "The UUID of the host.",
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
},
},
Optional: true,
},
+ "eject_supporters": schema.SetAttribute{
+ MarkdownDescription: "The set of pool supporters which will be ejected from the pool.",
+ ElementType: types.StringType,
+ Optional: true,
+ },
"uuid": schema.StringAttribute{
MarkdownDescription: "The UUID of the pool.",
Computed: true,
@@ -110,7 +113,7 @@ func PoolSchema() map[string]schema.Attribute {
}
}
-func getPoolParams(plan poolResourceModel) (poolParams, error) {
+func getPoolParams(plan poolResourceModel) poolParams {
var params poolParams
params.NameLabel = plan.NameLabel.ValueString()
params.NameDescription = plan.NameDescription.ValueString()
@@ -119,44 +122,162 @@ func getPoolParams(plan poolResourceModel) (poolParams, error) {
params.ManagementNetworkUUID = plan.ManagementNetworkUUID.ValueString()
}
- for _, host := range plan.Supporters {
- hostParams, err := getSupporterParams(host)
+ return params
+}
+
+func poolJoin(ctx context.Context, coordinatorSession *xenapi.Session, coordinatorConf *coordinatorConf, plan poolResourceModel) error {
+ joinedSupporterUUIDs := []string{}
+ joinSupporters := make([]joinSupporterResourceModel, 0, len(plan.JoinSupporters.Elements()))
+ diags := plan.JoinSupporters.ElementsAs(ctx, &joinSupporters, false)
+ if diags.HasError() {
+ return errors.New("unable to access join supporters in config data")
+ }
+ for _, supporter := range joinSupporters {
+ supporterSession, err := loginServer(supporter.Host.ValueString(), supporter.Username.ValueString(), supporter.Password.ValueString())
+ if err != nil {
+ if strings.Contains(err.Error(), "HOST_IS_SLAVE") {
+ tflog.Debug(ctx, "Host is already in the pool, continue")
+ continue
+ }
+ return errors.New("Login Supporter Host Failed!\n" + err.Error() + ", host: " + supporter.Host.ValueString())
+ }
+
+ hostRefs, err := xenapi.Host.GetAll(supporterSession)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+
+ if len(hostRefs) > 1 {
+ return errors.New("Supporter host " + supporter.Host.ValueString() + " is not a standalone host")
+ }
+
+ supporterRef := hostRefs[0]
+
+ // Check if the host is already in the pool, continue if it is
+ beforeJoinHostRefs, err := xenapi.Host.GetAll(coordinatorSession)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+
+ if slices.Contains(beforeJoinHostRefs, supporterRef) {
+ continue
+ }
+
+ supporterUUID, err := xenapi.Host.GetUUID(supporterSession, supporterRef)
if err != nil {
- return params, err
+ return errors.New(err.Error() + ". \n\nunable to Get Host UUID with host: " + supporter.Host.ValueString())
+ }
+
+ ejectSupporters := make([]string, 0, len(plan.EjectSupporters.Elements()))
+ diags := plan.EjectSupporters.ElementsAs(ctx, &ejectSupporters, false)
+ if diags.HasError() {
+ return errors.New("unable to access eject supporters in config data")
}
- params.Supporters = append(params.Supporters, hostParams)
+
+ // Check if the host is in eject_supporters, return error if it is
+ if slices.Contains(ejectSupporters, supporterUUID) {
+ return errors.New("host " + supporter.Host.ValueString() + " with uuid " + supporterUUID + " is in eject_supporters, can't join the pool")
+ }
+
+ // if coordinator host has scheme, remove it
+ coordinatorIP := regexp.MustCompile(`^https?://`).ReplaceAllString(coordinatorConf.Host, "")
+ err = xenapi.Pool.Join(supporterSession, coordinatorIP, coordinatorConf.Username, coordinatorConf.Password)
+ if err != nil {
+ return errors.New(err.Error() + ". \n\nPool join failed with host uuid: " + supporterUUID)
+ }
+
+ joinedSupporterUUIDs = append(joinedSupporterUUIDs, supporterUUID)
}
- return params, nil
+
+ return waitAllSupportersLive(ctx, coordinatorSession, joinedSupporterUUIDs)
}
-func getSupporterParams(plan supporterResourceModel) (supporterParams, error) {
- var params supporterParams
- if plan.Host.IsUnknown() || plan.Username.IsUnknown() || plan.Password.IsUnknown() {
- return params, errors.New("host url, username, and password required when pool join")
+func waitAllSupportersLive(ctx context.Context, session *xenapi.Session, supporterUUIDs []string) error {
+ tflog.Debug(ctx, "Waiting for all supporters to join the pool...")
+ operation := func() error {
+ for _, supporterUUID := range supporterUUIDs {
+ hostRef, err := xenapi.Host.GetByUUID(session, supporterUUID)
+ if err != nil {
+ return errors.New("unable to Get Host by UUID " + supporterUUID + "!\n" + err.Error())
+ }
+
+ hostMetricsRef, err := xenapi.Host.GetMetrics(session, hostRef)
+ if err != nil {
+ return errors.New("unable to Get Host Metrics with UUID " + supporterUUID + "!\n" + err.Error())
+ }
+
+ hostIsLive, err := xenapi.HostMetrics.GetLive(session, hostMetricsRef)
+ if err != nil {
+ return errors.New("unable to Get Host Live Status with UUID " + supporterUUID + "!\n" + err.Error())
+ }
+
+ if hostIsLive {
+ tflog.Debug(ctx, "Host "+supporterUUID+" is live")
+ continue
+ } else {
+ tflog.Debug(ctx, "Host "+supporterUUID+" is not live, retrying...")
+ return errors.New("host " + supporterUUID + " is not live")
+ }
+ }
+ return nil
}
- params.Host = plan.Host.ValueString()
- params.Username = plan.Username.ValueString()
- params.Password = plan.Password.ValueString()
+ b := backoff.NewExponentialBackOff()
+ b.MaxInterval = 10 * time.Second
+ b.MaxElapsedTime = 5 * time.Minute
+ err := backoff.Retry(operation, b)
+ if err != nil {
+ return errors.New(err.Error())
+ }
- return params, nil
+ return nil
}
-func poolJoin(providerConfig *providerModel, poolParams poolParams) error {
- for _, supporter := range poolParams.Supporters {
- supporterSession, err := loginServer(supporter.Host, supporter.Username, supporter.Password)
- if err != nil {
- return err
+func poolEject(ctx context.Context, session *xenapi.Session, plan poolResourceModel) error {
+ ejectSupporters := make([]string, 0, len(plan.EjectSupporters.Elements()))
+ diags := plan.EjectSupporters.ElementsAs(ctx, &ejectSupporters, false)
+ if diags.HasError() {
+ return errors.New("unable to access eject supporters in config data")
+ }
+
+ for _, hostUUID := range ejectSupporters {
+ tflog.Debug(ctx, "Ejecting pool with host: "+hostUUID)
+
+ operation := func() error {
+ hostRef, err := xenapi.Host.GetByUUID(session, hostUUID)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ return xenapi.Pool.Eject(session, hostRef)
}
- err = xenapi.Pool.Join(supporterSession, providerConfig.Host.ValueString(), providerConfig.Username.ValueString(), providerConfig.Password.ValueString())
+ err := backoff.Retry(operation, backoff.NewExponentialBackOff())
if err != nil {
return errors.New(err.Error())
}
}
+
return nil
}
+func getCoordinatorRef(session *xenapi.Session) (xenapi.HostRef, string, error) {
+ var coordinatorRef xenapi.HostRef
+ var coordinatorUUID string
+ poolRef, err := getPoolRef(session)
+ if err != nil {
+ return coordinatorRef, coordinatorUUID, errors.New(err.Error())
+ }
+ coordinatorRef, err = xenapi.Pool.GetMaster(session, poolRef)
+ if err != nil {
+ return coordinatorRef, coordinatorUUID, errors.New(err.Error())
+ }
+ coordinatorUUID, err = xenapi.Host.GetUUID(session, coordinatorRef)
+ if err != nil {
+ return coordinatorRef, coordinatorUUID, errors.New(err.Error())
+ }
+ return coordinatorRef, coordinatorUUID, nil
+}
+
func getPoolRef(session *xenapi.Session) (xenapi.PoolRef, error) {
poolRefs, err := xenapi.Pool.GetAll(session)
if err != nil {
@@ -166,83 +287,87 @@ func getPoolRef(session *xenapi.Session) (xenapi.PoolRef, error) {
return poolRefs[0], nil
}
-func poolResourceModelUpdate(session *xenapi.Session, poolRef xenapi.PoolRef, plan poolResourceModel) error {
- err := xenapi.Pool.SetNameLabel(session, poolRef, plan.NameLabel.ValueString())
+func cleanupPoolResource(session *xenapi.Session, poolRef xenapi.PoolRef) error {
+ err := xenapi.Pool.SetNameLabel(session, poolRef, "")
if err != nil {
return errors.New(err.Error())
}
- err = xenapi.Pool.SetNameDescription(session, poolRef, plan.NameDescription.ValueString())
+ // eject supporters
+ coordinatorRef, _, err := getCoordinatorRef(session)
if err != nil {
return errors.New(err.Error())
}
- srRef, err := xenapi.SR.GetByUUID(session, plan.DefaultSRUUID.ValueString())
+ // eject supporters
+ hostRefs, err := xenapi.Host.GetAll(session)
if err != nil {
return errors.New(err.Error())
}
- err = xenapi.Pool.SetDefaultSR(session, poolRef, srRef)
- if err != nil {
- return errors.New(err.Error())
- }
+ for _, hostRef := range hostRefs {
+ isCoordinator := hostRef == coordinatorRef
+ if isCoordinator {
+ continue
+ }
- if !plan.ManagementNetworkUUID.IsUnknown() {
- networkRef, err := xenapi.Network.GetByUUID(session, plan.ManagementNetworkUUID.ValueString())
- if err != nil {
- return errors.New(err.Error())
+ operation := func() error {
+ return xenapi.Pool.Eject(session, hostRef)
}
- err = xenapi.Pool.ManagementReconfigure(session, networkRef)
+ err = backoff.Retry(operation, backoff.NewExponentialBackOff())
if err != nil {
return errors.New(err.Error())
}
- // wait for toolstack restart
- time.Sleep(60 * time.Second)
}
return nil
}
-func cleanupPoolResource(session *xenapi.Session, poolRef xenapi.PoolRef) error {
- err := xenapi.Pool.SetNameLabel(session, poolRef, "")
- if err != nil {
- return errors.New(err.Error())
- }
- return nil
-}
-
func setPool(session *xenapi.Session, poolRef xenapi.PoolRef, poolParams poolParams) error {
err := xenapi.Pool.SetNameLabel(session, poolRef, poolParams.NameLabel)
if err != nil {
- return errors.New(err.Error())
+ return errors.New("unable to SetNameLabel!\n" + err.Error())
}
err = xenapi.Pool.SetNameDescription(session, poolRef, poolParams.NameDescription)
if err != nil {
- return errors.New(err.Error())
+ return errors.New("unable to SetNameDescription!\n" + err.Error())
}
- srRef, err := xenapi.SR.GetByUUID(session, poolParams.DefaultSRUUID)
- if err != nil {
- return errors.New(err.Error())
- }
+ if poolParams.DefaultSRUUID != "" {
+ srRef, err := xenapi.SR.GetByUUID(session, poolParams.DefaultSRUUID)
+ if err != nil {
+ return errors.New("unable to Get SR by UUID!\n" + err.Error() + ", uuid: " + poolParams.DefaultSRUUID)
+ }
- err = xenapi.Pool.SetDefaultSR(session, poolRef, srRef)
- if err != nil {
- return errors.New(err.Error())
+ // Check if the SR is non-shared, return error if it is
+ shared, err := xenapi.SR.GetShared(session, srRef)
+ if err != nil {
+ return errors.New("unable to Get SR shared status!\n" + err.Error())
+ }
+
+ if !shared {
+ return errors.New("SR with uuid " + poolParams.DefaultSRUUID + " is non-shared SR")
+ }
+
+ err = xenapi.Pool.SetDefaultSR(session, poolRef, srRef)
+ if err != nil {
+ return errors.New("unable to SetDefaultSR!\n" + err.Error())
+ }
}
if poolParams.ManagementNetworkUUID != "" {
networkRef, err := xenapi.Network.GetByUUID(session, poolParams.ManagementNetworkUUID)
if err != nil {
- return errors.New(err.Error() + ", uuid: " + poolParams.ManagementNetworkUUID)
+ return errors.New("unable to Get Network by UUID!\n" + err.Error() + ", uuid: " + poolParams.ManagementNetworkUUID)
}
err = xenapi.Pool.ManagementReconfigure(session, networkRef)
if err != nil {
- return errors.New(err.Error() + ", uuid: " + poolParams.ManagementNetworkUUID)
+ return errors.New("unable to ManagementReconfigure!\n" + err.Error() + ", uuid: " + poolParams.ManagementNetworkUUID)
}
+
// wait for toolstack restart
time.Sleep(60 * time.Second)
}
@@ -281,18 +406,21 @@ func getManagementNetworkUUID(session *xenapi.Session, coordinatorRef xenapi.Hos
func updatePoolResourceModel(session *xenapi.Session, record xenapi.PoolRecord, data *poolResourceModel) error {
data.NameLabel = types.StringValue(record.NameLabel)
- data.NameDescription = types.StringValue(record.NameDescription)
- srUUID, err := xenapi.SR.GetUUID(session, record.DefaultSR)
- if err != nil {
- return errors.New(err.Error())
- }
- data.DefaultSRUUID = types.StringValue(srUUID)
return updatePoolResourceModelComputed(session, record, data)
}
func updatePoolResourceModelComputed(session *xenapi.Session, record xenapi.PoolRecord, data *poolResourceModel) error {
data.UUID = types.StringValue(record.UUID)
data.ID = types.StringValue(record.UUID)
+ data.NameDescription = types.StringValue(record.NameDescription)
+
+ data.DefaultSRUUID = types.StringValue("")
+ if string(record.DefaultSR) != "OpaqueRef:NULL" {
+ srUUID, err := xenapi.SR.GetUUID(session, record.DefaultSR)
+ if err == nil {
+ data.DefaultSRUUID = types.StringValue(srUUID)
+ }
+ }
networkUUID, err := getManagementNetworkUUID(session, record.Master)
if err != nil {
diff --git a/xenserver/provider.go b/xenserver/provider.go
index f6dc6d0..b8235b9 100644
--- a/xenserver/provider.go
+++ b/xenserver/provider.go
@@ -27,8 +27,15 @@ type xsProvider struct {
// version is set to the provider version on release, "dev" when the
// provider is built and ran locally, and "test" when running acceptance
// testing.
- version string
- config *providerModel
+ version string
+ session *xenapi.Session
+ coordinatorConf coordinatorConf
+}
+
+type coordinatorConf struct {
+ Host string
+ Username string
+ Password string
}
func New(version string) func() provider.Provider {
@@ -83,7 +90,6 @@ func (p *xsProvider) Configure(ctx context.Context, req provider.ConfigureReques
if resp.Diagnostics.HasError() {
return
}
- p.config = &data
host := os.Getenv("XENSERVER_HOST")
username := os.Getenv("XENSERVER_USERNAME")
@@ -149,8 +155,14 @@ func (p *xsProvider) Configure(ctx context.Context, req provider.ConfigureReques
return
}
- resp.DataSourceData = session
- resp.ResourceData = session
+ p.coordinatorConf.Host = host
+ p.coordinatorConf.Username = username
+ p.coordinatorConf.Password = password
+ p.session = session
+
+ // the xsProvider type itself is made available for resources and data sources
+ resp.DataSourceData = p
+ resp.ResourceData = p
}
func loginServer(host string, username string, password string) (*xenapi.Session, error) {
diff --git a/xenserver/snapshot_resource.go b/xenserver/snapshot_resource.go
index e2001b5..8f737fa 100644
--- a/xenserver/snapshot_resource.go
+++ b/xenserver/snapshot_resource.go
@@ -49,8 +49,10 @@ func (r *snapshotResource) Schema(_ context.Context, _ resource.SchemaRequest, r
Required: true,
},
"with_memory": schema.BoolAttribute{
- MarkdownDescription: "True if snapshot with the VM's memory (VM must in running state), default to be `false`." +
- "\n\n-> **Note:** `with_memory` is not allowed to be updated.",
+ MarkdownDescription: "True if snapshot with the VM's memory, default to be `false`." +
+ "\n\n-> **Note:** " +
+ "1. `with_memory` field is not allowed to be updated. " +
+ "2. the VM must be in a running state and have the [XenServer VM Tool](https://www.xenserver.com/downloads) installed.",
Optional: true,
Computed: true,
Default: booldefault.StaticBool(false),
@@ -97,15 +99,16 @@ func (r *snapshotResource) Configure(_ context.Context, req resource.ConfigureRe
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *snapshotResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/sr_data_source.go b/xenserver/sr_data_source.go
index a00c730..60ef0d7 100644
--- a/xenserver/sr_data_source.go
+++ b/xenserver/sr_data_source.go
@@ -155,15 +155,15 @@ func (d *srDataSource) Configure(_ context.Context, req datasource.ConfigureRequ
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
// Read refreshes the Terraform state with the latest data.
diff --git a/xenserver/sr_nfs_resource.go b/xenserver/sr_nfs_resource.go
index 8768aed..2faf9c3 100644
--- a/xenserver/sr_nfs_resource.go
+++ b/xenserver/sr_nfs_resource.go
@@ -108,15 +108,15 @@ func (r *nfsResource) Configure(_ context.Context, req resource.ConfigureRequest
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *nfsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
@@ -191,7 +191,7 @@ func (r *nfsResource) Read(ctx context.Context, req resource.ReadRequest, resp *
srRef, err := xenapi.SR.GetByUUID(r.session, data.UUID.ValueString())
if err != nil {
resp.Diagnostics.AddError(
- "Unable to get SR ref",
+ "Unable to get SR ref in Read stage",
err.Error(),
)
return
@@ -241,7 +241,7 @@ func (r *nfsResource) Update(ctx context.Context, req resource.UpdateRequest, re
srRef, err := xenapi.SR.GetByUUID(r.session, plan.UUID.ValueString())
if err != nil {
resp.Diagnostics.AddError(
- "Unable to get SR ref",
+ "Unable to get SR ref in Update stage",
err.Error(),
)
return
@@ -284,7 +284,7 @@ func (r *nfsResource) Delete(ctx context.Context, req resource.DeleteRequest, re
srRef, err := xenapi.SR.GetByUUID(r.session, data.UUID.ValueString())
if err != nil {
resp.Diagnostics.AddError(
- "Unable to get SR ref",
+ "Unable to get SR ref in Delete stage",
err.Error(),
)
return
diff --git a/xenserver/sr_resource.go b/xenserver/sr_resource.go
index 264b3ef..83cdf8f 100644
--- a/xenserver/sr_resource.go
+++ b/xenserver/sr_resource.go
@@ -119,15 +119,15 @@ func (r *srResource) Configure(_ context.Context, req resource.ConfigureRequest,
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *srResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/sr_smb_resource.go b/xenserver/sr_smb_resource.go
index e9ebf3b..a28d768 100644
--- a/xenserver/sr_smb_resource.go
+++ b/xenserver/sr_smb_resource.go
@@ -102,15 +102,15 @@ func (r *smbResource) Configure(_ context.Context, req resource.ConfigureRequest
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *smbResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/sr_utils.go b/xenserver/sr_utils.go
index 6bcc47d..47beff8 100644
--- a/xenserver/sr_utils.go
+++ b/xenserver/sr_utils.go
@@ -119,19 +119,6 @@ type srResourceModel struct {
ID types.String `tfsdk:"id"`
}
-func getPoolCoordinatorRef(session *xenapi.Session) (xenapi.HostRef, error) {
- var coordinatorRef xenapi.HostRef
- poolRefs, err := xenapi.Pool.GetAll(session)
- if err != nil {
- return coordinatorRef, errors.New(err.Error())
- }
- coordinatorRef, err = xenapi.Pool.GetMaster(session, poolRefs[0])
- if err != nil {
- return coordinatorRef, errors.New(err.Error())
- }
- return coordinatorRef, nil
-}
-
func getSRCreateParams(ctx context.Context, session *xenapi.Session, data srResourceModel) (srCreateParams, error) {
var params srCreateParams
params.NameLabel = data.NameLabel.ValueString()
@@ -147,7 +134,7 @@ func getSRCreateParams(ctx context.Context, session *xenapi.Session, data srReso
if diags.HasError() {
return params, errors.New("unable to access SR SM config data")
}
- coordinatorRef, err := getPoolCoordinatorRef(session)
+ coordinatorRef, _, err := getCoordinatorRef(session)
if err != nil {
return params, err
}
@@ -196,7 +183,7 @@ func updateSRResourceModelComputed(ctx context.Context, session *xenapi.Session,
if diags.HasError() {
return errors.New("unable to access SR SM config")
}
- hostRef, err := getPoolCoordinatorRef(session)
+ hostRef, _, err := getCoordinatorRef(session)
if err != nil {
return err
}
@@ -264,7 +251,7 @@ func unplugPBDs(session *xenapi.Session, pbdRefs []xenapi.PBDRef) error {
var allPBDRefsToNonCoordinator []xenapi.PBDRef
var allPBDRefsToCoordinator []xenapi.PBDRef
- coordinatorRef, err := getPoolCoordinatorRef(session)
+ coordinatorRef, _, err := getCoordinatorRef(session)
if err != nil {
return err
}
@@ -390,7 +377,7 @@ type nfsResourceModel struct {
func getNFSCreateParams(session *xenapi.Session, data nfsResourceModel) (srCreateParams, error) {
var params srCreateParams
- coordinatorRef, err := getPoolCoordinatorRef(session)
+ coordinatorRef, _, err := getCoordinatorRef(session)
if err != nil {
return params, err
}
@@ -502,7 +489,7 @@ type smbResourceModel struct {
func getSMBCreateParams(session *xenapi.Session, data smbResourceModel) (srCreateParams, error) {
var params srCreateParams
- coordinatorRef, err := getPoolCoordinatorRef(session)
+ coordinatorRef, _, err := getCoordinatorRef(session)
if err != nil {
return params, err
}
diff --git a/xenserver/vdi_resource.go b/xenserver/vdi_resource.go
index 6cfd49b..3f6fb08 100644
--- a/xenserver/vdi_resource.go
+++ b/xenserver/vdi_resource.go
@@ -45,15 +45,15 @@ func (r *vdiResource) Configure(_ context.Context, req resource.ConfigureRequest
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *vdiResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
diff --git a/xenserver/vm_data_source.go b/xenserver/vm_data_source.go
index a9a59e9..bb98e77 100644
--- a/xenserver/vm_data_source.go
+++ b/xenserver/vm_data_source.go
@@ -452,15 +452,15 @@ func (d *vmDataSource) Configure(_ context.Context, req datasource.ConfigureRequ
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Data Source Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- d.session = session
+ d.session = providerData.session
}
// Read refreshes the Terraform state with the latest data.
diff --git a/xenserver/vm_resouce.go b/xenserver/vm_resouce.go
index 103150c..00fe838 100644
--- a/xenserver/vm_resouce.go
+++ b/xenserver/vm_resouce.go
@@ -41,15 +41,15 @@ func (r *vmResource) Configure(_ context.Context, req resource.ConfigureRequest,
if req.ProviderData == nil {
return
}
- session, ok := req.ProviderData.(*xenapi.Session)
+ providerData, ok := req.ProviderData.(*xsProvider)
if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
- fmt.Sprintf("Expected *xenapi.Session, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ fmt.Sprintf("Expected *xenserver.xsProvider, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)
return
}
- r.session = session
+ r.session = providerData.session
}
func (r *vmResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {