diff --git a/go.mod b/go.mod index b56b1e484cf..d24c22a72f3 100644 --- a/go.mod +++ b/go.mod @@ -166,7 +166,7 @@ require ( github.com/containers/image/v5 v5.25.0 github.com/google/gnostic-models v0.6.8 github.com/rancher/rancher/pkg/apis v0.0.0-20240213233515-935d309ebad4 - github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a + github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd go.qase.io/client v0.0.0-20231114201952-65195ec001fa ) diff --git a/go.sum b/go.sum index 2ee850af3b9..73366dfff68 100644 --- a/go.sum +++ b/go.sum @@ -1641,8 +1641,8 @@ github.com/rancher/remotedialer v0.3.0 h1:y1EO8JCsgZo0RcqTUp6U8FXcBAv27R+TLnWRcp github.com/rancher/remotedialer v0.3.0/go.mod h1:BwwztuvViX2JrLLUwDlsYt5DiyUwHLlzynRwkZLAY0Q= github.com/rancher/rke v1.5.7 h1:pCVziDwgulQc2WgRkisY6sEo3DFGgu1StE66UbkuF2c= github.com/rancher/rke v1.5.7/go.mod h1:vojhOf8U8VCmw7y17OENWXSIfEFPEbXCMQcmI7xN7i8= -github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a h1:VV4AyNyCQrkPtvSVa1rQL69+A/gyWTYFgWSkjuR4JGQ= -github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a/go.mod h1:LNI7nH1BptYMvJmuqsLgmkMytGBBTpW4jk4vAHCxfF4= +github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd h1:Nog4ViMD04zg6GP+5IvsthOIWqps7m6mdMnhIwWkPxA= +github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd/go.mod h1:LNI7nH1BptYMvJmuqsLgmkMytGBBTpW4jk4vAHCxfF4= github.com/rancher/steve v0.0.0-20240305150728-3943409601f1 h1:6wNYy3q9jget45syTN6K2uOLSYaptLYCHscY2WRmhDI= github.com/rancher/steve v0.0.0-20240305150728-3943409601f1/go.mod h1:o4vLBzMTKbHHhIiAcbgOiaN3aK1vIjL6ZTgaGxQYpsY= github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20210727200656-10b094e30007 h1:ru+mqGnxMmKeU0Q3XIDxkARvInDIqT1hH2amTcsjxI4= diff --git a/tests/v2/validation/deleting/delete_cluster_rke1_test.go b/tests/v2/validation/deleting/delete_cluster_rke1_test.go index e0acbd96c7f..89eaaa09a8d 100644 --- a/tests/v2/validation/deleting/delete_cluster_rke1_test.go +++ b/tests/v2/validation/deleting/delete_cluster_rke1_test.go @@ -34,11 +34,20 @@ func (c *RKE1ClusterDeleteTestSuite) SetupSuite() { } func (c *RKE1ClusterDeleteTestSuite) TestDeletingRKE1Cluster() { - clusterID, err := clusters.GetClusterIDByName(c.client, c.client.RancherConfig.ClusterName) - require.NoError(c.T(), err) - - clusters.DeleteRKE1Cluster(c.client, clusterID) - provisioning.VerifyDeleteRKE1Cluster(c.T(), c.client, clusterID) + tests := []struct { + name string + client *rancher.Client + }{ + {"Deleting cluster", c.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetClusterIDByName(c.client, c.client.RancherConfig.ClusterName) + require.NoError(c.T(), err) + + clusters.DeleteRKE1Cluster(tt.client, clusterID) + provisioning.VerifyDeleteRKE1Cluster(c.T(), tt.client, clusterID) + } } // In order for 'go test' to run this suite, we need to create diff --git a/tests/v2/validation/deleting/delete_cluster_test.go b/tests/v2/validation/deleting/delete_cluster_test.go index 8d69b4199e7..d579207a8a0 100644 --- a/tests/v2/validation/deleting/delete_cluster_test.go +++ b/tests/v2/validation/deleting/delete_cluster_test.go @@ -3,9 +3,12 @@ package deleting import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/provisioning" "github.com/rancher/shepherd/pkg/session" @@ -34,11 +37,35 @@ func (c *ClusterDeleteTestSuite) SetupSuite() { } func (c *ClusterDeleteTestSuite) TestDeletingCluster() { - clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName) - require.NoError(c.T(), err) + tests := []struct { + name string + client *rancher.Client + }{ + {"cluster", c.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName) + require.NoError(c.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(c.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(c.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "Deleting RKE2 " + tt.name + } else { + tt.name = "Deleting K3S " + tt.name + } - clusters.DeleteK3SRKE2Cluster(c.client, clusterID) - provisioning.VerifyDeleteRKE2K3SCluster(c.T(), c.client, clusterID) + c.Run(tt.name, func() { + clusters.DeleteK3SRKE2Cluster(tt.client, clusterID) + provisioning.VerifyDeleteRKE2K3SCluster(c.T(), tt.client, clusterID) + }) + } } // In order for 'go test' to run this suite, we need to create diff --git a/tests/v2/validation/nodescaling/README.md b/tests/v2/validation/nodescaling/README.md index d4ebf5b2c1c..625ff9285b4 100644 --- a/tests/v2/validation/nodescaling/README.md +++ b/tests/v2/validation/nodescaling/README.md @@ -21,50 +21,42 @@ rancher: ``` ## Node Replacing -Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. You can run a subset of the tests, but still need more than 1 node for the role you would like to run the test for. i.e. for `-run ^TestScaleDownAndUp/TestWorkerScaleDownAndUp$` you would need at least 1 pool with 2 or more dedicaated workers in it. The last node in the pool will be replaced. - -Typically, a cluster with the following 3 pools is used for testing: +Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. Typically, a cluster with the following 3 pools is used for testing: ```yaml -{ - { - ControlPlane: true, - Quantity: 2, - }, - { - Etcd: true, - Quantity: 3, - }, - { - Worker: true, - Quantity: 2, - }, -} +provisioningInput: + nodePools: # nodePools is specific for RKE1 clusters. + - nodeRoles: + etcd: true + quantity: 3 + - nodeRoles: + controlplane: true + quantity: 2 + - nodeRoles: + worker: true + quantity: 3 + machinePools: # machienPools is specific for RKE2/K3s clusters. + - machinePoolConfig: + etcd: true + quantity: 3 + - machinePoolConfig: + controlplane: true + quantity: 2 + - machinePoolConfig: + worker: true + quantity: 3 ``` These tests utilize Go build tags. Due to this, see the below examples on how to run the tests: ### RKE1 -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestEtcdScaleDownAndUp"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestControlPlaneScaleDownAndUp"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestWorkerScaleDownAndUp"` +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeReplacingTestSuite/TestReplacingRKE1Nodes"` ### RKE2 | K3S -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestEtcdScaleDownAndUp"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestControlPlaneScaleDownAndUp"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestWorkerScaleDownAndUp"` +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeReplacingTestSuite/TestReplacingNodes"` ## Scaling Existing Node Pools -Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below: +Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below that accounts for node drivers, custom clusters and hosted clusters: ```yaml -scalingInput: - nodePools: - nodeRoles: - worker: true - quantity: 2 - machinePools: - nodeRoles: - etcd: true - quantity: 1 scalingInput: nodeProvider: "ec2" nodePools: @@ -76,23 +68,29 @@ scalingInput: etcd: true quantity: 1 aksNodePool: - nodeCount: 1 + nodeCount: 3 eksNodePool: - desiredSize: 1 + desiredSize: 6 gkeNodePool: - initialNodeCount: 1 + initialNodeCount: 3 ``` -NOTE: When scaling AKS and EKS, you will need to make sure that the `maxCount` and `maxSize` parameter is greater than the desired scale amount, respectively. For example, if you wish to have 6 total EKS nodes, then the `maxSize` parameter needs to be at least 7. This is not a limitation of the automation, but rather how EkS specifically handles nodegroups. +NOTE: When scaling AKS and EKS, you will need to make sure that the `maxCount` and `maxSize` parameter is greater than the desired scale amount, respectively. For example, if you wish to have 6 total EKS nodes, then the `maxSize` parameter needs to be at least 7. This is not a limitation of the automation, but rather how EKS specifically handles nodegroups. + +Additionally, for AKS, you must have `enableAutoScaling` set to true if you specify `maxCount` and `minCount`. These tests utilize Go build tags. Due to this, see the below examples on how to run the tests: ### RKE1 `gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePools"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePoolsDynamicInput"` +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePoolsDynamicInput"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1CustomClusterNodeScalingTestSuite/TestScalingRKE1CustomClusterNodes"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1CustomClusterNodeScalingTestSuite/TestScalingRKE1CustomClusterNodesDynamicInput"` ### RKE2 | K3S -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE2NodeScalingTestSuite/TestRKE2NodeScalingTestSuite"` \ -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE2NodeScalingTestSuite/TestScalingRKE2NodePoolsDynamicInput"` +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScalingTestSuite/TestScalingNodePools"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScalingTestSuite/TestScalingNodePoolsDynamicInput"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestCustomClusterNodeScalingTestSuite/TestScalingCustomClusterNodes"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestCustomClusterNodeScalingTestSuite/TestScalingCustomClusterNodesDynamicInput"` ### AKS `gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestAKSNodeScalingTestSuite/TestScalingAKSNodePools"` \ diff --git a/tests/v2/validation/nodescaling/scale_replace_rke1_test.go b/tests/v2/validation/nodescaling/scale_replace_rke1_test.go index 030671c56b8..96bbde0255d 100644 --- a/tests/v2/validation/nodescaling/scale_replace_rke1_test.go +++ b/tests/v2/validation/nodescaling/scale_replace_rke1_test.go @@ -7,13 +7,14 @@ import ( "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/extensions/provisioninginput" + nodepools "github.com/rancher/shepherd/extensions/rke1/nodepools" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/session" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) -type NodeScaleRKE1DownAndUp struct { +type RKE1NodeReplacingTestSuite struct { suite.Suite session *session.Session client *rancher.Client @@ -21,11 +22,11 @@ type NodeScaleRKE1DownAndUp struct { clustersConfig *provisioninginput.Config } -func (s *NodeScaleRKE1DownAndUp) TearDownSuite() { +func (s *RKE1NodeReplacingTestSuite) TearDownSuite() { s.session.Cleanup() } -func (s *NodeScaleRKE1DownAndUp) SetupSuite() { +func (s *RKE1NodeReplacingTestSuite) SetupSuite() { testSession := session.NewSession() s.session = testSession @@ -40,22 +41,42 @@ func (s *NodeScaleRKE1DownAndUp) SetupSuite() { s.client = client } -func (s *NodeScaleRKE1DownAndUp) TestEtcdScaleDownAndUp() { - s.Run("rke1-etcd-node-scale-down-and-up", func() { - ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, true, false, false) - }) -} -func (s *NodeScaleRKE1DownAndUp) TestWorkerScaleDownAndUp() { - s.Run("rke1-worker-node-scale-down-and-up", func() { - ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, false, true) - }) -} -func (s *NodeScaleRKE1DownAndUp) TestControlPlaneScaleDownAndUp() { - s.Run("rke1-controlplane-node-scale-down-and-up", func() { - ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, true, false) - }) +func (s *RKE1NodeReplacingTestSuite) TestReplacingRKE1Nodes() { + nodeRolesEtcd := nodepools.NodeRoles{ + Etcd: true, + ControlPlane: false, + Worker: false, + } + + nodeRolesControlPlane := nodepools.NodeRoles{ + Etcd: false, + ControlPlane: true, + Worker: false, + } + + nodeRolesWorker := nodepools.NodeRoles{ + Etcd: false, + ControlPlane: false, + Worker: true, + } + + tests := []struct { + name string + nodeRoles nodepools.NodeRoles + client *rancher.Client + }{ + {"Replacing control plane nodes", nodeRolesControlPlane, s.client}, + {"Replacing etcd nodes", nodeRolesEtcd, s.client}, + {"Replacing worker nodes", nodeRolesWorker, s.client}, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.nodeRoles.Etcd, tt.nodeRoles.ControlPlane, tt.nodeRoles.Worker) + }) + } } -func TestRKE1NodeScaleDownAndUp(t *testing.T) { - suite.Run(t, new(NodeScaleRKE1DownAndUp)) +func TestRKE1NodeReplacingTestSuite(t *testing.T) { + suite.Run(t, new(RKE1NodeReplacingTestSuite)) } diff --git a/tests/v2/validation/nodescaling/scale_replace_test.go b/tests/v2/validation/nodescaling/scale_replace_test.go index 3608b0c14ad..89d88649dc7 100644 --- a/tests/v2/validation/nodescaling/scale_replace_test.go +++ b/tests/v2/validation/nodescaling/scale_replace_test.go @@ -3,11 +3,14 @@ package nodescaling import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/provisioning" + "github.com/rancher/shepherd/extensions/machinepools" "github.com/rancher/shepherd/extensions/provisioninginput" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/session" @@ -15,7 +18,7 @@ import ( "github.com/stretchr/testify/suite" ) -type NodeScaleDownAndUp struct { +type NodeReplacingTestSuite struct { suite.Suite session *session.Session client *rancher.Client @@ -23,11 +26,11 @@ type NodeScaleDownAndUp struct { clustersConfig *provisioninginput.Config } -func (s *NodeScaleDownAndUp) TearDownSuite() { +func (s *NodeReplacingTestSuite) TearDownSuite() { s.session.Cleanup() } -func (s *NodeScaleDownAndUp) SetupSuite() { +func (s *NodeReplacingTestSuite) SetupSuite() { testSession := session.NewSession() s.session = testSession @@ -42,34 +45,58 @@ func (s *NodeScaleDownAndUp) SetupSuite() { s.client = client } -func (s *NodeScaleDownAndUp) TestEtcdScaleDownAndUp() { - s.Run("etcd-node-scale-down-and-up", func() { - ReplaceNodes(s.T(), s.client, s.client.RancherConfig.ClusterName, true, false, false) - }) -} - -func (s *NodeScaleDownAndUp) TestControlPlaneScaleDownAndUp() { - s.Run("controlplane-node-scale-down-and-up", func() { - ReplaceNodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, true, false) - }) -} +func (s *NodeReplacingTestSuite) TestReplacingNodes() { + nodeRolesEtcd := machinepools.NodeRoles{ + Etcd: true, + ControlPlane: false, + Worker: false, + } + + nodeRolesControlPlane := machinepools.NodeRoles{ + Etcd: false, + ControlPlane: true, + Worker: false, + } + + nodeRolesWorker := machinepools.NodeRoles{ + Etcd: false, + ControlPlane: false, + Worker: true, + } + + tests := []struct { + name string + nodeRoles machinepools.NodeRoles + client *rancher.Client + }{ + {"control plane nodes", nodeRolesControlPlane, s.client}, + {"etcd nodes", nodeRolesEtcd, s.client}, + {"worker nodes", nodeRolesWorker, s.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) -func (s *NodeScaleDownAndUp) TestWorkerScaleDownAndUp() { - s.Run("worker-node-scale-down-and-up", func() { - ReplaceNodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, false, true) - }) -} + cluster, err := tt.client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) -func (s *NodeScaleDownAndUp) TestValidate() { - s.Run("rke2-validate", func() { - _, stevecluster, err := clusters.GetProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName, provisioninginput.Namespace) + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) require.NoError(s.T(), err) - clusterConfig := clusters.ConvertConfigToClusterConfig(s.clustersConfig) - provisioning.VerifyCluster(s.T(), s.client, clusterConfig, stevecluster) - }) + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "Replacing RKE2 " + tt.name + } else { + tt.name = "Replacing K3S " + tt.name + } + + s.Run(tt.name, func() { + ReplaceNodes(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.nodeRoles.Etcd, tt.nodeRoles.ControlPlane, tt.nodeRoles.Worker) + }) + } } -func TestScaleDownAndUp(t *testing.T) { - suite.Run(t, new(NodeScaleDownAndUp)) +func TestNodeReplacingTestSuite(t *testing.T) { + suite.Run(t, new(NodeReplacingTestSuite)) } diff --git a/tests/v2/validation/nodescaling/scaling_custom_cluster_rke1_test.go b/tests/v2/validation/nodescaling/scaling_custom_cluster_rke1_test.go index 6115f95d112..38ca0707897 100644 --- a/tests/v2/validation/nodescaling/scaling_custom_cluster_rke1_test.go +++ b/tests/v2/validation/nodescaling/scaling_custom_cluster_rke1_test.go @@ -71,11 +71,11 @@ func (s *RKE1CustomClusterNodeScalingTestSuite) TestScalingRKE1CustomClusterNode nodeRoles nodepools.NodeRoles client *rancher.Client }{ - {"Scaling custom RKE1 control plane by 1", nodeRolesControlPlane, s.client}, - {"Scaling custom RKE1 etcd by 1", nodeRolesEtcd, s.client}, - {"Scaling custom RKE1 etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client}, - {"Scaling custom RKE1 worker by 1", nodeRolesWorker, s.client}, - {"Scaling custom RKE1 worker by 2", nodeRolesTwoWorkers, s.client}, + {"Scaling custom control plane by 1", nodeRolesControlPlane, s.client}, + {"Scaling custom etcd by 1", nodeRolesEtcd, s.client}, + {"Scaling custom etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client}, + {"Scaling custom worker by 1", nodeRolesWorker, s.client}, + {"Scaling custom worker by 2", nodeRolesTwoWorkers, s.client}, } for _, tt := range tests { diff --git a/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go b/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go index 9e92c5abdb7..1ebfe67da36 100644 --- a/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go +++ b/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go @@ -65,10 +65,10 @@ func (s *RKE1NodeScalingTestSuite) TestScalingRKE1NodePools() { nodeRoles nodepools.NodeRoles client *rancher.Client }{ - {"Scaling RKE1 control plane by 1", nodeRolesControlPlane, s.client}, - {"Scaling RKE1 etcd node by 1", nodeRolesEtcd, s.client}, - {"Scaling RKE1 worker by 1", nodeRolesWorker, s.client}, - {"Scaling RKE1 worker node machine by 2", nodeRolesTwoWorkers, s.client}, + {"Scaling control plane by 1", nodeRolesControlPlane, s.client}, + {"Scaling etcd node by 1", nodeRolesEtcd, s.client}, + {"Scaling worker by 1", nodeRolesWorker, s.client}, + {"Scaling worker node machine by 2", nodeRolesTwoWorkers, s.client}, } for _, tt := range tests { diff --git a/tests/v2/validation/nodescaling/scaling_nodepools.go b/tests/v2/validation/nodescaling/scaling_nodepools.go index e42d16ed804..8c3a67d156c 100644 --- a/tests/v2/validation/nodescaling/scaling_nodepools.go +++ b/tests/v2/validation/nodescaling/scaling_nodepools.go @@ -30,10 +30,13 @@ func scalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID str clusterResp, err := machinepools.ScaleMachinePoolNodes(client, cluster, nodeRoles) require.NoError(t, err) - pods.VerifyReadyDaemonsetPods(t, client, cluster) + pods.VerifyReadyDaemonsetPods(t, client, clusterResp) + + updatedCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(t, err) nodeRoles.Quantity = -nodeRoles.Quantity - scaledClusterResp, err := machinepools.ScaleMachinePoolNodes(client, clusterResp, nodeRoles) + scaledClusterResp, err := machinepools.ScaleMachinePoolNodes(client, updatedCluster, nodeRoles) require.NoError(t, err) pods.VerifyReadyDaemonsetPods(t, client, scaledClusterResp) diff --git a/tests/v2/validation/provisioning/rke2/custom_cluster_test.go b/tests/v2/validation/provisioning/rke2/custom_cluster_test.go index ecef846c6af..cc15725d7bc 100644 --- a/tests/v2/validation/provisioning/rke2/custom_cluster_test.go +++ b/tests/v2/validation/provisioning/rke2/custom_cluster_test.go @@ -122,10 +122,10 @@ func (c *CustomClusterProvisioningTestSuite) TestProvisioningRKE2CustomCluster() {"2 nodes - etcd|cp roles per 1 node " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesShared, false, c.client.Flags.GetValue(environmentflag.Short) || c.client.Flags.GetValue(environmentflag.Long)}, {"3 nodes - 1 role per node " + provisioninginput.AdminClientName.String(), c.client, nodeRolesDedicated, false, c.client.Flags.GetValue(environmentflag.Long)}, {"3 nodes - 1 role per node " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicated, false, c.client.Flags.GetValue(environmentflag.Long)}, - {"4 nodes - 1 role per node + 1 windows worker" + provisioninginput.AdminClientName.String(), c.client, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, - {"4 nodes - 1 role per node + 1 windows worker" + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, - {"5 nodes - 1 role per node + 2 windows workers" + provisioninginput.AdminClientName.String(), c.client, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, - {"5 nodes - 1 role per node + 2 windows workers" + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"4 nodes - 1 role per node + 1 windows worker " + provisioninginput.AdminClientName.String(), c.client, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"4 nodes - 1 role per node + 1 windows worker " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"5 nodes - 1 role per node + 2 windows workers " + provisioninginput.AdminClientName.String(), c.client, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"5 nodes - 1 role per node + 2 windows workers " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, } for _, tt := range tests { if !tt.runFlag { diff --git a/tests/v2/validation/snapshot/snapshot_additional_test.go b/tests/v2/validation/snapshot/snapshot_additional_test.go index cb6c5a39eec..a3d2211a84e 100644 --- a/tests/v2/validation/snapshot/snapshot_additional_test.go +++ b/tests/v2/validation/snapshot/snapshot_additional_test.go @@ -3,9 +3,13 @@ package snapshot import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/etcdsnapshot" "github.com/rancher/shepherd/pkg/config" @@ -72,6 +76,24 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() { } for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(s.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "RKE2 " + tt.name + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { + tt.name = "K3S " + tt.name + } else { + tt.name = "RKE1 " + tt.name + } + s.Run(tt.name, func() { snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) }) @@ -95,6 +117,24 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotRecurringRestores() { } for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(s.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "RKE2 " + tt.name + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { + tt.name = "K3S " + tt.name + } else { + tt.name = "RKE1 " + tt.name + } + s.Run(tt.name, func() { snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) }) diff --git a/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go b/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go index 0bed7907ce0..af9117160f5 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go @@ -3,9 +3,13 @@ package snapshot import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/etcdsnapshot" "github.com/rancher/shepherd/pkg/config" @@ -64,6 +68,24 @@ func (s *SnapshotRestoreK8sUpgradeTestSuite) TestSnapshotRestoreK8sUpgrade() { } for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(s.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "RKE2 " + tt.name + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { + tt.name = "K3S " + tt.name + } else { + tt.name = "RKE1 " + tt.name + } + s.Run(tt.name, func() { snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) }) diff --git a/tests/v2/validation/snapshot/snapshot_restore_test.go b/tests/v2/validation/snapshot/snapshot_restore_test.go index b822eaf5a24..99f0af57fa9 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_test.go @@ -3,9 +3,13 @@ package snapshot import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/etcdsnapshot" "github.com/rancher/shepherd/pkg/config" @@ -56,6 +60,24 @@ func (s *SnapshotRestoreTestSuite) TestSnapshotRestoreETCDOnly() { } for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(s.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "RKE2 " + tt.name + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { + tt.name = "K3S " + tt.name + } else { + tt.name = "RKE1 " + tt.name + } + s.Run(tt.name, func() { snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) }) diff --git a/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go b/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go index 3dfbc708549..1483f93ca1f 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go @@ -3,9 +3,13 @@ package snapshot import ( + "strings" "testing" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/etcdsnapshot" "github.com/rancher/shepherd/pkg/config" @@ -72,6 +76,24 @@ func (s *SnapshotRestoreUpgradeStrategyTestSuite) TestSnapshotRestoreUpgradeStra } for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(s.T(), err) + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + require.NoError(s.T(), err) + + if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { + tt.name = "RKE2 " + tt.name + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { + tt.name = "K3S " + tt.name + } else { + tt.name = "RKE1 " + tt.name + } + s.Run(tt.name, func() { snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) })