Skip to content

Commit

Permalink
Merge pull request rancher#45039 from markusewalker/markusv28/fix-rel…
Browse files Browse the repository at this point in the history
…ease-issues

[v2.8] Fix flaky nodescaling test cases + specify cluster types
  • Loading branch information
markusewalker authored Apr 8, 2024
2 parents 2fe170c + 3bf3d13 commit 90ba059
Show file tree
Hide file tree
Showing 15 changed files with 305 additions and 114 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ require (
github.com/containers/image/v5 v5.25.0
github.com/google/gnostic-models v0.6.8
github.com/rancher/rancher/pkg/apis v0.0.0-20240213233515-935d309ebad4
github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a
github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd
go.qase.io/client v0.0.0-20231114201952-65195ec001fa
)

Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1641,8 +1641,8 @@ github.com/rancher/remotedialer v0.3.0 h1:y1EO8JCsgZo0RcqTUp6U8FXcBAv27R+TLnWRcp
github.com/rancher/remotedialer v0.3.0/go.mod h1:BwwztuvViX2JrLLUwDlsYt5DiyUwHLlzynRwkZLAY0Q=
github.com/rancher/rke v1.5.7 h1:pCVziDwgulQc2WgRkisY6sEo3DFGgu1StE66UbkuF2c=
github.com/rancher/rke v1.5.7/go.mod h1:vojhOf8U8VCmw7y17OENWXSIfEFPEbXCMQcmI7xN7i8=
github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a h1:VV4AyNyCQrkPtvSVa1rQL69+A/gyWTYFgWSkjuR4JGQ=
github.com/rancher/shepherd v0.0.0-20240405212128-578908d4308a/go.mod h1:LNI7nH1BptYMvJmuqsLgmkMytGBBTpW4jk4vAHCxfF4=
github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd h1:Nog4ViMD04zg6GP+5IvsthOIWqps7m6mdMnhIwWkPxA=
github.com/rancher/shepherd v0.0.0-20240408151625-d0c3b8dbe5dd/go.mod h1:LNI7nH1BptYMvJmuqsLgmkMytGBBTpW4jk4vAHCxfF4=
github.com/rancher/steve v0.0.0-20240305150728-3943409601f1 h1:6wNYy3q9jget45syTN6K2uOLSYaptLYCHscY2WRmhDI=
github.com/rancher/steve v0.0.0-20240305150728-3943409601f1/go.mod h1:o4vLBzMTKbHHhIiAcbgOiaN3aK1vIjL6ZTgaGxQYpsY=
github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20210727200656-10b094e30007 h1:ru+mqGnxMmKeU0Q3XIDxkARvInDIqT1hH2amTcsjxI4=
Expand Down
19 changes: 14 additions & 5 deletions tests/v2/validation/deleting/delete_cluster_rke1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,20 @@ func (c *RKE1ClusterDeleteTestSuite) SetupSuite() {
}

func (c *RKE1ClusterDeleteTestSuite) TestDeletingRKE1Cluster() {
clusterID, err := clusters.GetClusterIDByName(c.client, c.client.RancherConfig.ClusterName)
require.NoError(c.T(), err)

clusters.DeleteRKE1Cluster(c.client, clusterID)
provisioning.VerifyDeleteRKE1Cluster(c.T(), c.client, clusterID)
tests := []struct {
name string
client *rancher.Client
}{
{"Deleting cluster", c.client},
}

for _, tt := range tests {
clusterID, err := clusters.GetClusterIDByName(c.client, c.client.RancherConfig.ClusterName)
require.NoError(c.T(), err)

clusters.DeleteRKE1Cluster(tt.client, clusterID)
provisioning.VerifyDeleteRKE1Cluster(c.T(), tt.client, clusterID)
}
}

// In order for 'go test' to run this suite, we need to create
Expand Down
35 changes: 31 additions & 4 deletions tests/v2/validation/deleting/delete_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,12 @@
package deleting

import (
"strings"
"testing"

apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"github.com/rancher/shepherd/clients/rancher"
v1 "github.com/rancher/shepherd/clients/rancher/v1"
"github.com/rancher/shepherd/extensions/clusters"
"github.com/rancher/shepherd/extensions/provisioning"
"github.com/rancher/shepherd/pkg/session"
Expand Down Expand Up @@ -34,11 +37,35 @@ func (c *ClusterDeleteTestSuite) SetupSuite() {
}

func (c *ClusterDeleteTestSuite) TestDeletingCluster() {
clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName)
require.NoError(c.T(), err)
tests := []struct {
name string
client *rancher.Client
}{
{"cluster", c.client},
}

for _, tt := range tests {
clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName)
require.NoError(c.T(), err)

cluster, err := tt.client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterID)
require.NoError(c.T(), err)

updatedCluster := new(apisV1.Cluster)
err = v1.ConvertToK8sType(cluster, &updatedCluster)
require.NoError(c.T(), err)

if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") {
tt.name = "Deleting RKE2 " + tt.name
} else {
tt.name = "Deleting K3S " + tt.name
}

clusters.DeleteK3SRKE2Cluster(c.client, clusterID)
provisioning.VerifyDeleteRKE2K3SCluster(c.T(), c.client, clusterID)
c.Run(tt.name, func() {
clusters.DeleteK3SRKE2Cluster(tt.client, clusterID)
provisioning.VerifyDeleteRKE2K3SCluster(c.T(), tt.client, clusterID)
})
}
}

// In order for 'go test' to run this suite, we need to create
Expand Down
78 changes: 38 additions & 40 deletions tests/v2/validation/nodescaling/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,50 +21,42 @@ rancher:
```
## Node Replacing
Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. You can run a subset of the tests, but still need more than 1 node for the role you would like to run the test for. i.e. for `-run ^TestScaleDownAndUp/TestWorkerScaleDownAndUp$` you would need at least 1 pool with 2 or more dedicaated workers in it. The last node in the pool will be replaced.

Typically, a cluster with the following 3 pools is used for testing:
Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. Typically, a cluster with the following 3 pools is used for testing:
```yaml
{
{
ControlPlane: true,
Quantity: 2,
},
{
Etcd: true,
Quantity: 3,
},
{
Worker: true,
Quantity: 2,
},
}
provisioningInput:
nodePools: # nodePools is specific for RKE1 clusters.
- nodeRoles:
etcd: true
quantity: 3
- nodeRoles:
controlplane: true
quantity: 2
- nodeRoles:
worker: true
quantity: 3
machinePools: # machienPools is specific for RKE2/K3s clusters.
- machinePoolConfig:
etcd: true
quantity: 3
- machinePoolConfig:
controlplane: true
quantity: 2
- machinePoolConfig:
worker: true
quantity: 3
```
These tests utilize Go build tags. Due to this, see the below examples on how to run the tests:
### RKE1
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestEtcdScaleDownAndUp"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestControlPlaneScaleDownAndUp"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScaleDownAndUp/TestWorkerScaleDownAndUp"`
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeReplacingTestSuite/TestReplacingRKE1Nodes"`

### RKE2 | K3S
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestEtcdScaleDownAndUp"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestControlPlaneScaleDownAndUp"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScaleDownAndUp/TestWorkerScaleDownAndUp"`
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeReplacingTestSuite/TestReplacingNodes"`

## Scaling Existing Node Pools
Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below:
Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below that accounts for node drivers, custom clusters and hosted clusters:
```yaml
scalingInput:
nodePools:
nodeRoles:
worker: true
quantity: 2
machinePools:
nodeRoles:
etcd: true
quantity: 1
scalingInput:
nodeProvider: "ec2"
nodePools:
Expand All @@ -76,23 +68,29 @@ scalingInput:
etcd: true
quantity: 1
aksNodePool:
nodeCount: 1
nodeCount: 3
eksNodePool:
desiredSize: 1
desiredSize: 6
gkeNodePool:
initialNodeCount: 1
initialNodeCount: 3
```
NOTE: When scaling AKS and EKS, you will need to make sure that the `maxCount` and `maxSize` parameter is greater than the desired scale amount, respectively. For example, if you wish to have 6 total EKS nodes, then the `maxSize` parameter needs to be at least 7. This is not a limitation of the automation, but rather how EkS specifically handles nodegroups.
NOTE: When scaling AKS and EKS, you will need to make sure that the `maxCount` and `maxSize` parameter is greater than the desired scale amount, respectively. For example, if you wish to have 6 total EKS nodes, then the `maxSize` parameter needs to be at least 7. This is not a limitation of the automation, but rather how EKS specifically handles nodegroups.

Additionally, for AKS, you must have `enableAutoScaling` set to true if you specify `maxCount` and `minCount`.

These tests utilize Go build tags. Due to this, see the below examples on how to run the tests:

### RKE1
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePools"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePoolsDynamicInput"`
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1NodeScalingTestSuite/TestScalingRKE1NodePoolsDynamicInput"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1CustomClusterNodeScalingTestSuite/TestScalingRKE1CustomClusterNodes"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE1CustomClusterNodeScalingTestSuite/TestScalingRKE1CustomClusterNodesDynamicInput"`

### RKE2 | K3S
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE2NodeScalingTestSuite/TestRKE2NodeScalingTestSuite"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestRKE2NodeScalingTestSuite/TestScalingRKE2NodePoolsDynamicInput"`
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScalingTestSuite/TestScalingNodePools"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestNodeScalingTestSuite/TestScalingNodePoolsDynamicInput"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestCustomClusterNodeScalingTestSuite/TestScalingCustomClusterNodes"` \
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestCustomClusterNodeScalingTestSuite/TestScalingCustomClusterNodesDynamicInput"`

### AKS
`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/nodescaling --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestAKSNodeScalingTestSuite/TestScalingAKSNodePools"` \
Expand Down
59 changes: 40 additions & 19 deletions tests/v2/validation/nodescaling/scale_replace_rke1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,25 +7,26 @@ import (

"github.com/rancher/shepherd/clients/rancher"
"github.com/rancher/shepherd/extensions/provisioninginput"
nodepools "github.com/rancher/shepherd/extensions/rke1/nodepools"
"github.com/rancher/shepherd/pkg/config"
"github.com/rancher/shepherd/pkg/session"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)

type NodeScaleRKE1DownAndUp struct {
type RKE1NodeReplacingTestSuite struct {
suite.Suite
session *session.Session
client *rancher.Client
ns string
clustersConfig *provisioninginput.Config
}

func (s *NodeScaleRKE1DownAndUp) TearDownSuite() {
func (s *RKE1NodeReplacingTestSuite) TearDownSuite() {
s.session.Cleanup()
}

func (s *NodeScaleRKE1DownAndUp) SetupSuite() {
func (s *RKE1NodeReplacingTestSuite) SetupSuite() {
testSession := session.NewSession()
s.session = testSession

Expand All @@ -40,22 +41,42 @@ func (s *NodeScaleRKE1DownAndUp) SetupSuite() {
s.client = client
}

func (s *NodeScaleRKE1DownAndUp) TestEtcdScaleDownAndUp() {
s.Run("rke1-etcd-node-scale-down-and-up", func() {
ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, true, false, false)
})
}
func (s *NodeScaleRKE1DownAndUp) TestWorkerScaleDownAndUp() {
s.Run("rke1-worker-node-scale-down-and-up", func() {
ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, false, true)
})
}
func (s *NodeScaleRKE1DownAndUp) TestControlPlaneScaleDownAndUp() {
s.Run("rke1-controlplane-node-scale-down-and-up", func() {
ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, false, true, false)
})
func (s *RKE1NodeReplacingTestSuite) TestReplacingRKE1Nodes() {
nodeRolesEtcd := nodepools.NodeRoles{
Etcd: true,
ControlPlane: false,
Worker: false,
}

nodeRolesControlPlane := nodepools.NodeRoles{
Etcd: false,
ControlPlane: true,
Worker: false,
}

nodeRolesWorker := nodepools.NodeRoles{
Etcd: false,
ControlPlane: false,
Worker: true,
}

tests := []struct {
name string
nodeRoles nodepools.NodeRoles
client *rancher.Client
}{
{"Replacing control plane nodes", nodeRolesControlPlane, s.client},
{"Replacing etcd nodes", nodeRolesEtcd, s.client},
{"Replacing worker nodes", nodeRolesWorker, s.client},
}

for _, tt := range tests {
s.Run(tt.name, func() {
ReplaceRKE1Nodes(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.nodeRoles.Etcd, tt.nodeRoles.ControlPlane, tt.nodeRoles.Worker)
})
}
}

func TestRKE1NodeScaleDownAndUp(t *testing.T) {
suite.Run(t, new(NodeScaleRKE1DownAndUp))
func TestRKE1NodeReplacingTestSuite(t *testing.T) {
suite.Run(t, new(RKE1NodeReplacingTestSuite))
}
Loading

0 comments on commit 90ba059

Please sign in to comment.