diff --git a/tests/e2e/dualstack/Vagrantfile b/tests/e2e/dualstack/Vagrantfile index b81b06c263c3..b623937b6fc6 100644 --- a/tests/e2e/dualstack/Vagrantfile +++ b/tests/e2e/dualstack/Vagrantfile @@ -5,15 +5,16 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i NETWORK4_PREFIX = "10.10.10" NETWORK6_PREFIX = "fd11:decf:c0ff:ee" install_type = "" -def provision(vm, roles, role_num, node_num) +def provision(vm, role, role_num, node_num) vm.box = NODE_BOXES[node_num] - vm.hostname = "#{roles[0]}-#{role_num}" + vm.hostname = role node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}" node_ip6_gw = "#{NETWORK6_PREFIX}::1" @@ -30,13 +31,13 @@ def provision(vm, roles, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) - + addCoverageDir(vm, role, GOCOVER) vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s] install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" - if roles.include?("server") && role_num == 0 + if role.include?("server") && role_num == 0 vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " @@ -53,7 +54,7 @@ def provision(vm, roles, role_num, node_num) YAML k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] end - elsif roles.include?("server") && role_num != 0 + elsif role.include?("server") && role_num != 0 vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " @@ -70,7 +71,7 @@ def provision(vm, roles, role_num, node_num) k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] end end - if roles.include?("agent") + if role.include?("agent") vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "agent " @@ -105,10 +106,9 @@ Vagrant.configure("2") do |config| # over the node roles themselves NODE_ROLES.length.times do |i| name = NODE_ROLES[i] + role_num = name.split("-", -1).pop.to_i config.vm.define name do |node| - roles = name.split("-", -1) - role_num = roles.pop.to_i - provision(node.vm, roles, role_num, i) + provision(node.vm, name, role_num, i) end end end diff --git a/tests/e2e/dualstack/dualstack_test.go b/tests/e2e/dualstack/dualstack_test.go index daf8f212581e..36f33904445b 100644 --- a/tests/e2e/dualstack/dualstack_test.go +++ b/tests/e2e/dualstack/dualstack_test.go @@ -18,6 +18,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var hardened = flag.Bool("hardened", false, "true or false") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") func Test_E2EDualStack(t *testing.T) { flag.Parse() @@ -38,7 +39,11 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { It("Starts up with no issues", func() { var err error - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) @@ -193,6 +198,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/externalip/Vagrantfile b/tests/e2e/externalip/Vagrantfile index 7c2e5d64994a..d10809157778 100644 --- a/tests/e2e/externalip/Vagrantfile +++ b/tests/e2e/externalip/Vagrantfile @@ -5,15 +5,16 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i NETWORK4_PREFIX = "10.10.10" PUBLIC_NETWORK4_PREFIX = "10.100.100" install_type = "" -def provision(vm, roles, role_num, node_num) +def provision(vm, role, role_num, node_num) vm.box = NODE_BOXES[node_num] - vm.hostname = "#{roles[0]}-#{role_num}" + vm.hostname = role node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" node_ip4_public = "#{PUBLIC_NETWORK4_PREFIX}.#{100+node_num}" vm.network "private_network", :ip => node_ip4, :netmask => "255.255.255.0" @@ -24,10 +25,10 @@ def provision(vm, roles, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) - + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - if roles.include?("server") && role_num == 0 + if role.include?("server") && role_num == 0 vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " @@ -41,7 +42,7 @@ def provision(vm, roles, role_num, node_num) k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] end end - if roles.include?("agent") + if role.include?("agent") vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "agent " @@ -77,10 +78,9 @@ Vagrant.configure("2") do |config| # over the node roles themselves NODE_ROLES.length.times do |i| name = NODE_ROLES[i] + role_num = name.split("-", -1).pop.to_i config.vm.define name do |node| - roles = name.split("-", -1) - role_num = roles.pop.to_i - provision(node.vm, roles, role_num, i) + provision(node.vm, name, role_num, i) end end end diff --git a/tests/e2e/externalip/externalip_test.go b/tests/e2e/externalip/externalip_test.go index d3fccb4da0ff..a376f04e382f 100644 --- a/tests/e2e/externalip/externalip_test.go +++ b/tests/e2e/externalip/externalip_test.go @@ -23,6 +23,7 @@ var serverCount = flag.Int("serverCount", 1, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var hardened = flag.Bool("hardened", false, "true or false") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") // getLBServiceIPs returns the externalIP configured for flannel func getExternalIPs(kubeConfigFile string) ([]string, error) { @@ -66,7 +67,11 @@ var _ = Describe("Verify External-IP config", Ordered, func() { It("Starts up with no issues", func() { var err error - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) @@ -163,6 +168,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/multiclustercidr/Vagrantfile b/tests/e2e/multiclustercidr/Vagrantfile index 832a40d81590..16ffa187f6e7 100644 --- a/tests/e2e/multiclustercidr/Vagrantfile +++ b/tests/e2e/multiclustercidr/Vagrantfile @@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i IP_FAMILY = (ENV['E2E_IP_FAMILY'] || "ipv4") @@ -12,9 +13,9 @@ NETWORK4_PREFIX = "10.10.10" NETWORK6_PREFIX = "fd11:decf:c0ff:ee" install_type = "" -def provision(vm, roles, role_num, node_num) +def provision(vm, role, role_num, node_num) vm.box = NODE_BOXES[node_num] - vm.hostname = "#{roles[0]}-#{role_num}" + vm.hostname = role node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}" node_ip6_gw = "#{NETWORK6_PREFIX}::1" @@ -31,13 +32,14 @@ def provision(vm, roles, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s] install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" - if roles.include?("server") && role_num == 0 + if role.include?("server") && role_num == 0 vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " @@ -69,7 +71,7 @@ def provision(vm, roles, role_num, node_num) end k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] end - elsif roles.include?("server") && role_num != 0 + elsif role.include?("server") && role_num != 0 vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " @@ -100,7 +102,7 @@ def provision(vm, roles, role_num, node_num) k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] end end - if roles.include?("agent") + if role.include?("agent") vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "agent " @@ -144,10 +146,9 @@ Vagrant.configure("2") do |config| # over the node roles themselves NODE_ROLES.length.times do |i| name = NODE_ROLES[i] + role_num = name.split("-", -1).pop.to_i config.vm.define name do |node| - roles = name.split("-", -1) - role_num = roles.pop.to_i - provision(node.vm, roles, role_num, i) + provision(node.vm, name, role_num, i) end end end diff --git a/tests/e2e/multiclustercidr/multiclustercidr_test.go b/tests/e2e/multiclustercidr/multiclustercidr_test.go index 020c8f6ae04f..1b091dee1af4 100644 --- a/tests/e2e/multiclustercidr/multiclustercidr_test.go +++ b/tests/e2e/multiclustercidr/multiclustercidr_test.go @@ -18,6 +18,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var hardened = flag.Bool("hardened", false, "true or false") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") func Test_E2EMultiClusterCIDR(t *testing.T) { flag.Parse() @@ -36,233 +37,249 @@ var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() { - It("Starts up IPv4 setup with no issues", func() { - var err error - os.Setenv("E2E_IP_FAMILY", "ipv4") - defer os.Unsetenv("E2E_IP_FAMILY") - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + When("Cluster with IPv4 only is created", func() { + It("Starts up IPv4 setup with no issues", func() { + var err error + os.Setenv("E2E_IP_FAMILY", "ipv4") + defer os.Unsetenv("E2E_IP_FAMILY") + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each node has IPv4", func() { + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv4).Should(ContainSubstring("10.10.10")) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Verifies that each node has IPv4", func() { - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodeIPs { - Expect(node.IPv4).Should(ContainSubstring("10.10.10")) - } - }) - - It("Verifies that each pod has IPv4", func() { - podIPs, err := e2e.GetPodIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range podIPs { - Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name) - } - }) + }) - It("Add new CIDR", func() { - _, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - Eventually(func() (string, error) { - cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile - return e2e.RunCommand(cmd) - }, "120s", "5s").Should(ContainSubstring("10.248.0.0")) - - }) - - It("Restart agent-0", func() { - agents := []string{"agent-0"} - err := e2e.RestartCluster(agents) - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + It("Verifies that each pod has IPv4", func() { + podIPs, err := e2e.GetPodIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) + }) + + It("Add new CIDR", func() { + _, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("10.248.0.0")) + + }) + + It("Restart agent-0", func() { + agents := []string{"agent-0"} + err := e2e.RestartCluster(agents) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { + It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() { pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + if pod.Node == "agent-0" { + Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name) } } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) + }) - It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() { - pods, err := e2e.ParsePods(kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if pod.Node == "agent-0" { - Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name) + It("Destroy Cluster", func() { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + if os.Getenv("E2E_GOCOVER") != "" { + Expect(os.Rename("coverage.out", "coverage-ipv4.out")).To(Succeed()) } - } - }) - - It("Destroy Cluster", func() { - Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + }) }) - It("Starts up IPv6 setup with no issues", func() { - var err error - os.Setenv("E2E_IP_FAMILY", "ipv6") - defer os.Unsetenv("E2E_IP_FAMILY") - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + When("Cluster with IPv6 only is created", func() { + It("Starts up IPv6 setup with no issues", func() { + var err error + os.Setenv("E2E_IP_FAMILY", "ipv6") + defer os.Unsetenv("E2E_IP_FAMILY") + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each node has IPv6", func() { + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff")) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Verifies that each node has IPv6", func() { - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodeIPs { - Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff")) - } - }) - - It("Verifies that each pod has IPv6", func() { - podIPs, err := e2e.GetPodIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range podIPs { - Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) - } - }) + }) - It("Add new CIDR", func() { - _, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - Eventually(func() (string, error) { - cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile - return e2e.RunCommand(cmd) - }, "120s", "5s").Should(ContainSubstring("2001:cafe:248")) - - }) - - It("Delete and restart agent-0", func() { - agents := []string{"agent-0"} - err := e2e.RestartCluster(agents) - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + It("Verifies that each pod has IPv6", func() { + podIPs, err := e2e.GetPodIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) + }) + + It("Add new CIDR", func() { + _, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("2001:cafe:248")) + + }) + + It("Delete and restart agent-0", func() { + agents := []string{"agent-0"} + err := e2e.RestartCluster(agents) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { + It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() { pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + if pod.Node == "agent-0" { + Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name) } } - }, "420s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() { - pods, err := e2e.ParsePods(kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if pod.Node == "agent-0" { - Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name) - } - } + }) }) }) @@ -275,6 +292,10 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + if os.Getenv("E2E_GOCOVER") != "" { + Expect(os.Rename("coverage.out", "coverage-ipv6.out")).To(Succeed()) + } Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/privateregistry/Vagrantfile b/tests/e2e/privateregistry/Vagrantfile index c2120d74a8c6..b1ab630be5b0 100644 --- a/tests/e2e/privateregistry/Vagrantfile +++ b/tests/e2e/privateregistry/Vagrantfile @@ -5,7 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") -EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i # Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks @@ -24,6 +24,7 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "shell", inline: "ping -c 2 k3s.io" @@ -93,9 +94,6 @@ def provision(vm, role, role_num, node_num) end if vm.box.to_s.include?("microos") vm.provision 'k3s-reload', type: 'reload', run: 'once' - if !EXTERNAL_DB.empty? - vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" - end end end diff --git a/tests/e2e/privateregistry/privateregistry_test.go b/tests/e2e/privateregistry/privateregistry_test.go index 77ce4b9cb560..87957064f9cc 100644 --- a/tests/e2e/privateregistry/privateregistry_test.go +++ b/tests/e2e/privateregistry/privateregistry_test.go @@ -22,7 +22,6 @@ var ci = flag.Bool("ci", false, "running on CI") var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: -// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd) // E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) // E2E_REGISTRY: true/false (default: false) @@ -85,39 +84,39 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Create new private registry", func() { - registry, err := e2e.RunCmdOnNode("sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0]) + registry, err := e2e.RunCmdOnNode("docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0]) fmt.Println(registry) Expect(err).NotTo(HaveOccurred()) }) It("ensures registry is working", func() { - a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep registry\n", serverNodeNames[0]) + a, err := e2e.RunCmdOnNode("docker ps -a | grep registry\n", serverNodeNames[0]) fmt.Println(a) Expect(err).NotTo(HaveOccurred()) }) It("Should pull and image from dockerhub and send it to private registry", func() { - cmd := "sudo docker pull nginx" + cmd := "docker pull nginx" _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) - cmd = "sudo docker tag nginx " + nodeIP + ":5000/my-webpage" + cmd = "docker tag nginx " + nodeIP + ":5000/my-webpage" _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) - cmd = "sudo docker push " + nodeIP + ":5000/my-webpage" + cmd = "docker push " + nodeIP + ":5000/my-webpage" _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) - cmd = "sudo docker image remove nginx " + nodeIP + ":5000/my-webpage" + cmd = "docker image remove nginx " + nodeIP + ":5000/my-webpage" _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) }) It("Should create and validate deployment with private registry on", func() { - res, err := e2e.RunCmdOnNode("sudo kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0]) + res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0]) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) @@ -131,7 +130,6 @@ var _ = Describe("Verify Create", Ordered, func() { } g.Expect(err).NotTo(HaveOccurred()) g.Expect(pod.Status).Should(Equal("Running")) - g.Expect(pod.Node).Should(Equal(agentNodeNames[0])) }, "60s", "5s").Should(Succeed()) cmd := "curl " + pod.IP @@ -151,11 +149,12 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { - r1, err := e2e.RunCmdOnNode("sudo docker rm -f registry", serverNodeNames[0]) + r1, err := e2e.RunCmdOnNode("docker rm -f registry", serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), r1) - r2, err := e2e.RunCmdOnNode("sudo kubectl delete deployment my-webpage", serverNodeNames[0]) + r2, err := e2e.RunCmdOnNode("kubectl delete deployment my-webpage", serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), r2) Expect(err).NotTo(HaveOccurred()) + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/rotateca/Vagrantfile b/tests/e2e/rotateca/Vagrantfile index bb50599ca58f..54bb113f7fb8 100644 --- a/tests/e2e/rotateca/Vagrantfile +++ b/tests/e2e/rotateca/Vagrantfile @@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i # Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks @@ -21,6 +22,7 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults if File.exists?(vagrant_defaults) defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "shell", inline: "ping -c 2 k3s.io" diff --git a/tests/e2e/rotateca/rotateca_test.go b/tests/e2e/rotateca/rotateca_test.go index 29e187e80273..0ba640a566ff 100644 --- a/tests/e2e/rotateca/rotateca_test.go +++ b/tests/e2e/rotateca/rotateca_test.go @@ -17,6 +17,7 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2204", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: // E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master @@ -40,7 +41,11 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() { Context("Custom CA is rotated:", func() { It("Starts up with no issues", func() { var err error - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) @@ -136,6 +141,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/s3/Vagrantfile b/tests/e2e/s3/Vagrantfile index 968f146cf359..77d84b1ed1c9 100644 --- a/tests/e2e/s3/Vagrantfile +++ b/tests/e2e/s3/Vagrantfile @@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i # Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks @@ -23,8 +24,9 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - + vm.provision "shell", inline: "ping -c 2 k3s.io" runS3mock = <<~'SCRIPT' @@ -57,9 +59,6 @@ def provision(vm, role, role_num, node_num) if vm.box.to_s.include?("microos") vm.provision 'k3s-reload', type: 'reload', run: 'once' - if !EXTERNAL_DB.empty? - vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" - end end end diff --git a/tests/e2e/s3/s3_test.go b/tests/e2e/s3/s3_test.go index a763c74ed4ec..859309693c38 100644 --- a/tests/e2e/s3/s3_test.go +++ b/tests/e2e/s3/s3_test.go @@ -20,7 +20,6 @@ var ci = flag.Bool("ci", false, "running on CI") var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: -// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd) // E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) // E2E_REGISTRY: true/false (default: false) @@ -83,17 +82,44 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("ensures s3 mock is working", func() { - a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep mock\n", serverNodeNames[0]) - fmt.Println(a) + res, err := e2e.RunCmdOnNode("docker ps -a | grep mock\n", serverNodeNames[0]) + fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) It("save s3 snapshot", func() { - a, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot save", serverNodeNames[0]) + res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(ContainSubstring("S3 bucket test exists")) + Expect(res).To(ContainSubstring("Uploading snapshot")) + Expect(res).To(ContainSubstring("S3 upload complete for")) + }) + It("lists saved s3 snapshot", func() { + res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(ContainSubstring("on-demand-server-0")) + }) + It("save 3 more s3 snapshots", func() { + for _, i := range []string{"1", "2", "3"} { + res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save --name special-"+i, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(ContainSubstring("Uploading snapshot")) + Expect(res).To(ContainSubstring("S3 upload complete for special-" + i)) + } + }) + It("lists saved s3 snapshot", func() { + res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(ContainSubstring("on-demand-server-0")) + Expect(res).To(ContainSubstring("special-1-server-0")) + Expect(res).To(ContainSubstring("special-2-server-0")) + Expect(res).To(ContainSubstring("special-3-server-0")) + }) + // TODO, there is currently a bug that prevents pruning on s3 snapshots that are not prefixed with "on-demand" + // https://github.com/rancher/rke2/issues/3714 + // Once fixed, ensure that the snapshots list are actually reduced to 2 + It("prunes s3 snapshots", func() { + _, err := e2e.RunCmdOnNode("k3s etcd-snapshot prune --snapshot-retention 2", serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) - Expect(strings.Contains(a, "S3 bucket test exists")).Should(Equal(true)) - Expect(strings.Contains(a, "Uploading snapshot")).Should(Equal(true)) - Expect(strings.Contains(a, "S3 upload complete for")).Should(Equal(true)) - }) }) }) @@ -108,6 +134,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/secretsencryption/Vagrantfile b/tests/e2e/secretsencryption/Vagrantfile index 9170a607a291..d93ddf3d13f3 100644 --- a/tests/e2e/secretsencryption/Vagrantfile +++ b/tests/e2e/secretsencryption/Vagrantfile @@ -22,13 +22,11 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults if File.exists?(vagrant_defaults) defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "shell", inline: "ping -c 2 k3s.io" - if !GOCOVER.empty? - addCoverageDir(vm, role) - end if role.include?("server") && role_num == 0 vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s| diff --git a/tests/e2e/secretsencryption/secretsencryption_test.go b/tests/e2e/secretsencryption/secretsencryption_test.go index bee68ac9405d..a0b7d7c68e9e 100644 --- a/tests/e2e/secretsencryption/secretsencryption_test.go +++ b/tests/e2e/secretsencryption/secretsencryption_test.go @@ -306,12 +306,10 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if os.Getenv("E2E_GOCOVER") != "" { - Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed()) - } if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/snapshotrestore/Vagrantfile b/tests/e2e/snapshotrestore/Vagrantfile index 5ee814b624cd..a519abadcc0a 100644 --- a/tests/e2e/snapshotrestore/Vagrantfile +++ b/tests/e2e/snapshotrestore/Vagrantfile @@ -5,7 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") -EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i # Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks @@ -24,6 +24,7 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) vm.provision "shell", inline: "ping -c 2 k3s.io" @@ -35,10 +36,10 @@ def provision(vm, role, role_num, node_num) k3s.args = "server " k3s.config = <<~YAML token: vagrant + cluster-init: true node-external-ip: #{NETWORK_PREFIX}.100 flannel-iface: eth1 tls-san: #{NETWORK_PREFIX}.100.nip.io - #{db_type} YAML k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 @@ -52,7 +53,6 @@ def provision(vm, role, role_num, node_num) token: vagrant node-external-ip: #{node_ip} flannel-iface: eth1 - #{db_type} YAML k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}] k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 @@ -67,7 +67,6 @@ def provision(vm, role, role_num, node_num) token: vagrant node-external-ip: #{node_ip} flannel-iface: eth1 - #{db_type} YAML k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 @@ -75,25 +74,10 @@ def provision(vm, role, role_num, node_num) end if vm.box.to_s.include?("microos") vm.provision 'k3s-reload', type: 'reload', run: 'once' - if !EXTERNAL_DB.empty? - vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" - end end # This step does not run by default and is designed to be called by higher level tools end -def getDBType(role, role_num, vm) - if ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" ) - if role.include?("server") && role_num == 0 - return "cluster-init: true" - end - else - puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB - abort - end - return "" -end - Vagrant.configure("2") do |config| config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] # Default provider is libvirt, virtualbox is only provided as a backup diff --git a/tests/e2e/snapshotrestore/snapshotrestore_test.go b/tests/e2e/snapshotrestore/snapshotrestore_test.go index 9b1320b3ce3e..97b9c47de399 100644 --- a/tests/e2e/snapshotrestore/snapshotrestore_test.go +++ b/tests/e2e/snapshotrestore/snapshotrestore_test.go @@ -25,7 +25,6 @@ var ci = flag.Bool("ci", false, "running on CI") var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: -// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd) // E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) func Test_E2ESnapshotRestore(t *testing.T) { @@ -308,6 +307,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/startup/Vagrantfile b/tests/e2e/startup/Vagrantfile index 68aa8cad8d3b..117bde72b72d 100644 --- a/tests/e2e/startup/Vagrantfile +++ b/tests/e2e/startup/Vagrantfile @@ -23,9 +23,12 @@ def provision(vm, role, role_num, node_num) defaultOSConfigure(vm) dockerInstall(vm) + addCoverageDir(vm, role, GOCOVER) install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) + node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" + vm.provision "shell", inline: "ping -c 2 k3s.io" if role.include?("server") @@ -53,9 +56,6 @@ def provision(vm, role, role_num, node_num) end end - if !GOCOVER.empty? - addCoverageDir(vm, role) - end if vm.box.to_s.include?("microos") vm.provision 'k3s-reload', type: 'reload', run: 'once' diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index fa1e5a3d2f92..88dcfadd8ec6 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -277,12 +277,10 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if os.Getenv("E2E_GOCOVER") != "" { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) - } if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/tailscale/Vagrantfile b/tests/e2e/tailscale/Vagrantfile index bced13d8067e..e7b21ae12698 100644 --- a/tests/e2e/tailscale/Vagrantfile +++ b/tests/e2e/tailscale/Vagrantfile @@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] || ['generic/ubuntu2004', 'generic/ubuntu2004']) GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +GOCOVER = (ENV['E2E_GOCOVER'] || "") NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i # This key must be created using tailscale web @@ -12,26 +13,26 @@ TAILSCALE_KEY = (ENV['E2E_TAILSCALE_KEY'] || "") NETWORK4_PREFIX = "10.10.10" install_type = "" -def provision(node, roles, role_num, node_num) - node.vm.box = NODE_BOXES[node_num] - node.vm.hostname = "#{roles[0]}-#{role_num}" +def provision(vm, roles, role_num, node_num) + vm.box = NODE_BOXES[node_num] + vm.hostname = "#{roles[0]}-#{role_num}" node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" - node.vm.network "private_network", ip: node_ip4, netmask: "255.255.255.0" + vm.network "private_network", ip: node_ip4, netmask: "255.255.255.0" scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" load vagrant_defaults - defaultOSConfigure(node.vm) + defaultOSConfigure(vm) + addCoverageDir(vm, roles, GOCOVER) + install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - install_type = getInstallType(node.vm, RELEASE_VERSION, GITHUB_BRANCH) - - node.vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" - node.vm.provision "Install tailscale", type: "shell", inline: "curl -fsSL https://tailscale.com/install.sh | sh" + vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" + vm.provision "Install tailscale", type: "shell", inline: "curl -fsSL https://tailscale.com/install.sh | sh" if roles.include?("server") && role_num == 0 server_IP = nil - node.vm.provision :k3s, run: 'once' do |k3s| + vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "server " k3s.config = <<~YAML @@ -43,7 +44,7 @@ def provision(node, roles, role_num, node_num) end end if roles.include?("agent") - node.vm.provision :k3s, run: 'once' do |k3s| + vm.provision :k3s, run: 'once' do |k3s| k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 k3s.args = "agent " k3s.config = <<~YAML @@ -77,7 +78,7 @@ Vagrant.configure("2") do |config| config.vm.define name do |node| roles = name.split("-", -1) role_num = roles.pop.to_i - provision(node, roles, role_num, i) + provision(node.vm, roles, role_num, i) end end end diff --git a/tests/e2e/tailscale/tailscale_test.go b/tests/e2e/tailscale/tailscale_test.go index e61244635d48..16078392cd36 100644 --- a/tests/e2e/tailscale/tailscale_test.go +++ b/tests/e2e/tailscale/tailscale_test.go @@ -120,6 +120,7 @@ var _ = AfterSuite(func() { if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index 0990ccaffa4a..19b79935d3bd 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -493,6 +493,9 @@ func UpgradeCluster(nodeNames []string, local bool) error { } func GetCoverageReport(nodeNames []string) error { + if os.Getenv("E2E_GOCOVER") == "" { + return nil + } covDirs := []string{} for _, nodeName := range nodeNames { covDir := nodeName + "-cov" diff --git a/tests/e2e/upgradecluster/Vagrantfile b/tests/e2e/upgradecluster/Vagrantfile index 35c8b6544f8c..a81565a4af90 100644 --- a/tests/e2e/upgradecluster/Vagrantfile +++ b/tests/e2e/upgradecluster/Vagrantfile @@ -46,10 +46,7 @@ def provision(vm, role, role_num, node_num) if !REGISTRY.empty? vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ] end - - if !GOCOVER.empty? - addCoverageDir(vm, role) - end + addCoverageDir(vm, role, GOCOVER) if role.include?("server") && role_num == 0 vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| diff --git a/tests/e2e/upgradecluster/upgradecluster_test.go b/tests/e2e/upgradecluster/upgradecluster_test.go index 35ed556c7765..8c25fb9929b3 100644 --- a/tests/e2e/upgradecluster/upgradecluster_test.go +++ b/tests/e2e/upgradecluster/upgradecluster_test.go @@ -385,12 +385,10 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if os.Getenv("E2E_GOCOVER") != "" { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) - } if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) } diff --git a/tests/e2e/vagrantdefaults.rb b/tests/e2e/vagrantdefaults.rb index 960bbd60183d..6bdb03e001d4 100644 --- a/tests/e2e/vagrantdefaults.rb +++ b/tests/e2e/vagrantdefaults.rb @@ -34,7 +34,10 @@ def getInstallType(vm, release_version, branch) end end -def addCoverageDir(vm, role) +def addCoverageDir(vm, role, gocover) + if gocover.empty? + return + end service = role.include?("agent") ? "k3s-agent" : "k3s" script = <<~SHELL mkdir -p /tmp/k3scov diff --git a/tests/e2e/validatecluster/Vagrantfile b/tests/e2e/validatecluster/Vagrantfile index 0f369ebb1bca..4fd3b204ea57 100644 --- a/tests/e2e/validatecluster/Vagrantfile +++ b/tests/e2e/validatecluster/Vagrantfile @@ -39,10 +39,7 @@ def provision(vm, role, role_num, node_num) if !REGISTRY.empty? vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ] end - - if !GOCOVER.empty? - addCoverageDir(vm, role) - end + addCoverageDir(vm, role, GOCOVER) if role.include?("server") && role_num == 0 vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index 24b2af42fe90..abebe3f2c7c3 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -385,12 +385,10 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if os.Getenv("E2E_GOCOVER") != "" { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) - } if failed && !*ci { fmt.Println("FAILED!") } else { + Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) Expect(e2e.DestroyCluster()).To(Succeed()) Expect(os.Remove(kubeConfigFile)).To(Succeed()) }