Skip to content

Commit

Permalink
Refactor common util functions for /test
Browse files Browse the repository at this point in the history
Prior to this commit, the uitl functions for e2e tests are distributed
over multiple test files in /test. And we had to tag specific test
files for unrelevant test suites that needs such util functions. For
example, init_test has the 'conformance' build tag on it, which is not
necessarily the case as conformance test suite does not need the
inti_test but needs the setup there.

This commit cleans up the util functions separated across different test
files and put them into /test/util so that we can tag the appropriate
test files for build only.
  • Loading branch information
JeromeJu authored and tekton-robot committed Dec 4, 2023
1 parent 0749de6 commit a179226
Show file tree
Hide file tree
Showing 5 changed files with 319 additions and 292 deletions.
21 changes: 0 additions & 21 deletions test/custom_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
knativetest "knative.dev/pkg/test"
Expand Down Expand Up @@ -696,26 +695,6 @@ func TestWaitCustomTask_V1_PipelineRun(t *testing.T) {
}
}

// updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because
// it assumes that Data is already a non-nil map, and by default, it isn't!
func updateConfigMap(ctx context.Context, client kubernetes.Interface, name string, configName string, values map[string]string) error {
configMap, err := client.CoreV1().ConfigMaps(name).Get(ctx, configName, metav1.GetOptions{})
if err != nil {
return err
}

if configMap.Data == nil {
configMap.Data = make(map[string]string)
}

for key, value := range values {
configMap.Data[key] = value
}

_, err = client.CoreV1().ConfigMaps(name).Update(ctx, configMap, metav1.UpdateOptions{})
return err
}

func resetConfigMap(ctx context.Context, t *testing.T, c *clients, namespace, configName string, values map[string]string) {
t.Helper()
if err := updateConfigMap(ctx, c.KubeClient, namespace, configName, values); err != nil {
Expand Down
224 changes: 0 additions & 224 deletions test/init_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,165 +22,22 @@ limitations under the License.
package test

import (
"context"
"flag"
"fmt"
"os"
"strings"
"sync"
"testing"

"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/names"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Mysteriously by k8s libs, or they fail to create `KubeClient`s when using oidc authentication. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/345
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"knative.dev/pkg/system"
knativetest "knative.dev/pkg/test"
"knative.dev/pkg/test/logging" // Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242
"knative.dev/pkg/test/logstream"
"sigs.k8s.io/yaml"
)

var initMetrics sync.Once
var skipRootUserTests = false

func init() {
flag.BoolVar(&skipRootUserTests, "skipRootUserTests", false, "Skip tests that require root user")
}

func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) {
t.Helper()
skipIfExcluded(t)

t.Helper()
namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle")

initializeLogsAndMetrics(t)

// Inline controller logs from SYSTEM_NAMESPACE into the t.Log output.
cancel := logstream.Start(t)
t.Cleanup(cancel)

c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace)
createNamespace(ctx, t, namespace, c.KubeClient)
verifyServiceAccountExistence(ctx, t, namespace, c.KubeClient)

for _, f := range fn {
f(ctx, t, c, namespace)
}

return c, namespace
}

func header(t *testing.T, text string) {
t.Helper()
left := "### "
right := " ###"
txt := left + text + right
bar := strings.Repeat("#", len(txt))
t.Logf(bar)
t.Logf(txt)
t.Logf(bar)
}

func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) {
t.Helper()
if cs.KubeClient == nil {
return
}
if t.Failed() {
header(t, fmt.Sprintf("Dumping objects from %s", namespace))
bs, err := getCRDYaml(ctx, cs, namespace)
if err != nil {
t.Error(err)
} else {
t.Log(string(bs))
}
header(t, fmt.Sprintf("Dumping logs from Pods in the %s", namespace))
taskRuns, err := cs.V1TaskRunClient.List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing TaskRuns: %s", err)
}
for _, tr := range taskRuns.Items {
if tr.Status.PodName != "" {
CollectPodLogs(ctx, cs, tr.Status.PodName, namespace, t.Logf)
}
}
}

if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() {
t.Logf("Deleting namespace %s", namespace)
if err := cs.KubeClient.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete namespace %s: %s", namespace, err)
}
} else {
t.Logf("Not deleting namespace %s", namespace)
}
}

func initializeLogsAndMetrics(t *testing.T) {
t.Helper()
initMetrics.Do(func() {
flag.Parse()
flag.Set("alsologtostderr", "true")
logging.InitializeLogger()

// if knativetest.Flags.EmitMetrics {
logging.InitializeMetricExporter(t.Name())
//}
})
}

func createNamespace(ctx context.Context, t *testing.T, namespace string, kubeClient kubernetes.Interface) {
t.Helper()
t.Logf("Create namespace %s to deploy to", namespace)
labels := map[string]string{
"tekton.dev/test-e2e": "true",
}
if _, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: labels,
},
}, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err)
}
}

func getDefaultSA(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface, namespace string) string {
t.Helper()
configDefaultsCM, err := kubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetDefaultsConfigName(), metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetDefaultsConfigName(), err)
}
actual, ok := configDefaultsCM.Data["default-service-account"]
if !ok {
return "default"
}
return actual
}

func verifyServiceAccountExistence(ctx context.Context, t *testing.T, namespace string, kubeClient kubernetes.Interface) {
t.Helper()
defaultSA := getDefaultSA(ctx, t, kubeClient, namespace)
t.Logf("Verify SA %q is created in namespace %q", defaultSA, namespace)

if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, defaultSA, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
return true, err
}); err != nil {
t.Fatalf("Failed to get SA %q in namespace %q for tests: %s", defaultSA, namespace, err)
}
}

// TestMain initializes anything global needed by the tests. Right now this is just log and metric
// setup since the log and metric libs we're using use global state :(
func TestMain(m *testing.M) {
Expand All @@ -189,84 +46,3 @@ func TestMain(m *testing.M) {
fmt.Fprintf(os.Stderr, "Using kubeconfig at `%s` with cluster `%s`\n", knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster)
os.Exit(c)
}

func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) {
var output []byte
printOrAdd := func(i interface{}) {
bs, err := yaml.Marshal(i)
if err != nil {
return
}
output = append(output, []byte("\n---\n")...)
output = append(output, bs...)
}

v1beta1ClusterTasks, err := cs.V1beta1ClusterTaskClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1beta1 clustertasks: %w", err)
}
for _, i := range v1beta1ClusterTasks.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

v1Tasks, err := cs.V1TaskClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1 tasks: %w", err)
}
for _, i := range v1Tasks.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

v1TaskRuns, err := cs.V1TaskRunClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1 taskruns: %w", err)
}
for _, i := range v1TaskRuns.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

v1Pipelines, err := cs.V1PipelineClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1 pipeline: %w", err)
}
for _, i := range v1Pipelines.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

v1PipelineRuns, err := cs.V1PipelineRunClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1 pipelinerun: %w", err)
}
for _, i := range v1PipelineRuns.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

v1beta1CustomRuns, err := cs.V1beta1CustomRunClient.List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get v1beta1 customruns: %w", err)
}
for _, i := range v1beta1CustomRuns.Items {
i.SetManagedFields(nil)
printOrAdd(i)
}

pods, err := cs.KubeClient.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("could not get pods: %w", err)
}
for _, i := range pods.Items {
// Ignore gitea pods for SCM resolver test
if strings.HasPrefix(i.Name, "gitea-") {
continue
}
i.SetManagedFields(nil)
printOrAdd(i)
}

return output, nil
}
28 changes: 0 additions & 28 deletions test/pipelinefinally_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,8 @@ import (
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/parse"
jsonpatch "gomodules.xyz/jsonpatch/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
knativetest "knative.dev/pkg/test"
"knative.dev/pkg/test/helpers"
)
Expand Down Expand Up @@ -691,31 +688,6 @@ spec:
}
}

func isSuccessful(t *testing.T, taskRunName string, conds duckv1.Conditions) bool {
t.Helper()
for _, c := range conds {
if c.Type == apis.ConditionSucceeded {
if c.Status != corev1.ConditionTrue {
t.Errorf("TaskRun status %q is not succeeded, got %q", taskRunName, c.Status)
}
return true
}
}
t.Errorf("TaskRun status %q had no Succeeded condition", taskRunName)
return false
}

func isCancelled(t *testing.T, taskRunName string, conds duckv1.Conditions) bool {
t.Helper()
for _, c := range conds {
if c.Type == apis.ConditionSucceeded {
return true
}
}
t.Errorf("TaskRun status %q had no Succeeded condition", taskRunName)
return false
}

func getSuccessTask(t *testing.T, namespace string) *v1.Task {
t.Helper()
return parse.MustParseV1Task(t, fmt.Sprintf(`
Expand Down
19 changes: 0 additions & 19 deletions test/retry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,7 @@ import (
"time"

"github.com/tektoncd/pipeline/test/parse"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
knativetest "knative.dev/pkg/test"
"knative.dev/pkg/test/helpers"
)
Expand Down Expand Up @@ -139,19 +136,3 @@ spec:
}
}
}

// This method is necessary because PipelineRunTaskRunStatus and TaskRunStatus
// don't have an IsFailed method.
func isFailed(t *testing.T, taskRunName string, conds duckv1.Conditions) bool {
t.Helper()
for _, c := range conds {
if c.Type == apis.ConditionSucceeded {
if c.Status != corev1.ConditionFalse {
t.Errorf("TaskRun status %q is not failed, got %q", taskRunName, c.Status)
}
return true
}
}
t.Errorf("TaskRun status %q had no Succeeded condition", taskRunName)
return false
}
Loading

0 comments on commit a179226

Please sign in to comment.