diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml new file mode 100644 index 0000000000..736dc53f2f --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -0,0 +1,84 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + nodePoolName: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../templates/create-cluster-swiftv2.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements + + - stage: ${{ parameters.name }} + # condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }} + displayName: Swiftv2 Multitenancy E2E Test Suite - (${{ parameters.name }}) + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: swiftv2-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + + - template: ../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(ACN_TEST_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + hostport: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" + dependsOn: + - ${{ parameters.name }} + condition: failed() + steps: + - template: ../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + cni: cniv2 diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml new file mode 100644 index 0000000000..86cb37bc55 --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -0,0 +1,75 @@ +parameters: + name: "" + clusterName: "" + cni: cniv2 + os: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@1 + inputs: + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + ls -lah + pwd + kubectl cluster-info + kubectl get po -owide -A + echo "Apply the pod network yaml to start the delegation" + less test/integration/manifests/swiftv2/podnetwork.yaml + envsubst '${SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f - + echo "Check the podnetwork yaml file" + less test/integration/manifests/swiftv2/podnetwork.yaml + kubectl get pn + kubectl describe pn + echo "Apply the pod network instance yaml to reserve IP" + kubectl apply -f test/integration/manifests/swiftv2/pni.yaml + kubectl get pni + kubectl describe pni + export NODE_NAME_0="$(kubectl get nodes -o json | jq -r .items[0].metadata.name)" + echo $NODE_NAME_0 + echo "Start the first pod using the reserved IP" + envsubst '$NODE_NAME_0' < test/integration/manifests/swiftv2/mtpod0.yaml | kubectl apply -f - + export NODE_NAME_1="$(kubectl get nodes -o json | jq -r .items[1].metadata.name)" + echo $NODE_NAME_1 + echo "Start another pod using the reserved IP" + envsubst '$NODE_NAME_1' < test/integration/manifests/swiftv2/mtpod1.yaml | kubectl apply -f - + sleep 2m + kubectl get pod -o wide -A + sleep 2m + echo "Check pods after 4 minutes" + kubectl get po -owide -A + kubectl describe pni + name: "start_swiftv2_pods" + displayName: "Start Swiftv2 Pods" + env: + SUBNET_TOKEN: $(SUBNET_TOKEN) + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/swiftv2 + echo "TestSwiftv2PodToPod and will run it after migration from scripts." + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration -v + retryCountOnTaskFailure: 3 + name: "Swiftv2_Tests_future_version" + displayName: "Swiftv2 Tests through code" + diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 12b74394ae..2cb68aba4a 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -504,6 +504,21 @@ stages: vmSize: Standard_B2ms dependsOn: "test" + # Swiftv2 E2E tests with multitenancy cluster start up + - template: multitenancy/swiftv2-e2e-job-template.yaml + parameters: + name: "swiftv2_e2e" + displayName: Swiftv2 Multitenancy + os: linux + clusterType: swiftv2-multitenancy-cluster-up + clusterName: "mtacluster" + nodePoolName: "mtapool" + vmSize: Standard_D4_v2 + dependsOn: "test" + dummyClusterName: "swiftv2dummy" + dummyClusterType: "swiftv2-dummy-cluster-up" + dummyClusterDisplayName: Swiftv2 Multitenancy Dummy Cluster + - stage: delete displayName: Delete Clusters condition: always() @@ -518,6 +533,7 @@ stages: - aks_windows_22_e2e - dualstackoverlay_e2e - cilium_dualstackoverlay_e2e + - swiftv2_e2e variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: @@ -566,6 +582,12 @@ stages: cilium_dualstackoverlay_e2e: name: cilium_dualstackoverlay_e2e clusterName: "cildsovere2e" + swiftv2_e2e: + name: swiftv2_e2e + clusterName: "mtcluster" + swiftv2_dummy_e2e: + name: swiftv2_dummy_e2e + clusterName: "swiftv2dummy" steps: - template: templates/delete-cluster.yaml parameters: diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml new file mode 100644 index 0000000000..4bf79fac9d --- /dev/null +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -0,0 +1,34 @@ +parameters: + os: linux + +jobs: + - job: ${{ parameters.name }} + displayName: Cluster - ${{ parameters.name }} + steps: + - task: AzureCLI@1 + inputs: + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + echo "Check az version" + az version + if ${{ lower(contains(parameters.clusterType, 'dualstack')) }} + then + echo "Install az cli extension preview" + az extension add --name aks-preview + az extension update --name aks-preview + fi + mkdir -p ~/.kube/ + make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} OS=${{parameters.os}} \ + + echo "Cluster successfully created" + displayName: Cluster - ${{ parameters.clusterType }} + continueOnError: ${{ contains(parameters.clusterType, 'dualstack') }} diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 0eef49717b..54b39d712f 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -233,6 +233,28 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster --yes @$(MAKE) set-kubeconf +swiftv2-multitenancy-cluster-up: rg-up + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --network-plugin azure \ + --network-plugin-mode overlay \ + --kubernetes-version 1.28 \ + --nodepool-name "mtapool" \ + --node-vm-size Standard_D4_v2 \ + --node-count 2 \ + --nodepool-tags fastpathenabled=true \ + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf + +swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --network-plugin azure \ + --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ + --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf + # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents diff --git a/test/integration/manifests/swiftv2/mtpod0.yaml b/test/integration/manifests/swiftv2/mtpod0.yaml new file mode 100644 index 0000000000..357c240582 --- /dev/null +++ b/test/integration/manifests/swiftv2/mtpod0.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + kubernetes.azure.com/pod-network: aksswiftvnetv20425 + kubernetes.azure.com/pod-network-instance: pni1 + name: mtpod0 + namespace: default +spec: + containers: + - image: nicolaka/netshoot:latest + imagePullPolicy: Always + name: mtpod0 + command: ["/bin/bash"] + args: ["-c", "while true; do ping localhost; sleep 60;done"] + securityContext: + privileged: true + ports: + - containerPort: 80 + protocol: TCP + nodeSelector: + kubernetes.io/hostname: $NODE_NAME_0 \ No newline at end of file diff --git a/test/integration/manifests/swiftv2/mtpod1.yaml b/test/integration/manifests/swiftv2/mtpod1.yaml new file mode 100644 index 0000000000..4894ef8eed --- /dev/null +++ b/test/integration/manifests/swiftv2/mtpod1.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + kubernetes.azure.com/pod-network: aksswiftvnetv20425 + kubernetes.azure.com/pod-network-instance: pni1 + name: mtpod1 + namespace: default +spec: + containers: + - image: nicolaka/netshoot:latest + imagePullPolicy: Always + name: mtpod1 + command: ["/bin/bash"] + args: ["-c", "while true; do ping localhost; sleep 60;done"] + securityContext: + privileged: true + ports: + - containerPort: 80 + protocol: TCP + nodeSelector: + kubernetes.io/hostname: $NODE_NAME_1 \ No newline at end of file diff --git a/test/integration/manifests/swiftv2/pni.yaml b/test/integration/manifests/swiftv2/pni.yaml new file mode 100644 index 0000000000..04cd0847b5 --- /dev/null +++ b/test/integration/manifests/swiftv2/pni.yaml @@ -0,0 +1,7 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetworkInstance +metadata: + name: pni1 +spec: + podnetwork: aksswiftvnetv20425 + podIPReservationSize: 2 diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml new file mode 100644 index 0000000000..ef289bb315 --- /dev/null +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -0,0 +1,10 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetwork +metadata: + labels: + kubernetes.azure.com/override-subnet-token: $SUBNET_TOKEN + name: aksswiftvnetv20425 +spec: + subnetGUID: $SUBNET_GUID + subnetResourceID: $SUBNET_RESOURCE_ID + vnetGUID: $VNET_GUID \ No newline at end of file diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go new file mode 100644 index 0000000000..3c2b1dda7e --- /dev/null +++ b/test/integration/swiftv2/swiftv2_test.go @@ -0,0 +1,173 @@ +//go:build swiftv2 + +package swiftv2 + +import ( + "context" + "flag" + "strings" + "testing" + + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberneteslib "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +const ( + pniKey = "kubernetes.azure.com/pod-network-instance" + podCount = 2 + nodepoolKey = "agentpool" + podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" + mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" + pniYaml = "../manifests/swiftv2/pni.yaml" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + IpsInAnotherCluster = "172.25.0.7" + namespace = "default" +) + +var ( + podPrefix = flag.String("podnetworkinstance", "pni1", "the pni pod used") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") + nodepoolSelector = flag.String("nodelabel", "mtapool", "One of the node label and the key is agentpool") +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is mtapool, if the nodepool has a different name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +This test checks pod to pod, pod to node, pod to Internet check + +Timeout context is controled by the -timeout flag. + +*/ + +func setupLinuxEnvironment(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + t.Log("Create Label Selectors") + podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + t.Fatalf("could not get k8s node list: %v", err) + } + + t.Log("Waiting for pods to be running state") + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + + t.Log("Successfully created customer Linux pods") + + t.Log("Checking swiftv2 multitenant pods number") + for _, node := range nodes.Items { + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + if len(pods.Items) < 1 { + t.Fatalf("No pod on node: %v", node.Name) + } + } + + t.Log("Linux test environment ready") +} + +func GetMultitenantPodNetworkConfig(t *testing.T, ctx context.Context, kubeconfig, namespace, name string) v1alpha1.MultitenantPodNetworkConfig { + config := kubernetes.MustGetRestConfig() + crdClient, err := kubernetes.GetRESTClientForMultitenantCRDFromConfig(config) + t.Logf("config is %s", config) + if err != nil { + t.Fatalf("failed to get multitenant crd rest client: %s", err) + } + var mtpnc v1alpha1.MultitenantPodNetworkConfig + err = crdClient.Get().Namespace(namespace).Resource("multitenantpodnetworkconfigs").Name(name).Do(ctx).Into(&mtpnc) + if err != nil { + t.Errorf("failed to retrieve multitenantpodnetworkconfig: error: %s", err) + } + if mtpnc.Status.MacAddress == "" || mtpnc.Status.PrimaryIP == "" { + t.Errorf("mtpnc.Status.MacAddress is %v or mtpnc.Status.PrimaryIP is %v and at least one of them is Empty, ", + mtpnc.Status.MacAddress, mtpnc.Status.PrimaryIP) + } + return mtpnc +} + +func TestSwiftv2PodToPod(t *testing.T) { + var ( + kubeconfig string + numNodes int + ) + + kubeconfigPath := *kubernetes.GetKubeconfig() + t.Logf("TestSwiftv2PodToPod kubeconfig is %v", kubeconfigPath) + + ctx := context.Background() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + t.Log("Get Clientset config") + restConfig := kubernetes.MustGetRestConfig() + t.Log("rest config is", restConfig) + + t.Log("Create Label Selectors") + podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) + + t.Log("Successfully created customer Linux pods") + + t.Log("Checking swiftv2 multitenant pods number and get IPs") + ipsToPing := make([]string, 0, numNodes) + + podsClient := clientset.CoreV1().Pods(namespace) + allPods, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: podLabelSelector}) + if err != nil { + t.Fatalf("could not get pods from clientset: %v", err) + } + for _, pod := range allPods.Items { + t.Logf("Pod name is %s", pod.Name) + mtpnc := GetMultitenantPodNetworkConfig(t, ctx, kubeconfig, pod.Namespace, pod.Name) + if len(pod.Status.PodIPs) != 1 { + t.Fatalf("Pod doesn't have any IP associated.") + } + // remove /32 from PrimaryIP + splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") + if len(splitcidr) != 2 { + t.Fatalf("Split Pods IP with its cidr failed.") + } + ipsToPing = append(ipsToPing, splitcidr[0]) + } + ipsToPing = append(ipsToPing, IpsInAnotherCluster) + t.Log("Linux test environment ready") + + for _, pod := range allPods.Items { + for _, ip := range ipsToPing { + t.Logf("ping from pod %q to %q", pod.Name, ip) + result := podTest(t, ctx, clientset, pod, []string{"ping", "-c", "3", ip}, restConfig) + if result != nil { + t.Errorf("ping %q failed: error: %s", ip, result) + } + } + } + return +} + +func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, cmd []string, rc *restclient.Config) error { + output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) + t.Logf(string(output)) + if err != nil { + t.Errorf("failed to execute command on pod: %v", srcPod.Name) + } + return err +} diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index aa9122d154..f6584ab974 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -11,6 +11,7 @@ import ( "path/filepath" "time" + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -18,6 +19,8 @@ import ( v1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -47,6 +50,10 @@ const ( var Kubeconfig = flag.String("test-kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file") +func GetKubeconfig() *string { + return Kubeconfig +} + func MustGetClientset() *kubernetes.Clientset { config, err := clientcmd.BuildConfigFromFlags("", *Kubeconfig) if err != nil { @@ -67,6 +74,47 @@ func MustGetRestConfig() *rest.Config { return config } +func GetRESTClientForMultitenantCRDFromConfig(config *rest.Config) (*rest.RESTClient, error) { + schemeLocal := runtime.NewScheme() + err := v1alpha1.AddToScheme(schemeLocal) + if err != nil { + return nil, errors.Wrap(err, "failed to AddToScheme") + } + config.ContentConfig.GroupVersion = &v1alpha1.GroupVersion + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.NewCodecFactory(schemeLocal) + config.UserAgent = rest.DefaultKubernetesUserAgent() + client, err := rest.UnversionedRESTClientFor(config) + if err != nil { + return nil, errors.Wrap(err, "failed to UnversionedRESTClientFor config") + } + return client, nil +} + +func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { + schemeLocal := runtime.NewScheme() + err := v1alpha1.AddToScheme(schemeLocal) + if err != nil { + return nil, errors.Wrap(err, "failed to AddToScheme") + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) + if err != nil { + return nil, errors.Wrap(err, "failed to get RESTConfigFromKubeConfig") + } + + restConfig.ContentConfig.GroupVersion = &v1alpha1.GroupVersion + restConfig.APIPath = "/apis" + restConfig.NegotiatedSerializer = serializer.NewCodecFactory(schemeLocal) + restConfig.UserAgent = rest.DefaultKubernetesUserAgent() + + client, err := rest.UnversionedRESTClientFor(restConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to UnversionedRESTClientFor config") + } + return client, nil +} + func mustParseResource(path string, out interface{}) { f, err := os.Open(path) if err != nil {