From 304b2c4ef1af6692e19d2d9b56117bc351c26602 Mon Sep 17 00:00:00 2001 From: Antonin Bas Date: Thu, 29 Aug 2024 11:00:27 -0700 Subject: [PATCH 1/3] Upgrade Go to 1.23 * Update some for loops in response to the following change introduced in Go 1.22: each iteration of the loop creates new variables, to avoid accidental sharing bugs. As a result of this change, a few existing loop variables had become heap-allocated, so we updated the loop to avoid the extra allocation. Other loops that were affected by the change (compiling differently), but for which the loop variable was stack-allocated, were reviewed individually and when it made sense, the loop was refactored to avoid the extra variable copy. The `go test -gcflags=-d=loopvar=2` command was used to find affected loops. * The antrea/codegen image was updated to use Go 1.23. No changes in generated files. * golangci-lint was updated to the latest available version (v1.60.3) - govet reported some invalid usages of printf, which was addressed; this lead to some refactoring of the `antctl check` code - a new gosec rule (G115), which looks for potential integer overflows when converting between integer types, caused a lot of errors to be reported in the codebase. There were too many errors to be addressed as part of this PR, so we only handled a few of them for which the fix was straightforward. When the rule implementation is improved (so that it reports fewer false positives), we should review all errors and address them as needed. Fixes #6644 Signed-off-by: Antonin Bas --- .golangci.yml | 18 ++++-- Makefile | 2 +- build/images/codegen/README.md | 1 + build/images/deps/go-version | 2 +- go.mod | 2 +- hack/.spelling_failures | 1 + hack/update-codegen.sh | 2 +- .../leader/resourceexport_controller.go | 2 +- .../member/labelidentity_controller.go | 10 ++-- .../multicluster/member/node_controller.go | 3 +- .../member/resourceimport_controller_test.go | 12 ++-- .../member/serviceexport_controller.go | 3 +- multicluster/hack/update-codegen.sh | 2 +- multicluster/test/e2e/antreapolicy_test.go | 2 +- multicluster/test/e2e/framework.go | 8 +-- .../controller/networkpolicy/audit_logging.go | 4 +- .../networkpolicy/l7engine/reconciler.go | 28 ++++----- .../networkpolicy/node_reconciler_linux.go | 3 +- .../networkpolicy/pod_reconciler.go | 9 ++- .../externalnode/external_node_controller.go | 2 +- pkg/agent/nodeportlocal/npl_agent_test.go | 4 +- .../nodeportlocal/testing/annotations.go | 5 +- pkg/agent/openflow/network_policy.go | 4 +- pkg/agent/route/route_linux.go | 2 +- pkg/antctl/command_list.go | 3 +- pkg/antctl/raw/check/cluster/command.go | 20 +------ pkg/antctl/raw/check/installation/command.go | 24 ++------ .../check/installation/test_podtoservice.go | 3 +- pkg/antctl/raw/check/logger.go | 59 +++++++++++++++++++ pkg/antctl/raw/check/util.go | 11 ++-- pkg/apis/crd/v1beta1/util.go | 5 +- pkg/apiserver/certificate/certificate_test.go | 1 + pkg/apiserver/handlers/webhook/convert_crd.go | 6 +- pkg/controller/externalippool/controller.go | 3 +- pkg/controller/networkpolicy/clustergroup.go | 3 +- .../networkpolicy/clusternetworkpolicy.go | 8 ++- pkg/controller/stats/aggregator.go | 10 ++-- .../clickhouseclient/clickhouseclient_test.go | 4 +- pkg/flowaggregator/s3uploader/s3uploader.go | 2 +- .../s3uploader/s3uploader_test.go | 20 +++---- pkg/ipam/poolallocator/allocator.go | 2 +- pkg/ovs/ovsctl/ovsctl_test.go | 4 +- pkg/util/ip/ip.go | 6 +- pkg/util/sets/int32_test.go | 2 +- test/e2e/framework.go | 3 +- test/integration/ovs/ofctrl_test.go | 2 +- 46 files changed, 196 insertions(+), 136 deletions(-) create mode 100644 pkg/antctl/raw/check/logger.go diff --git a/.golangci.yml b/.golangci.yml index d903a5a074a..a1bd13766ee 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,13 +2,23 @@ run: tests: true timeout: 15m - skip-files: - - ".*\\.pb\\.go" - skip-dirs-use-default: true linters-settings: goimports: local-prefixes: antrea.io/antrea + gosec: + excludes: + # At the time of writing this, the G115 rule is not even part of an + # official release of gosec. This rule causes a lot of errors to be + # reported in the codebase. While some of the reported errors should be + # addressed, a lot can also be ignored and there are also some clear false + # positives that should not be flagged by gosec in the first place (see + # https://github.com/securego/gosec/issues/1187). We will re-enable this + # rule in the future when it becomes more accurate. + - G115 # Potential integer overflow when converting between integer types + misspell: + ignore-words: + - creater revive: ignore-generated-header: false severity: warning @@ -30,6 +40,6 @@ linters: - staticcheck - gosec - goimports - - vet + - govet - revive - loggercheck diff --git a/Makefile b/Makefile index a1b688ced5a..01e5c474345 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ GIT_HOOKS := $(shell find hack/git_client_side_hooks -type f -print) DOCKER_NETWORK ?= default TRIVY_TARGET_IMAGE ?= -GOLANGCI_LINT_VERSION := v1.54.0 +GOLANGCI_LINT_VERSION := v1.60.3 GOLANGCI_LINT_BINDIR := $(CURDIR)/.golangci-bin GOLANGCI_LINT_BIN := $(GOLANGCI_LINT_BINDIR)/$(GOLANGCI_LINT_VERSION)/golangci-lint diff --git a/build/images/codegen/README.md b/build/images/codegen/README.md index 32c8c3c5011..58bf1cc973d 100644 --- a/build/images/codegen/README.md +++ b/build/images/codegen/README.md @@ -22,6 +22,7 @@ Here is the table of codegen images that have been uploaded: | Tag | Change | | :------------------------ | ----------------------------------------------------------------------------- | +| kubernetes-1.29.2-build.3 | Upgraded Go to v1.23 | | kubernetes-1.29.2-build.2 | Upgraded go.uber.org/mock/mockgen to v0.4.0 | | kubernetes-1.29.2-build.1 | Upgraded controller-gen to v0.14.0 | | kubernetes-1.29.2-build.0 | Upgraded protoc (v26.0), protoc-gen-go (v1.33.0), protoc-gen-go-grpc (v1.3.0) | diff --git a/build/images/deps/go-version b/build/images/deps/go-version index d2ab029d32c..a1b6e17d61f 100644 --- a/build/images/deps/go-version +++ b/build/images/deps/go-version @@ -1 +1 @@ -1.21 +1.23 diff --git a/go.mod b/go.mod index 140357545c3..64ef5a631ba 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module antrea.io/antrea -go 1.21.0 +go 1.23.0 require ( antrea.io/libOpenflow v0.14.0 diff --git a/hack/.spelling_failures b/hack/.spelling_failures index c725af6578c..978cf500d68 100644 --- a/hack/.spelling_failures +++ b/hack/.spelling_failures @@ -9,3 +9,4 @@ generated_expansion generated.pb.go generated.proto zz_generated +.golangci.yml diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index ecf52f320bd..65e15efc21a 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -22,7 +22,7 @@ function echoerr { } ANTREA_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" -IMAGE_NAME="antrea/codegen:kubernetes-1.29.2-build.2" +IMAGE_NAME="antrea/codegen:kubernetes-1.29.2-build.3" # We will use git clone to make a working copy of the repository into a # temporary directory. This requires that all changes have been committed diff --git a/multicluster/controllers/multicluster/leader/resourceexport_controller.go b/multicluster/controllers/multicluster/leader/resourceexport_controller.go index c24590f8466..42ecbee1460 100644 --- a/multicluster/controllers/multicluster/leader/resourceexport_controller.go +++ b/multicluster/controllers/multicluster/leader/resourceexport_controller.go @@ -352,7 +352,7 @@ func (r *ResourceExportReconciler) refreshEndpointsResourceImport( svcResExport := &mcsv1alpha1.ResourceExport{} err := r.Client.Get(context.Background(), svcResExportName, svcResExport) if err != nil && apierrors.IsNotFound(err) { - return newResImport, false, fmt.Errorf("failed to get corresponding Service type of ResourceExport: " + svcResExportName.String()) + return newResImport, false, fmt.Errorf("failed to get ResourceExport %s: %w", svcResExportName.String(), err) } if len(svcResExport.Status.Conditions) > 0 { if svcResExport.Status.Conditions[0].Status != corev1.ConditionTrue { diff --git a/multicluster/controllers/multicluster/member/labelidentity_controller.go b/multicluster/controllers/multicluster/member/labelidentity_controller.go index 254287d5af3..6d25d4f5c3e 100644 --- a/multicluster/controllers/multicluster/member/labelidentity_controller.go +++ b/multicluster/controllers/multicluster/member/labelidentity_controller.go @@ -158,12 +158,13 @@ func (r *LabelIdentityReconciler) clusterSetMapFunc(ctx context.Context, a clien podList := &v1.PodList{} r.Client.List(ctx, podList) requests = make([]reconcile.Request, len(podList.Items)) - for i, pod := range podList.Items { + for idx := range podList.Items { + pod := &podList.Items[idx] podNamespacedName := types.NamespacedName{ Name: pod.GetName(), Namespace: pod.GetNamespace(), } - requests[i] = reconcile.Request{ + requests[idx] = reconcile.Request{ NamespacedName: podNamespacedName, } } @@ -183,8 +184,9 @@ func (r *LabelIdentityReconciler) namespaceMapFunc(ctx context.Context, ns clien podList := &v1.PodList{} r.Client.List(context.TODO(), podList, client.InNamespace(ns.GetName())) requests := make([]reconcile.Request, len(podList.Items)) - for i, pod := range podList.Items { - requests[i] = reconcile.Request{ + for idx := range podList.Items { + pod := &podList.Items[idx] + requests[idx] = reconcile.Request{ NamespacedName: types.NamespacedName{ Name: pod.GetName(), Namespace: pod.GetNamespace(), diff --git a/multicluster/controllers/multicluster/member/node_controller.go b/multicluster/controllers/multicluster/member/node_controller.go index 2a883943b67..ec9115737a8 100644 --- a/multicluster/controllers/multicluster/member/node_controller.go +++ b/multicluster/controllers/multicluster/member/node_controller.go @@ -392,7 +392,8 @@ func (r *NodeReconciler) clusterSetMapFunc(ctx context.Context, a client.Object) if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == corev1.ConditionTrue { nodeList := &corev1.NodeList{} r.Client.List(ctx, nodeList) - for _, n := range nodeList.Items { + for idx := range nodeList.Items { + n := &nodeList.Items[idx] if _, ok := n.Annotations[common.GatewayAnnotation]; ok { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ diff --git a/multicluster/controllers/multicluster/member/resourceimport_controller_test.go b/multicluster/controllers/multicluster/member/resourceimport_controller_test.go index 744105aeead..a8c44798bd3 100644 --- a/multicluster/controllers/multicluster/member/resourceimport_controller_test.go +++ b/multicluster/controllers/multicluster/member/resourceimport_controller_test.go @@ -18,8 +18,8 @@ package member import ( "context" + "fmt" "reflect" - "strconv" "strings" "testing" "time" @@ -562,17 +562,17 @@ func TestStaleControllerNoRaceWithResourceImportReconciler(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) q := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) - numInitialResImp := 50 - for i := 1; i <= numInitialResImp; i++ { + const numInitialResImp = 50 + for i := uint32(1); i <= numInitialResImp; i++ { resImp := &mcv1alpha1.ResourceImport{ ObjectMeta: metav1.ObjectMeta{ - Name: "label-identity-" + strconv.Itoa(i), + Name: fmt.Sprintf("label-identity-%d", i), Namespace: "antrea-mcs", }, Spec: mcv1alpha1.ResourceImportSpec{ LabelIdentity: &mcv1alpha1.LabelIdentitySpec{ - Label: "ns:kubernetes.io/metadata.name=ns&pod:seq=" + strconv.Itoa(i), - ID: uint32(i), + Label: fmt.Sprintf("ns:kubernetes.io/metadata.name=ns&pod:seq=%d", i), + ID: i, }, }, } diff --git a/multicluster/controllers/multicluster/member/serviceexport_controller.go b/multicluster/controllers/multicluster/member/serviceexport_controller.go index 2232f3a1056..94c3927b861 100644 --- a/multicluster/controllers/multicluster/member/serviceexport_controller.go +++ b/multicluster/controllers/multicluster/member/serviceexport_controller.go @@ -540,7 +540,8 @@ func (r *ServiceExportReconciler) clusterSetMapFunc(ctx context.Context, a clien if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == corev1.ConditionTrue { svcExports := &k8smcsv1alpha1.ServiceExportList{} r.Client.List(ctx, svcExports) - for _, svcExport := range svcExports.Items { + for idx := range svcExports.Items { + svcExport := &svcExports.Items[idx] namespacedName := types.NamespacedName{ Name: svcExport.GetName(), Namespace: svcExport.GetNamespace(), diff --git a/multicluster/hack/update-codegen.sh b/multicluster/hack/update-codegen.sh index 49c19f34625..5d0db866d0e 100755 --- a/multicluster/hack/update-codegen.sh +++ b/multicluster/hack/update-codegen.sh @@ -22,7 +22,7 @@ function echoerr { } ANTREA_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" && pwd )" -IMAGE_NAME="antrea/codegen:kubernetes-1.29.2-build.2" +IMAGE_NAME="antrea/codegen:kubernetes-1.29.2-build.3" # We will use git clone to make a working copy of the repository into a # temporary directory. This requires that all changes have been committed diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index 9bb2f5ad79b..64873881cbd 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -70,7 +70,7 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) { } for clusterName := range data.clusterTestDataMap { d := data.clusterTestDataMap[clusterName] - k8sUtils, err := antreae2e.NewKubernetesUtils(&d) + k8sUtils, err := antreae2e.NewKubernetesUtils(d) failOnError(err, t) if clusterName != leaderCluster { _, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true, nil, nil) diff --git a/multicluster/test/e2e/framework.go b/multicluster/test/e2e/framework.go index 6480640b6ce..6be3af8982b 100644 --- a/multicluster/test/e2e/framework.go +++ b/multicluster/test/e2e/framework.go @@ -69,7 +69,7 @@ var testOptions TestOptions type MCTestData struct { clusters []string - clusterTestDataMap map[string]antreae2e.TestData + clusterTestDataMap map[string]*antreae2e.TestData controlPlaneNames map[string]string logsDirForTestCase string clusterGateways map[string]string @@ -87,9 +87,9 @@ func (data *MCTestData) createClients() error { data.clusters = []string{ leaderCluster, eastCluster, westCluster, } - data.clusterTestDataMap = map[string]antreae2e.TestData{} + data.clusterTestDataMap = make(map[string]*antreae2e.TestData) for i, cluster := range data.clusters { - testData := antreae2e.TestData{ClusterName: cluster} + testData := &antreae2e.TestData{ClusterName: cluster} if err := testData.CreateClient(kubeConfigPaths[i]); err != nil { return fmt.Errorf("error initializing clients for cluster %s: %v", cluster, err) } @@ -193,7 +193,7 @@ func (data *MCTestData) createPod(clusterName, name, nodeName, namespace, ctrNam WithCommand(command).WithArgs(args). WithEnv(env).WithPorts(ports).WithHostNetwork(hostNetwork). WithMutateFunc(mutateFunc). - Create(&d) + Create(d) } return fmt.Errorf("clusterName %s not found", clusterName) } diff --git a/pkg/agent/controller/networkpolicy/audit_logging.go b/pkg/agent/controller/networkpolicy/audit_logging.go index 3dd3a82695b..26dce3b746a 100644 --- a/pkg/agent/controller/networkpolicy/audit_logging.go +++ b/pkg/agent/controller/networkpolicy/audit_logging.go @@ -110,7 +110,7 @@ func (l *AuditLogger) terminateLogKey(logMsg string) { defer l.logDeduplication.logMutex.Unlock() logRecord := l.logDeduplication.logMap[logMsg] if logRecord.count == 1 { - l.npLogger.Printf(logMsg) + l.npLogger.Print(logMsg) } else { l.npLogger.Printf("%s [%d packets in %s]", logMsg, logRecord.count, time.Since(logRecord.initTime)) } @@ -155,7 +155,7 @@ func (l *AuditLogger) LogDedupPacket(ob *logInfo) { // Deduplicate non-Allow packet log. logMsg := buildLogMsg(ob) if ob.disposition == openflow.DispositionToString[openflow.DispositionAllow] { - l.npLogger.Printf(logMsg) + l.npLogger.Print(logMsg) } else { // Increase count if duplicated within 1 sec, create buffer otherwise. exists := l.updateLogKey(logMsg, l.bufferLength) diff --git a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go index 56523773662..eb2d24fa289 100644 --- a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go +++ b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go @@ -123,27 +123,27 @@ multi-detect: `, config.L7SuricataSocketPath, config.L7RedirectTargetPortName, config.L7RedirectReturnPortName) ) -type threadSafeInt32Set struct { +type threadSafeSet[T comparable] struct { sync.RWMutex - cached sets.Set[int32] + cached sets.Set[T] } -func (g *threadSafeInt32Set) has(key uint32) bool { +func (g *threadSafeSet[T]) has(key T) bool { g.RLock() defer g.RUnlock() - return g.cached.Has(int32(key)) + return g.cached.Has(key) } -func (g *threadSafeInt32Set) insert(key uint32) { +func (g *threadSafeSet[T]) insert(key T) { g.Lock() defer g.Unlock() - g.cached.Insert(int32(key)) + g.cached.Insert(key) } -func (g *threadSafeInt32Set) delete(key uint32) { +func (g *threadSafeSet[T]) delete(key T) { g.Lock() defer g.Unlock() - g.cached.Delete(int32(key)) + g.cached.Delete(key) } type Reconciler struct { @@ -151,8 +151,8 @@ type Reconciler struct { startSuricataFn func() suricataScFn func(scCmd string) (*scCmdRet, error) - suricataTenantCache *threadSafeInt32Set - suricataTenantHandlerCache *threadSafeInt32Set + suricataTenantCache *threadSafeSet[uint32] + suricataTenantHandlerCache *threadSafeSet[uint32] once sync.Once } @@ -161,11 +161,11 @@ func NewReconciler() *Reconciler { return &Reconciler{ suricataScFn: suricataSc, startSuricataFn: startSuricata, - suricataTenantCache: &threadSafeInt32Set{ - cached: sets.New[int32](), + suricataTenantCache: &threadSafeSet[uint32]{ + cached: sets.New[uint32](), }, - suricataTenantHandlerCache: &threadSafeInt32Set{ - cached: sets.New[int32](), + suricataTenantHandlerCache: &threadSafeSet[uint32]{ + cached: sets.New[uint32](), }, } } diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_linux.go b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go index f3c2d3982ee..725c290a7c5 100644 --- a/pkg/agent/controller/networkpolicy/node_reconciler_linux.go +++ b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go @@ -571,7 +571,8 @@ func groupMembersToIPNets(groups v1beta2.GroupMemberSet, isIPv6 bool) sets.Set[s func ipBlocksToIPNets(ipBlocks []v1beta2.IPBlock, isIPv6 bool) []string { var ipnets []string - for _, b := range ipBlocks { + for idx := range ipBlocks { + b := &ipBlocks[idx] blockCIDR := ip.IPNetToNetIPNet(&b.CIDR) if isIPv6 != utilnet.IsIPv6CIDR(blockCIDR) { continue diff --git a/pkg/agent/controller/networkpolicy/pod_reconciler.go b/pkg/agent/controller/networkpolicy/pod_reconciler.go index 1bf64e4a84e..cac8ad1d0dd 100644 --- a/pkg/agent/controller/networkpolicy/pod_reconciler.go +++ b/pkg/agent/controller/networkpolicy/pod_reconciler.go @@ -337,7 +337,8 @@ func (r *podReconciler) getRuleType(rule *CompletedRule) ruleType { } } - for _, ipBlock := range rule.To.IPBlocks { + for idx := range rule.To.IPBlocks { + ipBlock := &rule.To.IPBlocks[idx] ipAddr := ip.IPNetToNetIPNet(&ipBlock.CIDR) if ipAddr.IP.IsMulticast() { return multicast @@ -1197,7 +1198,8 @@ func groupMembersToOFAddresses(groupMemberSet v1beta2.GroupMemberSet) []types.Ad func ipBlocksToOFAddresses(ipBlocks []v1beta2.IPBlock, ipv4Enabled, ipv6Enabled, ctMatch bool) []types.Address { // Must not return nil as it means not restricted by addresses in Openflow implementation. addresses := make([]types.Address, 0) - for _, b := range ipBlocks { + for idx := range ipBlocks { + b := &ipBlocks[idx] blockCIDR := ip.IPNetToNetIPNet(&b.CIDR) if !isIPNetSupportedByAF(blockCIDR, ipv4Enabled, ipv6Enabled) { // This is part of normal operations: "allow all" in a policy is represented @@ -1285,7 +1287,8 @@ func resolveService(service *v1beta2.Service, member *v1beta2.GroupMember) *v1be if service.Port == nil || service.Port.Type == intstr.Int { return service } - for _, port := range member.Ports { + for idx := range member.Ports { + port := &member.Ports[idx] // For K8s NetworkPolicy and Antrea-native policies, the Service.Protocol field will never be nil since TCP // will be filled as default. However, for AdminNetworkPolicy and BaselineAdminNetworkPolicy, the Protocol // field will be nil for ports written as named ports. In that case, the member port will match as long diff --git a/pkg/agent/externalnode/external_node_controller.go b/pkg/agent/externalnode/external_node_controller.go index 559e9bbfcd6..e14c782e7aa 100644 --- a/pkg/agent/externalnode/external_node_controller.go +++ b/pkg/agent/externalnode/external_node_controller.go @@ -202,7 +202,7 @@ func (c *ExternalNodeController) reconcilePolicyBypassFlows() error { klog.V(2).InfoS("Installing policy bypass flows", "protocol", rule.Protocol, "CIDR", rule.CIDR, "port", rule.Port, "direction", rule.Direction) protocol := parseProtocol(rule.Protocol) _, ipNet, _ := net.ParseCIDR(rule.CIDR) - if err := c.ofClient.InstallPolicyBypassFlows(protocol, ipNet, uint16(rule.Port), rule.Direction == "ingress"); err != nil { + if err := c.ofClient.InstallPolicyBypassFlows(protocol, ipNet, util.PortToUint16(rule.Port), rule.Direction == "ingress"); err != nil { return err } } diff --git a/pkg/agent/nodeportlocal/npl_agent_test.go b/pkg/agent/nodeportlocal/npl_agent_test.go index e5bb5b1234a..267231e03db 100644 --- a/pkg/agent/nodeportlocal/npl_agent_test.go +++ b/pkg/agent/nodeportlocal/npl_agent_test.go @@ -514,8 +514,8 @@ func TestPodDelete(t *testing.T) { // It verifies that the Pod's NPL annotation and the local port table are updated with both ports. // It then updates the Service to remove one of the target ports. func TestAddMultiPortPodSvc(t *testing.T) { - newPort := 90 - testSvc := getTestSvc(defaultPort, int32(newPort)) + const newPort = 90 + testSvc := getTestSvc(defaultPort, newPort) testPod := getTestPod() testData := setUp(t, newTestConfig(), testSvc, testPod) defer testData.tearDown() diff --git a/pkg/agent/nodeportlocal/testing/annotations.go b/pkg/agent/nodeportlocal/testing/annotations.go index e98559dcac3..2c01f38f5b5 100644 --- a/pkg/agent/nodeportlocal/testing/annotations.go +++ b/pkg/agent/nodeportlocal/testing/annotations.go @@ -38,9 +38,10 @@ func NewExpectedNPLAnnotations(nodeIP *string, nplStartPort, nplEndPort int) *Ex } func (a *ExpectedNPLAnnotations) find(podPort int, protocol string) *types.NPLAnnotation { - for _, annotation := range a.annotations { + for idx := range a.annotations { + annotation := &a.annotations[idx] if annotation.PodPort == podPort && annotation.Protocol == protocol { - return &annotation + return annotation } } return nil diff --git a/pkg/agent/openflow/network_policy.go b/pkg/agent/openflow/network_policy.go index 5b56e6f8cb8..95e61760eb7 100644 --- a/pkg/agent/openflow/network_policy.go +++ b/pkg/agent/openflow/network_policy.go @@ -388,8 +388,8 @@ func (m *matchPair) KeyString() string { func (m *conjunctiveMatch) generateGlobalMapKey() string { var priorityStr string var matchPairStrList []string - for _, eachMatchPair := range m.matchPairs { - matchPairStrList = append(matchPairStrList, eachMatchPair.KeyString()) + for idx := range m.matchPairs { + matchPairStrList = append(matchPairStrList, m.matchPairs[idx].KeyString()) } if m.priority == nil { priorityStr = strconv.Itoa(int(priorityNormal)) diff --git a/pkg/agent/route/route_linux.go b/pkg/agent/route/route_linux.go index e4a57ac9986..050060e6844 100644 --- a/pkg/agent/route/route_linux.go +++ b/pkg/agent/route/route_linux.go @@ -2099,7 +2099,7 @@ func (c *Client) DeleteRouteForLink(cidr *net.IPNet, linkIndex int) error { func (c *Client) ClearConntrackEntryForService(svcIP net.IP, svcPort uint16, endpointIP net.IP, protocol binding.Protocol) error { var protoVar uint8 - var ipFamilyVar int + var ipFamilyVar uint8 var zone uint16 switch protocol { case binding.ProtocolTCP: diff --git a/pkg/antctl/command_list.go b/pkg/antctl/command_list.go index d780d9e697f..2de722130be 100644 --- a/pkg/antctl/command_list.go +++ b/pkg/antctl/command_list.go @@ -105,7 +105,8 @@ func (cl *commandList) validate() []error { if len(cl.definitions) == 0 { return []error{fmt.Errorf("no command found in the command list")} } - for i, c := range cl.definitions { + for i := range cl.definitions { + c := &cl.definitions[i] for _, err := range c.validate() { errs = append(errs, fmt.Errorf("#%d command<%s>: %w", i, c.use, err)) } diff --git a/pkg/antctl/raw/check/cluster/command.go b/pkg/antctl/raw/check/cluster/command.go index e5a993ef1ba..a89bf8ed390 100644 --- a/pkg/antctl/raw/check/cluster/command.go +++ b/pkg/antctl/raw/check/cluster/command.go @@ -18,10 +18,8 @@ import ( "context" "errors" "fmt" - "os" "time" - "github.com/fatih/color" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -85,6 +83,7 @@ func RegisterTest(name string, test Test) { } type testContext struct { + check.Logger client kubernetes.Interface config *rest.Config clusterName string @@ -213,6 +212,7 @@ func (t *testContext) setup(ctx context.Context) error { func NewTestContext(client kubernetes.Interface, config *rest.Config, clusterName, testImage string) *testContext { return &testContext{ + Logger: check.NewLogger(fmt.Sprintf("[%s] ", clusterName)), client: client, config: config, clusterName: clusterName, @@ -221,22 +221,6 @@ func NewTestContext(client kubernetes.Interface, config *rest.Config, clusterNam } } -func (t *testContext) Log(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+format+"\n", a...) -} - -func (t *testContext) Success(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.GreenString(format, a...)+"\n") -} - -func (t *testContext) Fail(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.RedString(format, a...)+"\n") -} - -func (t *testContext) Warning(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.YellowString(format, a...)+"\n") -} - func (t *testContext) Header(format string, a ...interface{}) { t.Log("-------------------------------------------------------------------------------------------") t.Log(format, a...) diff --git a/pkg/antctl/raw/check/installation/command.go b/pkg/antctl/raw/check/installation/command.go index 1b9e43a28be..72e9121e940 100644 --- a/pkg/antctl/raw/check/installation/command.go +++ b/pkg/antctl/raw/check/installation/command.go @@ -18,12 +18,10 @@ import ( "context" "errors" "fmt" - "os" "regexp" "strings" "time" - "github.com/fatih/color" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -99,6 +97,7 @@ func RegisterTest(name string, test Test) { } type testContext struct { + check.Logger client kubernetes.Interface config *rest.Config clusterName string @@ -169,7 +168,7 @@ func tcpServerCommand(port int) []string { return []string{"nc", "-l", fmt.Sprint(port), "-k"} } -func newService(name string, selector map[string]string, port int) *corev1.Service { +func newService(name string, selector map[string]string, port int32) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -177,7 +176,7 @@ func newService(name string, selector map[string]string, port int) *corev1.Servi Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, Ports: []corev1.ServicePort{ - {Name: name, Port: int32(port)}, + {Name: name, Port: port}, }, Selector: selector, IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), @@ -194,6 +193,7 @@ func NewTestContext( testImage string, ) *testContext { return &testContext{ + Logger: check.NewLogger(fmt.Sprintf("[%s] ", clusterName)), client: client, config: config, clusterName: clusterName, @@ -378,22 +378,6 @@ func (t *testContext) tcpProbe(ctx context.Context, clientPodName string, contai return err } -func (t *testContext) Log(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+format+"\n", a...) -} - -func (t *testContext) Success(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.GreenString(format, a...)+"\n") -} - -func (t *testContext) Fail(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.RedString(format, a...)+"\n") -} - -func (t *testContext) Warning(format string, a ...interface{}) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", t.clusterName)+color.YellowString(format, a...)+"\n") -} - func (t *testContext) Header(format string, a ...interface{}) { t.Log("-------------------------------------------------------------------------------------------") t.Log(format, a...) diff --git a/pkg/antctl/raw/check/installation/test_podtoservice.go b/pkg/antctl/raw/check/installation/test_podtoservice.go index 1d45af42386..df61d0e16bb 100644 --- a/pkg/antctl/raw/check/installation/test_podtoservice.go +++ b/pkg/antctl/raw/check/installation/test_podtoservice.go @@ -48,7 +48,8 @@ func (t *PodToServiceConnectivityTest) Run(ctx context.Context, testContext *tes if err != nil { return err } - for _, clientPod := range testContext.clientPods { + for idx := range testContext.clientPods { + clientPod := &testContext.clientPods[idx] testContext.Log("Validating from Pod %s to Service %s in Namespace %s...", clientPod.Name, service.Name, testContext.namespace) for _, clusterIP := range service.Spec.ClusterIPs { // Service is realized asynchronously, retry a few times. diff --git a/pkg/antctl/raw/check/logger.go b/pkg/antctl/raw/check/logger.go new file mode 100644 index 00000000000..6ee6f2e8429 --- /dev/null +++ b/pkg/antctl/raw/check/logger.go @@ -0,0 +1,59 @@ +// Copyright 2024 Antrea Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package check + +import ( + "fmt" + "os" + + "github.com/fatih/color" +) + +type Logger struct { + prefix string +} + +func NewLogger(prefix string) Logger { + return Logger{ + prefix: prefix, + } +} + +type stringFormatterFunc func(format string, a ...interface{}) string + +func noopStringFormatter(format string, a ...interface{}) string { + return fmt.Sprintf(format, a...) +} + +func (l Logger) log(stringFormatter stringFormatterFunc, format string, a ...interface{}) { + fmt.Fprint(os.Stdout, l.prefix) + fmt.Fprintln(os.Stdout, stringFormatter(format, a...)) +} + +func (l Logger) Log(format string, a ...interface{}) { + l.log(noopStringFormatter, format, a...) +} + +func (l Logger) Success(format string, a ...interface{}) { + l.log(color.GreenString, format, a...) +} + +func (l Logger) Fail(format string, a ...interface{}) { + l.log(color.RedString, format, a...) +} + +func (l Logger) Warning(format string, a ...interface{}) { + l.log(color.YellowString, format, a...) +} diff --git a/pkg/antctl/raw/check/util.go b/pkg/antctl/raw/check/util.go index d17d9b0530f..e9a4f2f5fae 100644 --- a/pkg/antctl/raw/check/util.go +++ b/pkg/antctl/raw/check/util.go @@ -216,13 +216,14 @@ func GenerateRandomNamespace(baseName string) string { } func Teardown(ctx context.Context, client kubernetes.Interface, clusterName string, namespace string) { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", clusterName)+"Deleting installation tests setup...\n") + logger := NewLogger(fmt.Sprintf("[%s] ", clusterName)) + logger.Log("Deleting installation tests setup...") err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) if err != nil { - fmt.Fprintf(os.Stdout, "Namespace %s deletion failed: %v", namespace, err) + logger.Fail("Namespace %s deletion failed: %v", namespace, err) return } - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", clusterName)+"Waiting for Namespace %s to be deleted\n", namespace) + logger.Log("Waiting for Namespace %s to be deleted", namespace) err = wait.PollUntilContextTimeout(ctx, 2*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) { _, err := client.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { @@ -233,8 +234,8 @@ func Teardown(ctx context.Context, client kubernetes.Interface, clusterName stri return false, nil }) if err != nil { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", clusterName)+"Setup deletion failed\n") + logger.Success("Setup deletion failed") } else { - fmt.Fprintf(os.Stdout, fmt.Sprintf("[%s] ", clusterName)+"Setup deletion successful\n") + logger.Fail("Setup deletion successful") } } diff --git a/pkg/apis/crd/v1beta1/util.go b/pkg/apis/crd/v1beta1/util.go index 615db552e2c..87434a1c4f9 100644 --- a/pkg/apis/crd/v1beta1/util.go +++ b/pkg/apis/crd/v1beta1/util.go @@ -15,9 +15,10 @@ package v1beta1 func GetEgressCondition(conditions []EgressCondition, conditionType EgressConditionType) *EgressCondition { - for _, c := range conditions { + for idx := range conditions { + c := &conditions[idx] if c.Type == conditionType { - return &c + return c } } return nil diff --git a/pkg/apiserver/certificate/certificate_test.go b/pkg/apiserver/certificate/certificate_test.go index 56884acf17b..d84c004ffa9 100644 --- a/pkg/apiserver/certificate/certificate_test.go +++ b/pkg/apiserver/certificate/certificate_test.go @@ -47,6 +47,7 @@ AQsFAAOBgQCpN27uh/LjUVCaBK7Noko25iih/JSSoWzlvc8CaipvSPofNWyGx3Vu OdcSwNGYX/pp4ZoAzFij/Y5u0vKTVLkWXATeTMVmlPvhmpYjj9gPkCSY6j/SiKlY kGy0xr+0M5UQkMBcfIh9oAp9um1fZHVWAJAGP/ikZgkcUey0LmBn8w== -----END CERTIFICATE-----` + // #nosec G101 fakeTLSKey = `-----BEGIN RSA PRIVATE KEY----- MIICWwIBAAKBgQDkGXXSm6Yun5o3Jlmx45rItcQ2pmnoDk4eZfl0rmPa674s2pfY o3KywkXQ1Fp3BC8GUgzPLSfJ8xXya9Lg1Wo8sHrDln0iRg5HXxGuuFNhRBvj2S0s diff --git a/pkg/apiserver/handlers/webhook/convert_crd.go b/pkg/apiserver/handlers/webhook/convert_crd.go index eec3d16baa1..a8e6d427ced 100644 --- a/pkg/apiserver/handlers/webhook/convert_crd.go +++ b/pkg/apiserver/handlers/webhook/convert_crd.go @@ -138,7 +138,7 @@ func HandleCRDConversion(crdConvertFunc convertFunc) http.HandlerFunc { convertReview, ok := obj.(*v1beta1.ConversionReview) if !ok { msg := fmt.Sprintf("Expected v1beta1.ConversionReview but got: %T", obj) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, html.EscapeString(msg), http.StatusBadRequest) return } @@ -153,7 +153,7 @@ func HandleCRDConversion(crdConvertFunc convertFunc) http.HandlerFunc { convertReview, ok := obj.(*v1.ConversionReview) if !ok { msg := fmt.Sprintf("Expected v1.ConversionReview but got: %T", obj) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, html.EscapeString(msg), http.StatusBadRequest) return } @@ -175,7 +175,7 @@ func HandleCRDConversion(crdConvertFunc convertFunc) http.HandlerFunc { outSerializer := getOutputSerializer(accept) if outSerializer == nil { msg := fmt.Sprintf("invalid accept header `%s`", accept) - klog.Errorf(msg) + klog.Error(msg) http.Error(w, html.EscapeString(msg), http.StatusBadRequest) return } diff --git a/pkg/controller/externalippool/controller.go b/pkg/controller/externalippool/controller.go index b51f09cb84e..ae54ce5b46a 100644 --- a/pkg/controller/externalippool/controller.go +++ b/pkg/controller/externalippool/controller.go @@ -204,7 +204,8 @@ func (c *ExternalIPPoolController) createOrUpdateIPAllocator(ipPool *antreacrds. existingIPRanges.Insert(multiIPAllocator.Names()...) } - for _, ipRange := range ipPool.Spec.IPRanges { + for idx := range ipPool.Spec.IPRanges { + ipRange := &ipPool.Spec.IPRanges[idx] ipAllocator, err := func() (*ipallocator.SingleIPAllocator, error) { if ipRange.CIDR != "" { _, ipNet, err := net.ParseCIDR(ipRange.CIDR) diff --git a/pkg/controller/networkpolicy/clustergroup.go b/pkg/controller/networkpolicy/clustergroup.go index 000759ff70d..6e99618f973 100644 --- a/pkg/controller/networkpolicy/clustergroup.go +++ b/pkg/controller/networkpolicy/clustergroup.go @@ -439,7 +439,8 @@ func (c *NetworkPolicyController) GetAssociatedIPBlockGroups(ip net.IP) []antrea var matchedGroups []antreatypes.Group for _, obj := range ipBlockGroupObjs { group := obj.(*antreatypes.Group) - for _, ipNet := range group.IPNets { + for idx := range group.IPNets { + ipNet := &group.IPNets[idx] if ipNet.Contains(ip) { matchedGroups = append(matchedGroups, *group) // Append all parent groups to matchedGroups diff --git a/pkg/controller/networkpolicy/clusternetworkpolicy.go b/pkg/controller/networkpolicy/clusternetworkpolicy.go index 6e83f2a2f8c..ab7edddcd5f 100644 --- a/pkg/controller/networkpolicy/clusternetworkpolicy.go +++ b/pkg/controller/networkpolicy/clusternetworkpolicy.go @@ -401,16 +401,18 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *crdv1beta1.Cl } var rules []controlplane.NetworkPolicyRule processRules := func(cnpRules []crdv1beta1.Rule, direction controlplane.Direction) { - for idx, cnpRule := range cnpRules { + for idx := range cnpRules { + cnpRule := &cnpRules[idx] services, namedPortExists := toAntreaServicesForCRD(cnpRule.Ports, cnpRule.Protocols) clusterPeers, perNSPeers, nsLabelPeers := splitPeersByScope(cnpRule, direction) + priority := int32(idx) addRule := func(peer *controlplane.NetworkPolicyPeer, ruleAddressGroups []*antreatypes.AddressGroup, dir controlplane.Direction, ruleAppliedTos []*antreatypes.AppliedToGroup) { rule := controlplane.NetworkPolicyRule{ Direction: dir, Services: services, Name: cnpRule.Name, Action: cnpRule.Action, - Priority: int32(idx), + Priority: priority, EnableLogging: cnpRule.EnableLogging, AppliedToGroups: getAppliedToGroupNames(ruleAppliedTos), L7Protocols: toAntreaL7ProtocolsForCRD(cnpRule.L7Protocols), @@ -719,7 +721,7 @@ func (n *NetworkPolicyController) processClusterAppliedTo(appliedTo []crdv1beta1 // splitPeersByScope splits the ClusterNetworkPolicy peers in the rule by whether the peer // is cluster-scoped or per-namespace. -func splitPeersByScope(rule crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { +func splitPeersByScope(rule *crdv1beta1.Rule, dir controlplane.Direction) ([]crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer, []crdv1beta1.NetworkPolicyPeer) { var clusterPeers, perNSPeers, nsLabelPeers []crdv1beta1.NetworkPolicyPeer peers := rule.From if dir == controlplane.DirectionOut { diff --git a/pkg/controller/stats/aggregator.go b/pkg/controller/stats/aggregator.go index f551104f062..51697408e0e 100644 --- a/pkg/controller/stats/aggregator.go +++ b/pkg/controller/stats/aggregator.go @@ -371,7 +371,8 @@ func (a *Aggregator) Run(stopCh <-chan struct{}) { } func (a *Aggregator) doCollect(summary *controlplane.NodeStatsSummary) { - for _, stats := range summary.NetworkPolicies { + for idx := range summary.NetworkPolicies { + stats := &summary.NetworkPolicies[idx] // The policy might have been removed, skip processing it if missing. objs, _ := a.networkPolicyStats.ByIndex(uidIndex, string(stats.NetworkPolicy.UID)) if len(objs) > 0 { @@ -410,7 +411,8 @@ func (a *Aggregator) doCollect(summary *controlplane.NodeStatsSummary) { a.groupNodePodsMapMutex.Unlock() } if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) { - for _, stats := range summary.AntreaClusterNetworkPolicies { + for idx := range summary.AntreaClusterNetworkPolicies { + stats := &summary.AntreaClusterNetworkPolicies[idx] // The policy have might been removed, skip processing it if missing. objs, _ := a.antreaClusterNetworkPolicyStats.ByIndex(uidIndex, string(stats.NetworkPolicy.UID)) if len(objs) > 0 { @@ -426,10 +428,10 @@ func (a *Aggregator) doCollect(summary *controlplane.NodeStatsSummary) { } } - for _, stats := range summary.AntreaNetworkPolicies { + for idx := range summary.AntreaNetworkPolicies { + stats := &summary.AntreaNetworkPolicies[idx] // The policy have might been removed, skip processing it if missing. objs, _ := a.antreaNetworkPolicyStats.ByIndex(uidIndex, string(stats.NetworkPolicy.UID)) - if len(objs) > 0 { // The object returned by cache is supposed to be read only, create a new object and update it. curStats := objs[0].(*statsv1alpha1.AntreaNetworkPolicyStats).DeepCopy() diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go index 164a55d09b9..0cb8ae94bf2 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go @@ -218,8 +218,8 @@ func TestPushRecordsToFrontOfQueue(t *testing.T) { // init deque [0] records := make([]*flowrecord.FlowRecord, 5) - for i := 0; i < 5; i++ { - records[i] = &flowrecord.FlowRecord{SourceTransportPort: uint16(i)} + for i := range uint16(5) { + records[i] = &flowrecord.FlowRecord{SourceTransportPort: i} } chExportProc.deque.PushBack(records[0]) diff --git a/pkg/flowaggregator/s3uploader/s3uploader.go b/pkg/flowaggregator/s3uploader/s3uploader.go index e2dfd8611e0..a588cf357b9 100644 --- a/pkg/flowaggregator/s3uploader/s3uploader.go +++ b/pkg/flowaggregator/s3uploader/s3uploader.go @@ -69,7 +69,7 @@ type S3UploadProcess struct { // currentBuffer caches flow record currentBuffer *bytes.Buffer // cachedRecordCount keeps track of the number of flow records written into currentBuffer - cachedRecordCount int + cachedRecordCount int32 // bufferQueue caches currentBuffer when it is full bufferQueue []*bytes.Buffer // buffersToUpload stores all the buffers to be uploaded for the current uploadFile() call diff --git a/pkg/flowaggregator/s3uploader/s3uploader_test.go b/pkg/flowaggregator/s3uploader/s3uploader_test.go index 82d5a81000e..999a2373f9e 100644 --- a/pkg/flowaggregator/s3uploader/s3uploader_test.go +++ b/pkg/flowaggregator/s3uploader/s3uploader_test.go @@ -80,7 +80,7 @@ func TestCacheRecord(t *testing.T) { mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) - assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.Equal(t, int32(1), s3UploadProc.cachedRecordCount) currentBuffer := strings.TrimRight(s3UploadProc.currentBuffer.String(), "\n") assert.Equal(t, strings.Split(currentBuffer, ",")[:50], strings.Split(recordStrIPv4, ",")[:50]) assert.Equal(t, strings.Split(currentBuffer, ",")[51:], strings.Split(recordStrIPv4, ",")[51:]) @@ -94,7 +94,7 @@ func TestCacheRecord(t *testing.T) { currentBuf := strings.TrimRight(strings.Split(buf.String(), "\n")[1], "\n") assert.Equal(t, strings.Split(currentBuf, ",")[:50], strings.Split(recordStrIPv6, ",")[:50]) assert.Equal(t, strings.Split(currentBuf, ",")[51:], strings.Split(recordStrIPv6, ",")[51:]) - assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) } @@ -118,14 +118,14 @@ func TestBatchUploadAll(t *testing.T) { mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) - assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 1, s3UploadProc.cachedRecordCount) err := s3UploadProc.batchUploadAll(ctx) assert.NoError(t, err) assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) - assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) } func TestBatchUploadAllPartialSuccess(t *testing.T) { @@ -184,7 +184,7 @@ func TestBatchUploadAllError(t *testing.T) { mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) - assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 1, s3UploadProc.cachedRecordCount) // It is expected to fail when calling uploadFile, as the correct S3 bucket // configuration is not provided. @@ -192,7 +192,7 @@ func TestBatchUploadAllError(t *testing.T) { assert.Equal(t, 1, len(s3UploadProc.buffersToUpload)) assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) - assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) expectedErrMsg := "error when uploading file to S3: operation error S3: PutObject, https response error StatusCode: 301" assert.Contains(t, err.Error(), expectedErrMsg) } @@ -224,7 +224,7 @@ func TestFlowRecordPeriodicCommit(t *testing.T) { mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) - assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 1, s3UploadProc.cachedRecordCount) s3UploadProc.startExportProcess() assert.Eventually(t, func() bool { @@ -241,7 +241,7 @@ func TestFlowRecordPeriodicCommit(t *testing.T) { assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) - assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) } func TestFlushCacheOnStop(t *testing.T) { @@ -264,12 +264,12 @@ func TestFlushCacheOnStop(t *testing.T) { mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) - assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 1, s3UploadProc.cachedRecordCount) s3UploadProc.startExportProcess() s3UploadProc.stopExportProcess(true) assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) - assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) } diff --git a/pkg/ipam/poolallocator/allocator.go b/pkg/ipam/poolallocator/allocator.go index 036a3778802..f1efae3acab 100644 --- a/pkg/ipam/poolallocator/allocator.go +++ b/pkg/ipam/poolallocator/allocator.go @@ -91,7 +91,7 @@ func (a *IPPoolAllocator) initIPAllocators(ipPool *v1beta1.IPPool) (ipallocator. } size, bits := ipNet.Mask.Size() - if int32(size) == ipPool.Spec.SubnetInfo.PrefixLength && bits == 32 { + if size == int(ipPool.Spec.SubnetInfo.PrefixLength) && bits == 32 { // Allocation CIDR covers entire subnet, thus we need // to reserve broadcast IP as well for IPv4 reservedIPs = append(reservedIPs, iputil.GetLocalBroadcastIP(ipNet)) diff --git a/pkg/ovs/ovsctl/ovsctl_test.go b/pkg/ovs/ovsctl/ovsctl_test.go index 960a66e7613..65d0e76a32e 100644 --- a/pkg/ovs/ovsctl/ovsctl_test.go +++ b/pkg/ovs/ovsctl/ovsctl_test.go @@ -402,7 +402,7 @@ func TestOfCtl(t *testing.T) { bridge: "br-int", ovsOfctlRunner: mockOVSOfctlRunner, } - groupID := 3 + groupID := uint32(3) DumpGroup := func(id string) ([]byte, error) { var x = []byte{} for i := 0; i < len(testDumpGroups); i++ { @@ -415,7 +415,7 @@ func TestOfCtl(t *testing.T) { return x, nil } mockOVSOfctlRunner.EXPECT().RunOfctlCmd("dump-groups", strconv.FormatUint(uint64(groupID), 10)).Return(DumpGroup(fmt.Sprintf("%d", groupID))) - out, err := client.DumpGroup(uint32(groupID)) + out, err := client.DumpGroup(groupID) require.NoError(err) expectedGroup := "group_id=3,type=select,bucket=bucket_id:1,output:1,bucket=bucket_id:2,output:2,bucket=bucket_id:3,output:3,bucket=bucket_id:4,output:4" assert.Equal(expectedGroup, out) diff --git a/pkg/util/ip/ip.go b/pkg/util/ip/ip.go index 7c5c7254041..5694bada94c 100644 --- a/pkg/util/ip/ip.go +++ b/pkg/util/ip/ip.go @@ -96,7 +96,7 @@ func diffFromCIDR(allowCIDR, exceptCIDR *net.IPNet) []*net.IPNet { remainingCIDRs := make([]*net.IPNet, 0, exceptPrefix-allowPrefix) for i := allowPrefix + 1; i <= exceptPrefix; i++ { // Flip the (ipBitLen - i)th bit from LSB in exceptCIDR to get the IP which is not in exceptCIDR - ipOfNewCIDR := flipSingleBit(&exceptStartIP, uint8(bits-i)) + ipOfNewCIDR := flipSingleBit(&exceptStartIP, bits-i) newCIDRMask := net.CIDRMask(i, bits) for j := range allowStartIP { ipOfNewCIDR[j] = allowStartIP[j] | ipOfNewCIDR[j] @@ -108,10 +108,10 @@ func diffFromCIDR(allowCIDR, exceptCIDR *net.IPNet) []*net.IPNet { return remainingCIDRs } -func flipSingleBit(ip *net.IP, bitIndex uint8) net.IP { +func flipSingleBit(ip *net.IP, bitIndex int) net.IP { newIP := make(net.IP, len(*ip)) copy(newIP, *ip) - byteIndex := uint8(len(newIP)) - (bitIndex / 8) - 1 + byteIndex := len(newIP) - (bitIndex / 8) - 1 // XOR bit operation to flip newIP[byteIndex] = newIP[byteIndex] ^ (1 << (bitIndex % 8)) return newIP diff --git a/pkg/util/sets/int32_test.go b/pkg/util/sets/int32_test.go index 6504fff3f1d..c4dc6b267cc 100644 --- a/pkg/util/sets/int32_test.go +++ b/pkg/util/sets/int32_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -func getInt32Sets(start, end int) sets.Set[int32] { +func getInt32Sets(start, end int32) sets.Set[int32] { s := sets.New[int32]() for i := start; i < end; i++ { s.Insert(int32(i)) diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 218aead4859..27d82df258f 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -583,7 +583,8 @@ func (data *TestData) collectClusterInfo() error { workerIdx := 1 clusterInfo.nodes = make(map[int]*ClusterNode) clusterInfo.nodesOS = make(map[string]string) - for _, node := range nodes.Items { + for idx := range nodes.Items { + node := &nodes.Items[idx] isControlPlaneNode := func() bool { _, ok := node.Labels[labelNodeRoleControlPlane()] return ok diff --git a/test/integration/ovs/ofctrl_test.go b/test/integration/ovs/ofctrl_test.go index 4b00fe5cbac..cf37f01a330 100644 --- a/test/integration/ovs/ofctrl_test.go +++ b/test/integration/ovs/ofctrl_test.go @@ -214,7 +214,7 @@ func TestOFctrlFlow(t *testing.T) { // Test: DumpTableStatus for _, tableStates := range bridge.DumpTableStatus() { if tableStates.ID == uint(myTable.GetID()) { - if int(tableStates.FlowCount) != len(flowList) { + if tableStates.FlowCount != uint(len(flowList)) { t.Errorf("Flow count of table %d in the cache is incorrect, expect: %d, actual %d", dumpTable, len(flowList), tableStates.FlowCount) } } From c1a75b1acaad5b132dadd5be887b445034b49205 Mon Sep 17 00:00:00 2001 From: Antonin Bas Date: Fri, 30 Aug 2024 18:29:47 -0700 Subject: [PATCH 2/3] Update metrics doc Signed-off-by: Antonin Bas --- docs/prometheus-integration.md | 96 ++++++++++++++++++++++++++++------ 1 file changed, 79 insertions(+), 17 deletions(-) diff --git a/docs/prometheus-integration.md b/docs/prometheus-integration.md index 47a4daf7a3b..af40f6cc11f 100644 --- a/docs/prometheus-integration.md +++ b/docs/prometheus-integration.md @@ -345,13 +345,14 @@ could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. -- **go_cpu_classes_gc_pause_cpu_seconds_total:** Estimated total CPU time spent -with the application paused by the GC. Even if only one thread is running -during the pause, this is computed as GOMAXPROCS times the pause latency -because nothing else can be executing. This is the exact sum of samples in -/gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time -it is taken. This metric is an overestimate, and not directly comparable to -system CPU time measurements. Compare only with other /cpu/classes metrics. +- **go_cpu_classes_gc_pause_cpu_seconds_total:** Estimated total CPU time +spent with the application paused by the GC. Even if only one thread is +running during the pause, this is computed as GOMAXPROCS times the pause +latency because nothing else can be executing. This is the exact sum of +samples in /sched/pauses/total/gc:seconds if each sample is multiplied by +GOMAXPROCS at the time it is taken. This metric is an overestimate, and not +directly comparable to system CPU time measurements. Compare only with other +/cpu/classes metrics. - **go_cpu_classes_gc_total_cpu_seconds_total:** Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other @@ -434,8 +435,8 @@ of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. -- **go_gc_pauses_seconds:** Distribution of individual GC-related -stop-the-world pause latencies. Bucket counts increase monotonically. +- **go_gc_pauses_seconds:** Deprecated. Prefer the identical +/sched/pauses/total/gc:seconds. - **go_gc_scan_globals_bytes:** The total amount of global variable space that is scannable. - **go_gc_scan_heap_bytes:** The total amount of heap space that is scannable. @@ -444,6 +445,9 @@ last GC cycle. - **go_gc_scan_total_bytes:** The total amount space that is scannable. Sum of all metrics in /gc/scan. - **go_gc_stack_starting_size_bytes:** The stack size of new goroutines. +- **go_godebug_non_default_behavior_asynctimerchan_events_total:** The number +of non-default behaviors executed by the time package due to a non-default +GODEBUG=asynctimerchan=... setting. - **go_godebug_non_default_behavior_execerrdot_events_total:** The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. @@ -456,18 +460,27 @@ GODEBUG=gocachetest=... setting. - **go_godebug_non_default_behavior_gocacheverify_events_total:** The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +- **go_godebug_non_default_behavior_gotypesalias_events_total:** The number +of non-default behaviors executed by the go/types package due to a non-default +GODEBUG=gotypesalias=... setting. - **go_godebug_non_default_behavior_http2client_events_total:** The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. - **go_godebug_non_default_behavior_http2server_events_total:** The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +- **go_godebug_non_default_behavior_httplaxcontentlength_events_total:** +The number of non-default behaviors executed by the net/http package due to +a non-default GODEBUG=httplaxcontentlength=... setting. +- **go_godebug_non_default_behavior_httpmuxgo121_events_total:** The number +of non-default behaviors executed by the net/http package due to a non-default +GODEBUG=httpmuxgo121=... setting. +- **go_godebug_non_default_behavior_httpservecontentkeepheaders_events_total:** +The number of non-default behaviors executed by the net/http package due to +a non-default GODEBUG=httpservecontentkeepheaders=... setting. - **go_godebug_non_default_behavior_installgoroot_events_total:** The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. -- **go_godebug_non_default_behavior_jstmpllitinterp_events_total:** The -number of non-default behaviors executed by the html/template package due -to a non-default GODEBUG=jstmpllitinterp=... setting. - **go_godebug_non_default_behavior_multipartmaxheaders_events_total:** The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. @@ -489,15 +502,42 @@ GODEBUG=randautoseed=... setting. - **go_godebug_non_default_behavior_tarinsecurepath_events_total:** The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +- **go_godebug_non_default_behavior_tls10server_events_total:** The number of +non-default behaviors executed by the crypto/tls package due to a non-default +GODEBUG=tls10server=... setting. +- **go_godebug_non_default_behavior_tls3des_events_total:** The number of +non-default behaviors executed by the crypto/tls package due to a non-default +GODEBUG=tls3des=... setting. - **go_godebug_non_default_behavior_tlsmaxrsasize_events_total:** The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +- **go_godebug_non_default_behavior_tlsrsakex_events_total:** The number of +non-default behaviors executed by the crypto/tls package due to a non-default +GODEBUG=tlsrsakex=... setting. +- **go_godebug_non_default_behavior_tlsunsafeekm_events_total:** The number of +non-default behaviors executed by the crypto/tls package due to a non-default +GODEBUG=tlsunsafeekm=... setting. +- **go_godebug_non_default_behavior_winreadlinkvolume_events_total:** +The number of non-default behaviors executed by the os package due to a +non-default GODEBUG=winreadlinkvolume=... setting. +- **go_godebug_non_default_behavior_winsymlink_events_total:** The number +of non-default behaviors executed by the os package due to a non-default +GODEBUG=winsymlink=... setting. +- **go_godebug_non_default_behavior_x509keypairleaf_events_total:** The +number of non-default behaviors executed by the crypto/tls package due to +a non-default GODEBUG=x509keypairleaf=... setting. +- **go_godebug_non_default_behavior_x509negativeserial_events_total:** The +number of non-default behaviors executed by the crypto/x509 package due to +a non-default GODEBUG=x509negativeserial=... setting. - **go_godebug_non_default_behavior_x509sha1_events_total:** The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. - **go_godebug_non_default_behavior_x509usefallbackroots_events_total:** The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +- **go_godebug_non_default_behavior_x509usepolicies_events_total:** The +number of non-default behaviors executed by the crypto/x509 package due to +a non-default GODEBUG=x509usepolicies=... setting. - **go_godebug_non_default_behavior_zipinsecurepath_events_total:** The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. @@ -588,11 +628,33 @@ code simultaneously. - **go_sched_latencies_seconds:** Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. -- **go_sync_mutex_wait_total_seconds_total:** Approximate cumulative time -goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric -is useful for identifying global changes in lock contention. Collect a -mutex or block profile using the runtime/pprof package for more detailed -contention data. +- **go_sched_pauses_stopping_gc_seconds:** Distribution of individual +GC-related stop-the-world stopping latencies. This is the time it takes from +deciding to stop the world until all Ps are stopped. This is a subset of the +total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During +this time, some threads may be executing. Bucket counts increase monotonically. +- **go_sched_pauses_stopping_other_seconds:** Distribution of +individual non-GC-related stop-the-world stopping latencies. This +is the time it takes from deciding to stop the world until all Ps are +stopped. This is a subset of the total non-GC-related stop-the-world time +(/sched/pauses/total/other:seconds). During this time, some threads may be +executing. Bucket counts increase monotonically. +- **go_sched_pauses_total_gc_seconds:** Distribution of individual +GC-related stop-the-world pause latencies. This is the time from deciding +to stop the world until the world is started again. Some of this time +is spent getting all threads to stop (this is measured directly in +/sched/pauses/stopping/gc:seconds), during which some threads may still be +running. Bucket counts increase monotonically. +- **go_sched_pauses_total_other_seconds:** Distribution of individual +non-GC-related stop-the-world pause latencies. This is the time from +deciding to stop the world until the world is started again. Some of +this time is spent getting all threads to stop (measured directly in +/sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. +- **go_sync_mutex_wait_total_seconds_total:** Approximate cumulative +time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or +runtime-internal lock. This metric is useful for identifying global changes +in lock contention. Collect a mutex or block profile using the runtime/pprof +package for more detailed contention data. - **go_threads:** Number of OS threads created. #### Hidden Metrics From cfc45c858336b4ebf976645849c342d727275b9e Mon Sep 17 00:00:00 2001 From: Antonin Bas Date: Tue, 3 Sep 2024 10:32:23 -0700 Subject: [PATCH 3/3] Address review comments Signed-off-by: Antonin Bas --- pkg/antctl/raw/check/cluster/command.go | 2 +- pkg/antctl/raw/check/installation/command.go | 2 +- pkg/antctl/raw/check/util.go | 7 +++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/antctl/raw/check/cluster/command.go b/pkg/antctl/raw/check/cluster/command.go index a89bf8ed390..cf0b5ac231a 100644 --- a/pkg/antctl/raw/check/cluster/command.go +++ b/pkg/antctl/raw/check/cluster/command.go @@ -100,7 +100,7 @@ func Run(o *options) error { } ctx := context.Background() testContext := NewTestContext(client, config, clusterName, o.testImage) - defer check.Teardown(ctx, testContext.client, testContext.clusterName, testContext.namespace) + defer check.Teardown(ctx, testContext.Logger, testContext.client, testContext.namespace) if err := testContext.setup(ctx); err != nil { return err } diff --git a/pkg/antctl/raw/check/installation/command.go b/pkg/antctl/raw/check/installation/command.go index 72e9121e940..788830c3184 100644 --- a/pkg/antctl/raw/check/installation/command.go +++ b/pkg/antctl/raw/check/installation/command.go @@ -147,7 +147,7 @@ func Run(o *options) error { } ctx := context.Background() testContext := NewTestContext(client, config, clusterName, o.antreaNamespace, runFilterRegex, o.testImage) - defer check.Teardown(ctx, testContext.client, testContext.clusterName, testContext.namespace) + defer check.Teardown(ctx, testContext.Logger, testContext.client, testContext.namespace) if err := testContext.setup(ctx); err != nil { return err } diff --git a/pkg/antctl/raw/check/util.go b/pkg/antctl/raw/check/util.go index e9a4f2f5fae..2e058e352ce 100644 --- a/pkg/antctl/raw/check/util.go +++ b/pkg/antctl/raw/check/util.go @@ -215,8 +215,7 @@ func GenerateRandomNamespace(baseName string) string { return fmt.Sprintf("%s-%s", baseName, string(bytes)) } -func Teardown(ctx context.Context, client kubernetes.Interface, clusterName string, namespace string) { - logger := NewLogger(fmt.Sprintf("[%s] ", clusterName)) +func Teardown(ctx context.Context, logger Logger, client kubernetes.Interface, namespace string) { logger.Log("Deleting installation tests setup...") err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) if err != nil { @@ -234,8 +233,8 @@ func Teardown(ctx context.Context, client kubernetes.Interface, clusterName stri return false, nil }) if err != nil { - logger.Success("Setup deletion failed") + logger.Fail("Setup deletion failed") } else { - logger.Fail("Setup deletion successful") + logger.Success("Setup deletion successful") } }