diff --git a/build/charts/antrea/conf/antrea-agent.conf b/build/charts/antrea/conf/antrea-agent.conf index 7be006976a2..b565034f28f 100644 --- a/build/charts/antrea/conf/antrea-agent.conf +++ b/build/charts/antrea/conf/antrea-agent.conf @@ -85,6 +85,9 @@ featureGates: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. {{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "NodeNetworkPolicy" "default" false) }} +# Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. +{{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "L7FlowExporter" "default" false) }} + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: {{ .Values.ovs.bridgeName | quote }} diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 937f92e7145..7c09defd871 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -5628,6 +5628,9 @@ data: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. # NodeNetworkPolicy: false + # Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. + # L7FlowExporter: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6928,7 +6931,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72 + checksum/config: 30843b57762c91dfcffb560917191e3bc7e662c06552759bac2a173bc060b82c labels: app: antrea component: antrea-agent @@ -7166,7 +7169,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72 + checksum/config: 30843b57762c91dfcffb560917191e3bc7e662c06552759bac2a173bc060b82c labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index 8d83fc62773..bb63cda8f94 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -5628,6 +5628,9 @@ data: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. # NodeNetworkPolicy: false + # Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. + # L7FlowExporter: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6928,7 +6931,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72 + checksum/config: 30843b57762c91dfcffb560917191e3bc7e662c06552759bac2a173bc060b82c labels: app: antrea component: antrea-agent @@ -7167,7 +7170,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: f4ad8910666191c02982d1b7b202e3c4bd20fb4a8179dcb5696119f3b1490a72 + checksum/config: 30843b57762c91dfcffb560917191e3bc7e662c06552759bac2a173bc060b82c labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index a91213568da..5280fdece64 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -5628,6 +5628,9 @@ data: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. # NodeNetworkPolicy: false + # Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. + # L7FlowExporter: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6928,7 +6931,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: a54768c79d693083be554386f268c93bbbd0fdf5b334edd9aff31c13151c4e29 + checksum/config: d5cdb5356795c44a69c66fad1b4d67f7c00cdcbe837f3b3b50260e4d9dfd1e7e labels: app: antrea component: antrea-agent @@ -7164,7 +7167,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: a54768c79d693083be554386f268c93bbbd0fdf5b334edd9aff31c13151c4e29 + checksum/config: d5cdb5356795c44a69c66fad1b4d67f7c00cdcbe837f3b3b50260e4d9dfd1e7e labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index dafd8b040ce..2edcb4f2658 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -5641,6 +5641,9 @@ data: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. # NodeNetworkPolicy: false + # Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. + # L7FlowExporter: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6941,7 +6944,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 7ce7d85bc08079d1cef3b1d44f31e2139961f9ae49f71d79ff3b28e7e9ad6325 + checksum/config: 50f2864cf09e4732327b963130bd59a9fc06c560784b161c94e813c000367615 checksum/ipsec-secret: d0eb9c52d0cd4311b6d252a951126bf9bea27ec05590bed8a394f0f792dcb2a4 labels: app: antrea @@ -7223,7 +7226,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 7ce7d85bc08079d1cef3b1d44f31e2139961f9ae49f71d79ff3b28e7e9ad6325 + checksum/config: 50f2864cf09e4732327b963130bd59a9fc06c560784b161c94e813c000367615 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index 17858eb7007..d3c980a7ff8 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -5628,6 +5628,9 @@ data: # Allow users to apply ClusterNetworkPolicy to Kubernetes Nodes. # NodeNetworkPolicy: false + # Enable L7FlowExporter on Pods and Namespaces to export the application layer flows such as HTTP flows. + # L7FlowExporter: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6928,7 +6931,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 290f0c748863a7dad1e9d53d62c74f8108a44c5cc803306d351c108062cc1378 + checksum/config: ac3c14eed7ca0dc28bf2d659cd2c4e4a39d55278fb9a8759c30ea12eff89e518 labels: app: antrea component: antrea-agent @@ -7164,7 +7167,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 290f0c748863a7dad1e9d53d62c74f8108a44c5cc803306d351c108062cc1378 + checksum/config: ac3c14eed7ca0dc28bf2d659cd2c4e4a39d55278fb9a8759c30ea12eff89e518 labels: app: antrea component: antrea-controller diff --git a/ci/kind/test-e2e-kind.sh b/ci/kind/test-e2e-kind.sh index 951d16cfb77..60bef8dbabe 100755 --- a/ci/kind/test-e2e-kind.sh +++ b/ci/kind/test-e2e-kind.sh @@ -205,7 +205,7 @@ if $multicast; then manifest_args="$manifest_args --multicast" fi if $flow_visibility; then - manifest_args="$manifest_args --feature-gates FlowExporter=true --extra-helm-values-file $FLOW_VISIBILITY_HELM_VALUES" + manifest_args="$manifest_args --feature-gates FlowExporter=true,L7FlowExporter=true --extra-helm-values-file $FLOW_VISIBILITY_HELM_VALUES" fi COMMON_IMAGES_LIST=("registry.k8s.io/e2e-test-images/agnhost:2.29" \ diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index a0c1807278e..fb851a43a78 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -39,7 +39,9 @@ import ( "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/controller/egress" "antrea.io/antrea/pkg/agent/controller/ipseccertificate" + "antrea.io/antrea/pkg/agent/controller/l7flowexporter" "antrea.io/antrea/pkg/agent/controller/networkpolicy" + "antrea.io/antrea/pkg/agent/controller/networkpolicy/l7engine" "antrea.io/antrea/pkg/agent/controller/noderoute" "antrea.io/antrea/pkg/agent/controller/serviceexternalip" "antrea.io/antrea/pkg/agent/controller/traceflow" @@ -141,6 +143,7 @@ func run(o *Options) error { enableBridgingMode := enableAntreaIPAM && o.config.EnableBridgingMode l7NetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.L7NetworkPolicy) nodeNetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.NodeNetworkPolicy) + l7FlowExporterEnabled := features.DefaultFeatureGate.Enabled(features.L7FlowExporter) enableMulticlusterGW := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableGateway enableMulticlusterNP := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableStretchedNetworkPolicy enableFlowExporter := features.DefaultFeatureGate.Enabled(features.FlowExporter) && o.config.FlowExporter.Enable @@ -170,6 +173,7 @@ func run(o *Options) error { connectUplinkToBridge, multicastEnabled, features.DefaultFeatureGate.Enabled(features.TrafficControl), + l7FlowExporterEnabled, enableMulticlusterGW, groupIDAllocator, *o.config.EnablePrometheusMetrics, @@ -292,7 +296,8 @@ func run(o *Options) error { o.config.ExternalNode.ExternalNodeNamespace, connectUplinkToBridge, o.enableAntreaProxy, - l7NetworkPolicyEnabled) + l7NetworkPolicyEnabled, + l7FlowExporterEnabled) err = agentInitializer.Initialize() if err != nil { return fmt.Errorf("error initializing agent: %v", err) @@ -466,6 +471,10 @@ func run(o *Options) error { if o.nodeType == config.ExternalNode { nodeKey = k8s.NamespacedName(o.config.ExternalNode.ExternalNodeNamespace, nodeKey) } + var l7Reconciler *l7engine.Reconciler + if l7NetworkPolicyEnabled || l7FlowExporterEnabled { + l7Reconciler = l7engine.NewReconciler() + } networkPolicyController, err := networkpolicy.NewNetworkPolicyController( antreaClientProvider, ofClient, @@ -493,10 +502,22 @@ func run(o *Options) error { tunPort, nodeConfig, podNetworkWait, + l7Reconciler, ) if err != nil { return fmt.Errorf("error creating new NetworkPolicy controller: %v", err) } + var l7FlowExporterController *l7flowexporter.L7FlowExporterController + if l7FlowExporterEnabled { + l7FlowExporterController = l7flowexporter.NewL7FlowExporterController( + ofClient, + ifaceStore, + localPodInformer.Get(), + namespaceInformer, + l7Reconciler, + ) + go l7FlowExporterController.Run(stopCh) + } var egressController *egress.EgressController @@ -650,7 +671,8 @@ func run(o *Options) error { o.enableAntreaProxy, networkPolicyController, flowExporterOptions, - egressController) + egressController, + l7FlowExporterController) if err != nil { return fmt.Errorf("error when creating IPFIX flow exporter: %v", err) } diff --git a/docs/antrea-l7-network-policy.md b/docs/antrea-l7-network-policy.md index 82d0cf78b7c..3b1aaa2c9a6 100644 --- a/docs/antrea-l7-network-policy.md +++ b/docs/antrea-l7-network-policy.md @@ -72,7 +72,7 @@ forwarded to an application-aware engine for protocol detection and rule enforce the layer 7 criteria is also matched, otherwise it will be dropped. Therefore, any rules after a layer 7 rule will not be enforced for the traffic that match the layer 7 rule's layer 3/4 criteria. -As of now, the only supported layer 7 protocol is HTTP. More protocols will be supported in the near future, and we +As of now, the only supported layer 7 protocol is HTTP. Support for more protocols may be added in the future and we welcome feature requests for protocols that you are interested in. ### HTTP diff --git a/docs/feature-gates.md b/docs/feature-gates.md index 1fcdc0eff6e..79008d60430 100644 --- a/docs/feature-gates.md +++ b/docs/feature-gates.md @@ -58,6 +58,7 @@ edit the Agent configuration in the | `EgressTrafficShaping` | Agent | `false` | Alpha | v1.14 | N/A | N/A | Yes | OVS meters should be supported | | `EgressSeparateSubnet` | Agent | `false` | Alpha | v1.15 | N/A | N/A | No | | | `NodeNetworkPolicy` | Agent | `false` | Alpha | v1.15 | N/A | N/A | Yes | | +| `L7FlowExporter` | Agent | `false` | Alpha | v1.15 | N/A | N/A | Yes | | ## Description and Requirements of Features @@ -428,3 +429,12 @@ to be supported in the datapath. `EgressSeparateSubnet` allows users to allocate Egress IPs from a different subnet from the default Node subnet. Refer to this [document](egress.md#subnetinfo) for more information. + +### L7FlowExporter + +`L7FlowExporter` enables users to export application-layer flow data using Pod or Namespace annotations. +Refer to this [document](network-flow-visibility.md#l7-visibility) for more information. + +#### Requirements for this Feature + +- Linux Nodes only. diff --git a/docs/network-flow-visibility.md b/docs/network-flow-visibility.md index 911e021d05c..21926016d20 100644 --- a/docs/network-flow-visibility.md +++ b/docs/network-flow-visibility.md @@ -35,6 +35,9 @@ - [Output Flow Records](#output-flow-records) - [Grafana Flow Collector (migrated)](#grafana-flow-collector-migrated) - [ELK Flow Collector (removed)](#elk-flow-collector-removed) +- [Layer 7 Network Flow Exporter](#layer-7-network-flow-exporter) + - [Prerequisites](#prerequisites) + - [Usage](#usage) ## Overview @@ -610,3 +613,59 @@ and other Theia features, please refer to the **Starting with Antrea v1.7, support for the ELK Flow Collector has been removed.** Please consider using the [Grafana Flow Collector](#grafana-flow-collector-migrated) instead, which is actively maintained. + +## Layer 7 Network Flow Exporter + +In addition to layer 4 network visibility, Antrea adds layer 7 network flow +export. + +### Prerequisites + +To achieve L7 (Layer 7) network flow export, the `L7FlowExporter` feature gate +must be enabled. + +### Usage + +To export layer 7 flows of a Pod or a Namespace, user can annotate Pods or +Namespaces with the annotation key `visibility.antrea.io/l7-export` and set the +value to indicate the traffic flow direction, which can be `ingress`, `egress` +or `both`. + +For example, to enable L7 flow export in the ingress direction on +Pod test-pod in the default Namespace, you can use: + +```bash +kubectl annotate pod test-pod visibility.antrea.io/l7-export=ingress +``` + +Based on the annotation, Flow Exporter will export the L7 flow data to the +Flow Aggregator or configured IPFix collector using the fields `appProtocolName` +and `httpVals`. + +* `appProtocolName` field is used to indicate the application layer protocol +name (e.g. http) and it will be empty if application layer data is not exported. +* `httpVals` stores a serialized JSON dictionary with every HTTP request for +a connection mapped to a unique transaction ID. This format lets us group all +the HTTP transactions pertaining to the same connection, into the same exported +record. + +An example of `httpVals` is : + +`"{\"0\":{\"hostname\":\"10.10.0.1\",\"url\":\"/public/\",\"http_user_agent\":\"curl/7.74.0\",\"http_content_type\":\"text/html\",\"http_method\":\"GET\",\"protocol\":\"HTTP/1.1\",\"status\":200,\"length\":153}}"` + +HTTP fields in the `httpVals` are: + +| Http field | Description | +|-------------------|--------------------------------------------------------| +| hostname | IP address of the sender | +| URL | url requested on the server | +| http_user_agent | application used for HTTP | +| http_content_type | type of content being returned by the server | +| http_method | HTTP method used for the request | +| protocol | HTTP protocol version used for the request or response | +| status | HTTP status code | +| length | size of the response body | + +As of now, the only supported layer 7 protocol is `HTTP1.1`. Support for more +protocols may be added in the future. Antrea supports L7FlowExporter feature only +on Linux Nodes. Windows Nodes are not supported yet. diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go index 791e18b890f..f89ef58783a 100644 --- a/pkg/agent/agent.go +++ b/pkg/agent/agent.go @@ -119,6 +119,7 @@ type Initializer struct { serviceConfig *config.ServiceConfig l7NetworkPolicyConfig *config.L7NetworkPolicyConfig enableL7NetworkPolicy bool + enableL7FlowExporter bool connectUplinkToBridge bool enableAntreaProxy bool // podNetworkWait should be decremented once the Node's network is ready. @@ -151,6 +152,7 @@ func NewInitializer( connectUplinkToBridge bool, enableAntreaProxy bool, enableL7NetworkPolicy bool, + enableL7FlowExporter bool, ) *Initializer { return &Initializer{ ovsBridgeClient: ovsBridgeClient, @@ -175,6 +177,7 @@ func NewInitializer( connectUplinkToBridge: connectUplinkToBridge, enableAntreaProxy: enableAntreaProxy, enableL7NetworkPolicy: enableL7NetworkPolicy, + enableL7FlowExporter: enableL7FlowExporter, } } @@ -423,9 +426,9 @@ func (i *Initializer) Initialize() error { return err } - if i.enableL7NetworkPolicy { - // prepareL7NetworkPolicyInterfaces must be executed after setupOVSBridge since it requires interfaceStore. - if err := i.prepareL7NetworkPolicyInterfaces(); err != nil { + if i.enableL7NetworkPolicy || i.enableL7FlowExporter { + // prepareL7EngineInterfaces must be executed after setupOVSBridge since it requires interfaceStore. + if err := i.prepareL7EngineInterfaces(); err != nil { return err } } diff --git a/pkg/agent/agent_linux.go b/pkg/agent/agent_linux.go index 98f57bc3d10..cc150fa5f24 100644 --- a/pkg/agent/agent_linux.go +++ b/pkg/agent/agent_linux.go @@ -343,19 +343,18 @@ func (i *Initializer) installVMInitialFlows() error { return nil } -// prepareL7NetworkPolicyInterfaces creates two OVS internal ports. An application-aware engine will connect to OVS +// prepareL7EngineInterfaces creates two OVS internal ports. An application-aware engine will connect to OVS // through these two ports. -func (i *Initializer) prepareL7NetworkPolicyInterfaces() error { +func (i *Initializer) prepareL7EngineInterfaces() error { trafficControlPortExternalIDs := map[string]interface{}{ interfacestore.AntreaInterfaceTypeKey: interfacestore.AntreaTrafficControl, } - for _, portName := range []string{config.L7NetworkPolicyTargetPortName, config.L7NetworkPolicyReturnPortName} { + for _, portName := range []string{config.L7RedirectTargetPortName, config.L7RedirectReturnPortName} { _, exists := i.ifaceStore.GetInterface(portName) if exists { continue } - portUUID, err := i.ovsBridgeClient.CreateInternalPort(portName, 0, "", trafficControlPortExternalIDs) if err != nil { return err @@ -382,22 +381,22 @@ func (i *Initializer) prepareL7NetworkPolicyInterfaces() error { i.ifaceStore.AddInterface(itf) } - targetPort, _ := i.ifaceStore.GetInterfaceByName(config.L7NetworkPolicyTargetPortName) - returnPort, _ := i.ifaceStore.GetInterfaceByName(config.L7NetworkPolicyReturnPortName) + targetPort, _ := i.ifaceStore.GetInterfaceByName(config.L7RedirectTargetPortName) + returnPort, _ := i.ifaceStore.GetInterfaceByName(config.L7RedirectReturnPortName) i.l7NetworkPolicyConfig.TargetOFPort = uint32(targetPort.OFPort) i.l7NetworkPolicyConfig.ReturnOFPort = uint32(returnPort.OFPort) // Set the ports with no-flood to reject ARP flood packets at every startup. if err := i.ovsCtlClient.SetPortNoFlood(int(targetPort.OFPort)); err != nil { - return fmt.Errorf("failed to set port %s with no-flood config: %w", config.L7NetworkPolicyTargetPortName, err) + return fmt.Errorf("failed to set port %s with no-flood config: %w", config.L7RedirectTargetPortName, err) } if err := i.ovsCtlClient.SetPortNoFlood(int(returnPort.OFPort)); err != nil { - return fmt.Errorf("failed to set port %s with no-flood config: %w", config.L7NetworkPolicyReturnPortName, err) + return fmt.Errorf("failed to set port %s with no-flood config: %w", config.L7RedirectReturnPortName, err) } // Set MTU of the ports to the calculated MTU value at every startup. - if err := i.setInterfaceMTU(config.L7NetworkPolicyTargetPortName, i.networkConfig.InterfaceMTU); err != nil { + if err := i.setInterfaceMTU(config.L7RedirectTargetPortName, i.networkConfig.InterfaceMTU); err != nil { return err } - if err := i.setInterfaceMTU(config.L7NetworkPolicyReturnPortName, i.networkConfig.InterfaceMTU); err != nil { + if err := i.setInterfaceMTU(config.L7RedirectReturnPortName, i.networkConfig.InterfaceMTU); err != nil { return err } // Currently, the maximum of MTU supported by L7 NetworkPolicy engine Suricata is 32678 (assuming that the page size @@ -405,6 +404,5 @@ func (i *Initializer) prepareL7NetworkPolicyInterfaces() error { if i.networkConfig.InterfaceMTU > maxMTUSupportedBySuricata { klog.ErrorS(nil, "L7 NetworkPolicy engine Suricata may fail to start since the interface MTU is greater than the maximum MTU supported by Suricata", "interfaceMTU", i.networkConfig.InterfaceMTU, "maximumMTU", maxMTUSupportedBySuricata) } - return nil } diff --git a/pkg/agent/agent_windows.go b/pkg/agent/agent_windows.go index e7724979053..cde3d6d62d8 100644 --- a/pkg/agent/agent_windows.go +++ b/pkg/agent/agent_windows.go @@ -476,6 +476,6 @@ func (i *Initializer) installVMInitialFlows() error { return nil } -func (i *Initializer) prepareL7NetworkPolicyInterfaces() error { +func (i *Initializer) prepareL7EngineInterfaces() error { return nil } diff --git a/pkg/agent/config/node_config.go b/pkg/agent/config/node_config.go index 2908c1e2724..9079aa55eea 100644 --- a/pkg/agent/config/node_config.go +++ b/pkg/agent/config/node_config.go @@ -46,8 +46,9 @@ const ( ) const ( - L7NetworkPolicyTargetPortName = "antrea-l7-tap0" - L7NetworkPolicyReturnPortName = "antrea-l7-tap1" + L7RedirectTargetPortName = "antrea-l7-tap0" + L7RedirectReturnPortName = "antrea-l7-tap1" + L7SuricataSocketPath = "/var/run/suricata/suricata_eve.socket" ) const ( diff --git a/pkg/agent/controller/l7flowexporter/l7_flow_export_controller.go b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller.go new file mode 100644 index 00000000000..a0c8a016bc8 --- /dev/null +++ b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller.go @@ -0,0 +1,412 @@ +// Copyright 2023 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package l7flowexporter + +import ( + "fmt" + "strings" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/controller/networkpolicy/l7engine" + "antrea.io/antrea/pkg/agent/interfacestore" + "antrea.io/antrea/pkg/agent/openflow" + "antrea.io/antrea/pkg/agent/types" + "antrea.io/antrea/pkg/apis/crd/v1alpha2" + "antrea.io/antrea/pkg/util/k8s" +) + +const ( + controllerName = "L7FlowExporterController" + resyncPeriod time.Duration = 0 * time.Second + minRetryDelay = 5 * time.Second + maxRetryDelay = 300 * time.Second + defaultWorkers = 4 +) + +var ( + errInvalidAnnotation = fmt.Errorf("annotation key %s can only have values (Ingress/Egress/Both)", types.L7FlowExporterAnnotationKey) + errPodInterfaceNotFound = fmt.Errorf("interface of Pod not found") +) + +type L7FlowExporterController struct { + ofClient openflow.Client + interfaceStore interfacestore.InterfaceStore + + podInformer cache.SharedIndexInformer + podLister corelisters.PodLister + podListerSynced cache.InformerSynced + + namespaceInformer cache.SharedIndexInformer + namespaceLister corelisters.NamespaceLister + namespaceListerSynced cache.InformerSynced + + l7Reconciler *l7engine.Reconciler + podToDirectionMap map[string]v1alpha2.Direction + podToDirectionMapMutex sync.RWMutex + + targetPort uint32 + + queue workqueue.RateLimitingInterface +} + +func NewL7FlowExporterController( + ofClient openflow.Client, + interfaceStore interfacestore.InterfaceStore, + podInformer cache.SharedIndexInformer, + namespaceInformer coreinformers.NamespaceInformer, + l7Reconciler *l7engine.Reconciler) *L7FlowExporterController { + l7c := &L7FlowExporterController{ + ofClient: ofClient, + interfaceStore: interfaceStore, + podInformer: podInformer, + podLister: corelisters.NewPodLister(podInformer.GetIndexer()), + podListerSynced: podInformer.HasSynced, + namespaceInformer: namespaceInformer.Informer(), + namespaceLister: namespaceInformer.Lister(), + namespaceListerSynced: namespaceInformer.Informer().HasSynced, + l7Reconciler: l7Reconciler, + podToDirectionMap: make(map[string]v1alpha2.Direction), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "L7FlowExporterController"), + } + l7c.podInformer.AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: l7c.addPod, + UpdateFunc: l7c.updatePod, + DeleteFunc: l7c.deletePod, + }, + resyncPeriod, + ) + l7c.namespaceInformer.AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: l7c.addNamespace, + UpdateFunc: l7c.updateNamespace, + }, + resyncPeriod, + ) + return l7c +} + +func (l7c *L7FlowExporterController) Run(stopCh <-chan struct{}) { + defer l7c.queue.ShutDown() + klog.InfoS("Starting", "Controller", controllerName) + defer klog.InfoS("Shutting down", "Controller", controllerName) + + if !cache.WaitForNamedCacheSync(controllerName, stopCh, l7c.podListerSynced, l7c.namespaceListerSynced) { + return + } + // Interface is expected to be present as it is created during Antrea agent initialization. + if intf, ok := l7c.interfaceStore.GetInterfaceByName(config.L7RedirectTargetPortName); ok { + l7c.targetPort = uint32(intf.OFPort) + } + + for i := 0; i < defaultWorkers; i++ { + go wait.Until(l7c.worker, time.Second, stopCh) + } + <-stopCh +} + +func (l7c *L7FlowExporterController) worker() { + for l7c.processNextWorkItem() { + } +} + +func (l7c *L7FlowExporterController) processNextWorkItem() bool { + obj, quit := l7c.queue.Get() + if quit { + return false + } + defer l7c.queue.Done(obj) + + if key, ok := obj.(string); !ok { + // As the item in the work queue is actually invalid, we call Forget here else we'd + // go into a loop of attempting to process a work item that is invalid. + // This should not happen. + l7c.queue.Forget(key) + klog.ErrorS(nil, "Expected string in work queue but got", "key", obj) + return true + } else if err := l7c.syncPod(key); err == nil { + // If no error occurs we Forget this item, so it does not get queued again until + // another change happens. + l7c.queue.Forget(key) + } else if err == errInvalidAnnotation { + // Handle errors + // Do not add key again to the queue if annotation is incorrect + klog.ErrorS(err, "Syncing Pod object for L7FlowExporter failed", "Pod", key) + l7c.queue.Forget(key) + } else { + // Put the item back on the work queue to handle any transient errors. + l7c.queue.AddRateLimited(key) + klog.ErrorS(err, "Syncing Pod object for L7FlowExporter failed, requeue", "Pod", key) + } + return true +} + +func (l7c *L7FlowExporterController) addPod(obj interface{}) { + pod := obj.(*v1.Pod) + if !isValidPod(pod) { + return + } + podNS, err := l7c.namespaceLister.Get(pod.Namespace) + if err != nil { + return + } + // Both Pod and Namespace are not annotated, return + _, podOK := pod.Annotations[types.L7FlowExporterAnnotationKey] + _, nsOK := podNS.Annotations[types.L7FlowExporterAnnotationKey] + if !podOK && !nsOK { + return + } + + klog.V(2).InfoS("Processing Pod ADD event", "Pod", klog.KObj(pod)) + podNN := k8s.NamespacedName(pod.Namespace, pod.Name) + l7c.queue.Add(podNN) +} + +func (l7c *L7FlowExporterController) updatePod(oldObj interface{}, obj interface{}) { + oldPod := oldObj.(*v1.Pod) + updatedPod := obj.(*v1.Pod) + if !isValidPod(updatedPod) { + return + } + oldAnnotation := oldPod.Annotations[types.L7FlowExporterAnnotationKey] + updatedAnnotation, updatedAnnotationOk := updatedPod.Annotations[types.L7FlowExporterAnnotationKey] + if oldAnnotation == updatedAnnotation { + if !updatedAnnotationOk { + return + } + if oldPod.Status.PodIP == updatedPod.Status.PodIP { + return + } + } + + klog.V(2).InfoS("Processing Pod UPDATE event", "Pod", klog.KObj(updatedPod)) + podNN := k8s.NamespacedName(updatedPod.Namespace, updatedPod.Name) + l7c.queue.Add(podNN) +} + +func (l7c *L7FlowExporterController) deletePod(obj interface{}) { + pod, ok := obj.(*v1.Pod) + if !ok { + deletedState, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + klog.ErrorS(nil, "Received unexpected object", "object", obj) + return + } + pod, ok = deletedState.Obj.(*v1.Pod) + if !ok { + klog.ErrorS(nil, "DeletedFinalStateUnknown contains non-Pod object", "object", deletedState.Obj) + return + } + } + if _, ok := pod.Annotations[types.L7FlowExporterAnnotationKey]; !ok { + if !l7c.namespaceAnnotationExists(pod) { + return + } + } + + klog.V(2).InfoS("Processing Pod DELETE event", "Pod", klog.KObj(pod)) + podNN := k8s.NamespacedName(pod.Namespace, pod.Name) + l7c.queue.Add(podNN) +} + +func (l7c *L7FlowExporterController) namespaceAnnotationExists(pod *v1.Pod) bool { + podNamespace, err := l7c.namespaceLister.Get(pod.Namespace) + if err != nil { + return false + } + _, ok := podNamespace.Annotations[types.L7FlowExporterAnnotationKey] + return ok +} + +func isValidPod(pod *v1.Pod) bool { + return pod.Status.PodIP != "" && !pod.Spec.HostNetwork +} + +func (l7c *L7FlowExporterController) addNamespace(obj interface{}) { + ns := obj.(*v1.Namespace) + if _, ok := ns.Annotations[types.L7FlowExporterAnnotationKey]; !ok { + return + } + klog.V(2).InfoS("Processing Namespace ADD event", "Namespace", klog.KObj(ns)) + affectedPods := l7c.getNonAnnotatedPodsFromNamespace(ns) + for _, pod := range affectedPods { + podNN := k8s.NamespacedName(pod.Namespace, pod.Name) + l7c.queue.Add(podNN) + } +} + +func (l7c *L7FlowExporterController) updateNamespace(oldObj, obj interface{}) { + oldNS := oldObj.(*v1.Namespace) + updatedNS := obj.(*v1.Namespace) + oldAnnotation := oldNS.GetAnnotations()[types.L7FlowExporterAnnotationKey] + updatedAnnotation := updatedNS.GetAnnotations()[types.L7FlowExporterAnnotationKey] + if oldAnnotation == updatedAnnotation { + return + } + + klog.V(2).InfoS("Processing Namespace UPDATE event", "Namespace", klog.KObj(updatedNS)) + + affectedPods := l7c.getNonAnnotatedPodsFromNamespace(updatedNS) + for _, pod := range affectedPods { + podNN := k8s.NamespacedName(pod.Namespace, pod.Name) + l7c.queue.Add(podNN) + } +} + +func (l7c *L7FlowExporterController) getNonAnnotatedPodsFromNamespace(ns *v1.Namespace) []*v1.Pod { + var nonAnnotatedPods []*v1.Pod + pods, _ := l7c.podLister.Pods(ns.Name).List(labels.Everything()) + + // Only select the non annotated Pods, as annotated Pods are handled separately + for _, pod := range pods { + _, ok := pod.Annotations[types.L7FlowExporterAnnotationKey] + if !ok { + nonAnnotatedPods = append(nonAnnotatedPods, pod) + } + } + return nonAnnotatedPods +} + +func (l7c *L7FlowExporterController) syncPod(podNN string) error { + podNamespace, podName := k8s.SplitNamespacedName(podNN) + pod, err := l7c.podLister.Pods(podNamespace).Get(podName) + if err != nil { + // Remove the TC flows if the Pod has been deleted + return l7c.removeTCFlow(podNN) + } + if !isValidPod(pod) { + return nil + } + annotationValue, ok := pod.Annotations[types.L7FlowExporterAnnotationKey] + if !ok { + podNS, err := l7c.namespaceLister.Get(pod.Namespace) + if err != nil { + // Remove TC flows if Namespace has been deleted + return l7c.removeTCFlow(podNN) + } + // Both Pod and Namespace are not annotated, remove the TC Mark flow + annotationValue, ok = podNS.Annotations[types.L7FlowExporterAnnotationKey] + if !ok { + return l7c.removeTCFlow(podNN) + } + } + + // Check if the annotation value is one of the specified values + direction, err := checkIfAnnotationCorrect(annotationValue) + if err != nil { + return err + } + podInterfaces := l7c.interfaceStore.GetContainerInterfacesByPod(pod.Name, pod.Namespace) + if len(podInterfaces) == 0 { + return errPodInterfaceNotFound + } + sourceOfPort := []uint32{uint32(podInterfaces[0].OFPort)} + + // Start Suricata before starting traffic control mark flows + l7c.l7Reconciler.StartSuricataOnce() + + oldDirection, exists := l7c.getMirroredDirection(podNN) + if exists { + if oldDirection == direction { + return nil + } + if err := l7c.removeTCFlow(podNN); err != nil { + return err + } + } + tcName := l7c.generateTCName(podNN) + if err := l7c.ofClient.InstallTrafficControlMarkFlows(tcName, sourceOfPort, l7c.targetPort, direction, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow); err != nil { + return err + } + l7c.updateMirroredDirection(podNN, direction) + return nil +} + +func (l7c *L7FlowExporterController) updateMirroredDirection(podNN string, direction v1alpha2.Direction) { + l7c.podToDirectionMapMutex.Lock() + defer l7c.podToDirectionMapMutex.Unlock() + l7c.podToDirectionMap[podNN] = direction +} + +func (l7c *L7FlowExporterController) deleteMirroredDirection(podNN string) { + l7c.podToDirectionMapMutex.Lock() + defer l7c.podToDirectionMapMutex.Unlock() + delete(l7c.podToDirectionMap, podNN) +} + +func (l7c *L7FlowExporterController) getMirroredDirection(podNN string) (v1alpha2.Direction, bool) { + l7c.podToDirectionMapMutex.RLock() + defer l7c.podToDirectionMapMutex.RUnlock() + direction, ok := l7c.podToDirectionMap[podNN] + return direction, ok +} + +func (l7c *L7FlowExporterController) IsL7FlowExporterRequested(podNN string, ingress bool) bool { + l7c.podToDirectionMapMutex.RLock() + defer l7c.podToDirectionMapMutex.RUnlock() + if direction, ok := l7c.podToDirectionMap[podNN]; ok { + switch direction { + case v1alpha2.DirectionIngress: + return ingress + case v1alpha2.DirectionEgress: + return !ingress + case v1alpha2.DirectionBoth: + return true + } + } + return false +} + +func (l7c *L7FlowExporterController) removeTCFlow(podNN string) error { + if _, exists := l7c.getMirroredDirection(podNN); !exists { + return nil + } + if err := l7c.ofClient.UninstallTrafficControlMarkFlows(l7c.generateTCName(podNN)); err != nil { + return err + } + l7c.deleteMirroredDirection(podNN) + return nil +} + +func (l7c *L7FlowExporterController) generateTCName(podNN string) string { + return fmt.Sprintf("tcl7:%s", podNN) +} + +func checkIfAnnotationCorrect(annotationValue string) (v1alpha2.Direction, error) { + var direction v1alpha2.Direction + annotationValue = strings.ToLower(annotationValue) + switch annotationValue { + case "ingress": + direction = v1alpha2.DirectionIngress + case "egress": + direction = v1alpha2.DirectionEgress + case "both": + direction = v1alpha2.DirectionBoth + default: + return direction, errInvalidAnnotation + } + return direction, nil +} diff --git a/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go new file mode 100644 index 00000000000..27429df4a0a --- /dev/null +++ b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go @@ -0,0 +1,539 @@ +// Copyright 2023 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package l7flowexporter + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + "antrea.io/antrea/pkg/agent/controller/networkpolicy/l7engine" + "antrea.io/antrea/pkg/agent/interfacestore" + openflowtest "antrea.io/antrea/pkg/agent/openflow/testing" + "antrea.io/antrea/pkg/agent/types" + "antrea.io/antrea/pkg/agent/util" + "antrea.io/antrea/pkg/apis/crd/v1alpha2" + "antrea.io/antrea/pkg/util/k8s" +) + +var ( + annotationsEmpty = map[string]string{} + annotationsDifferent = map[string]string{"annotation.antrea.io": "mockVal1"} + annotationsIncorrect = map[string]string{types.L7FlowExporterAnnotationKey: "mockVal2"} + annotationsCorrectIngress = map[string]string{types.L7FlowExporterAnnotationKey: "ingress"} + annotationsCorrectEgress = map[string]string{types.L7FlowExporterAnnotationKey: "egress"} + annotationsCorrectBoth = map[string]string{types.L7FlowExporterAnnotationKey: "both"} + + pod1NN = k8s.NamespacedName("test-ns1", "test-pod1") + pod2NN = k8s.NamespacedName("test-ns1", "test-pod2") + pod3NN = k8s.NamespacedName("test-ns2", "test-pod3") + pod4NN = k8s.NamespacedName("test-ns2", "test-pod4") + podInterface1 = newPodInterface("test-pod1", "test-ns1", int32(pod1OFPort)) + podInterface2 = newPodInterface("test-pod2", "test-ns1", int32(pod2OFPort)) + podInterface3 = newPodInterface("test-pod3", "test-ns2", int32(pod3OFPort)) + podInterface4 = newPodInterface("test-pod4", "test-ns2", int32(pod4OFPort)) + + ctx = context.Background() +) + +const ( + pod1OFPort = uint32(1) + pod2OFPort = uint32(2) + pod3OFPort = uint32(3) + pod4OFPort = uint32(4) +) + +type fakeController struct { + *L7FlowExporterController + mockOFClient *openflowtest.MockClient + client *fake.Clientset + informerFactory informers.SharedInformerFactory + localPodInformer cache.SharedIndexInformer +} + +func (c *fakeController) startInformers(stopCh chan struct{}) { + c.informerFactory.Start(stopCh) + c.informerFactory.WaitForCacheSync(stopCh) + go c.localPodInformer.Run(stopCh) + go c.namespaceInformer.Run(stopCh) + cache.WaitForCacheSync(stopCh, c.localPodInformer.HasSynced, c.namespaceInformer.HasSynced) +} + +func newFakeControllerAndWatcher(t *testing.T, objects []runtime.Object, interfaces []*interfacestore.InterfaceConfig) *fakeController { + controller := gomock.NewController(t) + mockOFClient := openflowtest.NewMockClient(controller) + + client := fake.NewSimpleClientset(objects...) + informerFactory := informers.NewSharedInformerFactory(client, 0) + nsInformer := informerFactory.Core().V1().Namespaces() + + localPodInformer := coreinformers.NewFilteredPodInformer( + client, + metav1.NamespaceAll, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + func(options *metav1.ListOptions) { + options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", "fakeNode").String() + }, + ) + + ifaceStore := interfacestore.NewInterfaceStore() + for _, itf := range interfaces { + ifaceStore.AddInterface(itf) + } + + l7Reconciler := l7engine.NewReconciler() + l7w := NewL7FlowExporterController(mockOFClient, ifaceStore, localPodInformer, nsInformer, l7Reconciler) + + return &fakeController{ + L7FlowExporterController: l7w, + mockOFClient: mockOFClient, + client: client, + informerFactory: informerFactory, + localPodInformer: localPodInformer, + } +} + +func newPodObject(name, namespace string, annotations map[string]string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: v1.PodSpec{ + NodeName: "fakeNode", + }, + Status: v1.PodStatus{ + PodIP: "10.0.0.1", + }, + } +} + +func newNamespaceObject(name string, annotations map[string]string) *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + Labels: map[string]string{ + "fakeKey": "fakeValue", + }, + }, + } +} + +func newPodInterface(podName, podNamespace string, ofPort int32) *interfacestore.InterfaceConfig { + containerName := k8s.NamespacedName(podNamespace, podName) + return &interfacestore.InterfaceConfig{ + InterfaceName: util.GenerateContainerInterfaceName(podName, podNamespace, containerName), + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{PodName: podName, PodNamespace: podNamespace, ContainerID: containerName}, + OVSPortConfig: &interfacestore.OVSPortConfig{OFPort: ofPort}, + } +} + +func waitEvents(t *testing.T, expectedEvents int, c *fakeController) { + require.NoError(t, wait.Poll(10*time.Millisecond, 5*time.Second, func() (done bool, err error) { + return c.queue.Len() == expectedEvents, nil + })) +} + +func TestPodAdd(t *testing.T) { + var targetPort uint32 + testNS1 := newNamespaceObject("test-ns1", annotationsEmpty) + pod1 := newPodObject("test-pod1", "test-ns1", annotationsCorrectIngress) + pod2 := newPodObject("test-pod2", "test-ns1", annotationsIncorrect) + interfaces := []*interfacestore.InterfaceConfig{ + podInterface1, + podInterface2, + } + testcases := []struct { + name string + addedPod *v1.Pod + expectedPodToDirectionMap map[string]v1alpha2.Direction + expectedCalls func(mockOFClient *openflowtest.MockClient) + expectedError error + }{ + { + name: "Add pod with correct annotations", + addedPod: pod1, + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod1NN: v1alpha2.DirectionIngress, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN), []uint32{uint32(podInterface1.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + }, { + name: "Add pod with incorrect annotations", + addedPod: pod2, + expectedPodToDirectionMap: map[string]v1alpha2.Direction{}, + expectedCalls: func(mockOFClient *openflowtest.MockClient) {}, + expectedError: errInvalidAnnotation, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + c := newFakeControllerAndWatcher(t, []runtime.Object{tt.addedPod, testNS1}, interfaces) + stopCh := make(chan struct{}) + defer close(stopCh) + + c.startInformers(stopCh) + assert.Eventuallyf(t, func() bool { + ns, _ := c.namespaceLister.List(labels.Everything()) + return len(c.localPodInformer.GetIndexer().List()) == 1 && len(ns) == 1 + }, 1*time.Second, 10*time.Millisecond, "Pod should be added to Informers") + waitEvents(t, 1, c) + item, _ := c.queue.Get() + tt.expectedCalls(c.mockOFClient) + err := c.syncPod(item.(string)) + if tt.expectedError != nil { + assert.ErrorContains(t, err, tt.expectedError.Error()) + } else { + assert.Equal(t, tt.expectedPodToDirectionMap, c.podToDirectionMap) + } + c.queue.Done(item) + }) + } +} + +func TestPodUpdate(t *testing.T) { + var targetPort uint32 + testNS1 := newNamespaceObject("test-ns1", annotationsEmpty) + testNS2 := newNamespaceObject("test-ns2", annotationsEmpty) + pod1 := newPodObject("test-pod1", "test-ns1", annotationsDifferent) + pod2 := newPodObject("test-pod2", "test-ns1", annotationsIncorrect) + pod3 := newPodObject("test-pod3", "test-ns2", annotationsEmpty) + pod4 := newPodObject("test-pod4", "test-ns2", annotationsCorrectIngress) + interfaces := []*interfacestore.InterfaceConfig{ + podInterface1, + podInterface2, + podInterface3, + podInterface4, + } + testcases := []struct { + name string + updatedPod *v1.Pod + expectedPodToDirectionMap map[string]v1alpha2.Direction + expectedCalls func(mockOFClient *openflowtest.MockClient) + }{ + { + name: "Update Pod with different annotation to correct annotation", + updatedPod: newPodObject("test-pod1", "test-ns1", annotationsCorrectEgress), + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod1NN: v1alpha2.DirectionEgress, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN), []uint32{uint32(podInterface1.OFPort)}, targetPort, v1alpha2.DirectionEgress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + }, { + name: "Update Pod with Incorrect annotation to correct annotation", + updatedPod: newPodObject("test-pod2", "test-ns1", annotationsCorrectBoth), + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod2NN: v1alpha2.DirectionBoth, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod2NN), []uint32{uint32(podInterface2.OFPort)}, targetPort, v1alpha2.DirectionBoth, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + }, { + name: "Update Pod with no annotation to correct annotation", + updatedPod: newPodObject("test-pod3", "test-ns2", annotationsCorrectIngress), + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod3NN: v1alpha2.DirectionIngress, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod3NN), []uint32{uint32(podInterface3.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + }, { + name: "Update Pod with ingress annotation to egress annotation", + updatedPod: newPodObject("test-pod4", "test-ns2", annotationsCorrectEgress), + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod4NN: v1alpha2.DirectionEgress, + }, + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod4NN), []uint32{uint32(podInterface4.OFPort)}, targetPort, v1alpha2.DirectionEgress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + c := newFakeControllerAndWatcher(t, []runtime.Object{pod1, pod2, pod3, pod4, testNS1, testNS2}, interfaces) + stopCh := make(chan struct{}) + defer close(stopCh) + + c.startInformers(stopCh) + + assert.Eventuallyf(t, func() bool { + ns, _ := c.namespaceLister.List(labels.Everything()) + return len(c.localPodInformer.GetIndexer().List()) == 4 && len(ns) == 2 + }, 1*time.Second, 10*time.Millisecond, "Pods should be added to Informers") + + // Pod2 has the correction annotation key (but an invalid annotation value) and Pod4 has the correct + // annotation item, so they will be queued once for the ADD event. We ignore these events. + waitEvents(t, 2, c) + for i := 0; i < 2; i++ { + item, _ := c.queue.Get() + c.queue.Done(item) + } + + tt.expectedCalls(c.mockOFClient) + + // Update Pods with new annotations + _, err := c.client.CoreV1().Pods(tt.updatedPod.Namespace).Update(ctx, tt.updatedPod, metav1.UpdateOptions{}) + require.NoError(t, err) + + waitEvents(t, 1, c) + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + assert.Equal(t, tt.expectedPodToDirectionMap, c.podToDirectionMap) + c.queue.Done(item) + }) + } +} + +func TestPodUpdateRemoveFlows(t *testing.T) { + var targetPort uint32 + testNS1 := newNamespaceObject("test-ns1", annotationsEmpty) + pod1 := newPodObject("test-pod1", "test-ns1", annotationsCorrectIngress) + pod2 := newPodObject("test-pod2", "test-ns1", annotationsCorrectIngress) + interfaces := []*interfacestore.InterfaceConfig{ + podInterface1, + podInterface2, + } + expectedInstallCalls := func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN), []uint32{uint32(podInterface1.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod2NN), []uint32{uint32(podInterface2.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + } + testcases := []struct { + name string + pod *v1.Pod + deletePod bool + expectedL7PodNNDirAfterFlowRemoved map[string]v1alpha2.Direction + expectedUninstallCalls func(mockOFClient *openflowtest.MockClient) + }{ + { + name: "Remove flows for annotation removed", + pod: newPodObject("test-pod1", "test-ns1", annotationsEmpty), + deletePod: false, + expectedL7PodNNDirAfterFlowRemoved: map[string]v1alpha2.Direction{pod2NN: v1alpha2.DirectionIngress}, + expectedUninstallCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().UninstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN)) + }, + }, { + name: "Remove flows for deletedPod", + pod: newPodObject("test-pod2", "test-ns1", annotationsCorrectIngress), + deletePod: true, + expectedL7PodNNDirAfterFlowRemoved: map[string]v1alpha2.Direction{pod1NN: v1alpha2.DirectionIngress}, + expectedUninstallCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().UninstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod2NN)) + }, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + c := newFakeControllerAndWatcher(t, []runtime.Object{pod1, pod2, testNS1}, interfaces) + stopCh := make(chan struct{}) + defer close(stopCh) + c.startInformers(stopCh) + assert.Eventuallyf(t, func() bool { + ns, _ := c.namespaceLister.List(labels.Everything()) + return len(c.localPodInformer.GetIndexer().List()) == 2 && len(ns) == 1 + }, 1*time.Second, 10*time.Millisecond, "Pods should be added to Informers") + expectedInstallCalls(c.mockOFClient) + waitEvents(t, 2, c) + for i := 0; i < 2; i++ { + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + c.queue.Done(item) + } + if tt.deletePod { + //Delete Pod + err := c.client.CoreV1().Pods(tt.pod.Namespace).Delete(ctx, tt.pod.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + } else { + // Update Pods with no annotations + _, err := c.client.CoreV1().Pods(tt.pod.Namespace).Update(ctx, tt.pod, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + tt.expectedUninstallCalls(c.mockOFClient) + + waitEvents(t, 1, c) + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + assert.Equal(t, tt.expectedL7PodNNDirAfterFlowRemoved, c.podToDirectionMap) + c.queue.Done(item) + }) + } +} + +func TestNamespaceUpdate(t *testing.T) { + var targetPort uint32 + testNS1 := newNamespaceObject("test-ns1", annotationsEmpty) + testNS2 := newNamespaceObject("test-ns2", annotationsEmpty) + pod1 := newPodObject("test-pod1", "test-ns1", annotationsEmpty) + pod2 := newPodObject("test-pod2", "test-ns1", annotationsEmpty) + pod3 := newPodObject("test-pod3", "test-ns2", annotationsEmpty) + pod4 := newPodObject("test-pod4", "test-ns2", annotationsCorrectIngress) + interfaces := []*interfacestore.InterfaceConfig{ + podInterface1, + podInterface2, + podInterface3, + podInterface4, + } + testcases := []struct { + name string + updatedNS *v1.Namespace + expectedCalls func(mockOFClient *openflowtest.MockClient) + expectedPodToDirectionMap map[string]v1alpha2.Direction + expectedPodsCount int + }{ + { + name: "Update namespace to have annotations", + updatedNS: newNamespaceObject("test-ns1", annotationsCorrectEgress), + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN), []uint32{uint32(podInterface1.OFPort)}, targetPort, v1alpha2.DirectionEgress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod2NN), []uint32{uint32(podInterface2.OFPort)}, targetPort, v1alpha2.DirectionEgress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod1NN: v1alpha2.DirectionEgress, + pod2NN: v1alpha2.DirectionEgress, + }, + expectedPodsCount: 2, + }, { + name: "Update namespace to have annotations containing pod with annotation", + updatedNS: newNamespaceObject("test-ns2", annotationsCorrectEgress), + expectedCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod3NN), []uint32{uint32(podInterface3.OFPort)}, targetPort, v1alpha2.DirectionEgress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + expectedPodToDirectionMap: map[string]v1alpha2.Direction{ + pod3NN: v1alpha2.DirectionEgress, + }, + expectedPodsCount: 1, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + c := newFakeControllerAndWatcher(t, []runtime.Object{testNS1, testNS2, pod1, pod2, pod3, pod4}, interfaces) + stopCh := make(chan struct{}) + defer close(stopCh) + + c.startInformers(stopCh) + // Ignore Pod4 as that will be enqueued by addPod func + waitEvents(t, 1, c) + for i := 0; i < 1; i++ { + item, _ := c.queue.Get() + c.queue.Done(item) + } + + // Update NS object + _, err := c.client.CoreV1().Namespaces().Update(ctx, tt.updatedNS, metav1.UpdateOptions{}) + require.NoError(t, err) + tt.expectedCalls(c.mockOFClient) + waitEvents(t, tt.expectedPodsCount, c) + for i := 0; i < tt.expectedPodsCount; i++ { + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + c.queue.Done(item) + } + assert.Equal(t, tt.expectedPodToDirectionMap, c.podToDirectionMap) + }) + } +} + +func TestNSUpdateRemoveFlows(t *testing.T) { + var targetPort uint32 + testNS1 := newNamespaceObject("test-ns1", annotationsCorrectIngress) + pod1 := newPodObject("test-pod1", "test-ns1", annotationsEmpty) + pod2 := newPodObject("test-pod2", "test-ns1", annotationsCorrectIngress) + interfaces := []*interfacestore.InterfaceConfig{ + podInterface1, + podInterface2, + } + testcases := []struct { + name string + Namespace *v1.Namespace + expectedL7PodNNDirMapAfterFlowRemoved map[string]v1alpha2.Direction + expectedInstallCalls func(mockOFClient *openflowtest.MockClient) + expectedUninstallCalls func(mockOFClient *openflowtest.MockClient) + expectedQueueLen int + }{ + { + name: "Remove flows for annotation removed", + Namespace: newNamespaceObject("test-ns1", map[string]string{}), + expectedInstallCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN), []uint32{uint32(podInterface1.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + mockOFClient.EXPECT().InstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod2NN), []uint32{uint32(podInterface2.OFPort)}, targetPort, v1alpha2.DirectionIngress, v1alpha2.ActionMirror, types.TrafficControlFlowPriorityLow) + }, + expectedL7PodNNDirMapAfterFlowRemoved: map[string]v1alpha2.Direction{ + pod2NN: v1alpha2.DirectionIngress, + }, + expectedUninstallCalls: func(mockOFClient *openflowtest.MockClient) { + mockOFClient.EXPECT().UninstallTrafficControlMarkFlows(fmt.Sprintf("tcl7:%s", pod1NN)) + }, + expectedQueueLen: 1, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + c := newFakeControllerAndWatcher(t, []runtime.Object{testNS1}, interfaces) + stopCh := make(chan struct{}) + defer close(stopCh) + + c.startInformers(stopCh) + _, err := c.client.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{}) + require.NoError(t, err) + _, err = c.client.CoreV1().Pods(pod1.Namespace).Create(ctx, pod2, metav1.CreateOptions{}) + require.NoError(t, err) + assert.Eventuallyf(t, func() bool { + ns, _ := c.namespaceLister.List(labels.Everything()) + return len(c.localPodInformer.GetIndexer().List()) == 2 && len(ns) == 1 + }, 1*time.Second, 10*time.Millisecond, "Pods and Namespaces should be added to Informers") + + tt.expectedInstallCalls(c.mockOFClient) + waitEvents(t, 2, c) + for i := 0; i < 2; i++ { + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + c.queue.Done(item) + } + // Update Pods with no annotations + _, err = c.client.CoreV1().Namespaces().Update(ctx, tt.Namespace, metav1.UpdateOptions{}) + require.NoError(t, err) + + tt.expectedUninstallCalls(c.mockOFClient) + waitEvents(t, tt.expectedQueueLen, c) + for i := 0; i < tt.expectedQueueLen; i++ { + item, _ := c.queue.Get() + require.NoError(t, c.syncPod(item.(string))) + c.queue.Done(item) + } + assert.Equal(t, tt.expectedL7PodNNDirMapAfterFlowRemoved, c.podToDirectionMap) + }) + } +} diff --git a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go index b669cfadb80..2ca94205570 100644 --- a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go +++ b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go @@ -57,6 +57,62 @@ type scCmdRet struct { var ( // Declared as a variable for testing. defaultFS = afero.NewOsFs() + + // Create the config file /etc/suricata/antrea.yaml for Antrea which will be included in the default Suricata config file + // /etc/suricata/suricata.yaml. Two event logs in the config serve alert gilogging and http event logging purposes respectively. + suricataAntreaConfigData = fmt.Sprintf(`%%YAML 1.1 +--- +outputs: + - eve-log: + enabled: yes + filetype: regular + filename: eve-%%Y-%%m-%%d.json + rotate-interval: day + pcap-file: false + community-id: false + community-id-seed: 0 + xff: + enabled: no + types: + - alert: + tagged-packets: yes + - eve-log: + enabled: yes + filetype: unix_stream + filename: %[1]s + pcap-file: false + community-id: false + community-id-seed: 0 + xff: + enabled: no + types: + - http: + extended: yes +af-packet: + - interface: %[2]s + threads: auto + cluster-id: 80 + cluster-type: cluster_flow + defrag: no + use-mmap: yes + tpacket-v2: yes + checksum-checks: no + copy-mode: ips + copy-iface: %[3]s + - interface: %[3]s + threads: auto + cluster-id: 81 + cluster-type: cluster_flow + defrag: no + use-mmap: yes + tpacket-v2: yes + checksum-checks: no + copy-mode: ips + copy-iface: %[2]s +multi-detect: + enabled: yes + selector: vlan +`, config.L7SuricataSocketPath, config.L7RedirectTargetPortName, config.L7RedirectReturnPortName) ) type threadSafeInt32Set struct { @@ -204,15 +260,19 @@ func convertProtocolTLS(tls *v1beta.TLSProtocol) string { return strings.Join(keywords, " ") } +func (r *Reconciler) StartSuricataOnce() { + r.once.Do(func() { + r.startSuricata() + }) +} + func (r *Reconciler) AddRule(ruleID, policyName string, vlanID uint32, l7Protocols []v1beta.L7Protocol, enableLogging bool) error { start := time.Now() defer func() { klog.V(5).Infof("AddRule took %v", time.Since(start)) }() - r.once.Do(func() { - r.startSuricata() - }) + r.StartSuricataOnce() // Generate the keyword part used in Suricata rules. protoKeywords := make(map[string]sets.Set[string]) @@ -238,14 +298,13 @@ func (r *Reconciler) AddRule(ruleID, policyName string, vlanID uint32, l7Protoco rulesPath := generateTenantRulesPath(vlanID) rulesData := generateTenantRulesData(policyName, protoKeywords, enableLogging) if err := writeConfigFile(rulesPath, rulesData); err != nil { - return fmt.Errorf("failed to write Suricata rules data to file %s for L7 rule %s of %s", rulesPath, ruleID, policyName) + return fmt.Errorf("failed to write Suricata rules data to file %s for L7 rule %s of %s, err: %w", rulesPath, ruleID, policyName, err) } // Add a Suricata tenant. if err := r.addBindingSuricataTenant(vlanID, rulesPath); err != nil { return fmt.Errorf("failed to add Suricata tenant for L7 rule %s of %s: %w", ruleID, policyName, err) } - return nil } @@ -403,49 +462,6 @@ func (r *Reconciler) unregisterSuricataTenantHandler(tenantID, vlanID uint32) (* } func (r *Reconciler) startSuricata() { - // Create the config file /etc/suricata/antrea.yaml for Antrea which will be included in the default Suricata config file - // /etc/suricata/suricata.yaml. - suricataAntreaConfigData := fmt.Sprintf(`%%YAML 1.1 ---- -outputs: - - eve-log: - enabled: yes - filetype: regular - filename: eve-%%Y-%%m-%%d.json - rotate-interval: day - pcap-file: false - community-id: false - community-id-seed: 0 - xff: - enabled: no - types: - - alert: - tagged-packets: yes -af-packet: - - interface: %[1]s - threads: auto - cluster-id: 80 - cluster-type: cluster_flow - defrag: no - use-mmap: yes - tpacket-v2: yes - checksum-checks: no - copy-mode: ips - copy-iface: %[2]s - - interface: %[2]s - threads: auto - cluster-id: 81 - cluster-type: cluster_flow - defrag: no - use-mmap: yes - tpacket-v2: yes - checksum-checks: no - copy-mode: ips - copy-iface: %[1]s -multi-detect: - enabled: yes - selector: vlan -`, config.L7NetworkPolicyTargetPortName, config.L7NetworkPolicyReturnPortName) f, err := defaultFS.Create(antreaSuricataConfigPath) if err != nil { klog.ErrorS(err, "Failed to create Suricata config file", "FilePath", antreaSuricataConfigPath) diff --git a/pkg/agent/controller/networkpolicy/l7engine/reconciler_test.go b/pkg/agent/controller/networkpolicy/l7engine/reconciler_test.go index f5b960a1233..340547cfba1 100644 --- a/pkg/agent/controller/networkpolicy/l7engine/reconciler_test.go +++ b/pkg/agent/controller/networkpolicy/l7engine/reconciler_test.go @@ -131,45 +131,7 @@ func TestStartSuricata(t *testing.T) { fe.startSuricata() - ok, err := afero.FileContainsBytes(defaultFS, antreaSuricataConfigPath, []byte(`--- -outputs: - - eve-log: - enabled: yes - filetype: regular - filename: eve-%Y-%m-%d.json - rotate-interval: day - pcap-file: false - community-id: false - community-id-seed: 0 - xff: - enabled: no - types: - - alert: - tagged-packets: yes -af-packet: - - interface: antrea-l7-tap0 - threads: auto - cluster-id: 80 - cluster-type: cluster_flow - defrag: no - use-mmap: yes - tpacket-v2: yes - checksum-checks: no - copy-mode: ips - copy-iface: antrea-l7-tap1 - - interface: antrea-l7-tap1 - threads: auto - cluster-id: 81 - cluster-type: cluster_flow - defrag: no - use-mmap: yes - tpacket-v2: yes - checksum-checks: no - copy-mode: ips - copy-iface: antrea-l7-tap0 -multi-detect: - enabled: yes - selector: vlan`)) + ok, err := afero.FileContainsBytes(defaultFS, antreaSuricataConfigPath, []byte(suricataAntreaConfigData)) assert.NoError(t, err) assert.True(t, ok) diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go index 655ea959804..5565dad1f2a 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go @@ -191,7 +191,8 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, v6Enabled bool, gwPort, tunPort uint32, nodeConfig *config.NodeConfig, - podNetworkWait *utilwait.Group) (*Controller, error) { + podNetworkWait *utilwait.Group, + l7Reconciler *l7engine.Reconciler) (*Controller, error) { idAllocator := newIDAllocator(asyncRuleDeleteInterval, dnsInterceptRuleID) c := &Controller{ antreaClientProvider: antreaClientGetter, @@ -211,7 +212,7 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, } if l7NetworkPolicyEnabled { - c.l7RuleReconciler = l7engine.NewReconciler() + c.l7RuleReconciler = l7Reconciler c.l7VlanIDAllocator = newL7VlanIDAllocator() } diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go index 82012854bee..6256403bc6d 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/component-base/metrics/legacyregistry" "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/controller/networkpolicy/l7engine" "antrea.io/antrea/pkg/agent/metrics" "antrea.io/antrea/pkg/agent/openflow" proxytypes "antrea.io/antrea/pkg/agent/proxy/types" @@ -77,6 +78,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) { groupIDAllocator := openflow.NewGroupAllocator() groupCounters := []proxytypes.GroupCounter{proxytypes.NewGroupCounter(groupIDAllocator, ch2)} fs := afero.NewMemMapFs() + l7reconciler := l7engine.NewReconciler() controller, _ := NewNetworkPolicyController(&antreaClientGetter{clientset}, nil, nil, @@ -102,7 +104,8 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) { config.HostGatewayOFPort, config.DefaultTunOFPort, &config.NodeConfig{}, - wait.NewGroup()) + wait.NewGroup(), + l7reconciler) reconciler := newMockReconciler() controller.podReconciler = reconciler controller.auditLogger = nil diff --git a/pkg/agent/controller/trafficcontrol/controller_test.go b/pkg/agent/controller/trafficcontrol/controller_test.go index 477b88268eb..0c5e67bbe1a 100644 --- a/pkg/agent/controller/trafficcontrol/controller_test.go +++ b/pkg/agent/controller/trafficcontrol/controller_test.go @@ -288,7 +288,7 @@ func generateTrafficControlState(direction v1alpha2.Direction, } func waitEvents(t *testing.T, expectedEvents int, c *fakeController) { - require.NoError(t, wait.PollImmediate(10*time.Millisecond, 5*time.Second, func() (done bool, err error) { + require.NoError(t, wait.Poll(10*time.Millisecond, 5*time.Second, func() (done bool, err error) { return c.queue.Len() == expectedEvents, nil })) } diff --git a/pkg/agent/flowexporter/connections/connections.go b/pkg/agent/flowexporter/connections/connections.go index 1f61abc5eb9..a8f2be05a3c 100644 --- a/pkg/agent/flowexporter/connections/connections.go +++ b/pkg/agent/flowexporter/connections/connections.go @@ -150,10 +150,13 @@ func (cs *connectionStore) ReleaseConnStoreLock() { // UpdateConnAndQueue deletes the inactive connection from keyToItem map, // without adding it back to the PQ. In this way, we can avoid to reset the // item's expire time every time we encounter it in the PQ. The method also -// updates active connection's stats fields and adds it back to the PQ. +// updates active connection's stats fields and adds it back to the PQ. Layer 7 +// fields should be set to default to prevent from re-exporting same values. func (cs *connectionStore) UpdateConnAndQueue(pqItem *flowexporter.ItemToExpire, currTime time.Time) { conn := pqItem.Conn conn.LastExportTime = currTime + conn.AppProtocolName = "" + conn.HttpVals = "" if conn.ReadyToDelete || !conn.IsActive { cs.expirePriorityQueue.RemoveItemFromMap(conn) } else { diff --git a/pkg/agent/flowexporter/connections/connections_test.go b/pkg/agent/flowexporter/connections/connections_test.go index 3ab693bda3c..129e62d7070 100644 --- a/pkg/agent/flowexporter/connections/connections_test.go +++ b/pkg/agent/flowexporter/connections/connections_test.go @@ -123,7 +123,7 @@ func TestConnectionStore_DeleteConnWithoutLock(t *testing.T) { // test on conntrack connection store mockConnDumper := connectionstest.NewMockConnTrackDumper(ctrl) - conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, nil, mockPodStore, nil, testFlowExporterOptions) + conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, nil, mockPodStore, nil, nil, testFlowExporterOptions) conntrackConnStore.connections[connKey] = conn metrics.TotalAntreaConnectionsInConnTrackTable.Set(1) diff --git a/pkg/agent/flowexporter/connections/conntrack_connections.go b/pkg/agent/flowexporter/connections/conntrack_connections.go index 1d4bf619e5d..56a22fe99cb 100644 --- a/pkg/agent/flowexporter/connections/conntrack_connections.go +++ b/pkg/agent/flowexporter/connections/conntrack_connections.go @@ -16,6 +16,7 @@ package connections import ( "encoding/binary" + "encoding/json" "fmt" "time" @@ -45,9 +46,14 @@ type ConntrackConnectionStore struct { networkPolicyQuerier querier.AgentNetworkPolicyInfoQuerier pollInterval time.Duration connectUplinkToBridge bool + l7EventMapGetter L7EventMapGetter connectionStore } +type L7EventMapGetter interface { + ConsumeL7EventMap() map[flowexporter.ConnectionKey]L7ProtocolFields +} + func NewConntrackConnectionStore( connTrackDumper ConnTrackDumper, v4Enabled bool, @@ -55,6 +61,7 @@ func NewConntrackConnectionStore( npQuerier querier.AgentNetworkPolicyInfoQuerier, podStore podstore.Interface, proxier proxy.Proxier, + l7EventMapGetterFunc L7EventMapGetter, o *flowexporter.FlowExporterOptions, ) *ConntrackConnectionStore { return &ConntrackConnectionStore{ @@ -65,6 +72,7 @@ func NewConntrackConnectionStore( pollInterval: o.PollInterval, connectionStore: NewConnectionStore(podStore, proxier, o), connectUplinkToBridge: o.ConnectUplinkToBridge, + l7EventMapGetter: l7EventMapGetterFunc, } } @@ -96,6 +104,8 @@ func (cs *ConntrackConnectionStore) Run(stopCh <-chan struct{}) { // TODO: As optimization, only poll invalid/closed connections during every poll, and poll the established connections right before the export. func (cs *ConntrackConnectionStore) Poll() ([]int, error) { klog.V(2).Infof("Polling conntrack") + // DeepCopy the L7EventMap before polling the conntrack table to match corresponding L4 connection with L7 events and avoid missing the L7 events for corresponding L4 connection + l7EventMap := cs.l7EventMapGetter.ConsumeL7EventMap() var zones []uint16 var connsLens []int @@ -163,7 +173,7 @@ func (cs *ConntrackConnectionStore) Poll() ([]int, error) { for _, conn := range filteredConnsList { cs.AddOrUpdateConn(conn) } - + cs.fillL7EventInfo(l7EventMap) cs.ReleaseConnStoreLock() metrics.TotalConnectionsInConnTrackTable.Set(float64(totalConns)) @@ -325,3 +335,27 @@ func (cs *ConntrackConnectionStore) deleteConnWithoutLock(connKey flowexporter.C func (cs *ConntrackConnectionStore) GetPriorityQueue() *priorityqueue.ExpirePriorityQueue { return cs.connectionStore.expirePriorityQueue } + +func (cs *ConntrackConnectionStore) fillL7EventInfo(l7EventMap map[flowexporter.Tuple]L7ProtocolFields) { + for connKey, conn := range cs.connections { + l7event, ok := l7EventMap[connKey] + if ok { + if len(l7event.http) > 0 { + jsonBytes, err := json.Marshal(l7event.http) + if err != nil { + klog.ErrorS(err, "Converting l7Event http failed") + } + conn.HttpVals += string(jsonBytes) + conn.AppProtocolName = "http" + } + // In case L7 event is received after the last planned export of the TCP connection, add + // the event back to the queue to be exported in next export cycle. In case the L7 event + // is received later than the connkey become unavailable in the cs.connection, we will + // discard that event + _, exists := cs.expirePriorityQueue.KeyToItem[connKey] + if !exists { + cs.expirePriorityQueue.WriteItemToQueue(connKey, conn) + } + } + } +} diff --git a/pkg/agent/flowexporter/connections/conntrack_connections_perf_test.go b/pkg/agent/flowexporter/connections/conntrack_connections_perf_test.go index 8bfc97a0d12..7ae53d175cf 100644 --- a/pkg/agent/flowexporter/connections/conntrack_connections_perf_test.go +++ b/pkg/agent/flowexporter/connections/conntrack_connections_perf_test.go @@ -147,7 +147,8 @@ func setupConntrackConnStore(b *testing.B) (*ConntrackConnectionStore, *connecti mockProxier.EXPECT().GetServiceByIP(serviceStr).Return(servicePortName, true).AnyTimes() npQuerier := queriertest.NewMockAgentNetworkPolicyInfoQuerier(ctrl) - return NewConntrackConnectionStore(mockConnDumper, true, false, npQuerier, mockPodStore, nil, testFlowExporterOptions), mockConnDumper + l7Listener := NewL7Listener(nil, mockPodStore) + return NewConntrackConnectionStore(mockConnDumper, true, false, npQuerier, mockPodStore, nil, l7Listener, testFlowExporterOptions), mockConnDumper } func generateConns() []*flowexporter.Connection { diff --git a/pkg/agent/flowexporter/connections/conntrack_connections_test.go b/pkg/agent/flowexporter/connections/conntrack_connections_test.go index 3a63e5fa49f..9bc896a8846 100644 --- a/pkg/agent/flowexporter/connections/conntrack_connections_test.go +++ b/pkg/agent/flowexporter/connections/conntrack_connections_test.go @@ -91,6 +91,13 @@ var ( } ) +type fakeL7Listener struct{} + +func (fll *fakeL7Listener) ConsumeL7EventMap() map[flowexporter.ConnectionKey]L7ProtocolFields { + l7EventsMap := make(map[flowexporter.ConnectionKey]L7ProtocolFields) + return l7EventsMap +} + func TestConntrackConnectionStore_AddOrUpdateConn(t *testing.T) { ctrl := gomock.NewController(t) refTime := time.Now() @@ -216,7 +223,7 @@ func TestConntrackConnectionStore_AddOrUpdateConn(t *testing.T) { mockProxier := proxytest.NewMockProxier(ctrl) mockConnDumper := connectionstest.NewMockConnTrackDumper(ctrl) npQuerier := queriertest.NewMockAgentNetworkPolicyInfoQuerier(ctrl) - conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, npQuerier, mockPodStore, mockProxier, testFlowExporterOptions) + conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, npQuerier, mockPodStore, mockProxier, nil, testFlowExporterOptions) for _, c := range tc { t.Run(c.name, func(t *testing.T) { @@ -297,7 +304,7 @@ func TestConnectionStore_DeleteConnectionByKey(t *testing.T) { metrics.TotalAntreaConnectionsInConnTrackTable.Set(float64(len(testFlows))) // Create connectionStore mockPodStore := podstoretest.NewMockInterface(ctrl) - connStore := NewConntrackConnectionStore(nil, true, false, nil, mockPodStore, nil, testFlowExporterOptions) + connStore := NewConntrackConnectionStore(nil, true, false, nil, mockPodStore, nil, nil, testFlowExporterOptions) // Add flows to the connection store. for i, flow := range testFlows { connStore.connections[*testFlowKeys[i]] = flow @@ -319,7 +326,7 @@ func TestConnectionStore_MetricSettingInPoll(t *testing.T) { // Create connectionStore mockPodStore := podstoretest.NewMockInterface(ctrl) mockConnDumper := connectionstest.NewMockConnTrackDumper(ctrl) - conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, nil, mockPodStore, nil, testFlowExporterOptions) + conntrackConnStore := NewConntrackConnectionStore(mockConnDumper, true, false, nil, mockPodStore, nil, &fakeL7Listener{}, testFlowExporterOptions) // Hard-coded conntrack occupancy metrics for test TotalConnections := 0 MaxConnections := 300000 diff --git a/pkg/agent/flowexporter/connections/conntrack_ovs.go b/pkg/agent/flowexporter/connections/conntrack_ovs.go index efea78c31e6..d3e769d2dac 100644 --- a/pkg/agent/flowexporter/connections/conntrack_ovs.go +++ b/pkg/agent/flowexporter/connections/conntrack_ovs.go @@ -32,13 +32,6 @@ import ( // Following map is for converting protocol name (string) to protocol identifier var ( - protocols = map[string]uint8{ - "icmp": 1, - "igmp": 2, - "tcp": 6, - "udp": 17, - "ipv6-icmp": 58, - } // Mapping is defined at https://github.com/torvalds/linux/blob/v5.9/include/uapi/linux/netfilter/nf_conntrack_common.h#L42 conntrackStatusMap = map[string]uint32{ "EXPECTED": uint32(1), @@ -140,7 +133,7 @@ func flowStringToAntreaConnection(flow string, zoneFilter uint16) (*flowexporter switch { case hasAnyProto(fs): // Proto identifier - proto, err := lookupProtocolMap(fs) + proto, err := flowexporter.LookupProtocolMap(fs) if err != nil { return nil, err } @@ -299,7 +292,7 @@ func flowStringToAntreaConnection(flow string, zoneFilter uint16) (*flowexporter } func hasAnyProto(text string) bool { - for proto := range protocols { + for proto := range flowexporter.Protocols { if strings.Contains(strings.ToLower(text), proto) { return true } @@ -307,17 +300,6 @@ func hasAnyProto(text string) bool { return false } -// lookupProtocolMap returns protocol identifier given protocol name -func lookupProtocolMap(name string) (uint8, error) { - name = strings.TrimSpace(name) - lowerCaseStr := strings.ToLower(name) - proto, found := protocols[lowerCaseStr] - if !found { - return 0, fmt.Errorf("unknown IP protocol specified: %s", name) - } - return proto, nil -} - func (ct *connTrackOvsCtl) GetMaxConnections() (int, error) { cmdOutput, execErr := ct.ovsctlClient.RunAppctlCmd("dpctl/ct-get-maxconns", false) if execErr != nil { diff --git a/pkg/agent/flowexporter/connections/l7_listener.go b/pkg/agent/flowexporter/connections/l7_listener.go new file mode 100644 index 00000000000..4902abc4da4 --- /dev/null +++ b/pkg/agent/flowexporter/connections/l7_listener.go @@ -0,0 +1,211 @@ +// Copyright 2023 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connections + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "net" + "net/netip" + "os" + "path/filepath" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/flowexporter" + k8sutil "antrea.io/antrea/pkg/util/k8s" + "antrea.io/antrea/pkg/util/podstore" +) + +type PodL7FlowExporterAttrGetter interface { + IsL7FlowExporterRequested(podNN string, ingress bool) bool +} + +// L7ProtocolFields holds layer 7 protocols supported +type L7ProtocolFields struct { + http map[int32]*Http +} + +// Http holds the L7 HTTP flow JSON values. +type Http struct { + Hostname string `json:"hostname"` + URL string `json:"url"` + UserAgent string `json:"http_user_agent"` + ContentType string `json:"http_content_type"` + Method string `json:"http_method"` + Protocol string `json:"protocol"` + Status int32 `json:"status"` + ContentLength int32 `json:"length"` +} + +// JsonToEvent holds Suricata event JSON values. +// See https://docs.suricata.io/en/latest/output/eve/eve-json-format.html?highlight=HTTP%20event#event-types +type JsonToEvent struct { + Timestamp string `json:"timestamp"` + FlowID int64 `json:"flow_id"` + InInterface string `json:"in_iface"` + EventType string `json:"event_type"` + VLAN []int32 `json:"vlan"` + SrcIP netip.Addr `json:"src_ip"` + SrcPort int32 `json:"src_port"` + DestIP netip.Addr `json:"dest_ip"` + DestPort int32 `json:"dest_port"` + Proto string `json:"proto"` + TxID int32 `json:"tx_id"` + HTTP *Http `json:"http"` +} + +type L7Listener struct { + l7Events map[flowexporter.ConnectionKey]L7ProtocolFields + l7mut sync.Mutex + suricataEventSocketPath string + podL7FlowExporterAttrGetter PodL7FlowExporterAttrGetter + podStore podstore.Interface +} + +func NewL7Listener( + podL7FlowExporterAttrGetter PodL7FlowExporterAttrGetter, + podStore podstore.Interface) *L7Listener { + return &L7Listener{ + l7Events: make(map[flowexporter.ConnectionKey]L7ProtocolFields), + suricataEventSocketPath: config.L7SuricataSocketPath, + podL7FlowExporterAttrGetter: podL7FlowExporterAttrGetter, + podStore: podStore, + } +} + +func (l *L7Listener) Run(stopCh <-chan struct{}) { + go wait.Until(l.listenAndAcceptConn, 5*time.Second, stopCh) + <-stopCh +} + +func (l *L7Listener) listenAndAcceptConn() { + // Remove stale connections + if err := os.Remove(l.suricataEventSocketPath); err != nil && !os.IsNotExist(err) { + klog.V(2).ErrorS(err, "failed to remove stale socket") + } + if err := os.MkdirAll(filepath.Dir(l.suricataEventSocketPath), 0750); err != nil { + klog.ErrorS(err, "Failed to create directory %s", filepath.Dir(l.suricataEventSocketPath)) + } + listener, err := net.Listen("unix", l.suricataEventSocketPath) + if err != nil { + klog.ErrorS(err, "Failed to listen on Suricata socket") + return + } + defer listener.Close() + klog.InfoS("L7 Listener Server started. Listening for connections...") + for { + conn, err := listener.Accept() + if err != nil { + klog.ErrorS(err, "Error accepting Suricata connection") + return + } + go l.handleClientConnection(conn) + } +} + +func (l *L7Listener) handleClientConnection(conn net.Conn) { + defer conn.Close() + reader := bufio.NewReader(conn) + for { + buffer, err := reader.ReadBytes('\n') + if err != nil && err != io.EOF { + klog.ErrorS(err, "Error reading data", "buffer", buffer) + return + } + err = l.processLog(buffer) + if err != nil { + klog.ErrorS(err, "Error while processing L7 data") + return + } + } +} + +func (l *L7Listener) processLog(data []byte) error { + var event JsonToEvent + err := json.Unmarshal(data, &event) + if err != nil { + return fmt.Errorf("error parsing JSON data %v", data) + } + if event.EventType != "http" { + return nil + } + if err = l.addOrUpdateL7EventMap(&event); err != nil { + return fmt.Errorf("error while adding or updating L7 event map %v", err) + } + return nil +} + +func (l *L7Listener) addOrUpdateL7EventMap(event *JsonToEvent) error { + protocol, err := flowexporter.LookupProtocolMap(event.Proto) + if err != nil { + return fmt.Errorf("invalid protocol type, err: %v", err) + } + conn := flowexporter.Connection{ + FlowKey: flowexporter.Tuple{ + SourceAddress: event.SrcIP, + DestinationAddress: event.DestIP, + Protocol: protocol, + SourcePort: uint16(event.SrcPort), + DestinationPort: uint16(event.DestPort), + }, + } + connKey := flowexporter.NewConnectionKey(&conn) + srcIP := conn.FlowKey.SourceAddress.String() + dstIP := conn.FlowKey.DestinationAddress.String() + startTime, _ := time.Parse(time.RFC3339Nano, event.Timestamp) + srcPod, srcFound := l.podStore.GetPodByIPAndTime(srcIP, startTime) + dstPod, dstFound := l.podStore.GetPodByIPAndTime(dstIP, startTime) + if !srcFound && !dstFound { + klog.ErrorS(nil, "Cannot map any of the IPs to a local Pod", "srcIP", srcIP, "dstIP", dstIP) + return nil + } + var sourcePodNN, destinationPodNN string + if srcFound { + sourcePodNN = k8sutil.NamespacedName(srcPod.Namespace, srcPod.Name) + } + if dstFound { + destinationPodNN = k8sutil.NamespacedName(dstPod.Namespace, dstPod.Name) + } + l.l7mut.Lock() + defer l.l7mut.Unlock() + switch event.EventType { + case "http": + if l.podL7FlowExporterAttrGetter.IsL7FlowExporterRequested(sourcePodNN, false) || l.podL7FlowExporterAttrGetter.IsL7FlowExporterRequested(destinationPodNN, true) { + _, ok := l.l7Events[connKey] + if !ok { + l.l7Events[connKey] = L7ProtocolFields{ + http: make(map[int32]*Http), + } + } + l.l7Events[connKey].http[event.TxID] = event.HTTP + } + } + return nil +} + +func (l *L7Listener) ConsumeL7EventMap() map[flowexporter.ConnectionKey]L7ProtocolFields { + l.l7mut.Lock() + defer l.l7mut.Unlock() + l7EventsMap := l.l7Events + l.l7Events = make(map[flowexporter.ConnectionKey]L7ProtocolFields) + return l7EventsMap +} diff --git a/pkg/agent/flowexporter/connections/l7_listener_test.go b/pkg/agent/flowexporter/connections/l7_listener_test.go new file mode 100644 index 00000000000..23a931ae1c2 --- /dev/null +++ b/pkg/agent/flowexporter/connections/l7_listener_test.go @@ -0,0 +1,301 @@ +// Copyright 2023 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package connections + +import ( + "bufio" + "encoding/json" + "net" + "net/netip" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "antrea.io/antrea/pkg/agent/flowexporter" + "antrea.io/antrea/pkg/apis/crd/v1alpha2" + podstoretest "antrea.io/antrea/pkg/util/podstore/testing" +) + +var ( + fakeDestPod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePod", + Namespace: "fakeNS", + }, + } + fakeSrcPod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fakePod", + Namespace: "fakeNS", + }, + } +) + +type fakePodL7FlowExporterAttrGetter struct{} + +func (fl *fakePodL7FlowExporterAttrGetter) IsL7FlowExporterRequested(podNN string, ingress bool) bool { + podToDirectionMap := map[string]v1alpha2.Direction{ + "destPodNNDirIngress": v1alpha2.DirectionIngress, + "destPodNNDirEgress": v1alpha2.DirectionEgress, + "destPodNNDirBoth": v1alpha2.DirectionBoth, + "srcPodNNDirIngress": v1alpha2.DirectionIngress, + "srcPodNNDirEgress": v1alpha2.DirectionEgress, + "srcPodNNDirBoth": v1alpha2.DirectionBoth, + "fakeNS/fakePod": v1alpha2.DirectionIngress, + } + + if direction, ok := podToDirectionMap[podNN]; ok { + switch direction { + case v1alpha2.DirectionIngress: + return ingress + case v1alpha2.DirectionEgress: + return !ingress + case v1alpha2.DirectionBoth: + return true + } + } + return false +} + +func newFakeL7Listener(podStore *podstoretest.MockInterface) *L7Listener { + return &L7Listener{ + l7Events: make(map[flowexporter.ConnectionKey]L7ProtocolFields), + suricataEventSocketPath: "suricata_Test.socket", + podL7FlowExporterAttrGetter: &fakePodL7FlowExporterAttrGetter{}, + podStore: podStore, + } +} + +func TestFlowExporterL7ListenerHttp(t *testing.T) { + ctrl := gomock.NewController(t) + mockPodStore := podstoretest.NewMockInterface(ctrl) + l := newFakeL7Listener(mockPodStore) + + stopCh := make(chan struct{}) + defer func() { + close(stopCh) + os.RemoveAll(l.suricataEventSocketPath) + }() + go l.Run(stopCh) + <-time.After(100 * time.Millisecond) + + testCases := []struct { + name string + input []JsonToEvent + eventPresent bool + expectedErr error + expectedEvents L7ProtocolFields + }{ + { + name: "Invalid eventType", + input: []JsonToEvent{ + { + Timestamp: time.Now().String(), + FlowID: 1, + InInterface: "mock_interface", + EventType: "mock_event1", + VLAN: []int32{1}, + SrcIP: netip.MustParseAddr("10.10.0.1"), + SrcPort: 59921, + DestIP: netip.MustParseAddr("10.10.0.2"), + DestPort: 80, + Proto: "TCP", + TxID: 0, + HTTP: &Http{ + Hostname: "10.10.0.1", + URL: "/public/", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 200, + ContentLength: 153, + }, + }, + }, + eventPresent: false, + expectedEvents: L7ProtocolFields{}, + }, { + name: "Valid case", + input: []JsonToEvent{ + { + Timestamp: "0001-01-01 00:00:00 +0000 UTC", + FlowID: 1, + InInterface: "mock_interface", + EventType: "http", + VLAN: []int32{1}, + SrcIP: netip.MustParseAddr("10.10.0.1"), + SrcPort: 59920, + DestIP: netip.MustParseAddr("10.10.0.2"), + DestPort: 80, + Proto: "TCP", + TxID: 0, + HTTP: &Http{ + Hostname: "10.10.0.1", + URL: "/public/1", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 200, + ContentLength: 153, + }, + }, + }, + eventPresent: true, + expectedEvents: L7ProtocolFields{ + http: map[int32]*Http{ + 0: { + Hostname: "10.10.0.1", + URL: "/public/1", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 200, + ContentLength: 153, + }, + }, + }, + }, { + name: "Valid case for persistent http", + input: []JsonToEvent{ + { + Timestamp: time.Now().String(), + FlowID: 1, + InInterface: "mock_interface", + EventType: "http", + VLAN: []int32{1}, + SrcIP: netip.MustParseAddr("10.10.0.1"), + SrcPort: 59920, + DestIP: netip.MustParseAddr("10.10.0.2"), + DestPort: 80, + Proto: "TCP", + TxID: 0, + HTTP: &Http{ + Hostname: "10.10.0.1", + URL: "/public/2", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 200, + ContentLength: 153, + }, + }, { + Timestamp: time.Now().String(), + FlowID: 1, + InInterface: "mock_interface", + EventType: "http", + VLAN: []int32{1}, + SrcIP: netip.MustParseAddr("10.10.0.1"), + SrcPort: 59920, + DestIP: netip.MustParseAddr("10.10.0.2"), + DestPort: 80, + Proto: "TCP", + TxID: 1, + HTTP: &Http{ + Hostname: "10.10.0.1", + URL: "/public/3", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 201, + ContentLength: 154, + }, + }, + }, + eventPresent: true, + expectedEvents: L7ProtocolFields{ + http: map[int32]*Http{ + 0: { + Hostname: "10.10.0.1", + URL: "/public/2", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 200, + ContentLength: 153, + }, + 1: { + Hostname: "10.10.0.1", + URL: "/public/3", + UserAgent: "curl/7.74.0", + ContentType: "text/html", + Method: "GET", + Protocol: "HTTP/1.1", + Status: 201, + ContentLength: 154, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + socketConn, err := net.Dial("unix", l.suricataEventSocketPath) + if err != nil { + t.Fatalf("Failed to connect to server: %s", err) + } + defer socketConn.Close() + writer := bufio.NewWriter(socketConn) + timeNow, _ := time.Parse(time.RFC3339Nano, tc.input[0].Timestamp) + mockPodStore.EXPECT().GetPodByIPAndTime("10.10.0.1", timeNow).Return(fakeSrcPod, true) + mockPodStore.EXPECT().GetPodByIPAndTime("10.10.0.2", timeNow).Return(fakeDestPod, true) + for _, msg := range tc.input { + jsonData, err := json.Marshal(msg) + if err != nil { + t.Errorf("Error Marshaling data: %v\n", err) + } + writer.Write(jsonData) + if err != nil { + t.Errorf("Error writing event data: %v\n", err) + } + _, err = writer.Write([]byte("\n")) + if err != nil { + t.Errorf("Error writing newline: %v\n", err) + } + } + writer.Flush() + socketConn.Close() + <-time.After(100 * time.Millisecond) + protocol, _ := flowexporter.LookupProtocolMap(tc.input[0].Proto) + // Get 5-tuple information + tuple := flowexporter.Tuple{ + SourceAddress: tc.input[0].SrcIP, + DestinationAddress: tc.input[0].DestIP, + Protocol: protocol, + SourcePort: uint16(tc.input[0].SrcPort), + DestinationPort: uint16(tc.input[0].DestPort), + } + conn := flowexporter.Connection{} + conn.FlowKey = tuple + connKey := flowexporter.NewConnectionKey(&conn) + allL7Events := l.ConsumeL7EventMap() + existingEvent, exists := allL7Events[connKey] + assert.Equal(t, tc.eventPresent, exists) + if exists { + assert.Equal(t, tc.expectedEvents.http, existingEvent.http) + } + }) + } +} diff --git a/pkg/agent/flowexporter/exporter/exporter.go b/pkg/agent/flowexporter/exporter/exporter.go index b9f3140440f..4d4436e47f5 100644 --- a/pkg/agent/flowexporter/exporter/exporter.go +++ b/pkg/agent/flowexporter/exporter/exporter.go @@ -36,6 +36,7 @@ import ( "antrea.io/antrea/pkg/agent/metrics" "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/agent/proxy" + "antrea.io/antrea/pkg/features" "antrea.io/antrea/pkg/ipfix" "antrea.io/antrea/pkg/ovs/ovsconfig" "antrea.io/antrea/pkg/querier" @@ -101,6 +102,8 @@ var ( "flowType", "egressName", "egressIP", + "appProtocolName", + "httpVals", } AntreaInfoElementsIPv4 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv4"}...) AntreaInfoElementsIPv6 = append(antreaInfoElementsCommon, []string{"destinationClusterIPv6"}...) @@ -130,6 +133,7 @@ type FlowExporter struct { expiredConns []flowexporter.Connection egressQuerier querier.EgressQuerier podStore podstore.Interface + l7Listener *connections.L7Listener } func genObservationID(nodeName string) uint32 { @@ -157,7 +161,7 @@ func prepareExporterInputArgs(collectorProto, nodeName string) exporter.Exporter func NewFlowExporter(podStore podstore.Interface, proxier proxy.Proxier, k8sClient kubernetes.Interface, nodeRouteController *noderoute.Controller, trafficEncapMode config.TrafficEncapModeType, nodeConfig *config.NodeConfig, v4Enabled, v6Enabled bool, serviceCIDRNet, serviceCIDRNetv6 *net.IPNet, ovsDatapathType ovsconfig.OVSDatapathType, proxyEnabled bool, npQuerier querier.AgentNetworkPolicyInfoQuerier, o *flowexporter.FlowExporterOptions, - egressQuerier querier.EgressQuerier) (*FlowExporter, error) { + egressQuerier querier.EgressQuerier, podL7FlowExporterAttrGetter connections.PodL7FlowExporterAttrGetter) (*FlowExporter, error) { // Initialize IPFIX registry registry := ipfix.NewIPFIXRegistry() registry.LoadRegistry() @@ -168,10 +172,11 @@ func NewFlowExporter(podStore podstore.Interface, proxier proxy.Proxier, k8sClie return nil, err } expInput := prepareExporterInputArgs(o.FlowCollectorProto, nodeName) + l7Listener := connections.NewL7Listener(podL7FlowExporterAttrGetter, podStore) connTrackDumper := connections.InitializeConnTrackDumper(nodeConfig, serviceCIDRNet, serviceCIDRNetv6, ovsDatapathType, proxyEnabled) denyConnStore := connections.NewDenyConnectionStore(podStore, proxier, o) - conntrackConnStore := connections.NewConntrackConnectionStore(connTrackDumper, v4Enabled, v6Enabled, npQuerier, podStore, proxier, o) + conntrackConnStore := connections.NewConntrackConnectionStore(connTrackDumper, v4Enabled, v6Enabled, npQuerier, podStore, proxier, l7Listener, o) if nodeRouteController == nil { klog.InfoS("NodeRouteController is nil, will not be able to determine flow type for connections") @@ -195,6 +200,7 @@ func NewFlowExporter(podStore podstore.Interface, proxier proxy.Proxier, k8sClie expiredConns: make([]flowexporter.Connection, 0, maxConnsToExport*2), egressQuerier: egressQuerier, podStore: podStore, + l7Listener: l7Listener, }, nil } @@ -204,6 +210,10 @@ func (exp *FlowExporter) GetDenyConnStore() *connections.DenyConnectionStore { func (exp *FlowExporter) Run(stopCh <-chan struct{}) { go exp.podStore.Run(stopCh) + // Start L7 connection flow socket + if features.DefaultFeatureGate.Enabled(features.L7FlowExporter) { + go exp.l7Listener.Run(stopCh) + } // Start the goroutine to periodically delete stale deny connections. go exp.denyConnStore.RunPeriodicDeletion(stopCh) @@ -577,6 +587,10 @@ func (exp *FlowExporter) addConnToSet(conn *flowexporter.Connection) error { ie.SetStringValue(conn.EgressName) case "egressIP": ie.SetStringValue(conn.EgressIP) + case "appProtocolName": + ie.SetStringValue(conn.AppProtocolName) + case "httpVals": + ie.SetStringValue(conn.HttpVals) } } err := exp.ipfixSet.AddRecord(eL, templateID) diff --git a/pkg/agent/flowexporter/exporter/exporter_perf_test.go b/pkg/agent/flowexporter/exporter/exporter_perf_test.go index 48df3418ba9..dfbbd00eadc 100644 --- a/pkg/agent/flowexporter/exporter/exporter_perf_test.go +++ b/pkg/agent/flowexporter/exporter/exporter_perf_test.go @@ -169,8 +169,9 @@ func NewFlowExporterForTest(o *flowexporter.FlowExporterOptions) *FlowExporter { v4Enabled := !testWithIPv6 v6Enabled := testWithIPv6 + l7Listener := connections.NewL7Listener(nil, nil) denyConnStore := connections.NewDenyConnectionStore(nil, nil, o) - conntrackConnStore := connections.NewConntrackConnectionStore(nil, v4Enabled, v6Enabled, nil, nil, nil, o) + conntrackConnStore := connections.NewConntrackConnectionStore(nil, v4Enabled, v6Enabled, nil, nil, nil, l7Listener, o) return &FlowExporter{ collectorAddr: o.FlowCollectorAddr, @@ -188,6 +189,7 @@ func NewFlowExporterForTest(o *flowexporter.FlowExporterOptions) *FlowExporter { conntrackPriorityQueue: conntrackConnStore.GetPriorityQueue(), denyPriorityQueue: denyConnStore.GetPriorityQueue(), expiredConns: make([]flowexporter.Connection, 0, maxConnsToExport*2), + l7Listener: l7Listener, } } diff --git a/pkg/agent/flowexporter/exporter/exporter_test.go b/pkg/agent/flowexporter/exporter/exporter_test.go index fc824dd63c7..ea7fc42cd17 100644 --- a/pkg/agent/flowexporter/exporter/exporter_test.go +++ b/pkg/agent/flowexporter/exporter/exporter_test.go @@ -663,7 +663,7 @@ func runSendFlowRecordTests(t *testing.T, flowExp *FlowExporter, isIPv6 bool) { IdleFlowTimeout: testIdleFlowTimeout, StaleConnectionTimeout: 1, PollInterval: 1} - flowExp.conntrackConnStore = connections.NewConntrackConnectionStore(mockConnDumper, !isIPv6, isIPv6, nil, nil, nil, o) + flowExp.conntrackConnStore = connections.NewConntrackConnectionStore(mockConnDumper, !isIPv6, isIPv6, nil, nil, nil, nil, o) flowExp.denyConnStore = connections.NewDenyConnectionStore(nil, nil, o) flowExp.conntrackPriorityQueue = flowExp.conntrackConnStore.GetPriorityQueue() flowExp.denyPriorityQueue = flowExp.denyConnStore.GetPriorityQueue() diff --git a/pkg/agent/flowexporter/types.go b/pkg/agent/flowexporter/types.go index 825fc5a4d7b..f091fe7f3f2 100644 --- a/pkg/agent/flowexporter/types.go +++ b/pkg/agent/flowexporter/types.go @@ -84,6 +84,8 @@ type Connection struct { FlowType uint8 EgressName string EgressIP string + AppProtocolName string + HttpVals string } type ItemToExpire struct { diff --git a/pkg/agent/flowexporter/utils.go b/pkg/agent/flowexporter/utils.go index 100891500fc..3d8669b103e 100644 --- a/pkg/agent/flowexporter/utils.go +++ b/pkg/agent/flowexporter/utils.go @@ -15,6 +15,9 @@ package flowexporter import ( + "fmt" + "strings" + "github.com/vmware/go-ipfix/pkg/registry" "antrea.io/antrea/pkg/apis/controlplane/v1beta2" @@ -24,6 +27,16 @@ const ( connectionDyingFlag = uint32(1 << 9) ) +var ( + Protocols = map[string]uint8{ + "icmp": 1, + "igmp": 2, + "tcp": 6, + "udp": 17, + "ipv6-icmp": 58, + } +) + // NewConnectionKey creates 5-tuple of flow as connection key func NewConnectionKey(conn *Connection) ConnectionKey { return conn.FlowKey @@ -84,3 +97,14 @@ func PolicyTypeToUint8(policyType v1beta2.NetworkPolicyType) uint8 { return registry.PolicyTypeK8sNetworkPolicy } } + +// LookupProtocolMap returns protocol identifier given protocol name +func LookupProtocolMap(name string) (uint8, error) { + name = strings.TrimSpace(name) + lowerCaseStr := strings.ToLower(name) + proto, found := Protocols[lowerCaseStr] + if !found { + return 0, fmt.Errorf("unknown IP protocol specified: %s", name) + } + return proto, nil +} diff --git a/pkg/agent/flowexporter/utils_test.go b/pkg/agent/flowexporter/utils_test.go index b5b788337f0..d7069ad3644 100644 --- a/pkg/agent/flowexporter/utils_test.go +++ b/pkg/agent/flowexporter/utils_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "antrea.io/antrea/pkg/apis/controlplane/v1beta2" ) @@ -96,3 +97,26 @@ func TestPolicyTypeToUint8(t *testing.T) { assert.Equal(t, tc.expectedResult, result) } } + +func TestLookupProtocolMap(t *testing.T) { + for _, tc := range []struct { + protocol string + expectedResult uint8 + }{ + {"icmp", 1}, + {"igmp", 2}, + {"tcp", 6}, + {"udp", 17}, + {"ipv6-icmp", 58}, + {"IPV6-ICMP", 58}, + {"mockProtocol", 0}, + } { + proto, err := LookupProtocolMap(tc.protocol) + if tc.expectedResult == 0 { + assert.ErrorContains(t, err, "unknown IP protocol specified") + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedResult, proto) + } + } +} diff --git a/pkg/agent/openflow/client.go b/pkg/agent/openflow/client.go index 209e1049c4e..fe46c981b40 100644 --- a/pkg/agent/openflow/client.go +++ b/pkg/agent/openflow/client.go @@ -924,7 +924,8 @@ func (c *client) generatePipelines() { c.enableMulticast, c.proxyAll, c.enableDSR, - c.enableTrafficControl) + c.enableTrafficControl, + c.enableL7FlowExporter) c.activatedFeatures = append(c.activatedFeatures, c.featurePodConnectivity) c.traceableFeatures = append(c.traceableFeatures, c.featurePodConnectivity) diff --git a/pkg/agent/openflow/client_test.go b/pkg/agent/openflow/client_test.go index 8a4167c199a..0dd6f5ec878 100644 --- a/pkg/agent/openflow/client_test.go +++ b/pkg/agent/openflow/client_test.go @@ -93,6 +93,7 @@ type clientOptions struct { enableTrafficControl bool enableMulticluster bool enableL7NetworkPolicy bool + enableL7FlowExporter bool } type clientOptionsFn func(*clientOptions) @@ -407,6 +408,7 @@ func newFakeClientWithBridge( o.connectUplinkToBridge, o.enableMulticast, o.enableTrafficControl, + o.enableL7FlowExporter, o.enableMulticluster, NewGroupAllocator(), false, @@ -2017,7 +2019,7 @@ func Test_client_setBasePacketOutBuilder(t *testing.T) { } func prepareSetBasePacketOutBuilder(ctrl *gomock.Controller, success bool) *client { - ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, nil, false, defaultPacketInRate) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, nil, false, defaultPacketInRate) m := ovsoftest.NewMockBridge(ctrl) ofClient.bridge = m bridge := binding.OFBridge{} diff --git a/pkg/agent/openflow/framework.go b/pkg/agent/openflow/framework.go index 26320404a48..d8c353ee2a2 100644 --- a/pkg/agent/openflow/framework.go +++ b/pkg/agent/openflow/framework.go @@ -203,7 +203,7 @@ func (f *featurePodConnectivity) getRequiredTables() []*Table { } } } - if f.enableTrafficControl { + if f.enableTrafficControl || f.enableL7FlowExporter { tables = append(tables, TrafficControlTable) } diff --git a/pkg/agent/openflow/pipeline.go b/pkg/agent/openflow/pipeline.go index 7f77b81362b..d6247594ce4 100644 --- a/pkg/agent/openflow/pipeline.go +++ b/pkg/agent/openflow/pipeline.go @@ -430,6 +430,7 @@ type client struct { enableEgressTrafficShaping bool enableMulticast bool enableTrafficControl bool + enableL7FlowExporter bool enableMulticluster bool enablePrometheusMetrics bool connectUplinkToBridge bool @@ -2938,6 +2939,7 @@ func NewClient(bridgeName string, connectUplinkToBridge bool, enableMulticast bool, enableTrafficControl bool, + enableL7FlowExporter bool, enableMulticluster bool, groupIDAllocator GroupAllocator, enablePrometheusMetrics bool, @@ -2957,6 +2959,7 @@ func NewClient(bridgeName string, enableEgressTrafficShaping: enableEgressTrafficShaping, enableMulticast: enableMulticast, enableTrafficControl: enableTrafficControl, + enableL7FlowExporter: enableL7FlowExporter, enableMulticluster: enableMulticluster, enablePrometheusMetrics: enablePrometheusMetrics, connectUplinkToBridge: connectUplinkToBridge, diff --git a/pkg/agent/openflow/pod_connectivity.go b/pkg/agent/openflow/pod_connectivity.go index ec9b366e65c..856ca571785 100644 --- a/pkg/agent/openflow/pod_connectivity.go +++ b/pkg/agent/openflow/pod_connectivity.go @@ -52,6 +52,7 @@ type featurePodConnectivity struct { proxyAll bool enableDSR bool enableTrafficControl bool + enableL7FlowExporter bool category cookie.Category } @@ -69,7 +70,8 @@ func newFeaturePodConnectivity( enableMulticast bool, proxyAll bool, enableDSR bool, - enableTrafficControl bool) *featurePodConnectivity { + enableTrafficControl bool, + enableL7FlowExporter bool) *featurePodConnectivity { ctZones := make(map[binding.Protocol]int) gatewayIPs := make(map[binding.Protocol]net.IP) localCIDRs := make(map[binding.Protocol]net.IPNet) @@ -122,6 +124,7 @@ func newFeaturePodConnectivity( networkConfig: networkConfig, connectUplinkToBridge: connectUplinkToBridge, enableTrafficControl: enableTrafficControl, + enableL7FlowExporter: enableL7FlowExporter, ipCtZoneTypeRegMarks: ipCtZoneTypeRegMarks, ctZoneSrcField: getZoneSrcField(connectUplinkToBridge), enableMulticast: enableMulticast, @@ -182,7 +185,7 @@ func (f *featurePodConnectivity) initFlows() []*openflow15.FlowMod { // Pod IP will take care of routing the traffic to destination Pod. flows = append(flows, f.l3FwdFlowToLocalPodCIDR()...) } - if f.enableTrafficControl { + if f.enableTrafficControl || f.enableL7FlowExporter { flows = append(flows, f.trafficControlCommonFlows()...) } return GetFlowModMessages(flows, binding.AddMessage) diff --git a/pkg/agent/types/annotations.go b/pkg/agent/types/annotations.go index 08660f72618..ca860d751d2 100644 --- a/pkg/agent/types/annotations.go +++ b/pkg/agent/types/annotations.go @@ -32,4 +32,7 @@ const ( // ServiceLoadBalancerModeAnnotationKey is the key of the Service annotation that specifies the Service's load balancer mode. ServiceLoadBalancerModeAnnotationKey string = "service.antrea.io/load-balancer-mode" + + // L7FlowExporterAnnotationKey is the key of the L7 network flow export annotation that enables L7 network flow export for annotated Pod or Namespace based on the value of annotation which is direction of traffic. + L7FlowExporterAnnotationKey string = "visibility.antrea.io/l7-export" ) diff --git a/pkg/apiserver/handlers/featuregates/handler_test.go b/pkg/apiserver/handlers/featuregates/handler_test.go index 0cdfabdd3fb..b39ef17485e 100644 --- a/pkg/apiserver/handlers/featuregates/handler_test.go +++ b/pkg/apiserver/handlers/featuregates/handler_test.go @@ -62,6 +62,7 @@ func Test_getGatesResponse(t *testing.T) { {Component: "agent", Name: "ExternalNode", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "FlowExporter", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "IPsecCertAuth", Status: "Disabled", Version: "ALPHA"}, + {Component: "agent", Name: "L7FlowExporter", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "L7NetworkPolicy", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "LoadBalancerModeDSR", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "Multicast", Status: multicastStatus, Version: "BETA"}, diff --git a/pkg/features/antrea_features.go b/pkg/features/antrea_features.go index f24a440a3cf..36fb997ffdc 100644 --- a/pkg/features/antrea_features.go +++ b/pkg/features/antrea_features.go @@ -154,6 +154,10 @@ const ( // alpha: v1.15 // Allows users to apply ClusterNetworkPolicy to Kubernetes Nodes. NodeNetworkPolicy featuregate.Feature = "NodeNetworkPolicy" + + // alpha: v1.15 + // Enable layer 7 flow export on Pods and Namespaces + L7FlowExporter featuregate.Feature = "L7FlowExporter" ) var ( @@ -194,6 +198,7 @@ var ( EgressTrafficShaping: {Default: false, PreRelease: featuregate.Alpha}, EgressSeparateSubnet: {Default: false, PreRelease: featuregate.Alpha}, NodeNetworkPolicy: {Default: false, PreRelease: featuregate.Alpha}, + L7FlowExporter: {Default: false, PreRelease: featuregate.Alpha}, } // AgentGates consists of all known feature gates for the Antrea Agent. @@ -223,6 +228,7 @@ var ( EgressTrafficShaping, EgressSeparateSubnet, NodeNetworkPolicy, + L7FlowExporter, ) // ControllerGates consists of all known feature gates for the Antrea Controller. @@ -269,6 +275,7 @@ var ( EgressTrafficShaping: {}, EgressSeparateSubnet: {}, NodeNetworkPolicy: {}, + L7FlowExporter: {}, } // supportedFeaturesOnExternalNode records the features supported on an external // Node. Antrea Agent checks the enabled features if it is running on an diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go index 0cfea52cbf5..989061bafba 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go @@ -87,9 +87,12 @@ const ( reverseThroughputFromDestinationNode, clusterUUID, egressName, - egressIP) + egressIP, + appProtocolName, + httpVals) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, - ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` + ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, + ?, ?, ?, ?)` ) // PrepareClickHouseConnection is used for unit testing @@ -329,6 +332,8 @@ func (ch *ClickHouseExportProcess) batchCommitAll(ctx context.Context) (int, err ch.clusterUUID, record.EgressName, record.EgressIP, + record.AppProtocolName, + record.HttpVals, ) if err != nil { diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go index 7d415dd4f6c..2f601454f86 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go @@ -135,7 +135,9 @@ func TestBatchCommitAll(t *testing.T) { 12381346, fakeClusterUUID, "test-egress", - "172.18.0.1"). + "172.18.0.1", + "http", + "mockHttpString"). WillReturnResult(sqlmock.NewResult(0, 1)) mock.ExpectCommit() diff --git a/pkg/flowaggregator/flowlogger/logger.go b/pkg/flowaggregator/flowlogger/logger.go index 798589e7d65..fca7495ee75 100644 --- a/pkg/flowaggregator/flowlogger/logger.go +++ b/pkg/flowaggregator/flowlogger/logger.go @@ -116,6 +116,8 @@ func (fl *FlowLogger) WriteRecord(r *flowrecord.FlowRecord, prettyPrint bool) er egressNetworkPolicyType, r.EgressName, r.EgressIP, + r.AppProtocolName, + r.HttpVals, } str := strings.Join(fields, ",") diff --git a/pkg/flowaggregator/flowlogger/logger_test.go b/pkg/flowaggregator/flowlogger/logger_test.go index de682828c8b..d0c7ef40c29 100644 --- a/pkg/flowaggregator/flowlogger/logger_test.go +++ b/pkg/flowaggregator/flowlogger/logger_test.go @@ -70,11 +70,11 @@ func TestWriteRecord(t *testing.T) { }{ { prettyPrint: true, - expected: "1637706961,1637706973,10.10.0.79,10.10.0.80,44752,5201,TCP,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,Drop,K8sNetworkPolicy,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,Invalid,Invalid,test-egress,172.18.0.1", + expected: "1637706961,1637706973,10.10.0.79,10.10.0.80,44752,5201,TCP,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,Drop,K8sNetworkPolicy,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,Invalid,Invalid,test-egress,172.18.0.1,http,mockHttpString", }, { prettyPrint: false, - expected: "1637706961,1637706973,10.10.0.79,10.10.0.80,44752,5201,6,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,test-egress,172.18.0.1", + expected: "1637706961,1637706973,10.10.0.79,10.10.0.80,44752,5201,6,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,test-egress,172.18.0.1,http,mockHttpString", }, } diff --git a/pkg/flowaggregator/flowrecord/record.go b/pkg/flowaggregator/flowrecord/record.go index ed14f09f283..2213d682258 100644 --- a/pkg/flowaggregator/flowrecord/record.go +++ b/pkg/flowaggregator/flowrecord/record.go @@ -70,6 +70,8 @@ type FlowRecord struct { ReverseThroughputFromDestinationNode uint64 EgressName string EgressIP string + AppProtocolName string + HttpVals string } // GetFlowRecord converts ipfixentities.Record to FlowRecord @@ -228,6 +230,12 @@ func GetFlowRecord(record ipfixentities.Record) *FlowRecord { if egressIP, _, ok := record.GetInfoElementWithValue("egressIP"); ok { r.EgressIP = egressIP.GetStringValue() } + if appProtocolName, _, ok := record.GetInfoElementWithValue("appProtocolName"); ok { + r.AppProtocolName = appProtocolName.GetStringValue() + } + if httpVals, _, ok := record.GetInfoElementWithValue("httpVals"); ok { + r.HttpVals = httpVals.GetStringValue() + } return r } diff --git a/pkg/flowaggregator/flowrecord/record_test.go b/pkg/flowaggregator/flowrecord/record_test.go index dd3140f559a..9b512996551 100644 --- a/pkg/flowaggregator/flowrecord/record_test.go +++ b/pkg/flowaggregator/flowrecord/record_test.go @@ -88,6 +88,10 @@ func TestGetFlowRecord(t *testing.T) { assert.Equal(t, uint64(15902813474), flowRecord.ThroughputFromDestinationNode) assert.Equal(t, uint64(12381345), flowRecord.ReverseThroughputFromSourceNode) assert.Equal(t, uint64(12381346), flowRecord.ReverseThroughputFromDestinationNode) + assert.Equal(t, "test-egress", flowRecord.EgressName) + assert.Equal(t, "172.18.0.1", flowRecord.EgressIP) + assert.Equal(t, "http", flowRecord.AppProtocolName) + assert.Equal(t, "mockHttpString", flowRecord.HttpVals) if tc.isIPv4 { assert.Equal(t, "10.10.0.79", flowRecord.SourceIP) diff --git a/pkg/flowaggregator/flowrecord/testing/util.go b/pkg/flowaggregator/flowrecord/testing/util.go index 21eacd60e2b..e06ae31e164 100644 --- a/pkg/flowaggregator/flowrecord/testing/util.go +++ b/pkg/flowaggregator/flowrecord/testing/util.go @@ -72,5 +72,7 @@ func PrepareTestFlowRecord() *flowrecord.FlowRecord { ReverseThroughputFromDestinationNode: 12381346, EgressName: "test-egress", EgressIP: "172.18.0.1", + AppProtocolName: "http", + HttpVals: "mockHttpString", } } diff --git a/pkg/flowaggregator/infoelements/elements.go b/pkg/flowaggregator/infoelements/elements.go index f93f422398b..7cd851fe6f1 100644 --- a/pkg/flowaggregator/infoelements/elements.go +++ b/pkg/flowaggregator/infoelements/elements.go @@ -59,6 +59,8 @@ var ( "flowType", "egressName", "egressIP", + "appProtocolName", + "httpVals", } AntreaInfoElementsIPv4 = append(AntreaInfoElementsCommon, []string{"destinationClusterIPv4"}...) AntreaInfoElementsIPv6 = append(AntreaInfoElementsCommon, []string{"destinationClusterIPv6"}...) @@ -67,6 +69,7 @@ var ( "flowEndSeconds", "flowEndReason", "tcpState", + "httpVals", } StatsElementList = []string{ "octetDeltaCount", diff --git a/pkg/flowaggregator/s3uploader/s3uploader.go b/pkg/flowaggregator/s3uploader/s3uploader.go index 68431b10153..4239dcc6a6b 100644 --- a/pkg/flowaggregator/s3uploader/s3uploader.go +++ b/pkg/flowaggregator/s3uploader/s3uploader.go @@ -484,4 +484,8 @@ func writeRecord(w io.Writer, r *flowrecord.FlowRecord, clusterUUID string) { io.WriteString(w, r.EgressName) io.WriteString(w, ",") io.WriteString(w, r.EgressIP) + io.WriteString(w, ",") + io.WriteString(w, r.AppProtocolName) + io.WriteString(w, ",") + io.WriteString(w, r.HttpVals) } diff --git a/pkg/flowaggregator/s3uploader/s3uploader_test.go b/pkg/flowaggregator/s3uploader/s3uploader_test.go index f6a62760aa4..b452ca09d38 100644 --- a/pkg/flowaggregator/s3uploader/s3uploader_test.go +++ b/pkg/flowaggregator/s3uploader/s3uploader_test.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "math/rand" + "strings" "testing" "time" @@ -37,8 +38,8 @@ import ( var ( fakeClusterUUID = uuid.New().String() - recordStrIPv4 = "1637706961,1637706973,1637706974,1637706975,3,10.10.0.79,10.10.0.80,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,'{\"antrea-e2e\":\"perftest-a\",\"app\":\"iperf\"}','{\"antrea-e2e\":\"perftest-b\",\"app\":\"iperf\"}',15902813472,12381344,15902813473,15902813474,12381345,12381346," + fakeClusterUUID - recordStrIPv6 = "1637706961,1637706973,1637706974,1637706975,3,2001:0:3238:dfe1:63::fefb,2001:0:3238:dfe1:63::fefc,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,2001:0:3238:dfe1:64::a,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,'{\"antrea-e2e\":\"perftest-a\",\"app\":\"iperf\"}','{\"antrea-e2e\":\"perftest-b\",\"app\":\"iperf\"}',15902813472,12381344,15902813473,15902813474,12381345,12381346," + fakeClusterUUID + recordStrIPv4 = "1637706961,1637706973,1637706974,1637706975,3,10.10.0.79,10.10.0.80,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,'{\"antrea-e2e\":\"perftest-a\",\"app\":\"iperf\"}','{\"antrea-e2e\":\"perftest-b\",\"app\":\"iperf\"}',15902813472,12381344,15902813473,15902813474,12381345,12381346," + fakeClusterUUID + "," + fmt.Sprintf("%d", time.Now().Unix()) + ",test-egress,172.18.0.1,http,mockHttpString" + recordStrIPv6 = "1637706961,1637706973,1637706974,1637706975,3,2001:0:3238:dfe1:63::fefb,2001:0:3238:dfe1:63::fefc,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,2001:0:3238:dfe1:64::a,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,'{\"antrea-e2e\":\"perftest-a\",\"app\":\"iperf\"}','{\"antrea-e2e\":\"perftest-b\",\"app\":\"iperf\"}',15902813472,12381344,15902813473,15902813474,12381345,12381346," + fakeClusterUUID + "," + fmt.Sprintf("%d", time.Now().Unix()) + ",test-egress,172.18.0.1,http,mockHttpString" ) const seed = 1 @@ -80,7 +81,9 @@ func TestCacheRecord(t *testing.T) { flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) s3UploadProc.CacheRecord(mockRecord) assert.Equal(t, 1, s3UploadProc.cachedRecordCount) - assert.Contains(t, s3UploadProc.currentBuffer.String(), recordStrIPv4) + currentBuffer := strings.TrimRight(s3UploadProc.currentBuffer.String(), "\n") + assert.Equal(t, strings.Split(currentBuffer, ",")[:50], strings.Split(recordStrIPv4, ",")[:50]) + assert.Equal(t, strings.Split(currentBuffer, ",")[51:], strings.Split(recordStrIPv4, ",")[51:]) // Second call, reach currentBuffer max size, add the currentBuffer to bufferQueue. mockRecord = ipfixentitiestesting.NewMockRecord(ctrl) @@ -88,7 +91,9 @@ func TestCacheRecord(t *testing.T) { s3UploadProc.CacheRecord(mockRecord) assert.Equal(t, 1, len(s3UploadProc.bufferQueue)) buf := s3UploadProc.bufferQueue[0] - assert.Contains(t, buf.String(), recordStrIPv6) + currentBuf := strings.TrimRight(strings.Split(buf.String(), "\n")[1], "\n") + assert.Equal(t, strings.Split(currentBuf, ",")[:50], strings.Split(recordStrIPv6, ",")[:50]) + assert.Equal(t, strings.Split(currentBuf, ",")[51:], strings.Split(recordStrIPv6, ",")[51:]) assert.Equal(t, 0, s3UploadProc.cachedRecordCount) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) } diff --git a/pkg/flowaggregator/testing/util.go b/pkg/flowaggregator/testing/util.go index 2aa9dd714f3..af73695c87d 100644 --- a/pkg/flowaggregator/testing/util.go +++ b/pkg/flowaggregator/testing/util.go @@ -215,6 +215,14 @@ func PrepareMockIpfixRecord(mockRecord *ipfixentitiestesting.MockRecord, isIPv4 egressIPElem.SetStringValue("172.18.0.1") mockRecord.EXPECT().GetInfoElementWithValue("egressIP").Return(egressIPElem, 0, true) + appProtocolNameElem := createElement("appProtocolName", ipfixregistry.AntreaEnterpriseID) + appProtocolNameElem.SetStringValue("http") + mockRecord.EXPECT().GetInfoElementWithValue("appProtocolName").Return(appProtocolNameElem, 0, true) + + httpValsElem := createElement("httpVals", ipfixregistry.AntreaEnterpriseID) + httpValsElem.SetStringValue("mockHttpString") + mockRecord.EXPECT().GetInfoElementWithValue("httpVals").Return(httpValsElem, 0, true) + if isIPv4 { sourceIPv4Elem := createElement("sourceIPv4Address", ipfixregistry.IANAEnterpriseID) sourceIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.0.79")) diff --git a/test/e2e/charts/flow-visibility/templates/configmap.yaml b/test/e2e/charts/flow-visibility/templates/configmap.yaml index 611a2488a2a..5dc812926b6 100644 --- a/test/e2e/charts/flow-visibility/templates/configmap.yaml +++ b/test/e2e/charts/flow-visibility/templates/configmap.yaml @@ -76,7 +76,9 @@ data: clusterUUID String, trusted UInt8 DEFAULT 0, egressName String, - egressIP String + egressIP String, + appProtocolName String, + httpVals String ) engine=MergeTree ORDER BY (timeInserted, flowEndSeconds) TTL timeInserted + INTERVAL 1 HOUR diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index f5d48d7174b..877b71a0131 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -36,9 +36,11 @@ import ( "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/openflow" + antreaagenttypes "antrea.io/antrea/pkg/agent/types" "antrea.io/antrea/pkg/antctl" "antrea.io/antrea/pkg/antctl/runtime" secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" + "antrea.io/antrea/pkg/features" "antrea.io/antrea/pkg/flowaggregator/apiserver/handlers/recordmetrics" "antrea.io/antrea/test/e2e/utils" ) @@ -88,6 +90,8 @@ DATA SET: flowType: 1 egressName: test-egressbkclk egressIP: 172.18.0.2 + appProtocolName: http + httpVals: mockHttpString destinationClusterIPv4: 0.0.0.0 octetDeltaCountFromSourceNode: 8982624938 octetDeltaCountFromDestinationNode: 8982624938 @@ -214,11 +218,6 @@ func TestFlowAggregatorSecureConnection(t *testing.T) { if err != nil { t.Fatalf("Error when setting up test: %v", err) } - // Check recordmetrics of Flow Aggregator to make sure Antrea-agent Pods/ClickHouse/IPFIX collector and Flow Aggregator - // are correctly connected - if err := getAndCheckFlowAggregatorMetrics(t, data); err != nil { - t.Fatalf("Error when checking metrics of Flow Aggregator: %v", err) - } t.Run(o.name, func(t *testing.T) { defer func() { teardownTest(t, data) @@ -278,6 +277,10 @@ func TestFlowAggregator(t *testing.T) { t.Run("IPv6", func(t *testing.T) { testHelper(t, data, true) }) } + t.Run("L7FlowExporterController", func(t *testing.T) { + testL7FlowExporterControllerRun(t, data) + }) + } func checkIntraNodeFlows(t *testing.T, data *TestData, podAIPs, podBIPs *PodIPs, isIPv6 bool, labelFilter string) { @@ -1393,6 +1396,15 @@ func checkEgressInfoClickHouse(t *testing.T, record *ClickHouseFullRow, egressNa assert.Equal(t, egressIP, record.EgressIP, "Record does not have correct egressIP") } +func checkL7FlowExporterData(t *testing.T, record, appProtocolName string) { + assert.Containsf(t, record, fmt.Sprintf("appProtocolName: %s", appProtocolName), "Record does not have correct Layer 7 protocol Name") +} + +func checkL7FlowExporterDataClickHouse(t *testing.T, record *ClickHouseFullRow, appProtocolName string) { + assert.Equal(t, record.AppProtocolName, appProtocolName, "Record does not have correct Layer 7 protocol Name") + assert.NotEmpty(t, record.HttpVals, "Record does not have httpVals") +} + func getUint64FieldFromRecord(t *testing.T, record string, field string) uint64 { if strings.Contains(record, "TEMPLATE SET") { return 0 @@ -1865,6 +1877,60 @@ func getAndCheckFlowAggregatorMetrics(t *testing.T, data *TestData) error { return nil } +func testL7FlowExporterControllerRun(t *testing.T, data *TestData) { + skipIfFeatureDisabled(t, features.L7FlowExporter, true, false) + + clientPodName := "test-l7-flow-exporter" + clientPodLabels := map[string]string{"test-l7-flow-exporter-e2e": "true"} + clientPodAnnotations := map[string]string{antreaagenttypes.L7FlowExporterAnnotationKey: "both"} + cmd := []string{"sleep", "3600"} + + // Create a client Pod which will be selected by test L7 NetworkPolices. + require.NoError(t, NewPodBuilder(clientPodName, data.testNamespace, toolboxImage).OnNode(nodeName(0)).WithCommand(cmd).WithLabels(clientPodLabels).WithAnnotations(clientPodAnnotations).Create(data)) + clientPodIP, err := data.podWaitForIPs(defaultTimeout, clientPodName, data.testNamespace) + require.NoErrorf(t, err, "Error when waiting for IP for Pod '%s': %v", clientPodName, err) + require.NoError(t, data.podWaitForRunning(defaultTimeout, clientPodName, data.testNamespace)) + + serverIPs := createToExternalTestServer(t, data) + srcIP := clientPodIP.IPv4.String() + dstIP := serverIPs.IPv4.String() + + // checkRecordsForToExternalFlows(t, data, nodeName(0), clientPodName, clientPodIP.ipv4.String(), serverIPs.ipv4.String(), serverPodPort, isIPv6, "", "") + cmd = []string{ + "curl", + fmt.Sprintf("http://%s:%d", serverIPs.IPv4.String(), serverPodPort), + } + stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, clientPodName, toolboxContainerName, cmd) + require.NoErrorf(t, err, "Error when running curl command, stdout: %s, stderr: %s", stdout, stderr) + _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, false, data, "") + for _, record := range recordSlices { + if strings.Contains(record, srcIP) && strings.Contains(record, dstIP) { + // checkPodAndNodeData(t, record, clientPodName, nodeName(0), "", "", data.testNamespace) + assert := assert.New(t) + assert.Contains(record, clientPodName, "Record with srcIP does not have Pod name: %s", clientPodName) + assert.Contains(record, fmt.Sprintf("sourcePodNamespace: %s", data.testNamespace), "Record does not have correct sourcePodNamespace: %s", data.testNamespace) + assert.Contains(record, fmt.Sprintf("sourceNodeName: %s", nodeName(0)), "Record does not have correct sourceNodeName: %s", nodeName(0)) + assert.Contains(record, fmt.Sprintf("\"test-l7-flow-exporter-e2e\":\"true\""), "Record does not have correct label for source Pod") + + checkFlowType(t, record, ipfixregistry.FlowTypeToExternal) + checkL7FlowExporterData(t, record, "http") + } + } + + clickHouseRecords := getClickHouseOutput(t, data, srcIP, dstIP, "", false, false, "") + for _, record := range clickHouseRecords { + assert := assert.New(t) + assert.Equal(record.SourcePodName, clientPodName, "Record with srcIP does not have Pod name: %s", clientPodName) + assert.Equal(record.SourcePodNamespace, data.testNamespace, "Record does not have correct sourcePodNamespace: %s", data.testNamespace) + assert.Equal(record.SourceNodeName, nodeName(0), "Record does not have correct sourceNodeName: %s", nodeName(0)) + assert.Contains(record.SourcePodLabels, fmt.Sprintf("\"test-l7-flow-exporter-e2e\":\"true\""), "Record does not have correct label for source Pod") + + checkFlowTypeClickHouse(t, record, ipfixregistry.FlowTypeToExternal) + checkL7FlowExporterDataClickHouse(t, record, "http") + } + +} + type ClickHouseFullRow struct { TimeInserted time.Time `json:"timeInserted"` FlowStartSeconds time.Time `json:"flowStartSeconds"` @@ -1918,4 +1984,6 @@ type ClickHouseFullRow struct { Trusted uint8 `json:"trusted"` EgressName string `json:"egressName"` EgressIP string `json:"egressIP"` + AppProtocolName string `json:"appProtocolName"` + HttpVals string `json:"httpVals"` } diff --git a/test/integration/agent/flowexporter_test.go b/test/integration/agent/flowexporter_test.go index 1ce705bb6ba..f048b18f246 100644 --- a/test/integration/agent/flowexporter_test.go +++ b/test/integration/agent/flowexporter_test.go @@ -46,6 +46,13 @@ const ( testStaleConnectionTimeout = 5 * time.Minute ) +type fakel7EventMapGetter struct{} + +func (fll *fakel7EventMapGetter) ConsumeL7EventMap() map[flowexporter.ConnectionKey]connections.L7ProtocolFields { + l7EventsMap := make(map[flowexporter.ConnectionKey]connections.L7ProtocolFields) + return l7EventsMap +} + func createConnsForTest() ([]*flowexporter.Connection, []*flowexporter.ConnectionKey) { // Reference for flow timestamp refTime := time.Now() @@ -125,7 +132,7 @@ func TestConnectionStoreAndFlowRecords(t *testing.T) { IdleFlowTimeout: testIdleFlowTimeout, StaleConnectionTimeout: testStaleConnectionTimeout, PollInterval: testPollInterval} - conntrackConnStore := connections.NewConntrackConnectionStore(connDumperMock, true, false, npQuerier, mockPodStore, nil, o) + conntrackConnStore := connections.NewConntrackConnectionStore(connDumperMock, true, false, npQuerier, mockPodStore, nil, &fakel7EventMapGetter{}, o) // Expect calls for connStore.poll and other callees connDumperMock.EXPECT().DumpFlows(uint16(openflow.CtZone)).Return(testConns, 0, nil) connDumperMock.EXPECT().GetMaxConnections().Return(0, nil) diff --git a/test/integration/agent/openflow_test.go b/test/integration/agent/openflow_test.go index feed0be4863..57f48cf619b 100644 --- a/test/integration/agent/openflow_test.go +++ b/test/integration/agent/openflow_test.go @@ -120,7 +120,7 @@ func TestConnectivityFlows(t *testing.T) { antrearuntime.WindowsOS = runtime.GOOS } - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) defer func() { @@ -176,7 +176,7 @@ func TestAntreaFlexibleIPAMConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, true, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, true, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) defer func() { @@ -239,7 +239,7 @@ func TestReplayFlowsConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -281,7 +281,7 @@ func TestReplayFlowsNetworkPolicyFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -466,7 +466,7 @@ func TestNetworkPolicyFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -580,7 +580,7 @@ func TestIPv6ConnectivityFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, true, true, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -621,7 +621,7 @@ func TestProxyServiceFlowsAntreaPolicyDisabled(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, false, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -711,7 +711,7 @@ func TestProxyServiceFlowsAntreaPoilcyEnabled(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), true, true, false, false, false, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -1793,7 +1793,7 @@ func testEgressMarkFlows(t *testing.T, trafficShaping bool) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, true, trafficShaping, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, true, trafficShaping, false, false, false, false, false, false, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -1850,7 +1850,7 @@ func TestTrafficControlFlows(t *testing.T) { legacyregistry.Reset() metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, false, false, false, false, false, false, false, true, false, groupIDAllocator, false, defaultPacketInRate) + c = ofClient.NewClient(br, bridgeMgmtAddr, nodeiptest.NewFakeNodeIPChecker(), false, false, false, false, false, false, false, false, false, false, true, false, false, groupIDAllocator, false, defaultPacketInRate) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br))