diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 38e13bc46..a27c46332 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -110,6 +110,9 @@ export IMAGE=quay.io/$USER/network-observability-operator:test export BUNDLE_IMAGE=quay.io/$USER/network-observability-operator-bundle:v0.0.0-test make images make bundle bundle-build bundle-push + +# or, alternatively: +BUNDLE_VERSION=0.0.0-test VERSION=test make images bundle bundle-build bundle-push ``` Optionally, you might validate the bundle: diff --git a/Makefile b/Makefile index 534425576..55c62ed68 100644 --- a/Makefile +++ b/Makefile @@ -266,24 +266,24 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./apis/flowcollector/v1alpha1,./apis/flowcollector/v1beta1" + $(MAKE) clean-generated-conversions SRC_DIRS="./apis/flowcollector/v1beta1" $(CONVERSION_GEN) \ - --input-dirs=./apis/flowcollector/v1alpha1 \ --input-dirs=./apis/flowcollector/v1beta1 \ --build-tag=ignore_autogenerated_core \ --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt -.PHONY: hack-crd-for-test -hack-crd-for-test: YQ - cat ./config/crd/bases/flows.netobserv.io_flowcollectors.yaml \ - | $(YQ) eval-all \ - '(.spec.versions.[]|select(.name != "v1beta2").storage) = false,(.spec.versions.[]|select(.name == "v1beta2").storage) = true' \ - > ./hack/cloned.flows.netobserv.io_flowcollectors.yaml - cp ./config/crd/bases/flows.netobserv.io_flowmetrics.yaml ./hack/cloned.flows.netobserv.io_flowmetrics.yaml +# Hack to reintroduce when the API stored version != latest version; see also envtest.go (CRD path config) +# .PHONY: hack-crd-for-test +# hack-crd-for-test: YQ +# cat ./config/crd/bases/flows.netobserv.io_flowcollectors.yaml \ +# | $(YQ) eval-all \ +# '(.spec.versions.[]|select(.name != "v1beta2").storage) = false,(.spec.versions.[]|select(.name == "v1beta2").storage) = true' \ +# > ./hack/cloned.flows.netobserv.io_flowcollectors.yaml +# cp ./config/crd/bases/flows.netobserv.io_flowmetrics.yaml ./hack/cloned.flows.netobserv.io_flowmetrics.yaml -generate: gencode manifests hack-crd-for-test doc generate-go-conversions ## Run all code/file generators +generate: gencode manifests doc generate-go-conversions ## Run all code/file generators .PHONY: clean-generated-conversions clean-generated-conversions: ## Remove files generated by conversion-gen from the mentioned dirs diff --git a/PROJECT b/PROJECT index ce0de7cb3..3de0f51e9 100644 --- a/PROJECT +++ b/PROJECT @@ -28,6 +28,6 @@ resources: domain: netobserv.io group: flows kind: FlowMetric - path: github.com/netobserv/network-observability-operator/apis/flowcollector/v1alpha1 + path: github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1 version: v1alpha1 version: "3" diff --git a/apis/flowcollector/v1alpha1/doc.go b/apis/flowcollector/v1alpha1/doc.go deleted file mode 100644 index c76a4c855..000000000 --- a/apis/flowcollector/v1alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1aplha1 contains the v1alpha1 API implementation. -// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2 -// -// Deprecated: This package will be removed in one of the next releases. -package v1alpha1 diff --git a/apis/flowcollector/v1alpha1/flowcollector_types.go b/apis/flowcollector/v1alpha1/flowcollector_types.go deleted file mode 100644 index c9af1bbad..000000000 --- a/apis/flowcollector/v1alpha1/flowcollector_types.go +++ /dev/null @@ -1,762 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - ascv2 "k8s.io/api/autoscaling/v2" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -const ( - AgentIPFIX = "IPFIX" - AgentEBPF = "EBPF" - DeploymentModelDirect = "DIRECT" - DeploymentModelKafka = "KAFKA" -) - -// Please notice that the FlowCollectorSpec's properties MUST redefine one of the default -// values to force the definition of the section when it is not provided by the manifest. -// This will cause that the remaining default fields will be set according to their definition. -// Otherwise, omitting the sections in the manifest would lead to zero-valued properties. -// This is a workaround for the related issue: -// https://github.com/kubernetes-sigs/controller-tools/issues/622 - -// FlowCollectorSpec defines the desired state of FlowCollector -type FlowCollectorSpec struct { - // Important: Run "make generate" to regenerate code after modifying this file - - // namespace where NetObserv pods are deployed. - // If empty, the namespace of the operator is going to be used. - // +optional - Namespace string `json:"namespace,omitempty"` - - // agent for flows extraction. - // +kubebuilder:default:={type:"EBPF"} - Agent FlowCollectorAgent `json:"agent"` - - // processor defines the settings of the component that receives the flows from the agent, - // enriches them, and forwards them to the Loki persistence layer. - Processor FlowCollectorFLP `json:"processor,omitempty"` - - // loki, the flow store, client settings. - Loki FlowCollectorLoki `json:"loki,omitempty"` - - // consolePlugin defines the settings related to the OpenShift Console plugin, when available. - ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"` - - // deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make - // the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption - // by the processor. - // Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - // +unionDiscriminator - // +kubebuilder:validation:Enum:="DIRECT";"KAFKA" - // +kubebuilder:validation:Required - // +kubebuilder:default:=DIRECT - DeploymentModel string `json:"deploymentModel"` - - // kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". - // +optional - Kafka FlowCollectorKafka `json:"kafka,omitempty"` - - // exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available. - // +optional - // +k8s:conversion-gen=false - Exporters []*FlowCollectorExporter `json:"exporters"` -} - -// FlowCollectorAgent is a discriminated union that allows to select either ipfix or ebpf, but does not -// allow defining both fields. -// +union -type FlowCollectorAgent struct { - // type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, - // "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better - // performances and should work regardless of the CNI installed on the cluster. - // "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, - // but they would require manual configuration). - // +unionDiscriminator - // +kubebuilder:validation:Enum:="EBPF";"IPFIX" - // +kubebuilder:validation:Required - // +kubebuilder:default:=EBPF - Type string `json:"type"` - - // ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" - // property is set to "IPFIX". - // +optional - IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` - - // ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" - // property is set to "EBPF". - // +optional - EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` -} - -// FlowCollectorIPFIX defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the -// flows information -type FlowCollectorIPFIX struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ - //+kubebuilder:default:="20s" - // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending - CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty" mapstructure:"cacheActiveTimeout,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=400 - // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows - CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty" mapstructure:"cacheMaxFlows,omitempty"` - - //+kubebuilder:validation:Minimum=2 - //+kubebuilder:default:=400 - // sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. - // To ensure cluster stability, it is not possible to set a value below 2. - // If you really want to sample every packet, which might impact the cluster stability, - // refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX. - Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` - - //+kubebuilder:default:=false - // forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. - // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. - // If you REALLY want to do that, set this flag to true. Use at your own risk. - // When it is set to true, the value of "sampling" is ignored. - ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` - - // clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. - ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` - - // ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. - OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` -} - -// FlowCollectorEBPF defines a FlowCollector that uses eBPF to collect the flows information -type FlowCollectorEBPF struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"800Mi"}} - // resources are the compute resources required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - // sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=50 - //+optional - Sampling *int32 `json:"sampling,omitempty"` - - // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. - // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, - // however you can expect higher memory consumption and an increased latency in the flow collection. - //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ - //+kubebuilder:default:="5s" - CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty"` - - // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. - // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, - // however you can expect higher memory consumption and an increased latency in the flow collection. - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:default:=100000 - CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty"` - - // interfaces contains the interface names from where flows will be collected. If empty, the agent - // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. - // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, - // otherwise it will be matched as a case-sensitive string. - //+optional - Interfaces []string `json:"interfaces,omitempty"` - - // excludeInterfaces contains the interface names that will be excluded from flow tracing. - // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, - // otherwise it will be matched as a case-sensitive string. - //+kubebuilder:default:=lo; - ExcludeInterfaces []string `json:"excludeInterfaces,omitempty"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel defines the log level for the NetObserv eBPF Agent - LogLevel string `json:"logLevel,omitempty"` - - // privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: - // in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) - // to the container, to enable its correct operation. - // If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) - // then you can turn on this mode for more global privileges. - // +optional - Privileged bool `json:"privileged,omitempty"` - - //+kubebuilder:default:=1048576 - // +optional - // kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. - KafkaBatchSize int `json:"kafkaBatchSize"` - - // Debug allows setting some aspects of the internal configuration of the eBPF agent. - // This section is aimed exclusively for debugging and fine-grained performance optimizations - // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - // +optional - Debug DebugConfig `json:"debug,omitempty"` -} - -// FlowCollectorKafka defines the desired Kafka config of FlowCollector -type FlowCollectorKafka struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:="" - // address of the Kafka server - Address string `json:"address"` - - //+kubebuilder:default:="" - // kafka topic to use. It must exist, NetObserv will not create it. - Topic string `json:"topic"` - - // tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - // Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - // +optional - TLS ClientTLS `json:"tls"` - - // SASL authentication configuration. [Unsupported (*)]. - // +optional - SASL SASLConfig `json:"sasl"` -} - -type FlowCollectorIPFIXReceiver struct { - //+kubebuilder:default:="" - // Address of the IPFIX external receiver - TargetHost string `json:"targetHost"` - - // Port for the IPFIX external receiver - TargetPort int `json:"targetPort"` - - // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - // +unionDiscriminator - // +kubebuilder:validation:Enum:="TCP";"UDP" - // +optional - Transport string `json:"transport,omitempty"` -} - -const ( - ServerTLSDisabled = "DISABLED" - ServerTLSProvided = "PROVIDED" - ServerTLSAuto = "AUTO" -) - -type ServerTLSConfigType string - -// ServerTLS define the TLS configuration, server side -type ServerTLS struct { - // Select the type of TLS configuration - // "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, - // and "AUTO" to use OpenShift auto generated certificate using annotations - // +unionDiscriminator - // +kubebuilder:validation:Enum:="DISABLED";"PROVIDED";"AUTO" - // +kubebuilder:validation:Required - //+kubebuilder:default:="DISABLED" - Type ServerTLSConfigType `json:"type,omitempty"` - - // TLS configuration. - // +optional - Provided *CertificateReference `json:"provided"` -} - -// MetricsServerConfig define the metrics server endpoint configuration for Prometheus scraper -type MetricsServerConfig struct { - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=9102 - // the prometheus HTTP port - Port int32 `json:"port,omitempty"` - - // TLS configuration. - // +optional - TLS ServerTLS `json:"tls"` -} - -// FLPMetrics define the desired FLP configuration regarding metrics -type FLPMetrics struct { - // metricsServer endpoint configuration for Prometheus scraper - // +optional - Server MetricsServerConfig `json:"server,omitempty"` - - // ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . - // Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads - //+kubebuilder:default:={"egress","packets"} - IgnoreTags []string `json:"ignoreTags,omitempty"` -} - -// FlowCollectorFLP defines the desired flowlogs-pipeline state of FlowCollector -type FlowCollectorFLP struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Minimum=1025 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=2055 - // port of the flow collector (host port) - // By conventions, some value are not authorized port must not be below 1024 and must not equal this values: - // 4789,6081,500, and 4500 - Port int32 `json:"port,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=8080 - // healthPort is a collector HTTP port in the Pod that exposes the health check API - HealthPort int32 `json:"healthPort,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:validation:Maximum=65535 - //+optional - // profilePort allows setting up a Go pprof profiler listening to this port - ProfilePort int32 `json:"profilePort,omitempty"` - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - // Metrics define the processor configuration regarding metrics - Metrics FLPMetrics `json:"metrics,omitempty"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel of the collector runtime - LogLevel string `json:"logLevel,omitempty"` - - //+kubebuilder:default:={requests:{memory:"100Mi",cpu:"100m"},limits:{memory:"800Mi"}} - // resources are the compute resources required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - //+kubebuilder:default:=true - // enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes - EnableKubeProbes bool `json:"enableKubeProbes,omitempty"` - - //+kubebuilder:default:=true - // dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space. - DropUnusedFields bool `json:"dropUnusedFields,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=3 - // kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. - // This setting is ignored when Kafka is disabled. - KafkaConsumerReplicas int32 `json:"kafkaConsumerReplicas,omitempty"` - - // kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. - // This setting is ignored when Kafka is disabled. - // +optional - KafkaConsumerAutoscaler FlowCollectorHPA `json:"kafkaConsumerAutoscaler,omitempty"` - - //+kubebuilder:default:=1000 - // +optional - // kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. - KafkaConsumerQueueCapacity int `json:"kafkaConsumerQueueCapacity"` - - //+kubebuilder:default:=10485760 - // +optional - // kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. - KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` - - // Debug allows setting some aspects of the internal configuration of the flow processor. - // This section is aimed exclusively for debugging and fine-grained performance optimizations - // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - // +optional - Debug DebugConfig `json:"debug,omitempty"` -} - -const ( - HPAStatusDisabled = "DISABLED" - HPAStatusEnabled = "ENABLED" -) - -type FlowCollectorHPA struct { - // +kubebuilder:validation:Enum:=DISABLED;ENABLED - // +kubebuilder:default:=DISABLED - // Status describe the desired status regarding deploying an horizontal pod autoscaler - // DISABLED will not deploy an horizontal pod autoscaler - // ENABLED will deploy an horizontal pod autoscaler - Status string `json:"status,omitempty"` - - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - // +kubebuilder:default:=3 - // +optional - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // metrics used by the pod autoscaler - // +optional - Metrics []ascv2.MetricSpec `json:"metrics"` -} - -const ( - LokiAuthDisabled = "DISABLED" - LokiAuthUseHostToken = "HOST" - LokiAuthForwardUserToken = "FORWARD" -) - -// FlowCollectorLoki defines the desired state for FlowCollector's Loki client. -type FlowCollectorLoki struct { - //+kubebuilder:default:="http://loki:3100/" - // url is the address of an existing Loki service to push the flows to. When using the Loki Operator, - // set it to the Loki gateway service with the `network` tenant set in path, for example - // https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - URL string `json:"url,omitempty"` - - //+kubebuilder:validation:optional - // querierURL specifies the address of the Loki querier service, in case it is different from the - // Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester - // and querier are in the same server). When using the Loki Operator, do not set it, since - // ingestion and queries use the Loki gateway. - QuerierURL string `json:"querierUrl,omitempty"` - - //+kubebuilder:validation:optional - // statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the - // Loki querier URL. If empty, the QuerierURL value will be used. - // This is useful to show error messages and some context in the frontend. - // When using the Loki Operator, set it to the Loki HTTP query frontend service, for example - // https://loki-query-frontend-http.netobserv.svc:3100/. - StatusURL string `json:"statusUrl,omitempty"` - - //+kubebuilder:default:="netobserv" - // tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. - // When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. - TenantID string `json:"tenantID,omitempty"` - - // +kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" - //+kubebuilder:default:="DISABLED" - // AuthToken describe the way to get a token to authenticate to Loki. - // DISABLED will not send any token with the request. - // HOST will use the local pod service account to authenticate to Loki. - // FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. - // When using the Loki Operator, set it to `HOST` or `FORWARD`. - AuthToken string `json:"authToken,omitempty"` - - //+kubebuilder:default:="1s" - // batchWait is max time to wait before sending a batch. - BatchWait metav1.Duration `json:"batchWait,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:default:=102400 - // batchSize is max batch size (in bytes) of logs to accumulate before sending. - BatchSize int64 `json:"batchSize,omitempty"` - - //+kubebuilder:default:="10s" - // timeout is the maximum time connection / request limit. - // A Timeout of zero means no timeout. - Timeout metav1.Duration `json:"timeout,omitempty"` - - //+kubebuilder:default:="1s" - // minBackoff is the initial backoff time for client connection between retries. - MinBackoff metav1.Duration `json:"minBackoff,omitempty"` - - //+kubebuilder:default:="5s" - // maxBackoff is the maximum backoff time for client connection between retries. - MaxBackoff metav1.Duration `json:"maxBackoff,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=2 - // maxRetries is the maximum number of retries for client connections. - MaxRetries int32 `json:"maxRetries,omitempty"` - - //+kubebuilder:default:={"app":"netobserv-flowcollector"} - // staticLabels is a map of common labels to set on each flow. - StaticLabels map[string]string `json:"staticLabels,omitempty"` - - // tls client configuration. - // +optional - TLS ClientTLS `json:"tls"` -} - -// FlowCollectorConsolePlugin defines the desired ConsolePlugin state of FlowCollector -type FlowCollectorConsolePlugin struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=true - // register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. - // When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. - // E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]' - Register bool `json:"register"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=1 - // replicas defines the number of replicas (pods) to start. - Replicas int32 `json:"replicas,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=9001 - // port is the plugin service port - Port int32 `json:"port,omitempty"` - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"100Mi"}} - // resources, in terms of compute resources, required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel for the console plugin backend - LogLevel string `json:"logLevel,omitempty"` - - // autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. - // +optional - Autoscaler FlowCollectorHPA `json:"autoscaler,omitempty"` - - //+kubebuilder:default:={enable:true} - // portNaming defines the configuration of the port-to-service name translation - PortNaming ConsolePluginPortConfig `json:"portNaming,omitempty"` - - //+kubebuilder:default:={{name:"Applications",filter:{"src_namespace!":"openshift-,netobserv","dst_namespace!":"openshift-,netobserv"},default:true},{name:"Infrastructure",filter:{"src_namespace":"openshift-,netobserv","dst_namespace":"openshift-,netobserv"}},{name:"Pods network",filter:{"src_kind":"Pod","dst_kind":"Pod"},default:true},{name:"Services network",filter:{"dst_kind":"Service"}}} - // quickFilters configures quick filter presets for the Console plugin - QuickFilters []QuickFilter `json:"quickFilters,omitempty"` -} - -// Configuration of the port to service name translation feature of the console plugin -type ConsolePluginPortConfig struct { - //+kubebuilder:default:=true - // enable the console plugin port-to-service name translation - Enable bool `json:"enable,omitempty"` - - // portNames defines additional port names to use in the console. - // Example: portNames: {"3100": "loki"} - // +optional - PortNames map[string]string `json:"portNames,omitempty" yaml:"portNames,omitempty"` -} - -// QuickFilter defines preset configuration for Console's quick filters -type QuickFilter struct { - // name of the filter, that will be displayed in Console - // +kubebuilder:MinLength:=1 - Name string `json:"name"` - // filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. - // Example: filter: {"src_namespace": "namespace1,namespace2"} - // +kubebuilder:MinProperties:=1 - Filter map[string]string `json:"filter"` - // default defines whether this filter should be active by default or not - // +optional - Default bool `json:"default,omitempty"` -} - -// ClusterNetworkOperatorConfig defines the desired configuration related to the Cluster Network Configuration -type ClusterNetworkOperatorConfig struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=openshift-network-operator - // namespace where the config map is going to be deployed. - Namespace string `json:"namespace,omitempty"` -} - -// OVNKubernetesConfig defines the desired configuration related to the OVN-Kubernetes network provider, when Cluster Network Operator isn't installed. -type OVNKubernetesConfig struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=ovn-kubernetes - // namespace where OVN-Kubernetes pods are deployed. - Namespace string `json:"namespace,omitempty"` - - //+kubebuilder:default:=ovnkube-node - // daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods. - DaemonSetName string `json:"daemonSetName,omitempty"` - - //+kubebuilder:default:=ovnkube-node - // containerName defines the name of the container to configure for IPFIX. - ContainerName string `json:"containerName,omitempty"` -} - -type MountableType string - -const ( - CertRefTypeSecret MountableType = "secret" - CertRefTypeConfigMap MountableType = "configmap" -) - -type FileReference struct { - //+kubebuilder:validation:Enum=configmap;secret - // Type for the file reference: "configmap" or "secret" - Type MountableType `json:"type,omitempty"` - - // Name of the config map or secret containing the file - Name string `json:"name,omitempty"` - - // Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - // +optional - //+kubebuilder:default:="" - Namespace string `json:"namespace,omitempty"` - - // File name within the config map or secret - File string `json:"file,omitempty"` -} - -type CertificateReference struct { - //+kubebuilder:validation:Enum=configmap;secret - // type for the certificate reference: "configmap" or "secret" - Type MountableType `json:"type,omitempty"` - - // name of the config map or secret containing certificates - Name string `json:"name,omitempty"` - - // certFile defines the path to the certificate file name within the config map or secret - CertFile string `json:"certFile,omitempty"` - - // certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - // +optional - CertKey string `json:"certKey,omitempty"` - - // namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - // +optional - //+kubebuilder:default:="" - Namespace string `json:"namespace,omitempty"` -} - -// ClientTLS defines TLS client configuration -type ClientTLS struct { - //+kubebuilder:default:=false - // enable TLS - Enable bool `json:"enable,omitempty"` - - //+kubebuilder:default:=false - // insecureSkipVerify allows skipping client-side verification of the server certificate - // If set to true, CACert field will be ignored - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` - - // caCert defines the reference of the certificate for the Certificate Authority - CACert CertificateReference `json:"caCert,omitempty"` - - // userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - // +optional - UserCert CertificateReference `json:"userCert,omitempty"` -} - -type SASLType string - -const ( - SASLDisabled SASLType = "DISABLED" - SASLPlain SASLType = "PLAIN" - SASLScramSHA512 SASLType = "SCRAM-SHA512" -) - -// `SASLConfig` defines SASL configuration -type SASLConfig struct { - //+kubebuilder:validation:Enum=DISABLED;PLAIN;SCRAM-SHA512 - //+kubebuilder:default:=DISABLED - // Type of SASL authentication to use, or `DISABLED` if SASL is not used - Type SASLType `json:"type,omitempty"` - - // Reference to the secret or config map containing the client ID - ClientIDReference FileReference `json:"clientIDReference,omitempty"` - - // Reference to the secret or config map containing the client secret - ClientSecretReference FileReference `json:"clientSecretReference,omitempty"` -} - -// DebugConfig allows tweaking some aspects of the internal configuration of the agent and FLP. -// They are aimed exclusively for debugging. Users setting these values do it at their own risk. -type DebugConfig struct { - // env allows passing custom environment variables to the NetObserv Agent. Useful for passing - // some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be - // publicly exposed as part of the FlowCollector descriptor, as they are only useful - // in edge debug and support scenarios. - //+optional - Env map[string]string `json:"env,omitempty"` -} - -// Add more exporter types below -type ExporterType string - -const ( - KafkaExporter ExporterType = "KAFKA" -) - -// FlowCollectorExporter defines an additional exporter to send enriched flows to -type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. - // +unionDiscriminator - // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" - // +kubebuilder:validation:Required - Type ExporterType `json:"type"` - - // kafka configuration, such as address or topic, to send enriched flows to. - // +optional - Kafka FlowCollectorKafka `json:"kafka,omitempty"` - - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - // +optional - IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` -} - -// FlowCollectorStatus defines the observed state of FlowCollector -type FlowCollectorStatus struct { - // Important: Run "make" to regenerate code after modifying this file - - // conditions represent the latest available observations of an object's state - Conditions []metav1.Condition `json:"conditions"` - - // namespace where console plugin and flowlogs-pipeline have been deployed. - Namespace string `json:"namespace,omitempty"` -} - -// +kubebuilder:deprecatedversion -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster -// +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` -// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` -// +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` - -// FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. -// -// Deprecated: This package will be removed in one of the next releases. -type FlowCollector struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FlowCollectorSpec `json:"spec,omitempty"` - Status FlowCollectorStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// FlowCollectorList contains a list of FlowCollector -// -// Deprecated: This package will be removed in one of the next releases. -type FlowCollectorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []FlowCollector `json:"items"` -} - -func init() { - SchemeBuilder.Register(&FlowCollector{}, &FlowCollectorList{}) -} diff --git a/apis/flowcollector/v1alpha1/flowcollector_webhook.go b/apis/flowcollector/v1alpha1/flowcollector_webhook.go deleted file mode 100644 index 924f4e9dc..000000000 --- a/apis/flowcollector/v1alpha1/flowcollector_webhook.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" - "github.com/netobserv/network-observability-operator/pkg/helper" - "github.com/netobserv/network-observability-operator/pkg/metrics" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiconversion "k8s.io/apimachinery/pkg/conversion" - "sigs.k8s.io/controller-runtime/pkg/conversion" -) - -// ConvertTo converts this v1alpha1 FlowCollector to its v1beta2 equivalent (the conversion Hub) -// https://book.kubebuilder.io/multiversion-tutorial/conversion.html -func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta2.FlowCollector) - - if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { - return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta2.FlowCollector: %w", err) - } - dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) - copy(dst.Status.Conditions, r.Status.Conditions) - - // Manually restore data. - restored := &v1beta2.FlowCollector{} - if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { - return err - } - - // Agent - if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) - copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) - } - - // Processor - dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes - if restored.Spec.Processor.Advanced.ConversationHeartbeatInterval != nil { - dst.Spec.Processor.Advanced.ConversationHeartbeatInterval = restored.Spec.Processor.Advanced.ConversationHeartbeatInterval - } - if restored.Spec.Processor.Advanced.ConversationEndTimeout != nil { - dst.Spec.Processor.Advanced.ConversationEndTimeout = restored.Spec.Processor.Advanced.ConversationEndTimeout - } - if restored.Spec.Processor.Advanced.ConversationTerminatingTimeout != nil { - dst.Spec.Processor.Advanced.ConversationTerminatingTimeout = restored.Spec.Processor.Advanced.ConversationTerminatingTimeout - } - if restored.Spec.Processor.Metrics.DisableAlerts != nil { - dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts - } - if restored.Spec.Processor.ClusterName != "" { - dst.Spec.Processor.ClusterName = restored.Spec.Processor.ClusterName - } - dst.Spec.Processor.AddZone = restored.Spec.Processor.AddZone - if restored.Spec.Processor.MultiClusterDeployment != nil { - dst.Spec.Processor.MultiClusterDeployment = restored.Spec.Processor.MultiClusterDeployment - } - - dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify - dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile - - // Kafka - dst.Spec.Kafka.SASL = restored.Spec.Kafka.SASL - - // Loki - dst.Spec.Loki.Enable = restored.Spec.Loki.Enable - - if restored.Spec.Processor.Metrics.IncludeList != nil { - list := make([]v1beta2.FLPMetric, len(*restored.Spec.Processor.Metrics.IncludeList)) - copy(list, *restored.Spec.Processor.Metrics.IncludeList) - dst.Spec.Processor.Metrics.IncludeList = &list - } - - return nil -} - -// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1alpha1 -func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta2.FlowCollector) - - if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { - return fmt.Errorf("copying v1beta2.FlowCollector into v1alpha1.FlowCollector: %w", err) - } - r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) - copy(r.Status.Conditions, src.Status.Conditions) - - // Preserve Hub data on down-conversion except for metadata - return utilconversion.MarshalData(src, r) -} - -func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta2.FlowCollectorList) - return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) -} - -func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta2.FlowCollectorList) - return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { - // Note that, despite we loose namespace info here, this isn't an issue because it's going to be restored from annotations - manual := helper.NewLokiConfig(in, "") - out.URL = manual.IngesterURL - out.QuerierURL = manual.QuerierURL - out.StatusURL = manual.StatusURL - out.TenantID = manual.TenantID - out.AuthToken = utilconversion.PascalToUpper(string(manual.AuthToken), '_') - if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&manual.TLS, &out.TLS, nil); err != nil { - return fmt.Errorf("copying v1beta2.Loki.TLS into v1alpha1.Loki.TLS: %w", err) - } - return autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { - out.Mode = v1beta2.LokiModeManual - out.Manual = v1beta2.LokiManualParams{ - IngesterURL: in.URL, - QuerierURL: in.QuerierURL, - StatusURL: in.StatusURL, - TenantID: in.TenantID, - AuthToken: v1beta2.LokiAuthToken(utilconversion.UpperToPascal(in.AuthToken)), - } - // fallback on ingester url if querier is not set - if len(out.Manual.QuerierURL) == 0 { - out.Manual.QuerierURL = out.Manual.IngesterURL - } - if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.Manual.TLS, nil); err != nil { - return fmt.Errorf("copying v1alpha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) - } - return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s); err != nil { - return err - } - out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(utilconversion.UpperToPascal(in.DeploymentModel)) - out.Exporters = []*v1beta2.FlowCollectorExporter{} - for _, inExporter := range in.Exporters { - outExporter := &v1beta2.FlowCollectorExporter{} - if err := Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(inExporter, outExporter, s); err != nil { - return err - } - out.Exporters = append(out.Exporters, outExporter) - } - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s); err != nil { - return err - } - out.DeploymentModel = utilconversion.PascalToUpper(string(in.DeploymentModel), '_') - out.Exporters = []*FlowCollectorExporter{} - for _, inExporter := range in.Exporters { - outExporter := &FlowCollectorExporter{} - if err := Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(inExporter, outExporter, s); err != nil { - return err - } - out.Exporters = append(out.Exporters, outExporter) - } - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s); err != nil { - return err - } - out.Type = v1beta2.FlowCollectorAgentType(utilconversion.UpperToPascal(in.Type)) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s); err != nil { - return err - } - out.Type = utilconversion.PascalToUpper(string(in.Type), '_') - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s); err != nil { - return err - } - out.Type = v1beta2.ServerTLSConfigType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s); err != nil { - return err - } - out.Type = ServerTLSConfigType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s); err != nil { - return err - } - out.Status = v1beta2.HPAStatus(utilconversion.UpperToPascal(in.Status)) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s); err != nil { - return err - } - out.Status = utilconversion.PascalToUpper(string(in.Status), '_') - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in, out, s); err != nil { - return err - } - out.Type = v1beta2.SASLType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in, out, s); err != nil { - return err - } - out.Type = SASLType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s); err != nil { - return err - } - out.Type = v1beta2.ExporterType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s); err != nil { - return err - } - out.Type = ExporterType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1beta1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s apiconversion.Scope) error { - out.IncludeList = metrics.GetAsIncludeList(in.IgnoreTags, nil) - return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s apiconversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedAgentConfig(in *DebugConfig, out *v1beta2.AdvancedAgentConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_AdvancedAgentConfig_To_v1alpha1_DebugConfig(in *v1beta2.AdvancedAgentConfig, out *DebugConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedProcessorConfig(in *DebugConfig, out *v1beta2.AdvancedProcessorConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_AdvancedProcessorConfig_To_v1alpha1_DebugConfig(in *v1beta2.AdvancedProcessorConfig, out *DebugConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s apiconversion.Scope) error { - out.Advanced = &v1beta2.AdvancedAgentConfig{ - Env: in.Debug.Env, - } - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} diff --git a/apis/flowcollector/v1alpha1/groupversion_info.go b/apis/flowcollector/v1alpha1/groupversion_info.go deleted file mode 100644 index 913c57b7f..000000000 --- a/apis/flowcollector/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the flows v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=flows.netobserv.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "flows.netobserv.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme - localSchemeBuilder = SchemeBuilder.SchemeBuilder -) diff --git a/apis/flowcollector/v1alpha1/zz_generated.conversion.go b/apis/flowcollector/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 899103749..000000000 --- a/apis/flowcollector/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,1017 +0,0 @@ -//go:build !ignore_autogenerated_core -// +build !ignore_autogenerated_core - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1beta2 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FileReference)(nil), (*v1beta2.FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FileReference_To_v1beta2_FileReference(a.(*FileReference), b.(*v1beta2.FileReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FileReference)(nil), (*FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FileReference_To_v1alpha1_FileReference(a.(*v1beta2.FileReference), b.(*FileReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.AdvancedAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedAgentConfig(a.(*DebugConfig), b.(*v1beta2.AdvancedAgentConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.AdvancedProcessorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedProcessorConfig(a.(*DebugConfig), b.(*v1beta2.AdvancedProcessorConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.AdvancedAgentConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AdvancedAgentConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.AdvancedAgentConfig), b.(*DebugConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.AdvancedProcessorConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AdvancedProcessorConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.AdvancedProcessorConfig), b.(*DebugConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { - out.Type = v1beta2.MountableType(in.Type) - out.Name = in.Name - out.CertFile = in.CertFile - out.CertKey = in.CertKey - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. -func Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) -} - -func autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { - out.Type = MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.CertFile = in.CertFile - out.CertKey = in.CertKey - return nil -} - -// Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. -func Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { - return autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) -} - -func autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { - out.Enable = in.Enable - out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { - return err - } - if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. -func Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) -} - -func autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { - out.Enable = in.Enable - out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { - return err - } - if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. -func Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { - return autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) -} - -func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) -} - -func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) -} - -func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { - if err := v1.Convert_bool_To_Pointer_bool(&in.Enable, &out.Enable, s); err != nil { - return err - } - out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) - return nil -} - -// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) -} - -func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - if err := v1.Convert_Pointer_bool_To_bool(&in.Enable, &out.Enable, s); err != nil { - return err - } - out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) - return nil -} - -// Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) -} - -func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { - if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { - return err - } - // WARNING: in.IgnoreTags requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { - if err := Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { - return err - } - // WARNING: in.IncludeList requires manual conversion: does not exist in peer-type - // WARNING: in.DisableAlerts requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { - out.Type = v1beta2.MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.File = in.File - return nil -} - -// Convert_v1alpha1_FileReference_To_v1beta2_FileReference is an autogenerated conversion function. -func Convert_v1alpha1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { - return autoConvert_v1alpha1_FileReference_To_v1beta2_FileReference(in, out, s) -} - -func autoConvert_v1beta2_FileReference_To_v1alpha1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { - out.Type = MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.File = in.File - return nil -} - -// Convert_v1beta2_FileReference_To_v1alpha1_FileReference is an autogenerated conversion function. -func Convert_v1beta2_FileReference_To_v1alpha1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { - return autoConvert_v1beta2_FileReference_To_v1alpha1_FileReference(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) -} - -func autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. -func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = v1beta2.FlowCollectorAgentType(in.Type) - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = string(in.Type) - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { - // WARNING: in.Register requires manual conversion: does not exist in peer-type - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - // WARNING: in.Port requires manual conversion: does not exist in peer-type - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.LogLevel = in.LogLevel - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { - return err - } - if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { - return err - } - out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) - return nil -} - -func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { - // WARNING: in.Enable requires manual conversion: does not exist in peer-type - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.LogLevel = in.LogLevel - if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { - return err - } - if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { - return err - } - out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - // WARNING: in.Debug requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - // WARNING: in.Features requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { - out.Type = v1beta2.ExporterType(in.Type) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { - out.Type = ExporterType(in.Type) - if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { - // WARNING: in.Port requires manual conversion: does not exist in peer-type - // WARNING: in.HealthPort requires manual conversion: does not exist in peer-type - // WARNING: in.ProfilePort requires manual conversion: does not exist in peer-type - out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { - return err - } - out.LogLevel = in.LogLevel - out.Resources = in.Resources - // WARNING: in.EnableKubeProbes requires manual conversion: does not exist in peer-type - // WARNING: in.DropUnusedFields requires manual conversion: does not exist in peer-type - if err := v1.Convert_int32_To_Pointer_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { - return err - } - out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity - out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - // WARNING: in.Debug requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { - return err - } - out.LogLevel = in.LogLevel - out.Resources = in.Resources - if err := v1.Convert_Pointer_int32_To_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { - return err - } - out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity - out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - // WARNING: in.LogTypes requires manual conversion: does not exist in peer-type - // WARNING: in.ClusterName requires manual conversion: does not exist in peer-type - // WARNING: in.MultiClusterDeployment requires manual conversion: does not exist in peer-type - // WARNING: in.AddZone requires manual conversion: does not exist in peer-type - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - out.Status = v1beta2.HPAStatus(in.Status) - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - out.Status = string(in.Status) - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { - out.Address = in.Address - out.Topic = in.Topic - if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - if err := Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(&in.SASL, &out.SASL, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - out.Address = in.Address - out.Topic = in.Topic - if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - if err := Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(&in.SASL, &out.SASL, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1beta2.FlowCollector, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FlowCollector, len(*in)) - for i := range *in { - if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { - // WARNING: in.URL requires manual conversion: does not exist in peer-type - // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type - // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type - // WARNING: in.TenantID requires manual conversion: does not exist in peer-type - // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type - // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type - // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type - // WARNING: in.Timeout requires manual conversion: does not exist in peer-type - // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type - // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type - // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type - // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type - // WARNING: in.TLS requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { - // WARNING: in.Enable requires manual conversion: does not exist in peer-type - // WARNING: in.Mode requires manual conversion: does not exist in peer-type - // WARNING: in.Manual requires manual conversion: does not exist in peer-type - // WARNING: in.Microservices requires manual conversion: does not exist in peer-type - // WARNING: in.Monolithic requires manual conversion: does not exist in peer-type - // WARNING: in.LokiStack requires manual conversion: does not exist in peer-type - // WARNING: in.ReadTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.WriteTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.WriteBatchWait requires manual conversion: does not exist in peer-type - // WARNING: in.WriteBatchSize requires manual conversion: does not exist in peer-type - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { - out.Namespace = in.Namespace - if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { - return err - } - out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(in.DeploymentModel) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - // INFO: in.Exporters opted out of conversion generation - return nil -} - -func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - out.Namespace = in.Namespace - if err := Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { - return err - } - out.DeploymentModel = string(in.DeploymentModel) - if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - // INFO: in.Exporters opted out of conversion generation - return nil -} - -func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { - out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) -} - -func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { - out.Port = in.Port - if err := Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) -} - -func autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - out.Port = in.Port - if err := Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) -} - -func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - out.DaemonSetName = in.DaemonSetName - out.ContainerName = in.ContainerName - return nil -} - -// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) -} - -func autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - out.DaemonSetName = in.DaemonSetName - out.ContainerName = in.ContainerName - return nil -} - -// Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) -} - -func autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { - out.Name = in.Name - out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) - out.Default = in.Default - return nil -} - -// Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. -func Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { - return autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) -} - -func autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { - out.Name = in.Name - out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) - out.Default = in.Default - return nil -} - -// Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. -func Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { - return autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) -} - -func autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { - out.Type = v1beta2.SASLType(in.Type) - if err := Convert_v1alpha1_FileReference_To_v1beta2_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { - return err - } - if err := Convert_v1alpha1_FileReference_To_v1beta2_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { - out.Type = SASLType(in.Type) - if err := Convert_v1beta2_FileReference_To_v1alpha1_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { - return err - } - if err := Convert_v1beta2_FileReference_To_v1alpha1_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { - out.Type = v1beta2.ServerTLSConfigType(in.Type) - out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) - return nil -} - -func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { - out.Type = ServerTLSConfigType(in.Type) - out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) - // WARNING: in.InsecureSkipVerify requires manual conversion: does not exist in peer-type - // WARNING: in.ProvidedCaFile requires manual conversion: does not exist in peer-type - return nil -} diff --git a/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go b/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 921d44bf7..000000000 --- a/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,570 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateReference) DeepCopyInto(out *CertificateReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateReference. -func (in *CertificateReference) DeepCopy() *CertificateReference { - if in == nil { - return nil - } - out := new(CertificateReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { - *out = *in - out.CACert = in.CACert - out.UserCert = in.UserCert -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. -func (in *ClientTLS) DeepCopy() *ClientTLS { - if in == nil { - return nil - } - out := new(ClientTLS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkOperatorConfig) DeepCopyInto(out *ClusterNetworkOperatorConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkOperatorConfig. -func (in *ClusterNetworkOperatorConfig) DeepCopy() *ClusterNetworkOperatorConfig { - if in == nil { - return nil - } - out := new(ClusterNetworkOperatorConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsolePluginPortConfig) DeepCopyInto(out *ConsolePluginPortConfig) { - *out = *in - if in.PortNames != nil { - in, out := &in.PortNames, &out.PortNames - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginPortConfig. -func (in *ConsolePluginPortConfig) DeepCopy() *ConsolePluginPortConfig { - if in == nil { - return nil - } - out := new(ConsolePluginPortConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DebugConfig) DeepCopyInto(out *DebugConfig) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugConfig. -func (in *DebugConfig) DeepCopy() *DebugConfig { - if in == nil { - return nil - } - out := new(DebugConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { - *out = *in - in.Server.DeepCopyInto(&out.Server) - if in.IgnoreTags != nil { - in, out := &in.IgnoreTags, &out.IgnoreTags - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FLPMetrics. -func (in *FLPMetrics) DeepCopy() *FLPMetrics { - if in == nil { - return nil - } - out := new(FLPMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FileReference) DeepCopyInto(out *FileReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileReference. -func (in *FileReference) DeepCopy() *FileReference { - if in == nil { - return nil - } - out := new(FileReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollector) DeepCopyInto(out *FlowCollector) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollector. -func (in *FlowCollector) DeepCopy() *FlowCollector { - if in == nil { - return nil - } - out := new(FlowCollector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FlowCollector) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { - *out = *in - out.IPFIX = in.IPFIX - in.EBPF.DeepCopyInto(&out.EBPF) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. -func (in *FlowCollectorAgent) DeepCopy() *FlowCollectorAgent { - if in == nil { - return nil - } - out := new(FlowCollectorAgent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlugin) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - in.Autoscaler.DeepCopyInto(&out.Autoscaler) - in.PortNaming.DeepCopyInto(&out.PortNaming) - if in.QuickFilters != nil { - in, out := &in.QuickFilters, &out.QuickFilters - *out = make([]QuickFilter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorConsolePlugin. -func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { - if in == nil { - return nil - } - out := new(FlowCollectorConsolePlugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - if in.Sampling != nil { - in, out := &in.Sampling, &out.Sampling - *out = new(int32) - **out = **in - } - if in.Interfaces != nil { - in, out := &in.Interfaces, &out.Interfaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExcludeInterfaces != nil { - in, out := &in.ExcludeInterfaces, &out.ExcludeInterfaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Debug.DeepCopyInto(&out.Debug) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. -func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { - if in == nil { - return nil - } - out := new(FlowCollectorEBPF) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorExporter) DeepCopyInto(out *FlowCollectorExporter) { - *out = *in - out.Kafka = in.Kafka - out.IPFIX = in.IPFIX -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorExporter. -func (in *FlowCollectorExporter) DeepCopy() *FlowCollectorExporter { - if in == nil { - return nil - } - out := new(FlowCollectorExporter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) { - *out = *in - in.Metrics.DeepCopyInto(&out.Metrics) - in.Resources.DeepCopyInto(&out.Resources) - in.KafkaConsumerAutoscaler.DeepCopyInto(&out.KafkaConsumerAutoscaler) - in.Debug.DeepCopyInto(&out.Debug) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorFLP. -func (in *FlowCollectorFLP) DeepCopy() *FlowCollectorFLP { - if in == nil { - return nil - } - out := new(FlowCollectorFLP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorHPA) DeepCopyInto(out *FlowCollectorHPA) { - *out = *in - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]v2.MetricSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorHPA. -func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { - if in == nil { - return nil - } - out := new(FlowCollectorHPA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { - *out = *in - out.ClusterNetworkOperator = in.ClusterNetworkOperator - out.OVNKubernetes = in.OVNKubernetes -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. -func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { - if in == nil { - return nil - } - out := new(FlowCollectorIPFIX) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. -func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { - if in == nil { - return nil - } - out := new(FlowCollectorIPFIXReceiver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorKafka) DeepCopyInto(out *FlowCollectorKafka) { - *out = *in - out.TLS = in.TLS - out.SASL = in.SASL -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorKafka. -func (in *FlowCollectorKafka) DeepCopy() *FlowCollectorKafka { - if in == nil { - return nil - } - out := new(FlowCollectorKafka) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorList) DeepCopyInto(out *FlowCollectorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FlowCollector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorList. -func (in *FlowCollectorList) DeepCopy() *FlowCollectorList { - if in == nil { - return nil - } - out := new(FlowCollectorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FlowCollectorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) { - *out = *in - out.BatchWait = in.BatchWait - out.Timeout = in.Timeout - out.MinBackoff = in.MinBackoff - out.MaxBackoff = in.MaxBackoff - if in.StaticLabels != nil { - in, out := &in.StaticLabels, &out.StaticLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.TLS = in.TLS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorLoki. -func (in *FlowCollectorLoki) DeepCopy() *FlowCollectorLoki { - if in == nil { - return nil - } - out := new(FlowCollectorLoki) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorSpec) DeepCopyInto(out *FlowCollectorSpec) { - *out = *in - in.Agent.DeepCopyInto(&out.Agent) - in.Processor.DeepCopyInto(&out.Processor) - in.Loki.DeepCopyInto(&out.Loki) - in.ConsolePlugin.DeepCopyInto(&out.ConsolePlugin) - out.Kafka = in.Kafka - if in.Exporters != nil { - in, out := &in.Exporters, &out.Exporters - *out = make([]*FlowCollectorExporter, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(FlowCollectorExporter) - **out = **in - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorSpec. -func (in *FlowCollectorSpec) DeepCopy() *FlowCollectorSpec { - if in == nil { - return nil - } - out := new(FlowCollectorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorStatus) DeepCopyInto(out *FlowCollectorStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorStatus. -func (in *FlowCollectorStatus) DeepCopy() *FlowCollectorStatus { - if in == nil { - return nil - } - out := new(FlowCollectorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { - *out = *in - in.TLS.DeepCopyInto(&out.TLS) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. -func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { - if in == nil { - return nil - } - out := new(MetricsServerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. -func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { - if in == nil { - return nil - } - out := new(OVNKubernetesConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuickFilter) DeepCopyInto(out *QuickFilter) { - *out = *in - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickFilter. -func (in *QuickFilter) DeepCopy() *QuickFilter { - if in == nil { - return nil - } - out := new(QuickFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SASLConfig) DeepCopyInto(out *SASLConfig) { - *out = *in - out.ClientIDReference = in.ClientIDReference - out.ClientSecretReference = in.ClientSecretReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASLConfig. -func (in *SASLConfig) DeepCopy() *SASLConfig { - if in == nil { - return nil - } - out := new(SASLConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerTLS) DeepCopyInto(out *ServerTLS) { - *out = *in - if in.Provided != nil { - in, out := &in.Provided, &out.Provided - *out = new(CertificateReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTLS. -func (in *ServerTLS) DeepCopy() *ServerTLS { - if in == nil { - return nil - } - out := new(ServerTLS) - in.DeepCopyInto(out) - return out -} diff --git a/apis/flowcollector/v1beta1/doc.go b/apis/flowcollector/v1beta1/doc.go index 2bb94b776..42ca5cfc4 100644 --- a/apis/flowcollector/v1beta1/doc.go +++ b/apis/flowcollector/v1beta1/doc.go @@ -13,4 +13,6 @@ limitations under the License. // Package v1beta1 contains the v1beta1 API implementation. // +k8s:conversion-gen=github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2 +// +// Deprecated: This package will be removed in one of the next releases. package v1beta1 diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index 2ccee38a9..a53e39399 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -875,7 +875,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` -// +kubebuilder:storageversion +// +kubebuilder:deprecatedversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index 1b03c3ce4..00f4f24a1 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -1027,6 +1027,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` +// +kubebuilder:storageversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index d7b0df627..9d4e478dc 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -15,7 +15,6 @@ spec: namespace: netobserv path: /convert conversionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 group: flows.netobserv.io @@ -40,2429 +39,6 @@ spec: name: Status type: string deprecated: true - name: v1alpha1 - schema: - openAPIV3Schema: - description: "FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. \n Deprecated: This package will - be removed in one of the next releases." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - properties: - name: - pattern: ^cluster$ - type: string - type: object - spec: - description: FlowCollectorSpec defines the desired state of FlowCollector - properties: - agent: - default: - type: EBPF - description: agent for flows extraction. - properties: - ebpf: - description: ebpf describes the settings related to the eBPF-based - flow reporter when the "agent.type" property is set to "EBPF". - properties: - cacheActiveTimeout: - default: 5s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - format: int32 - minimum: 1 - type: integer - debug: - description: Debug allows setting some aspects of the internal - configuration of the eBPF agent. This section is aimed exclusively - for debugging and fine-grained performance optimizations - (for example GOGC, GOMAXPROCS env vars). Users setting its - values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very - concrete performance-tuning options (such as GOGC, GOMAXPROCS) - that shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and - support scenarios. - type: object - type: object - excludeInterfaces: - default: - - lo - description: excludeInterfaces contains the interface names - that will be excluded from flow tracing. If an entry is - enclosed by slashes (such as `/br-/`), it will match as - regular expression, otherwise it will be matched as a case-sensitive - string. - items: - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy - for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: interfaces contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. If an entry is enclosed by - slashes (such as `/br-/`), it will match as regular expression, - otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: 'kafkaBatchSize limits the maximum size of a - request in bytes before being sent to a partition. Ignored - when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: logLevel defines the log level for the NetObserv - eBPF Agent - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: 'privileged mode for the eBPF Agent container. - In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set (for example old kernel version - not knowing CAP_BPF) then you can turn on this mode for - more global privileges.' - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources are the compute resources required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: sampling rate of the flow reporter. 100 means - one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: ipfix describes the settings related to the IPFIX-based - flow reporter when the "agent.type" property is set to "IPFIX". - properties: - cacheActiveTimeout: - default: 20s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related - to the OpenShift Cluster Network Operator, when available. - properties: - namespace: - default: openshift-network-operator - description: namespace where the config map is going - to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: forceSampleAll allows disabling sampling in the - IPFIX-based flow reporter. It is not recommended to sample - all the traffic with IPFIX, as it might generate cluster - instability. If you REALLY want to do that, set this flag - to true. Use at your own risk. When it is set to true, the - value of "sampling" is ignored. - type: boolean - ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes - CNI, when available. This configuration is used when using - OVN's IPFIX exports, without OpenShift. When using OpenShift, - refer to the `clusterNetworkOperator` property instead. - properties: - containerName: - default: ovnkube-node - description: containerName defines the name of the container - to configure for IPFIX. - type: string - daemonSetName: - default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet - controlling the OVN-Kubernetes pods. - type: string - namespace: - default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: sampling is the sampling rate on the reporter. - 100 means one flow on 100 is sent. To ensure cluster stability, - it is not possible to set a value below 2. If you really - want to sample every packet, which might impact the cluster - stability, refer to "forceSampleAll". Alternatively, you - can use the eBPF Agent instead of IPFIX. - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: type selects the flows tracing agent. Possible values - are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to - use the legacy IPFIX collector. "EBPF" is recommended in most - cases as it offers better performances and should work regardless - of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes - CNI (other CNIs could work if they support exporting IPFIX, - but they would require manual configuration). - enum: - - EBPF - - IPFIX - type: string - required: - - type - type: object - consolePlugin: - description: consolePlugin defines the settings related to the OpenShift - Console plugin, when available. - properties: - autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to - set up for the plugin Deployment. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: logLevel for the console plugin backend - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: port is the plugin service port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: portNaming defines the configuration of the port-to-service - name translation - properties: - enable: - default: true - description: enable the console plugin port-to-service name - translation - type: boolean - portNames: - additionalProperties: - type: string - description: 'portNames defines additional port names to use - in the console. Example: portNames: {"3100": "loki"}' - type: object - type: object - quickFilters: - default: - - default: true - filter: - dst_namespace!: openshift-,netobserv - src_namespace!: openshift-,netobserv - name: Applications - - filter: - dst_namespace: openshift-,netobserv - src_namespace: openshift-,netobserv - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: quickFilters configures quick filter presets for - the Console plugin - items: - description: QuickFilter defines preset configuration for Console's - quick filters - properties: - default: - description: default defines whether this filter should - be active by default or not - type: boolean - filter: - additionalProperties: - type: string - description: 'filter is a set of keys and values to be set - when this filter is selected. Each key can relate to a - list of values using a coma-separated string. Example: - filter: {"src_namespace": "namespace1,namespace2"}' - type: object - name: - description: name of the filter, that will be displayed - in Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: 'register allows, when set to true, to automatically - register the provided console plugin with the OpenShift Console - operator. When set to false, you can still register it manually - by editing console.operator.openshift.io/cluster. E.g: oc patch - console.operator.openshift.io cluster --type=''json'' -p ''[{"op": - "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' - type: boolean - replicas: - default: 1 - description: replicas defines the number of replicas (pods) to - start. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources, in terms of compute resources, required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - register - type: object - deploymentModel: - default: DIRECT - description: deploymentModel defines the desired type of deployment - for flow processing. Possible values are "DIRECT" (default) to make - the flow processor listening directly from the agents, or "KAFKA" - to make flows sent to a Kafka pipeline before consumption by the - processor. Kafka can provide better scalability, resiliency and - high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: exporters defines additional optional exporters for custom - consumption or storage. This is an experimental feature. Currently, - only KAFKA exporter is available. - items: - description: FlowCollectorExporter defines an additional exporter - to send enriched flows to - properties: - ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: kafka configuration, such as address or topic, - to send enriched flows to. - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported - (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, - generally 9093. Note that, when eBPF agents are used, - Kafka certificate needs to be copied in the agent namespace - (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv - will not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: kafka configuration, allowing to use Kafka as a broker - as part of the flow collection pipeline. Available when the "spec.deploymentModel" - is "KAFKA". - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` - if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, generally - 9093. Note that, when eBPF agents are used, Kafka certificate - needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will - not create it. - type: string - required: - - address - - topic - type: object - loki: - description: loki, the flow store, client settings. - properties: - authToken: - default: DISABLED - description: AuthToken describe the way to get a token to authenticate - to Loki. DISABLED will not send any token with the request. - HOST will use the local pod service account to authenticate - to Loki. FORWARD will forward user token, in this mode, pod - that are not receiving user request like the processor will - use the local pod service account. Similar to HOST mode. When - using the Loki Operator, set it to `HOST` or `FORWARD`. - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: batchSize is max batch size (in bytes) of logs to - accumulate before sending. - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: batchWait is max time to wait before sending a batch. - type: string - maxBackoff: - default: 5s - description: maxBackoff is the maximum backoff time for client - connection between retries. - type: string - maxRetries: - default: 2 - description: maxRetries is the maximum number of retries for client - connections. - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: minBackoff is the initial backoff time for client - connection between retries. - type: string - querierUrl: - description: querierURL specifies the address of the Loki querier - service, in case it is different from the Loki ingester URL. - If empty, the URL value will be used (assuming that the Loki - ingester and querier are in the same server). When using the - Loki Operator, do not set it, since ingestion and queries use - the Loki gateway. - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on - each flow. - type: object - statusUrl: - description: statusURL specifies the address of the Loki /ready - /metrics /config endpoints, in case it is different from the - Loki querier URL. If empty, the QuerierURL value will be used. - This is useful to show error messages and some context in the - frontend. When using the Loki Operator, set it to the Loki HTTP - query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - type: string - tenantID: - default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies - the tenant for each request. When using the Loki Operator, set - it to `network`, which corresponds to a special tenant mode. - type: string - timeout: - default: 10s - description: timeout is the maximum time connection / request - limit. A Timeout of zero means no timeout. - type: string - tls: - description: tls client configuration. - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service to - push the flows to. When using the Loki Operator, set it to the - Loki gateway service with the `network` tenant set in path, - for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string - type: object - namespace: - description: namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. - type: string - processor: - description: processor defines the settings of the component that - receives the flows from the agent, enriches them, and forwards them - to the Loki persistence layer. - properties: - debug: - description: Debug allows setting some aspects of the internal - configuration of the flow processor. This section is aimed exclusively - for debugging and fine-grained performance optimizations (for - example GOGC, GOMAXPROCS env vars). Users setting its values - do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very concrete - performance-tuning options (such as GOGC, GOMAXPROCS) that - shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and support - scenarios. - type: object - type: object - dropUnusedFields: - default: true - description: dropUnusedFields allows, when set to true, to drop - fields that are known to be unused by OVS, in order to save - storage space. - type: boolean - enableKubeProbes: - default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes - liveness and readiness probes - type: boolean - healthPort: - default: 8080 - description: healthPort is a collector HTTP port in the Pod that - exposes the health check API - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod - autoscaler to set up for flowlogs-pipeline-transformer, which - consumes Kafka messages. This setting is ignored when Kafka - is disabled. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the - maximum batch size, in bytes, that the consumer will accept. - Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of - the internal message queue used in the Kafka consumer client. - Ignored when not using Kafka. - type: integer - kafkaConsumerReplicas: - default: 3 - description: kafkaConsumerReplicas defines the number of replicas - (pods) to start for flowlogs-pipeline-transformer, which consumes - Kafka messages. This setting is ignored when Kafka is disabled. - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: logLevel of the collector runtime - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - metrics: - description: Metrics define the processor configuration regarding - metrics - properties: - ignoreTags: - default: - - egress - - packets - description: 'ignoreTags is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: egress, ingress, flows, bytes, packets, - namespaces, nodes, workloads' - items: - type: string - type: array - server: - description: metricsServer endpoint configuration for Prometheus - scraper - properties: - port: - default: 9102 - description: the prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - provided: - description: TLS configuration. - properties: - certFile: - description: certFile defines the path to the - certificate file name within the config map - or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map - or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret - containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration - "DISABLED" (default) to not configure TLS for the - endpoint, "PROVIDED" to manually provide cert file - and a key file, and "AUTO" to use OpenShift auto - generated certificate using annotations - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - port: - default: 2055 - description: 'port of the flow collector (host port) By conventions, - some value are not authorized port must not be below 1024 and - must not equal this values: 4789,6081,500, and 4500' - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: profilePort allows setting up a Go pprof profiler - listening to this port - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: 'resources are the compute resources required by - this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - required: - - agent - - deploymentModel - type: object - status: - description: FlowCollectorStatus defines the observed state of FlowCollector - properties: - conditions: - description: conditions represent the latest available observations - of an object's state - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: namespace where console plugin and flowlogs-pipeline - have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string name: v1beta1 schema: openAPIV3Schema: @@ -2480,6 +56,10 @@ spec: submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: + properties: + name: + pattern: ^cluster$ + type: string type: object spec: description: 'Defines the desired state of the FlowCollector resource. @@ -5142,7 +2722,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} - additionalPrinterColumns: @@ -8096,7 +5676,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} status: diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 8814b9729..3f24a6d0c 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -4,16 +4,6 @@ metadata: annotations: alm-examples: |- [ - { - "apiVersion": "flows.netobserv.io/v1alpha1", - "kind": "FlowCollector", - "metadata": { - "name": "cluster" - }, - "spec": { - "deploymentModel": "DIRECT" - } - }, { "apiVersion": "flows.netobserv.io/v1alpha1", "kind": "FlowMetric", @@ -427,12 +417,6 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: - - description: FlowCollector is the Schema for the flowcollectors API, which pilots - and configures netflow collection. - displayName: Flow Collector - kind: FlowCollector - name: flowcollectors.flows.netobserv.io - version: v1alpha1 - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' displayName: Flow Collector @@ -1274,7 +1258,6 @@ spec: version: 1.0.5 webhookdefinitions: - admissionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 containerPort: 443 diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index d81633097..6a942d9aa 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -30,2425 +30,6 @@ spec: name: Status type: string deprecated: true - name: v1alpha1 - schema: - openAPIV3Schema: - description: "FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. \n Deprecated: This package will - be removed in one of the next releases." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FlowCollectorSpec defines the desired state of FlowCollector - properties: - agent: - default: - type: EBPF - description: agent for flows extraction. - properties: - ebpf: - description: ebpf describes the settings related to the eBPF-based - flow reporter when the "agent.type" property is set to "EBPF". - properties: - cacheActiveTimeout: - default: 5s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - format: int32 - minimum: 1 - type: integer - debug: - description: Debug allows setting some aspects of the internal - configuration of the eBPF agent. This section is aimed exclusively - for debugging and fine-grained performance optimizations - (for example GOGC, GOMAXPROCS env vars). Users setting its - values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very - concrete performance-tuning options (such as GOGC, GOMAXPROCS) - that shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and - support scenarios. - type: object - type: object - excludeInterfaces: - default: - - lo - description: excludeInterfaces contains the interface names - that will be excluded from flow tracing. If an entry is - enclosed by slashes (such as `/br-/`), it will match as - regular expression, otherwise it will be matched as a case-sensitive - string. - items: - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy - for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: interfaces contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. If an entry is enclosed by - slashes (such as `/br-/`), it will match as regular expression, - otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: 'kafkaBatchSize limits the maximum size of a - request in bytes before being sent to a partition. Ignored - when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: logLevel defines the log level for the NetObserv - eBPF Agent - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: 'privileged mode for the eBPF Agent container. - In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set (for example old kernel version - not knowing CAP_BPF) then you can turn on this mode for - more global privileges.' - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources are the compute resources required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: sampling rate of the flow reporter. 100 means - one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: ipfix describes the settings related to the IPFIX-based - flow reporter when the "agent.type" property is set to "IPFIX". - properties: - cacheActiveTimeout: - default: 20s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related - to the OpenShift Cluster Network Operator, when available. - properties: - namespace: - default: openshift-network-operator - description: namespace where the config map is going - to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: forceSampleAll allows disabling sampling in the - IPFIX-based flow reporter. It is not recommended to sample - all the traffic with IPFIX, as it might generate cluster - instability. If you REALLY want to do that, set this flag - to true. Use at your own risk. When it is set to true, the - value of "sampling" is ignored. - type: boolean - ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes - CNI, when available. This configuration is used when using - OVN's IPFIX exports, without OpenShift. When using OpenShift, - refer to the `clusterNetworkOperator` property instead. - properties: - containerName: - default: ovnkube-node - description: containerName defines the name of the container - to configure for IPFIX. - type: string - daemonSetName: - default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet - controlling the OVN-Kubernetes pods. - type: string - namespace: - default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: sampling is the sampling rate on the reporter. - 100 means one flow on 100 is sent. To ensure cluster stability, - it is not possible to set a value below 2. If you really - want to sample every packet, which might impact the cluster - stability, refer to "forceSampleAll". Alternatively, you - can use the eBPF Agent instead of IPFIX. - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: type selects the flows tracing agent. Possible values - are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to - use the legacy IPFIX collector. "EBPF" is recommended in most - cases as it offers better performances and should work regardless - of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes - CNI (other CNIs could work if they support exporting IPFIX, - but they would require manual configuration). - enum: - - EBPF - - IPFIX - type: string - required: - - type - type: object - consolePlugin: - description: consolePlugin defines the settings related to the OpenShift - Console plugin, when available. - properties: - autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to - set up for the plugin Deployment. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: logLevel for the console plugin backend - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: port is the plugin service port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: portNaming defines the configuration of the port-to-service - name translation - properties: - enable: - default: true - description: enable the console plugin port-to-service name - translation - type: boolean - portNames: - additionalProperties: - type: string - description: 'portNames defines additional port names to use - in the console. Example: portNames: {"3100": "loki"}' - type: object - type: object - quickFilters: - default: - - default: true - filter: - dst_namespace!: openshift-,netobserv - src_namespace!: openshift-,netobserv - name: Applications - - filter: - dst_namespace: openshift-,netobserv - src_namespace: openshift-,netobserv - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: quickFilters configures quick filter presets for - the Console plugin - items: - description: QuickFilter defines preset configuration for Console's - quick filters - properties: - default: - description: default defines whether this filter should - be active by default or not - type: boolean - filter: - additionalProperties: - type: string - description: 'filter is a set of keys and values to be set - when this filter is selected. Each key can relate to a - list of values using a coma-separated string. Example: - filter: {"src_namespace": "namespace1,namespace2"}' - type: object - name: - description: name of the filter, that will be displayed - in Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: 'register allows, when set to true, to automatically - register the provided console plugin with the OpenShift Console - operator. When set to false, you can still register it manually - by editing console.operator.openshift.io/cluster. E.g: oc patch - console.operator.openshift.io cluster --type=''json'' -p ''[{"op": - "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' - type: boolean - replicas: - default: 1 - description: replicas defines the number of replicas (pods) to - start. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources, in terms of compute resources, required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - register - type: object - deploymentModel: - default: DIRECT - description: deploymentModel defines the desired type of deployment - for flow processing. Possible values are "DIRECT" (default) to make - the flow processor listening directly from the agents, or "KAFKA" - to make flows sent to a Kafka pipeline before consumption by the - processor. Kafka can provide better scalability, resiliency and - high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: exporters defines additional optional exporters for custom - consumption or storage. This is an experimental feature. Currently, - only KAFKA exporter is available. - items: - description: FlowCollectorExporter defines an additional exporter - to send enriched flows to - properties: - ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: kafka configuration, such as address or topic, - to send enriched flows to. - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported - (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, - generally 9093. Note that, when eBPF agents are used, - Kafka certificate needs to be copied in the agent namespace - (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv - will not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: kafka configuration, allowing to use Kafka as a broker - as part of the flow collection pipeline. Available when the "spec.deploymentModel" - is "KAFKA". - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` - if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, generally - 9093. Note that, when eBPF agents are used, Kafka certificate - needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will - not create it. - type: string - required: - - address - - topic - type: object - loki: - description: loki, the flow store, client settings. - properties: - authToken: - default: DISABLED - description: AuthToken describe the way to get a token to authenticate - to Loki. DISABLED will not send any token with the request. - HOST will use the local pod service account to authenticate - to Loki. FORWARD will forward user token, in this mode, pod - that are not receiving user request like the processor will - use the local pod service account. Similar to HOST mode. When - using the Loki Operator, set it to `HOST` or `FORWARD`. - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: batchSize is max batch size (in bytes) of logs to - accumulate before sending. - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: batchWait is max time to wait before sending a batch. - type: string - maxBackoff: - default: 5s - description: maxBackoff is the maximum backoff time for client - connection between retries. - type: string - maxRetries: - default: 2 - description: maxRetries is the maximum number of retries for client - connections. - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: minBackoff is the initial backoff time for client - connection between retries. - type: string - querierUrl: - description: querierURL specifies the address of the Loki querier - service, in case it is different from the Loki ingester URL. - If empty, the URL value will be used (assuming that the Loki - ingester and querier are in the same server). When using the - Loki Operator, do not set it, since ingestion and queries use - the Loki gateway. - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on - each flow. - type: object - statusUrl: - description: statusURL specifies the address of the Loki /ready - /metrics /config endpoints, in case it is different from the - Loki querier URL. If empty, the QuerierURL value will be used. - This is useful to show error messages and some context in the - frontend. When using the Loki Operator, set it to the Loki HTTP - query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - type: string - tenantID: - default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies - the tenant for each request. When using the Loki Operator, set - it to `network`, which corresponds to a special tenant mode. - type: string - timeout: - default: 10s - description: timeout is the maximum time connection / request - limit. A Timeout of zero means no timeout. - type: string - tls: - description: tls client configuration. - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service to - push the flows to. When using the Loki Operator, set it to the - Loki gateway service with the `network` tenant set in path, - for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string - type: object - namespace: - description: namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. - type: string - processor: - description: processor defines the settings of the component that - receives the flows from the agent, enriches them, and forwards them - to the Loki persistence layer. - properties: - debug: - description: Debug allows setting some aspects of the internal - configuration of the flow processor. This section is aimed exclusively - for debugging and fine-grained performance optimizations (for - example GOGC, GOMAXPROCS env vars). Users setting its values - do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very concrete - performance-tuning options (such as GOGC, GOMAXPROCS) that - shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and support - scenarios. - type: object - type: object - dropUnusedFields: - default: true - description: dropUnusedFields allows, when set to true, to drop - fields that are known to be unused by OVS, in order to save - storage space. - type: boolean - enableKubeProbes: - default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes - liveness and readiness probes - type: boolean - healthPort: - default: 8080 - description: healthPort is a collector HTTP port in the Pod that - exposes the health check API - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod - autoscaler to set up for flowlogs-pipeline-transformer, which - consumes Kafka messages. This setting is ignored when Kafka - is disabled. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the - maximum batch size, in bytes, that the consumer will accept. - Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of - the internal message queue used in the Kafka consumer client. - Ignored when not using Kafka. - type: integer - kafkaConsumerReplicas: - default: 3 - description: kafkaConsumerReplicas defines the number of replicas - (pods) to start for flowlogs-pipeline-transformer, which consumes - Kafka messages. This setting is ignored when Kafka is disabled. - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: logLevel of the collector runtime - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - metrics: - description: Metrics define the processor configuration regarding - metrics - properties: - ignoreTags: - default: - - egress - - packets - description: 'ignoreTags is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: egress, ingress, flows, bytes, packets, - namespaces, nodes, workloads' - items: - type: string - type: array - server: - description: metricsServer endpoint configuration for Prometheus - scraper - properties: - port: - default: 9102 - description: the prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - provided: - description: TLS configuration. - properties: - certFile: - description: certFile defines the path to the - certificate file name within the config map - or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map - or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret - containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration - "DISABLED" (default) to not configure TLS for the - endpoint, "PROVIDED" to manually provide cert file - and a key file, and "AUTO" to use OpenShift auto - generated certificate using annotations - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - port: - default: 2055 - description: 'port of the flow collector (host port) By conventions, - some value are not authorized port must not be below 1024 and - must not equal this values: 4789,6081,500, and 4500' - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: profilePort allows setting up a Go pprof profiler - listening to this port - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: 'resources are the compute resources required by - this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - required: - - agent - - deploymentModel - type: object - status: - description: FlowCollectorStatus defines the observed state of FlowCollector - properties: - conditions: - description: conditions represent the latest available observations - of an object's state - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: namespace where console plugin and flowlogs-pipeline - have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string name: v1beta1 schema: openAPIV3Schema: @@ -5128,7 +2709,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} - additionalPrinterColumns: @@ -8082,7 +5663,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} status: diff --git a/config/crd/patches/webhook_in_flowcollectors.yaml b/config/crd/patches/webhook_in_flowcollectors.yaml index fa0d9bee0..04b0caf57 100644 --- a/config/crd/patches/webhook_in_flowcollectors.yaml +++ b/config/crd/patches/webhook_in_flowcollectors.yaml @@ -13,6 +13,5 @@ spec: name: webhook-service path: /convert conversionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml index ff04f3687..c7fc74589 100644 --- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml +++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml @@ -25,12 +25,6 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: - - description: FlowCollector is the Schema for the flowcollectors API, which pilots - and configures netflow collection. - displayName: Flow Collector - kind: FlowCollector - name: flowcollectors.flows.netobserv.io - version: v1alpha1 - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' displayName: Flow Collector diff --git a/config/samples/flows_v1alpha1_flowcollector.yaml b/config/samples/flows_v1alpha1_flowcollector.yaml deleted file mode 100644 index 0f49d5a54..000000000 --- a/config/samples/flows_v1alpha1_flowcollector.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: flows.netobserv.io/v1alpha1 -kind: FlowCollector -metadata: - name: cluster -spec: - deploymentModel: DIRECT diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 7b8b38139..af8c16ab8 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,6 +1,5 @@ ## Append samples you want in your CSV to this file as resources ## resources: -- flows_v1alpha1_flowcollector.yaml - flows_v1beta1_flowcollector.yaml - flows_v1beta2_flowcollector.yaml - flows_v1alpha1_flowmetric.yaml diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 2e3e75fc9..74667b520 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -2,4316 +2,9 @@ Packages: -- [flows.netobserv.io/v1alpha1](#flowsnetobserviov1alpha1) - [flows.netobserv.io/v1beta1](#flowsnetobserviov1beta1) - [flows.netobserv.io/v1beta2](#flowsnetobserviov1beta2) -# flows.netobserv.io/v1alpha1 - -Resource Types: - -- [FlowCollector](#flowcollector) - - - - -## FlowCollector -[↩ Parent](#flowsnetobserviov1alpha1 ) - - - - - - -FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. - Deprecated: This package will be removed in one of the next releases. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringflows.netobserv.io/v1alpha1true
kindstringFlowCollectortrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject - FlowCollectorSpec defines the desired state of FlowCollector
-
false
statusobject - FlowCollectorStatus defines the observed state of FlowCollector
-
false
- - -### FlowCollector.spec -[↩ Parent](#flowcollector) - - - -FlowCollectorSpec defines the desired state of FlowCollector - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
agentobject - agent for flows extraction.
-
- Default: map[type:EBPF]
-
true
deploymentModelenum - deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
-
- Enum: DIRECT, KAFKA
- Default: DIRECT
-
true
consolePluginobject - consolePlugin defines the settings related to the OpenShift Console plugin, when available.
-
false
exporters[]object - exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available.
-
false
kafkaobject - kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA".
-
false
lokiobject - loki, the flow store, client settings.
-
false
namespacestring - namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
-
false
processorobject - processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
-
false
- - -### FlowCollector.spec.agent -[↩ Parent](#flowcollectorspec) - - - -agent for flows extraction. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typeenum - type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
-
- Enum: EBPF, IPFIX
- Default: EBPF
-
true
ebpfobject - ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
-
false
ipfixobject - ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX".
-
false
- - -### FlowCollector.spec.agent.ebpf -[↩ Parent](#flowcollectorspecagent) - - - -ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF". - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
cacheActiveTimeoutstring - cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
-
- Default: 5s
-
false
cacheMaxFlowsinteger - cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
-
- Format: int32
- Default: 100000
- Minimum: 1
-
false
debugobject - Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
-
false
excludeInterfaces[]string - excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
-
- Default: [lo]
-
false
imagePullPolicyenum - imagePullPolicy is the Kubernetes pull policy for the image defined above
-
- Enum: IfNotPresent, Always, Never
- Default: IfNotPresent
-
false
interfaces[]string - interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
-
false
kafkaBatchSizeinteger - kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB.
-
- Default: 1048576
-
false
logLevelenum - logLevel defines the log level for the NetObserv eBPF Agent
-
- Enum: trace, debug, info, warn, error, fatal, panic
- Default: info
-
false
privilegedboolean - privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges.
-
false
resourcesobject - resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
- Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:50Mi]]
-
false
samplinginteger - sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
-
- Format: int32
- Default: 50
- Minimum: 0
-
false
- - -### FlowCollector.spec.agent.ebpf.debug -[↩ Parent](#flowcollectorspecagentebpf) - - - -Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
envmap[string]string - env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
-
false
- - -### FlowCollector.spec.agent.ebpf.resources -[↩ Parent](#flowcollectorspecagentebpf) - - - -resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
claims[]object - Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers.
-
false
limitsmap[string]int or string - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
requestsmap[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
- - -### FlowCollector.spec.agent.ebpf.resources.claims[index] -[↩ Parent](#flowcollectorspecagentebpfresources) - - - -ResourceClaim references one entry in PodSpec.ResourceClaims. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
-
true
- - -### FlowCollector.spec.agent.ipfix -[↩ Parent](#flowcollectorspecagent) - - - -ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX". - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
cacheActiveTimeoutstring - cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending
-
- Default: 20s
-
false
cacheMaxFlowsinteger - cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows
-
- Format: int32
- Default: 400
- Minimum: 0
-
false
clusterNetworkOperatorobject - clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available.
-
false
forceSampleAllboolean - forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored.
-
- Default: false
-
false
ovnKubernetesobject - ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
-
false
samplinginteger - sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX.
-
- Format: int32
- Default: 400
- Minimum: 2
-
false
- - -### FlowCollector.spec.agent.ipfix.clusterNetworkOperator -[↩ Parent](#flowcollectorspecagentipfix) - - - -clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namespacestring - namespace where the config map is going to be deployed.
-
- Default: openshift-network-operator
-
false
- - -### FlowCollector.spec.agent.ipfix.ovnKubernetes -[↩ Parent](#flowcollectorspecagentipfix) - - - -ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerNamestring - containerName defines the name of the container to configure for IPFIX.
-
- Default: ovnkube-node
-
false
daemonSetNamestring - daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods.
-
- Default: ovnkube-node
-
false
namespacestring - namespace where OVN-Kubernetes pods are deployed.
-
- Default: ovn-kubernetes
-
false
- - -### FlowCollector.spec.consolePlugin -[↩ Parent](#flowcollectorspec) - - - -consolePlugin defines the settings related to the OpenShift Console plugin, when available. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
registerboolean - register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'
-
- Default: true
-
true
autoscalerobject - autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
-
false
imagePullPolicyenum - imagePullPolicy is the Kubernetes pull policy for the image defined above
-
- Enum: IfNotPresent, Always, Never
- Default: IfNotPresent
-
false
logLevelenum - logLevel for the console plugin backend
-
- Enum: trace, debug, info, warn, error, fatal, panic
- Default: info
-
false
portinteger - port is the plugin service port
-
- Format: int32
- Default: 9001
- Minimum: 1
- Maximum: 65535
-
false
portNamingobject - portNaming defines the configuration of the port-to-service name translation
-
- Default: map[enable:true]
-
false
quickFilters[]object - quickFilters configures quick filter presets for the Console plugin
-
- Default: [map[default:true filter:map[dst_namespace!:openshift-,netobserv src_namespace!:openshift-,netobserv] name:Applications] map[filter:map[dst_namespace:openshift-,netobserv src_namespace:openshift-,netobserv] name:Infrastructure] map[default:true filter:map[dst_kind:Pod src_kind:Pod] name:Pods network] map[filter:map[dst_kind:Service] name:Services network]]
-
false
replicasinteger - replicas defines the number of replicas (pods) to start.
-
- Format: int32
- Default: 1
- Minimum: 0
-
false
resourcesobject - resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
- Default: map[limits:map[memory:100Mi] requests:map[cpu:100m memory:50Mi]]
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler -[↩ Parent](#flowcollectorspecconsoleplugin) - - - -autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxReplicasinteger - maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
-
- Format: int32
- Default: 3
-
false
metrics[]object - metrics used by the pod autoscaler
-
false
minReplicasinteger - minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
-
- Format: int32
-
false
statusenum - Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
-
- Enum: DISABLED, ENABLED
- Default: DISABLED
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscaler) - - - -MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
-
true
containerResourceobject - containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
-
false
externalobject - external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
-
false
objectobject - object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
-
false
podsobject - pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
-
false
resourceobject - resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) - - - -containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerstring - container is the name of the container in the pods of the scaling target
-
true
namestring - name is the name of the resource in question.
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexcontainerresource) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) - - - -external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) - - - -object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
describedObjectobject - describedObject specifies the descriptions of a object,such as kind,name apiVersion
-
true
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.describedObject -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) - - - -describedObject specifies the descriptions of a object,such as kind,name apiVersion - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstring - kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
-
true
namestring - name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
-
true
apiVersionstring - apiVersion is the API version of the referent
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) - - - -pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) - - - -resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the resource in question.
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexresource) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.consolePlugin.portNaming -[↩ Parent](#flowcollectorspecconsoleplugin) - - - -portNaming defines the configuration of the port-to-service name translation - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
enableboolean - enable the console plugin port-to-service name translation
-
- Default: true
-
false
portNamesmap[string]string - portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"}
-
false
- - -### FlowCollector.spec.consolePlugin.quickFilters[index] -[↩ Parent](#flowcollectorspecconsoleplugin) - - - -QuickFilter defines preset configuration for Console's quick filters - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
filtermap[string]string - filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"}
-
true
namestring - name of the filter, that will be displayed in Console
-
true
defaultboolean - default defines whether this filter should be active by default or not
-
false
- - -### FlowCollector.spec.consolePlugin.resources -[↩ Parent](#flowcollectorspecconsoleplugin) - - - -resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
claims[]object - Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers.
-
false
limitsmap[string]int or string - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
requestsmap[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
- - -### FlowCollector.spec.consolePlugin.resources.claims[index] -[↩ Parent](#flowcollectorspecconsolepluginresources) - - - -ResourceClaim references one entry in PodSpec.ResourceClaims. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
-
true
- - -### FlowCollector.spec.exporters[index] -[↩ Parent](#flowcollectorspec) - - - -FlowCollectorExporter defines an additional exporter to send enriched flows to - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typeenum - `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.
-
- Enum: KAFKA, IPFIX
-
true
ipfixobject - IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
-
false
kafkaobject - kafka configuration, such as address or topic, to send enriched flows to.
-
false
- - -### FlowCollector.spec.exporters[index].ipfix -[↩ Parent](#flowcollectorspecexportersindex) - - - -IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
targetHoststring - Address of the IPFIX external receiver
-
- Default:
-
true
targetPortinteger - Port for the IPFIX external receiver
-
true
transportenum - Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
-
- Enum: TCP, UDP
-
false
- - -### FlowCollector.spec.exporters[index].kafka -[↩ Parent](#flowcollectorspecexportersindex) - - - -kafka configuration, such as address or topic, to send enriched flows to. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
addressstring - address of the Kafka server
-
- Default:
-
true
topicstring - kafka topic to use. It must exist, NetObserv will not create it.
-
- Default:
-
true
saslobject - SASL authentication configuration. [Unsupported (*)].
-
false
tlsobject - tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
-
false
- - -### FlowCollector.spec.exporters[index].kafka.sasl -[↩ Parent](#flowcollectorspecexportersindexkafka) - - - -SASL authentication configuration. [Unsupported (*)]. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
clientIDReferenceobject - Reference to the secret or config map containing the client ID
-
false
clientSecretReferenceobject - Reference to the secret or config map containing the client secret
-
false
typeenum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
-
- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
-
false
- - -### FlowCollector.spec.exporters[index].kafka.sasl.clientIDReference -[↩ Parent](#flowcollectorspecexportersindexkafkasasl) - - - -Reference to the secret or config map containing the client ID - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
filestring - File name within the config map or secret
-
false
namestring - Name of the config map or secret containing the file
-
false
namespacestring - Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - Type for the file reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.exporters[index].kafka.sasl.clientSecretReference -[↩ Parent](#flowcollectorspecexportersindexkafkasasl) - - - -Reference to the secret or config map containing the client secret - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
filestring - File name within the config map or secret
-
false
namestring - Name of the config map or secret containing the file
-
false
namespacestring - Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - Type for the file reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.exporters[index].kafka.tls -[↩ Parent](#flowcollectorspecexportersindexkafka) - - - -tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
caCertobject - caCert defines the reference of the certificate for the Certificate Authority
-
false
enableboolean - enable TLS
-
- Default: false
-
false
insecureSkipVerifyboolean - insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
-
- Default: false
-
false
userCertobject - userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
-
false
- - -### FlowCollector.spec.exporters[index].kafka.tls.caCert -[↩ Parent](#flowcollectorspecexportersindexkafkatls) - - - -caCert defines the reference of the certificate for the Certificate Authority - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.exporters[index].kafka.tls.userCert -[↩ Parent](#flowcollectorspecexportersindexkafkatls) - - - -userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.kafka -[↩ Parent](#flowcollectorspec) - - - -kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
addressstring - address of the Kafka server
-
- Default:
-
true
topicstring - kafka topic to use. It must exist, NetObserv will not create it.
-
- Default:
-
true
saslobject - SASL authentication configuration. [Unsupported (*)].
-
false
tlsobject - tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
-
false
- - -### FlowCollector.spec.kafka.sasl -[↩ Parent](#flowcollectorspeckafka) - - - -SASL authentication configuration. [Unsupported (*)]. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
clientIDReferenceobject - Reference to the secret or config map containing the client ID
-
false
clientSecretReferenceobject - Reference to the secret or config map containing the client secret
-
false
typeenum - Type of SASL authentication to use, or `DISABLED` if SASL is not used
-
- Enum: DISABLED, PLAIN, SCRAM-SHA512
- Default: DISABLED
-
false
- - -### FlowCollector.spec.kafka.sasl.clientIDReference -[↩ Parent](#flowcollectorspeckafkasasl) - - - -Reference to the secret or config map containing the client ID - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
filestring - File name within the config map or secret
-
false
namestring - Name of the config map or secret containing the file
-
false
namespacestring - Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - Type for the file reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.kafka.sasl.clientSecretReference -[↩ Parent](#flowcollectorspeckafkasasl) - - - -Reference to the secret or config map containing the client secret - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
filestring - File name within the config map or secret
-
false
namestring - Name of the config map or secret containing the file
-
false
namespacestring - Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - Type for the file reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.kafka.tls -[↩ Parent](#flowcollectorspeckafka) - - - -tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
caCertobject - caCert defines the reference of the certificate for the Certificate Authority
-
false
enableboolean - enable TLS
-
- Default: false
-
false
insecureSkipVerifyboolean - insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
-
- Default: false
-
false
userCertobject - userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
-
false
- - -### FlowCollector.spec.kafka.tls.caCert -[↩ Parent](#flowcollectorspeckafkatls) - - - -caCert defines the reference of the certificate for the Certificate Authority - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.kafka.tls.userCert -[↩ Parent](#flowcollectorspeckafkatls) - - - -userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.loki -[↩ Parent](#flowcollectorspec) - - - -loki, the flow store, client settings. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
authTokenenum - AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST will use the local pod service account to authenticate to Loki. FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. When using the Loki Operator, set it to `HOST` or `FORWARD`.
-
- Enum: DISABLED, HOST, FORWARD
- Default: DISABLED
-
false
batchSizeinteger - batchSize is max batch size (in bytes) of logs to accumulate before sending.
-
- Format: int64
- Default: 102400
- Minimum: 1
-
false
batchWaitstring - batchWait is max time to wait before sending a batch.
-
- Default: 1s
-
false
maxBackoffstring - maxBackoff is the maximum backoff time for client connection between retries.
-
- Default: 5s
-
false
maxRetriesinteger - maxRetries is the maximum number of retries for client connections.
-
- Format: int32
- Default: 2
- Minimum: 0
-
false
minBackoffstring - minBackoff is the initial backoff time for client connection between retries.
-
- Default: 1s
-
false
querierUrlstring - querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
-
false
staticLabelsmap[string]string - staticLabels is a map of common labels to set on each flow.
-
- Default: map[app:netobserv-flowcollector]
-
false
statusUrlstring - statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/.
-
false
tenantIDstring - tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.
-
- Default: netobserv
-
false
timeoutstring - timeout is the maximum time connection / request limit. A Timeout of zero means no timeout.
-
- Default: 10s
-
false
tlsobject - tls client configuration.
-
false
urlstring - url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.
-
- Default: http://loki:3100/
-
false
- - -### FlowCollector.spec.loki.tls -[↩ Parent](#flowcollectorspecloki) - - - -tls client configuration. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
caCertobject - caCert defines the reference of the certificate for the Certificate Authority
-
false
enableboolean - enable TLS
-
- Default: false
-
false
insecureSkipVerifyboolean - insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
-
- Default: false
-
false
userCertobject - userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
-
false
- - -### FlowCollector.spec.loki.tls.caCert -[↩ Parent](#flowcollectorspeclokitls) - - - -caCert defines the reference of the certificate for the Certificate Authority - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.loki.tls.userCert -[↩ Parent](#flowcollectorspeclokitls) - - - -userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.processor -[↩ Parent](#flowcollectorspec) - - - -processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
debugobject - Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
-
false
dropUnusedFieldsboolean - dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space.
-
- Default: true
-
false
enableKubeProbesboolean - enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes
-
- Default: true
-
false
healthPortinteger - healthPort is a collector HTTP port in the Pod that exposes the health check API
-
- Format: int32
- Default: 8080
- Minimum: 1
- Maximum: 65535
-
false
imagePullPolicyenum - imagePullPolicy is the Kubernetes pull policy for the image defined above
-
- Enum: IfNotPresent, Always, Never
- Default: IfNotPresent
-
false
kafkaConsumerAutoscalerobject - kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
-
false
kafkaConsumerBatchSizeinteger - kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.
-
- Default: 10485760
-
false
kafkaConsumerQueueCapacityinteger - kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.
-
- Default: 1000
-
false
kafkaConsumerReplicasinteger - kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
-
- Format: int32
- Default: 3
- Minimum: 0
-
false
logLevelenum - logLevel of the collector runtime
-
- Enum: trace, debug, info, warn, error, fatal, panic
- Default: info
-
false
metricsobject - Metrics define the processor configuration regarding metrics
-
false
portinteger - port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500
-
- Format: int32
- Default: 2055
- Minimum: 1025
- Maximum: 65535
-
false
profilePortinteger - profilePort allows setting up a Go pprof profiler listening to this port
-
- Format: int32
- Minimum: 0
- Maximum: 65535
-
false
resourcesobject - resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
- Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:100Mi]]
-
false
- - -### FlowCollector.spec.processor.debug -[↩ Parent](#flowcollectorspecprocessor) - - - -Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
envmap[string]string - env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler -[↩ Parent](#flowcollectorspecprocessor) - - - -kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
maxReplicasinteger - maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
-
- Format: int32
- Default: 3
-
false
metrics[]object - metrics used by the pod autoscaler
-
false
minReplicasinteger - minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
-
- Format: int32
-
false
statusenum - Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
-
- Enum: DISABLED, ENABLED
- Default: DISABLED
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscaler) - - - -MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
-
true
containerResourceobject - containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
-
false
externalobject - external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
-
false
objectobject - object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
-
false
podsobject - pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
-
false
resourceobject - resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) - - - -containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
containerstring - container is the name of the container in the pods of the scaling target
-
true
namestring - name is the name of the resource in question.
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexcontainerresource) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) - - - -external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) - - - -object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
describedObjectobject - describedObject specifies the descriptions of a object,such as kind,name apiVersion
-
true
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.describedObject -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) - - - -describedObject specifies the descriptions of a object,such as kind,name apiVersion - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
kindstring - kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
-
true
namestring - name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
-
true
apiVersionstring - apiVersion is the API version of the referent
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) - - - -pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
metricobject - metric identifies the target metric by name and selector
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods) - - - -metric identifies the target metric by name and selector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the given metric
-
true
selectorobject - selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetric) - - - -selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
false
matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetricselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
keystring - key is the label key that the selector applies to.
-
true
operatorstring - operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
-
true
values[]string - values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) - - - -resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - name is the name of the resource in question.
-
true
targetobject - target specifies the target value for the given metric
-
true
- - -### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexresource) - - - -target specifies the target value for the given metric - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
typestring - type represents whether the metric type is Utilization, Value, or AverageValue
-
true
averageUtilizationinteger - averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
-
- Format: int32
-
false
averageValueint or string - averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
-
false
valueint or string - value is the target value of the metric (as a quantity).
-
false
- - -### FlowCollector.spec.processor.metrics -[↩ Parent](#flowcollectorspecprocessor) - - - -Metrics define the processor configuration regarding metrics - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
ignoreTags[]string - ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads
-
- Default: [egress packets]
-
false
serverobject - metricsServer endpoint configuration for Prometheus scraper
-
false
- - -### FlowCollector.spec.processor.metrics.server -[↩ Parent](#flowcollectorspecprocessormetrics) - - - -metricsServer endpoint configuration for Prometheus scraper - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
portinteger - the prometheus HTTP port
-
- Format: int32
- Default: 9102
- Minimum: 1
- Maximum: 65535
-
false
tlsobject - TLS configuration.
-
false
- - -### FlowCollector.spec.processor.metrics.server.tls -[↩ Parent](#flowcollectorspecprocessormetricsserver) - - - -TLS configuration. - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
providedobject - TLS configuration.
-
false
typeenum - Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations
-
- Enum: DISABLED, PROVIDED, AUTO
- Default: DISABLED
-
false
- - -### FlowCollector.spec.processor.metrics.server.tls.provided -[↩ Parent](#flowcollectorspecprocessormetricsservertls) - - - -TLS configuration. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
certFilestring - certFile defines the path to the certificate file name within the config map or secret
-
false
certKeystring - certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
-
false
namestring - name of the config map or secret containing certificates
-
false
namespacestring - namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
-
- Default:
-
false
typeenum - type for the certificate reference: "configmap" or "secret"
-
- Enum: configmap, secret
-
false
- - -### FlowCollector.spec.processor.resources -[↩ Parent](#flowcollectorspecprocessor) - - - -resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
claims[]object - Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers.
-
false
limitsmap[string]int or string - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
requestsmap[string]int or string - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
false
- - -### FlowCollector.spec.processor.resources.claims[index] -[↩ Parent](#flowcollectorspecprocessorresources) - - - -ResourceClaim references one entry in PodSpec.ResourceClaims. - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
namestring - Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
-
true
- - -### FlowCollector.status -[↩ Parent](#flowcollector) - - - -FlowCollectorStatus defines the observed state of FlowCollector - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
conditions[]object - conditions represent the latest available observations of an object's state
-
true
namespacestring - namespace where console plugin and flowlogs-pipeline have been deployed.
-
false
- - -### FlowCollector.status.conditions[index] -[↩ Parent](#flowcollectorstatus) - - - -Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - // other fields } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
lastTransitionTimestring - lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
-
- Format: date-time
-
true
messagestring - message is a human readable message indicating details about the transition. This may be an empty string.
-
true
reasonstring - reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
-
true
statusenum - status of the condition, one of True, False, Unknown.
-
- Enum: True, False, Unknown
-
true
typestring - type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
-
true
observedGenerationinteger - observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
-
- Format: int64
- Minimum: 0
-
false
- # flows.netobserv.io/v1beta1 Resource Types: @@ -4358,14 +51,14 @@ Resource Types: Refer to the Kubernetes API documentation for the fields of the `metadata` field. true - spec + spec object Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for example, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
false - status + status object `FlowCollectorStatus` defines the observed state of FlowCollector
@@ -4376,7 +69,7 @@ Resource Types: ### FlowCollector.spec -[↩ Parent](#flowcollector-1) +[↩ Parent](#flowcollector) @@ -4392,14 +85,14 @@ Defines the desired state of the FlowCollector resource.

*: the mention - agent + agent object Agent configuration for flows extraction.
false - consolePlugin + consolePlugin object `consolePlugin` defines the settings related to the OpenShift Console plugin, when available.
@@ -4416,21 +109,21 @@ Defines the desired state of the FlowCollector resource.

*: the mention false - exporters + exporters []object `exporters` define additional optional exporters for custom consumption or storage.
false - kafka + kafka object Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
false - loki + loki object `loki`, the flow store, client settings.
@@ -4446,7 +139,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention false - processor + processor object `processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.
@@ -4457,7 +150,7 @@ Defines the desired state of the FlowCollector resource.

*: the mention ### FlowCollector.spec.agent -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -4473,14 +166,14 @@ Agent configuration for flows extraction. - ebpf + ebpf object `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
false - ipfix + ipfix object `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
@@ -4501,7 +194,7 @@ Agent configuration for flows extraction. ### FlowCollector.spec.agent.ebpf -[↩ Parent](#flowcollectorspecagent-1) +[↩ Parent](#flowcollectorspecagent) @@ -4537,7 +230,7 @@ Agent configuration for flows extraction. false - debug + debug object `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.
@@ -4603,7 +296,7 @@ Agent configuration for flows extraction. false - resources + resources object `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
@@ -4627,7 +320,7 @@ Agent configuration for flows extraction. ### FlowCollector.spec.agent.ebpf.debug -[↩ Parent](#flowcollectorspecagentebpf-1) +[↩ Parent](#flowcollectorspecagentebpf) @@ -4654,7 +347,7 @@ Agent configuration for flows extraction. ### FlowCollector.spec.agent.ebpf.resources -[↩ Parent](#flowcollectorspecagentebpf-1) +[↩ Parent](#flowcollectorspecagentebpf) @@ -4670,7 +363,7 @@ Agent configuration for flows extraction. - claims + claims []object Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. @@ -4697,7 +390,7 @@ Agent configuration for flows extraction. ### FlowCollector.spec.agent.ebpf.resources.claims[index] -[↩ Parent](#flowcollectorspecagentebpfresources-1) +[↩ Parent](#flowcollectorspecagentebpfresources) @@ -4724,7 +417,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.agent.ipfix -[↩ Parent](#flowcollectorspecagent-1) +[↩ Parent](#flowcollectorspecagent) @@ -4760,7 +453,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - clusterNetworkOperator + clusterNetworkOperator object `clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.
@@ -4776,7 +469,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - ovnKubernetes + ovnKubernetes object `ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
@@ -4798,7 +491,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.agent.ipfix.clusterNetworkOperator -[↩ Parent](#flowcollectorspecagentipfix-1) +[↩ Parent](#flowcollectorspecagentipfix) @@ -4827,7 +520,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.agent.ipfix.ovnKubernetes -[↩ Parent](#flowcollectorspecagentipfix-1) +[↩ Parent](#flowcollectorspecagentipfix) @@ -4874,7 +567,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.consolePlugin -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -4890,7 +583,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. - autoscaler + autoscaler object `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.
@@ -4938,7 +631,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - portNaming + portNaming object `portNaming` defines the configuration of the port-to-service name translation
@@ -4947,7 +640,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - quickFilters + quickFilters []object `quickFilters` configures quick filter presets for the Console plugin
@@ -4976,7 +669,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - resources + resources object `resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
@@ -4989,7 +682,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.consolePlugin.autoscaler -[↩ Parent](#flowcollectorspecconsoleplugin-1) +[↩ Parent](#flowcollectorspecconsoleplugin) @@ -5015,7 +708,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. false - metrics + metrics []object Metrics used by the pod autoscaler
@@ -5045,7 +738,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscaler-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscaler) @@ -5068,35 +761,35 @@ MetricSpec specifies how to scale based on a single metric (only `type` and one true - containerResource + containerResource object containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
false - external + external object external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
false - object + object object object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
false - pods + pods object pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
false - resource + resource object resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
@@ -5107,7 +800,7 @@ MetricSpec specifies how to scale based on a single metric (only `type` and one ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) @@ -5137,7 +830,7 @@ containerResource refers to a resource metric (such as those specified in reques true - target + target object target specifies the target value for the given metric
@@ -5148,7 +841,7 @@ containerResource refers to a resource metric (such as those specified in reques ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].containerResource.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexcontainerresource-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexcontainerresource) @@ -5198,7 +891,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) @@ -5214,14 +907,14 @@ external refers to a global metric that is not associated with any Kubernetes ob - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -5232,7 +925,7 @@ external refers to a global metric that is not associated with any Kubernetes ob ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal) @@ -5255,7 +948,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -5266,7 +959,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetric-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetric) @@ -5282,7 +975,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -5300,7 +993,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetricselector-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternalmetricselector) @@ -5341,7 +1034,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].external.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexexternal) @@ -5391,7 +1084,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) @@ -5407,21 +1100,21 @@ object refers to a metric describing a single kubernetes object (for example, hi - describedObject + describedObject object describedObject specifies the descriptions of a object,such as kind,name apiVersion
true - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -5432,7 +1125,7 @@ object refers to a metric describing a single kubernetes object (for example, hi ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.describedObject -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) @@ -5473,7 +1166,7 @@ describedObject specifies the descriptions of a object,such as kind,name apiVers ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) @@ -5496,7 +1189,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -5507,7 +1200,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetric-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetric) @@ -5523,7 +1216,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -5541,7 +1234,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetricselector-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobjectmetricselector) @@ -5582,7 +1275,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].object.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexobject) @@ -5632,7 +1325,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) @@ -5648,14 +1341,14 @@ pods refers to a metric describing each pod in the current scale target (for exa - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -5666,7 +1359,7 @@ pods refers to a metric describing each pod in the current scale target (for exa ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods) @@ -5689,7 +1382,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -5700,7 +1393,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetric-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetric) @@ -5716,7 +1409,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -5734,7 +1427,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetricselector-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpodsmetricselector) @@ -5775,7 +1468,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].pods.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexpods) @@ -5825,7 +1518,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindex) @@ -5848,7 +1541,7 @@ resource refers to a resource metric (such as those specified in requests and li true - target + target object target specifies the target value for the given metric
@@ -5859,7 +1552,7 @@ resource refers to a resource metric (such as those specified in requests and li ### FlowCollector.spec.consolePlugin.autoscaler.metrics[index].resource.target -[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexresource-1) +[↩ Parent](#flowcollectorspecconsolepluginautoscalermetricsindexresource) @@ -5909,7 +1602,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.portNaming -[↩ Parent](#flowcollectorspecconsoleplugin-1) +[↩ Parent](#flowcollectorspecconsoleplugin) @@ -5945,7 +1638,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.quickFilters[index] -[↩ Parent](#flowcollectorspecconsoleplugin-1) +[↩ Parent](#flowcollectorspecconsoleplugin) @@ -5986,7 +1679,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.resources -[↩ Parent](#flowcollectorspecconsoleplugin-1) +[↩ Parent](#flowcollectorspecconsoleplugin) @@ -6002,7 +1695,7 @@ target specifies the target value for the given metric - claims + claims []object Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. @@ -6029,7 +1722,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.consolePlugin.resources.claims[index] -[↩ Parent](#flowcollectorspecconsolepluginresources-1) +[↩ Parent](#flowcollectorspecconsolepluginresources) @@ -6056,7 +1749,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.exporters[index] -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -6081,14 +1774,14 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. true - ipfix + ipfix object IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
false - kafka + kafka object Kafka configuration, such as the address and topic, to send enriched flows to.
@@ -6099,7 +1792,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.spec.exporters[index].ipfix -[↩ Parent](#flowcollectorspecexportersindex-1) +[↩ Parent](#flowcollectorspecexportersindex) @@ -6144,7 +1837,7 @@ IPFIX configuration, such as the IP address and port to send enriched IPFIX flow ### FlowCollector.spec.exporters[index].kafka -[↩ Parent](#flowcollectorspecexportersindex-1) +[↩ Parent](#flowcollectorspecexportersindex) @@ -6178,14 +1871,14 @@ Kafka configuration, such as the address and topic, to send enriched flows to. true - sasl + sasl object SASL authentication configuration. [Unsupported (*)].
false - tls + tls object TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
@@ -6196,7 +1889,7 @@ Kafka configuration, such as the address and topic, to send enriched flows to. ### FlowCollector.spec.exporters[index].kafka.sasl -[↩ Parent](#flowcollectorspecexportersindexkafka-1) +[↩ Parent](#flowcollectorspecexportersindexkafka) @@ -6212,14 +1905,14 @@ SASL authentication configuration. [Unsupported (*)]. - clientIDReference + clientIDReference object Reference to the secret or config map containing the client ID
false - clientSecretReference + clientSecretReference object Reference to the secret or config map containing the client secret
@@ -6240,7 +1933,7 @@ SASL authentication configuration. [Unsupported (*)]. ### FlowCollector.spec.exporters[index].kafka.sasl.clientIDReference -[↩ Parent](#flowcollectorspecexportersindexkafkasasl-1) +[↩ Parent](#flowcollectorspecexportersindexkafkasasl) @@ -6292,7 +1985,7 @@ Reference to the secret or config map containing the client ID ### FlowCollector.spec.exporters[index].kafka.sasl.clientSecretReference -[↩ Parent](#flowcollectorspecexportersindexkafkasasl-1) +[↩ Parent](#flowcollectorspecexportersindexkafkasasl) @@ -6344,7 +2037,7 @@ Reference to the secret or config map containing the client secret ### FlowCollector.spec.exporters[index].kafka.tls -[↩ Parent](#flowcollectorspecexportersindexkafka-1) +[↩ Parent](#flowcollectorspecexportersindexkafka) @@ -6360,7 +2053,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka - caCert + caCert object `caCert` defines the reference of the certificate for the Certificate Authority
@@ -6385,7 +2078,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka false - userCert + userCert object `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
@@ -6396,7 +2089,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.exporters[index].kafka.tls.caCert -[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) +[↩ Parent](#flowcollectorspecexportersindexkafkatls) @@ -6455,7 +2148,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.exporters[index].kafka.tls.userCert -[↩ Parent](#flowcollectorspecexportersindexkafkatls-1) +[↩ Parent](#flowcollectorspecexportersindexkafkatls) @@ -6514,7 +2207,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.kafka -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -6548,14 +2241,14 @@ Kafka configuration, allowing to use Kafka as a broker as part of the flow colle true - sasl + sasl object SASL authentication configuration. [Unsupported (*)].
false - tls + tls object TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
@@ -6566,7 +2259,7 @@ Kafka configuration, allowing to use Kafka as a broker as part of the flow colle ### FlowCollector.spec.kafka.sasl -[↩ Parent](#flowcollectorspeckafka-1) +[↩ Parent](#flowcollectorspeckafka) @@ -6582,14 +2275,14 @@ SASL authentication configuration. [Unsupported (*)]. - clientIDReference + clientIDReference object Reference to the secret or config map containing the client ID
false - clientSecretReference + clientSecretReference object Reference to the secret or config map containing the client secret
@@ -6610,7 +2303,7 @@ SASL authentication configuration. [Unsupported (*)]. ### FlowCollector.spec.kafka.sasl.clientIDReference -[↩ Parent](#flowcollectorspeckafkasasl-1) +[↩ Parent](#flowcollectorspeckafkasasl) @@ -6662,7 +2355,7 @@ Reference to the secret or config map containing the client ID ### FlowCollector.spec.kafka.sasl.clientSecretReference -[↩ Parent](#flowcollectorspeckafkasasl-1) +[↩ Parent](#flowcollectorspeckafkasasl) @@ -6714,7 +2407,7 @@ Reference to the secret or config map containing the client secret ### FlowCollector.spec.kafka.tls -[↩ Parent](#flowcollectorspeckafka-1) +[↩ Parent](#flowcollectorspeckafka) @@ -6730,7 +2423,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka - caCert + caCert object `caCert` defines the reference of the certificate for the Certificate Authority
@@ -6755,7 +2448,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka false - userCert + userCert object `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
@@ -6766,7 +2459,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.kafka.tls.caCert -[↩ Parent](#flowcollectorspeckafkatls-1) +[↩ Parent](#flowcollectorspeckafkatls) @@ -6825,7 +2518,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.kafka.tls.userCert -[↩ Parent](#flowcollectorspeckafkatls-1) +[↩ Parent](#flowcollectorspeckafkatls) @@ -6884,7 +2577,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.loki -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -7025,7 +2718,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka false - tls + tls object TLS client configuration for Loki URL.
@@ -7045,7 +2738,7 @@ TLS client configuration. When using TLS, verify that the address matches the Ka ### FlowCollector.spec.loki.statusTls -[↩ Parent](#flowcollectorspecloki-1) +[↩ Parent](#flowcollectorspecloki) @@ -7215,7 +2908,7 @@ TLS client configuration for Loki status URL. ### FlowCollector.spec.loki.tls -[↩ Parent](#flowcollectorspecloki-1) +[↩ Parent](#flowcollectorspecloki) @@ -7231,7 +2924,7 @@ TLS client configuration for Loki URL. - caCert + caCert object `caCert` defines the reference of the certificate for the Certificate Authority
@@ -7256,7 +2949,7 @@ TLS client configuration for Loki URL. false - userCert + userCert object `userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)
@@ -7267,7 +2960,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.loki.tls.caCert -[↩ Parent](#flowcollectorspeclokitls-1) +[↩ Parent](#flowcollectorspeclokitls) @@ -7326,7 +3019,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.loki.tls.userCert -[↩ Parent](#flowcollectorspeclokitls-1) +[↩ Parent](#flowcollectorspeclokitls) @@ -7385,7 +3078,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.processor -[↩ Parent](#flowcollectorspec-1) +[↩ Parent](#flowcollectorspec) @@ -7444,7 +3137,7 @@ TLS client configuration for Loki URL. false - debug + debug object `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.
@@ -7491,7 +3184,7 @@ TLS client configuration for Loki URL. false - kafkaConsumerAutoscaler + kafkaConsumerAutoscaler object `kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
@@ -7547,7 +3240,7 @@ TLS client configuration for Loki URL. false - metrics + metrics object `Metrics` define the processor configuration regarding metrics
@@ -7586,7 +3279,7 @@ TLS client configuration for Loki URL. false - resources + resources object `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
@@ -7599,7 +3292,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.processor.debug -[↩ Parent](#flowcollectorspecprocessor-1) +[↩ Parent](#flowcollectorspecprocessor) @@ -7626,7 +3319,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.processor.kafkaConsumerAutoscaler -[↩ Parent](#flowcollectorspecprocessor-1) +[↩ Parent](#flowcollectorspecprocessor) @@ -7652,7 +3345,7 @@ TLS client configuration for Loki URL. false - metrics + metrics []object Metrics used by the pod autoscaler
@@ -7682,7 +3375,7 @@ TLS client configuration for Loki URL. ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscaler-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscaler) @@ -7705,35 +3398,35 @@ MetricSpec specifies how to scale based on a single metric (only `type` and one true - containerResource + containerResource object containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
false - external + external object external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
false - object + object object object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
false - pods + pods object pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
false - resource + resource object resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
@@ -7744,7 +3437,7 @@ MetricSpec specifies how to scale based on a single metric (only `type` and one ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) @@ -7774,7 +3467,7 @@ containerResource refers to a resource metric (such as those specified in reques true - target + target object target specifies the target value for the given metric
@@ -7785,7 +3478,7 @@ containerResource refers to a resource metric (such as those specified in reques ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].containerResource.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexcontainerresource-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexcontainerresource) @@ -7835,7 +3528,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) @@ -7851,14 +3544,14 @@ external refers to a global metric that is not associated with any Kubernetes ob - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -7869,7 +3562,7 @@ external refers to a global metric that is not associated with any Kubernetes ob ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal) @@ -7892,7 +3585,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -7903,7 +3596,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetric-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetric) @@ -7919,7 +3612,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -7937,7 +3630,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetricselector-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternalmetricselector) @@ -7978,7 +3671,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].external.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexexternal) @@ -8028,7 +3721,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) @@ -8044,21 +3737,21 @@ object refers to a metric describing a single kubernetes object (for example, hi - describedObject + describedObject object describedObject specifies the descriptions of a object,such as kind,name apiVersion
true - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -8069,7 +3762,7 @@ object refers to a metric describing a single kubernetes object (for example, hi ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.describedObject -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) @@ -8110,7 +3803,7 @@ describedObject specifies the descriptions of a object,such as kind,name apiVers ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) @@ -8133,7 +3826,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -8144,7 +3837,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetric-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetric) @@ -8160,7 +3853,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -8178,7 +3871,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetricselector-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobjectmetricselector) @@ -8219,7 +3912,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].object.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexobject) @@ -8269,7 +3962,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) @@ -8285,14 +3978,14 @@ pods refers to a metric describing each pod in the current scale target (for exa - metric + metric object metric identifies the target metric by name and selector
true - target + target object target specifies the target value for the given metric
@@ -8303,7 +3996,7 @@ pods refers to a metric describing each pod in the current scale target (for exa ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods) @@ -8326,7 +4019,7 @@ metric identifies the target metric by name and selector true - selector + selector object selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
@@ -8337,7 +4030,7 @@ metric identifies the target metric by name and selector ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetric-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetric) @@ -8353,7 +4046,7 @@ selector is the string-encoded form of a standard kubernetes label selector for - matchExpressions + matchExpressions []object matchExpressions is a list of label selector requirements. The requirements are ANDed.
@@ -8371,7 +4064,7 @@ selector is the string-encoded form of a standard kubernetes label selector for ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.metric.selector.matchExpressions[index] -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetricselector-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpodsmetricselector) @@ -8412,7 +4105,7 @@ A label selector requirement is a selector that contains values, a key, and an o ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].pods.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexpods) @@ -8462,7 +4155,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindex) @@ -8485,7 +4178,7 @@ resource refers to a resource metric (such as those specified in requests and li true - target + target object target specifies the target value for the given metric
@@ -8496,7 +4189,7 @@ resource refers to a resource metric (such as those specified in requests and li ### FlowCollector.spec.processor.kafkaConsumerAutoscaler.metrics[index].resource.target -[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexresource-1) +[↩ Parent](#flowcollectorspecprocessorkafkaconsumerautoscalermetricsindexresource) @@ -8546,7 +4239,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.metrics -[↩ Parent](#flowcollectorspecprocessor-1) +[↩ Parent](#flowcollectorspecprocessor) @@ -8585,7 +4278,7 @@ target specifies the target value for the given metric false - server + server object Metrics server endpoint configuration for Prometheus scraper
@@ -8596,7 +4289,7 @@ target specifies the target value for the given metric ### FlowCollector.spec.processor.metrics.server -[↩ Parent](#flowcollectorspecprocessormetrics-1) +[↩ Parent](#flowcollectorspecprocessormetrics) @@ -8624,7 +4317,7 @@ Metrics server endpoint configuration for Prometheus scraper false - tls + tls object TLS configuration.
@@ -8635,7 +4328,7 @@ Metrics server endpoint configuration for Prometheus scraper ### FlowCollector.spec.processor.metrics.server.tls -[↩ Parent](#flowcollectorspecprocessormetricsserver-1) +[↩ Parent](#flowcollectorspecprocessormetricsserver) @@ -8660,7 +4353,7 @@ TLS configuration. false - provided + provided object TLS configuration when `type` is set to `PROVIDED`.
@@ -8688,7 +4381,7 @@ TLS configuration. ### FlowCollector.spec.processor.metrics.server.tls.provided -[↩ Parent](#flowcollectorspecprocessormetricsservertls-1) +[↩ Parent](#flowcollectorspecprocessormetricsservertls) @@ -8747,7 +4440,7 @@ TLS configuration when `type` is set to `PROVIDED`. ### FlowCollector.spec.processor.metrics.server.tls.providedCaFile -[↩ Parent](#flowcollectorspecprocessormetricsservertls-1) +[↩ Parent](#flowcollectorspecprocessormetricsservertls) @@ -8799,7 +4492,7 @@ Reference to the CA file when `type` is set to `PROVIDED`. ### FlowCollector.spec.processor.resources -[↩ Parent](#flowcollectorspecprocessor-1) +[↩ Parent](#flowcollectorspecprocessor) @@ -8815,7 +4508,7 @@ Reference to the CA file when `type` is set to `PROVIDED`. - claims + claims []object Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. @@ -8842,7 +4535,7 @@ Reference to the CA file when `type` is set to `PROVIDED`. ### FlowCollector.spec.processor.resources.claims[index] -[↩ Parent](#flowcollectorspecprocessorresources-1) +[↩ Parent](#flowcollectorspecprocessorresources) @@ -8869,7 +4562,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.status -[↩ Parent](#flowcollector-1) +[↩ Parent](#flowcollector) @@ -8885,7 +4578,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. - conditions + conditions []object `conditions` represent the latest available observations of an object's state
@@ -8903,7 +4596,7 @@ ResourceClaim references one entry in PodSpec.ResourceClaims. ### FlowCollector.status.conditions[index] -[↩ Parent](#flowcollectorstatus-1) +[↩ Parent](#flowcollectorstatus) diff --git a/hack/cloned.flows.netobserv.io_flowcollectors.yaml b/hack/cloned.flows.netobserv.io_flowcollectors.yaml deleted file mode 100644 index d5ee42801..000000000 --- a/hack/cloned.flows.netobserv.io_flowcollectors.yaml +++ /dev/null @@ -1,5614 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: flowcollectors.flows.netobserv.io -spec: - group: flows.netobserv.io - names: - kind: FlowCollector - listKind: FlowCollectorList - plural: flowcollectors - singular: flowcollector - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string - deprecated: true - name: v1alpha1 - schema: - openAPIV3Schema: - description: "FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. \n Deprecated: This package will be removed in one of the next releases." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FlowCollectorSpec defines the desired state of FlowCollector - properties: - agent: - default: - type: EBPF - description: agent for flows extraction. - properties: - ebpf: - description: ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF". - properties: - cacheActiveTimeout: - default: 5s - description: cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - format: int32 - minimum: 1 - type: integer - debug: - description: Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios. - type: object - type: object - excludeInterfaces: - default: - - lo - description: excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: 'kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: logLevel defines the log level for the NetObserv eBPF Agent - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: 'privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges.' - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX". - properties: - cacheActiveTimeout: - default: 20s - description: cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. - properties: - namespace: - default: openshift-network-operator - description: namespace where the config map is going to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored. - type: boolean - ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. - properties: - containerName: - default: ovnkube-node - description: containerName defines the name of the container to configure for IPFIX. - type: string - daemonSetName: - default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods. - type: string - namespace: - default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX. - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration). - enum: - - EBPF - - IPFIX - type: string - required: - - type - type: object - consolePlugin: - description: consolePlugin defines the settings related to the OpenShift Console plugin, when available. - properties: - autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: logLevel for the console plugin backend - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: port is the plugin service port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: portNaming defines the configuration of the port-to-service name translation - properties: - enable: - default: true - description: enable the console plugin port-to-service name translation - type: boolean - portNames: - additionalProperties: - type: string - description: 'portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"}' - type: object - type: object - quickFilters: - default: - - default: true - filter: - dst_namespace!: openshift-,netobserv - src_namespace!: openshift-,netobserv - name: Applications - - filter: - dst_namespace: openshift-,netobserv - src_namespace: openshift-,netobserv - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: quickFilters configures quick filter presets for the Console plugin - items: - description: QuickFilter defines preset configuration for Console's quick filters - properties: - default: - description: default defines whether this filter should be active by default or not - type: boolean - filter: - additionalProperties: - type: string - description: 'filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"}' - type: object - name: - description: name of the filter, that will be displayed in Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: 'register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' - type: boolean - replicas: - default: 1 - description: replicas defines the number of replicas (pods) to start. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - register - type: object - deploymentModel: - default: DIRECT - description: deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available. - items: - description: FlowCollectorExporter defines an additional exporter to send enriched flows to - properties: - ipfix: - description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: kafka configuration, such as address or topic, to send enriched flows to. - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will not create it. - type: string - required: - - address - - topic - type: object - loki: - description: loki, the flow store, client settings. - properties: - authToken: - default: DISABLED - description: AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST will use the local pod service account to authenticate to Loki. FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. When using the Loki Operator, set it to `HOST` or `FORWARD`. - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: batchSize is max batch size (in bytes) of logs to accumulate before sending. - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: batchWait is max time to wait before sending a batch. - type: string - maxBackoff: - default: 5s - description: maxBackoff is the maximum backoff time for client connection between retries. - type: string - maxRetries: - default: 2 - description: maxRetries is the maximum number of retries for client connections. - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: minBackoff is the initial backoff time for client connection between retries. - type: string - querierUrl: - description: querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway. - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on each flow. - type: object - statusUrl: - description: statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - type: string - tenantID: - default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. - type: string - timeout: - default: 10s - description: timeout is the maximum time connection / request limit. A Timeout of zero means no timeout. - type: string - tls: - description: tls client configuration. - properties: - caCert: - description: caCert defines the reference of the certificate for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string - type: object - namespace: - description: namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used. - type: string - processor: - description: processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer. - properties: - debug: - description: Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios. - type: object - type: object - dropUnusedFields: - default: true - description: dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space. - type: boolean - enableKubeProbes: - default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes - type: boolean - healthPort: - default: 8080 - description: healthPort is a collector HTTP port in the Pod that exposes the health check API - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. - type: integer - kafkaConsumerReplicas: - default: 3 - description: kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: logLevel of the collector runtime - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - metrics: - description: Metrics define the processor configuration regarding metrics - properties: - ignoreTags: - default: - - egress - - packets - description: 'ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads' - items: - type: string - type: array - server: - description: metricsServer endpoint configuration for Prometheus scraper - properties: - port: - default: 9102 - description: the prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - provided: - description: TLS configuration. - properties: - certFile: - description: certFile defines the path to the certificate file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - port: - default: 2055 - description: 'port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500' - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: profilePort allows setting up a Go pprof profiler listening to this port - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: 'resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - required: - - agent - - deploymentModel - type: object - status: - description: FlowCollectorStatus defines the observed state of FlowCollector - properties: - conditions: - description: conditions represent the latest available observations of an object's state - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: namespace where console plugin and flowlogs-pipeline have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for example, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.' - properties: - agent: - description: Agent configuration for flows extraction. - properties: - ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.' - properties: - cacheActiveTimeout: - default: 5s - description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.' - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.' - format: int32 - minimum: 1 - type: integer - debug: - description: '`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.' - properties: - env: - additionalProperties: - type: string - description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - type: object - type: object - excludeInterfaces: - default: - - lo - description: '`excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' - items: - type: string - type: array - features: - description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' - items: - description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency. [Unsupported (*)].
- enum: - - PacketDrop - - DNSTracking - - FlowRTT - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: '`interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: '`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: '`logLevel` defines the log level for the NetObserv eBPF Agent' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support. - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' - properties: - cacheActiveTimeout: - default: 20s - description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending.' - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows.' - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: '`clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.' - properties: - namespace: - default: openshift-network-operator - description: Namespace where the config map is going to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' - type: boolean - ovnKubernetes: - description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' - properties: - containerName: - default: ovnkube-node - description: '`containerName` defines the name of the container to configure for IPFIX.' - type: string - daemonSetName: - default: ovnkube-node - description: '`daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods.' - type: string - namespace: - default: ovn-kubernetes - description: Namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.' - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).' - enum: - - EBPF - - IPFIX - type: string - type: object - consolePlugin: - description: '`consolePlugin` defines the settings related to the OpenShift Console plugin, when available.' - properties: - autoscaler: - description: '`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.' - properties: - maxReplicas: - default: 3 - description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.' - format: int32 - type: integer - metrics: - description: Metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.' - format: int32 - type: integer - status: - default: DISABLED - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
' - enum: - - DISABLED - - ENABLED - type: string - type: object - enable: - default: true - description: Enables the console plugin deployment. `spec.loki.enable` must also be `true` - type: boolean - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: '`logLevel` for the console plugin backend' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: '`port` is the plugin service port. Do not use 9002, which is reserved for metrics.' - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: '`portNaming` defines the configuration of the port-to-service name translation' - properties: - enable: - default: true - description: Enable the console plugin port-to-service name translation - type: boolean - portNames: - additionalProperties: - type: string - description: '`portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`.' - type: object - type: object - quickFilters: - default: - - default: true - filter: - flow_layer: app - name: Applications - - filter: - flow_layer: infra - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: '`quickFilters` configures quick filter presets for the Console plugin' - items: - description: '`QuickFilter` defines preset configuration for Console''s quick filters' - properties: - default: - description: '`default` defines whether this filter should be active by default or not' - type: boolean - filter: - additionalProperties: - type: string - description: '`filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`.' - type: object - name: - description: Name of the filter, that is displayed in the Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: '`register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator. When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' - type: boolean - replicas: - default: 1 - description: '`replicas` defines the number of replicas (pods) to start.' - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - deploymentModel: - default: DIRECT - description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: '`exporters` define additional optional exporters for custom consumption or storage.' - items: - description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' - properties: - ipfix: - description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: Kafka configuration, such as the address and topic, to send enriched flows to. - properties: - address: - default: "" - description: Address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: Kafka topic to use. It must exist. NetObserv does not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`. - properties: - address: - default: "" - description: Address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: Kafka topic to use. It must exist. NetObserv does not create it. - type: string - required: - - address - - topic - type: object - loki: - description: '`loki`, the flow store, client settings.' - properties: - authToken: - default: DISABLED - description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.' - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: '`batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.' - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: '`batchWait` is the maximum time to wait before sending a batch.' - type: string - enable: - default: true - description: Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation. - type: boolean - maxBackoff: - default: 5s - description: '`maxBackoff` is the maximum backoff time for client connection between retries.' - type: string - maxRetries: - default: 2 - description: '`maxRetries` is the maximum number of retries for client connections.' - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: '`minBackoff` is the initial backoff time for client connection between retries.' - type: string - querierUrl: - description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value is used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.' - type: string - readTimeout: - default: 30s - description: '`readTimeout` is the maximum loki query total time limit. A timeout of zero means no timeout.' - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: '`staticLabels` is a map of common labels to set on each flow.' - type: object - statusTls: - description: TLS client configuration for Loki status URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - statusUrl: - description: '`statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value is used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration is used when `statusUrl` is set.' - type: string - tenantID: - default: netobserv - description: '`tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.' - type: string - timeout: - default: 10s - description: '`timeout` is the maximum processor time connection / request limit. A timeout of zero means no timeout.' - type: string - tls: - description: TLS client configuration for Loki URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: '`url` is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' - type: string - type: object - namespace: - default: netobserv - description: Namespace where NetObserv pods are deployed. - type: string - processor: - description: '`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.' - properties: - addZone: - description: '`addZone` allows availability zone awareness by labelling flows with their source and destination zones. This feature requires the "topology.kubernetes.io/zone" label to be set on nodes.' - type: boolean - clusterName: - default: "" - description: '`clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.' - type: string - conversationEndTimeout: - default: 10s - description: '`conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).' - type: string - conversationHeartbeatInterval: - default: 30s - description: '`conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation' - type: string - conversationTerminatingTimeout: - default: 5s - description: '`conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.' - type: string - debug: - description: '`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.' - properties: - env: - additionalProperties: - type: string - description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - type: object - type: object - dropUnusedFields: - default: true - description: '`dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.' - type: boolean - enableKubeProbes: - default: true - description: '`enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes' - type: boolean - healthPort: - default: 8080 - description: '`healthPort` is a collector HTTP port in the Pod that exposes the health check API' - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: '`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.' - properties: - maxReplicas: - default: 3 - description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.' - format: int32 - type: integer - metrics: - description: Metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.' - format: int32 - type: integer - status: - default: DISABLED - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
' - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: '`kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: '`kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.' - type: integer - kafkaConsumerReplicas: - default: 3 - description: '`kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.' - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: '`logLevel` of the processor runtime' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - logTypes: - default: FLOWS - description: '`logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events
' - enum: - - FLOWS - - CONVERSATIONS - - ENDED_CONVERSATIONS - - ALL - type: string - metrics: - description: '`Metrics` define the processor configuration regarding metrics' - properties: - disableAlerts: - description: '`disableAlerts` is a list of alerts that should be disabled. Possible values are:
`NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
`NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
' - items: - description: Name of a processor alert. Possible values are:
- `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
- `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
- enum: - - NetObservNoFlows - - NetObservLokiError - type: string - type: array - ignoreTags: - default: - - egress - - packets - - nodes-flows - - namespaces-flows - - workloads-flows - - namespaces - description: '`ignoreTags` [deprecated (*)] is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).
Deprecation notice: use `includeList` instead.' - items: - type: string - type: array - includeList: - description: '`includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` will show up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' - items: - description: Metric name. More information in https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md. - enum: - - namespace_egress_bytes_total - - namespace_egress_packets_total - - namespace_ingress_bytes_total - - namespace_ingress_packets_total - - namespace_flows_total - - node_egress_bytes_total - - node_egress_packets_total - - node_ingress_bytes_total - - node_ingress_packets_total - - node_flows_total - - workload_egress_bytes_total - - workload_egress_packets_total - - workload_ingress_bytes_total - - workload_ingress_packets_total - - workload_flows_total - - namespace_drop_bytes_total - - namespace_drop_packets_total - - node_drop_bytes_total - - node_drop_packets_total - - workload_drop_bytes_total - - workload_drop_packets_total - - namespace_rtt_seconds - - node_rtt_seconds - - workload_rtt_seconds - - namespace_dns_latency_seconds - - node_dns_latency_seconds - - workload_dns_latency_seconds - type: string - type: array - server: - description: Metrics server endpoint configuration for Prometheus scraper - properties: - port: - default: 9102 - description: The prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the provided certificate. If set to `true`, the `providedCaFile` field is ignored.' - type: boolean - provided: - description: TLS configuration when `type` is set to `PROVIDED`. - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - providedCaFile: - description: Reference to the CA file when `type` is set to `PROVIDED`. - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations. - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - multiClusterDeployment: - default: false - description: Set `multiClusterDeployment` to `true` to enable multi clusters feature. This adds clusterName label to flows data - type: boolean - port: - default: 2055 - description: Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081. - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: '`profilePort` allows setting up a Go pprof profiler listening to this port' - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - type: object - status: - description: '`FlowCollectorStatus` defines the observed state of FlowCollector' - properties: - conditions: - description: '`conditions` represent the latest available observations of an object''s state' - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: Namespace where console plugin and flowlogs-pipeline have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Defines the desired state of the FlowCollector resource.

*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for example, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.' - properties: - agent: - description: Agent configuration for flows extraction. - properties: - ebpf: - description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`.' - properties: - advanced: - description: '`advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.' - properties: - env: - additionalProperties: - type: string - description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - type: object - type: object - cacheActiveTimeout: - default: 5s - description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.' - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.' - format: int32 - minimum: 1 - type: integer - excludeInterfaces: - default: - - lo - description: '`excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' - items: - type: string - type: array - features: - description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature.
- `FlowRTT`: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
' - items: - description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- enum: - - PacketDrop - - DNSTracking - - FlowRTT - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: '`interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.' - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: '`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: '`logLevel` defines the log level for the NetObserv eBPF Agent' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support. - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.' - properties: - cacheActiveTimeout: - default: 20s - description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending.' - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows.' - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: '`clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.' - properties: - namespace: - default: openshift-network-operator - description: Namespace where the config map is going to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.' - type: boolean - ovnKubernetes: - description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.' - properties: - containerName: - default: ovnkube-node - description: '`containerName` defines the name of the container to configure for IPFIX.' - type: string - daemonSetName: - default: ovnkube-node - description: '`daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods.' - type: string - namespace: - default: ovn-kubernetes - description: Namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.' - format: int32 - minimum: 2 - type: integer - type: object - type: - default: eBPF - description: '`type` selects the flows tracing agent. Possible values are:
- `eBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`eBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).' - enum: - - eBPF - - IPFIX - type: string - type: object - consolePlugin: - description: '`consolePlugin` defines the settings related to the OpenShift Console plugin, when available.' - properties: - advanced: - description: '`advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.' - properties: - args: - description: '`args` allows passing custom arguments to underlying components. Useful for overriding some parameters, such as an url or a configuration path, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - items: - type: string - type: array - env: - additionalProperties: - type: string - description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - type: object - port: - default: 9001 - description: '`port` is the plugin service port. Do not use 9002, which is reserved for metrics.' - format: int32 - maximum: 65535 - minimum: 1 - type: integer - register: - default: true - description: '`register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator. When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`' - type: boolean - type: object - autoscaler: - description: '`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.' - properties: - maxReplicas: - default: 3 - description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.' - format: int32 - type: integer - metrics: - description: Metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.' - format: int32 - type: integer - status: - default: Disabled - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' - enum: - - Disabled - - Enabled - type: string - type: object - enable: - default: true - description: Enables the console plugin deployment. `spec.loki.enable` must also be `true` - type: boolean - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: '`logLevel` for the console plugin backend' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - portNaming: - default: - enable: true - description: '`portNaming` defines the configuration of the port-to-service name translation' - properties: - enable: - default: true - description: Enable the console plugin port-to-service name translation - type: boolean - portNames: - additionalProperties: - type: string - description: '`portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`.' - type: object - type: object - quickFilters: - default: - - default: true - filter: - flow_layer: app - name: Applications - - filter: - flow_layer: infra - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: '`quickFilters` configures quick filter presets for the Console plugin' - items: - description: '`QuickFilter` defines preset configuration for Console''s quick filters' - properties: - default: - description: '`default` defines whether this filter should be active by default or not' - type: boolean - filter: - additionalProperties: - type: string - description: '`filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`.' - type: object - name: - description: Name of the filter, that is displayed in the Console - type: string - required: - - filter - - name - type: object - type: array - replicas: - default: 1 - description: '`replicas` defines the number of replicas (pods) to start.' - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - deploymentModel: - default: Direct - description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `Direct` (default) to make the flow processor listening directly from the agents.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).' - enum: - - Direct - - Kafka - type: string - exporters: - description: '`exporters` define additional optional exporters for custom consumption or storage.' - items: - description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' - properties: - ipfix: - description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: Kafka configuration, such as the address and topic, to send enriched flows to. - properties: - address: - default: "" - description: Address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: Disabled - description: Type of SASL authentication to use, or `Disabled` if SASL is not used - enum: - - Disabled - - Plain - - ScramSHA512 - type: string - type: object - tls: - description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: Kafka topic to use. It must exist. NetObserv does not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available options are `Kafka` and `IPFIX`.' - enum: - - Kafka - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`. - properties: - address: - default: "" - description: Address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: Disabled - description: Type of SASL authentication to use, or `Disabled` if SASL is not used - enum: - - Disabled - - Plain - - ScramSHA512 - type: string - type: object - tls: - description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: Kafka topic to use. It must exist. NetObserv does not create it. - type: string - required: - - address - - topic - type: object - loki: - description: '`loki`, the flow store, client settings.' - properties: - advanced: - description: '`advanced` allows setting some aspects of the internal configuration of the Loki clients. This section is aimed mostly for debugging and fine-grained performance optimizations.' - properties: - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: '`staticLabels` is a map of common labels to set on each flow in Loki storage.' - type: object - writeMaxBackoff: - default: 5s - description: '`writeMaxBackoff` is the maximum backoff time for Loki client connection between retries.' - type: string - writeMaxRetries: - default: 2 - description: '`writeMaxRetries` is the maximum number of retries for Loki client connections.' - format: int32 - minimum: 0 - type: integer - writeMinBackoff: - default: 1s - description: '`writeMinBackoff` is the initial backoff time for Loki client connection between retries.' - type: string - type: object - enable: - default: true - description: Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation. - type: boolean - lokiStack: - description: Loki configuration for `LokiStack` mode. This is useful for an easy loki-operator configuration. It is ignored for other modes. - properties: - name: - default: loki - description: Name of an existing LokiStack resource to use. - type: string - namespace: - description: Namespace where this `LokiStack` resource is located. If omited, it is assumed to be the same as `spec.namespace`. - type: string - type: object - manual: - description: Loki configuration for `Manual` mode. This is the most flexible configuration. It is ignored for other modes. - properties: - authToken: - default: Disabled - description: '`authToken` describes the way to get a token to authenticate to Loki.
- `Disabled` does not send any token with the request.
- `Forward` forwards the user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `Forward`.' - enum: - - Disabled - - Host - - Forward - type: string - ingesterUrl: - default: http://loki:3100/ - description: '`ingesterUrl` is the address of an existing Loki ingester service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' - type: string - querierUrl: - default: http://loki:3100/ - description: '`querierUrl` specifies the address of the Loki querier service. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.' - type: string - statusTls: - description: TLS client configuration for Loki status URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - statusUrl: - description: '`statusUrl` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierUrl` value is used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration is used when `statusUrl` is set.' - type: string - tenantID: - default: netobserv - description: '`tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.' - type: string - tls: - description: TLS client configuration for Loki URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - type: object - microservices: - description: Loki configuration for `Microservices` mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes. - properties: - ingesterUrl: - default: http://loki-distributor:3100/ - description: '`ingesterUrl` is the address of an existing Loki ingester service to push the flows to.' - type: string - querierUrl: - default: http://loki-query-frontend:3100/ - description: '`querierURL` specifies the address of the Loki querier service.' - type: string - tenantID: - default: netobserv - description: '`tenantID` is the Loki `X-Scope-OrgID` header that identifies the tenant for each request.' - type: string - tls: - description: TLS client configuration for Loki URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - type: object - mode: - default: Monolithic - description: '`mode` must be set according to the installation mode of Loki:
- Use `LokiStack` when Loki is managed using the Loki Operator
- Use `Monolithic` when Loki is installed as a monolithic workload
- Use `Microservices` when Loki is installed as microservices, but without Loki Operator
- Use `Manual` if none of the options above match your setup
' - enum: - - Manual - - LokiStack - - Monolithic - - Microservices - type: string - monolithic: - description: Loki configuration for `Monolithic` mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes. - properties: - tenantID: - default: netobserv - description: '`tenantID` is the Loki `X-Scope-OrgID` header that identifies the tenant for each request.' - type: string - tls: - description: TLS client configuration for Loki URL. - properties: - caCert: - description: '`caCert` defines the reference of the certificate for the Certificate Authority' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: Enable TLS - type: boolean - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.' - type: boolean - userCert: - description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)' - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: '`url` is the unique address of an existing Loki service that points to both the ingester and the querier.' - type: string - type: object - readTimeout: - default: 30s - description: '`readTimeout` is the maximum console plugin loki query total time limit. A timeout of zero means no timeout.' - type: string - writeBatchSize: - default: 102400 - description: '`writeBatchSize` is the maximum batch size (in bytes) of Loki logs to accumulate before sending.' - format: int64 - minimum: 1 - type: integer - writeBatchWait: - default: 1s - description: '`writeBatchWait` is the maximum time to wait before sending a Loki batch.' - type: string - writeTimeout: - default: 10s - description: '`writeTimeout` is the maximum Loki time connection / request limit. A timeout of zero means no timeout.' - type: string - type: object - namespace: - default: netobserv - description: Namespace where NetObserv pods are deployed. - type: string - processor: - description: '`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.' - properties: - addZone: - description: '`addZone` allows availability zone awareness by labelling flows with their source and destination zones. This feature requires the "topology.kubernetes.io/zone" label to be set on nodes.' - type: boolean - advanced: - description: '`advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.' - properties: - conversationEndTimeout: - default: 10s - description: '`conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).' - type: string - conversationHeartbeatInterval: - default: 30s - description: '`conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation' - type: string - conversationTerminatingTimeout: - default: 5s - description: '`conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.' - type: string - dropUnusedFields: - default: true - description: '`dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.' - type: boolean - enableKubeProbes: - default: true - description: '`enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes' - type: boolean - env: - additionalProperties: - type: string - description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.' - type: object - healthPort: - default: 8080 - description: '`healthPort` is a collector HTTP port in the Pod that exposes the health check API' - format: int32 - maximum: 65535 - minimum: 1 - type: integer - port: - default: 2055 - description: Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081. - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - default: 6060 - description: '`profilePort` allows setting up a Go pprof profiler listening to this port' - format: int32 - maximum: 65535 - minimum: 0 - type: integer - type: object - clusterName: - default: "" - description: '`clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.' - type: string - imagePullPolicy: - default: IfNotPresent - description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above' - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: '`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.' - properties: - maxReplicas: - default: 3 - description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.' - format: int32 - type: integer - metrics: - description: Metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once). - properties: - containerResource: - description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container in the pods of the scaling target - type: string - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - properties: - metric: - description: metric identifies the target metric by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - properties: - name: - description: name is the name of the resource in question. - type: string - target: - description: target specifies the target value for the given metric - properties: - averageUtilization: - description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.' - format: int32 - type: integer - status: - default: Disabled - description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `Disabled` does not deploy an horizontal pod autoscaler.
- `Enabled` deploys an horizontal pod autoscaler.
' - enum: - - Disabled - - Enabled - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: '`kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: '`kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.' - type: integer - kafkaConsumerReplicas: - default: 3 - description: '`kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.' - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: '`logLevel` of the processor runtime' - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - logTypes: - default: Flows - description: '`logTypes` defines the desired record types to generate. Possible values are:
- `Flows` (default) to export regular network flows
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `EndedConversations` to generate only ended conversations events
- `All` to generate both network flows and all conversations events
' - enum: - - Flows - - Conversations - - EndedConversations - - All - type: string - metrics: - description: '`Metrics` define the processor configuration regarding metrics' - properties: - disableAlerts: - description: '`disableAlerts` is a list of alerts that should be disabled. Possible values are:
`NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
`NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
' - items: - description: Name of a processor alert. Possible values are:
- `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
- `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
- enum: - - NetObservNoFlows - - NetObservLokiError - type: string - type: array - includeList: - description: '`includeList` is a list of metric names to specify which ones to generate. The names correspond to the names in Prometheus without the prefix. For example, `namespace_egress_packets_total` shows up as `netobserv_namespace_egress_packets_total` in Prometheus. Note that the more metrics you add, the bigger is the impact on Prometheus workload resources. Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `workload_ingress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), `namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md' - items: - description: Metric name. More information in https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md. - enum: - - namespace_egress_bytes_total - - namespace_egress_packets_total - - namespace_ingress_bytes_total - - namespace_ingress_packets_total - - namespace_flows_total - - node_egress_bytes_total - - node_egress_packets_total - - node_ingress_bytes_total - - node_ingress_packets_total - - node_flows_total - - workload_egress_bytes_total - - workload_egress_packets_total - - workload_ingress_bytes_total - - workload_ingress_packets_total - - workload_flows_total - - namespace_drop_bytes_total - - namespace_drop_packets_total - - node_drop_bytes_total - - node_drop_packets_total - - workload_drop_bytes_total - - workload_drop_packets_total - - namespace_rtt_seconds - - node_rtt_seconds - - workload_rtt_seconds - - namespace_dns_latency_seconds - - node_dns_latency_seconds - - workload_dns_latency_seconds - type: string - type: array - server: - description: Metrics server endpoint configuration for Prometheus scraper - properties: - port: - default: 9102 - description: The prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - insecureSkipVerify: - default: false - description: '`insecureSkipVerify` allows skipping client-side verification of the provided certificate. If set to `true`, the `providedCaFile` field is ignored.' - type: boolean - provided: - description: TLS configuration when `type` is set to `Provided`. - properties: - certFile: - description: '`certFile` defines the path to the certificate file name within the config map or secret' - type: string - certKey: - description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.' - type: string - name: - description: Name of the config map or secret containing certificates - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the certificate reference: `configmap` or `secret`' - enum: - - configmap - - secret - type: string - type: object - providedCaFile: - description: Reference to the CA file when `type` is set to `Provided`. - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: Disabled - description: Select the type of TLS configuration:
- `Disabled` (default) to not configure TLS for the endpoint. - `Provided` to manually provide cert file and a key file. - `Auto` to use OpenShift auto generated certificate using annotations. - enum: - - Disabled - - Provided - - Auto - type: string - type: object - type: object - type: object - multiClusterDeployment: - default: false - description: Set `multiClusterDeployment` to `true` to enable multi clusters feature. This adds `clusterName` label to flows data - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - type: object - status: - description: '`FlowCollectorStatus` defines the observed state of FlowCollector' - properties: - conditions: - description: '`conditions` represent the latest available observations of an object''s state' - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: 'Namespace where console plugin and flowlogs-pipeline have been deployed. Deprecated: annotations are used instead' - type: string - required: - - conditions - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/hack/cloned.flows.netobserv.io_flowmetrics.yaml b/hack/cloned.flows.netobserv.io_flowmetrics.yaml deleted file mode 100644 index 50469918a..000000000 --- a/hack/cloned.flows.netobserv.io_flowmetrics.yaml +++ /dev/null @@ -1,146 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: flowmetrics.flows.netobserv.io -spec: - group: flows.netobserv.io - names: - kind: FlowMetric - listKind: FlowMetricList - plural: flowmetrics - singular: flowmetric - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: FlowMetric is the Schema for the flowmetrics API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'FlowMetricSpec defines the desired state of FlowMetric The - provided API allows you to customize these metrics according to your - needs.
When adding new metrics or modifying existing labels, you - must carefully monitor the memory usage of Prometheus workloads as this - could potentially have a high impact. Cf https://rhobs-handbook.netlify.app/products/openshiftmonitoring/telemetry.md/#what-is-the-cardinality-of-a-metric
- To check the cardinality of all NetObserv metrics, run as `promql`: - `count({__name__=~"netobserv.*"}) by (__name__)`.' - properties: - buckets: - description: A list of buckets to use when `type` is "Histogram". - The list must be parseable as floats. Prometheus default buckets - will be used if unset. - items: - type: string - type: array - direction: - default: Any - description: 'Filter for ingress, egress or any direction flows. When - set to `Ingress`, it is equivalent to adding the regex filter on - `FlowDirection`: `0|2`. When set to `Egress`, it is equivalent to - adding the regex filter on `FlowDirection`: `1|2`.' - enum: - - Any - - Egress - - Ingress - type: string - filters: - description: '`filters` is a list of fields and values used to restrict - which flows are taken into account. Oftentimes, these filters must - be used to eliminate duplicates: `Duplicate:"false"` and `FlowDirection: - "0"`. Refer to the documentation for the list of available fields: - https://docs.openshift.com/container-platform/latest/networking/network_observability/json-flows-format-reference.html.' - items: - properties: - field: - description: Name of the field to filter on - type: string - matchType: - default: Exact - description: Type of matching to apply - enum: - - Exact - - Regex - - Presence - - Absence - type: string - value: - description: Value to filter on - type: string - required: - - field - - matchType - type: object - type: array - includeDuplicates: - description: 'When set to `true`, flows duplicated across several - interfaces will add up in the generated metrics. When set to `false` - (default), it is equivalent to adding the exact filter on `Duplicate`: - `false`.' - type: boolean - labels: - description: '`labels` is a list of fields that should be used as - Prometheus labels, also known as dimensions. From choosing labels - results the level of granularity of this metric, as well as the - available aggregations at query time. It must be done carefully - as it impacts the metric cardinality (cf https://rhobs-handbook.netlify.app/products/openshiftmonitoring/telemetry.md/#what-is-the-cardinality-of-a-metric). - In general, avoid setting very high cardinality labels such as IP - or MAC addresses. "SrcK8S_OwnerName" or "DstK8S_OwnerName" should - be preferred over "SrcK8S_Name" or "DstK8S_Name" as much as possible. - Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/network_observability/json-flows-format-reference.html.' - items: - type: string - type: array - metricName: - description: Name of the metric in Prometheus. It will be automatically - prefixed with "netobserv_". - type: string - type: - description: 'Metric type: "Counter" or "Histogram". Use "Counter" - for any value that increases over time and on which you can compute - a rate, such as Bytes or Packets. Use "Histogram" for any value - that must be sampled independently, such as latencies.' - enum: - - Counter - - Histogram - type: string - valueField: - description: '`valueField` is the flow field that must be used as - a value for this metric. This field must hold numeric values. Leave - empty to count flows rather than a specific value per flow. Refer - to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/networking/network_observability/json-flows-format-reference.html.' - type: string - required: - - metricName - - type - type: object - status: - description: FlowMetricStatus defines the observed state of FlowMetric - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/main.go b/main.go index 90863ba1a..ad8cd5ed9 100644 --- a/main.go +++ b/main.go @@ -50,7 +50,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" // nolint:staticcheck - flowsv1alpha1 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta1" flowsv1beta2 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" metricsv1alpha1 "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" @@ -75,7 +74,6 @@ var crdBytes []byte func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(flowsv1alpha1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta1.AddToScheme(scheme)) utilruntime.Must(flowsv1beta2.AddToScheme(scheme)) utilruntime.Must(metricsv1alpha1.AddToScheme(scheme)) diff --git a/pkg/test/envtest.go b/pkg/test/envtest.go index 0610b0ab3..d1975ca54 100644 --- a/pkg/test/envtest.go +++ b/pkg/test/envtest.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" // nolint:staticcheck - flowsv1alpha1 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1alpha1" flowsv1beta1 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta1" flowsv1beta2 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" metricsv1alpha1 "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" @@ -51,10 +50,9 @@ func PrepareEnvTest(controllers []manager.Registerer, namespaces []string, baseP Scheme: scheme.Scheme, CRDInstallOptions: envtest.CRDInstallOptions{ Paths: []string{ - // FIXME: till v1beta2 becomes the new storage version we will point to hack folder - // where v1beta2 is marked as the storage version - // filepath.Join("..", "config", "crd", "bases"), - filepath.Join(basePath, "..", "hack"), + // Hack to reintroduce when the API stored version != latest version: comment-out config/crd/bases and use hack instead; see also Makefile "hack-crd-for-test" + filepath.Join(basePath, "..", "config", "crd", "bases"), + // filepath.Join(basePath, "..", "hack"), // We need to install the ConsolePlugin CRD to test setup of our Network Console Plugin filepath.Join(basePath, "..", "vendor", "github.com", "openshift", "api", "console", "v1alpha1"), filepath.Join(basePath, "..", "vendor", "github.com", "openshift", "api", "config", "v1"), @@ -75,9 +73,6 @@ func PrepareEnvTest(controllers []manager.Registerer, namespaces []string, baseP Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = flowsv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = flowsv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred())