diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 38e13bc46..a27c46332 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -110,6 +110,9 @@ export IMAGE=quay.io/$USER/network-observability-operator:test export BUNDLE_IMAGE=quay.io/$USER/network-observability-operator-bundle:v0.0.0-test make images make bundle bundle-build bundle-push + +# or, alternatively: +BUNDLE_VERSION=0.0.0-test VERSION=test make images bundle bundle-build bundle-push ``` Optionally, you might validate the bundle: diff --git a/Makefile b/Makefile index 534425576..55c62ed68 100644 --- a/Makefile +++ b/Makefile @@ -266,24 +266,24 @@ doc: crdoc ## Generate markdown documentation $(CRDOC) --resources config/crd/bases/flows.netobserv.io_flowcollectors.yaml --output docs/FlowCollector.md generate-go-conversions: $(CONVERSION_GEN) ## Run all generate-go-conversions - $(MAKE) clean-generated-conversions SRC_DIRS="./apis/flowcollector/v1alpha1,./apis/flowcollector/v1beta1" + $(MAKE) clean-generated-conversions SRC_DIRS="./apis/flowcollector/v1beta1" $(CONVERSION_GEN) \ - --input-dirs=./apis/flowcollector/v1alpha1 \ --input-dirs=./apis/flowcollector/v1beta1 \ --build-tag=ignore_autogenerated_core \ --output-file-base=zz_generated.conversion \ $(CONVERSION_GEN_OUTPUT_BASE) \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt -.PHONY: hack-crd-for-test -hack-crd-for-test: YQ - cat ./config/crd/bases/flows.netobserv.io_flowcollectors.yaml \ - | $(YQ) eval-all \ - '(.spec.versions.[]|select(.name != "v1beta2").storage) = false,(.spec.versions.[]|select(.name == "v1beta2").storage) = true' \ - > ./hack/cloned.flows.netobserv.io_flowcollectors.yaml - cp ./config/crd/bases/flows.netobserv.io_flowmetrics.yaml ./hack/cloned.flows.netobserv.io_flowmetrics.yaml +# Hack to reintroduce when the API stored version != latest version; see also envtest.go (CRD path config) +# .PHONY: hack-crd-for-test +# hack-crd-for-test: YQ +# cat ./config/crd/bases/flows.netobserv.io_flowcollectors.yaml \ +# | $(YQ) eval-all \ +# '(.spec.versions.[]|select(.name != "v1beta2").storage) = false,(.spec.versions.[]|select(.name == "v1beta2").storage) = true' \ +# > ./hack/cloned.flows.netobserv.io_flowcollectors.yaml +# cp ./config/crd/bases/flows.netobserv.io_flowmetrics.yaml ./hack/cloned.flows.netobserv.io_flowmetrics.yaml -generate: gencode manifests hack-crd-for-test doc generate-go-conversions ## Run all code/file generators +generate: gencode manifests doc generate-go-conversions ## Run all code/file generators .PHONY: clean-generated-conversions clean-generated-conversions: ## Remove files generated by conversion-gen from the mentioned dirs diff --git a/PROJECT b/PROJECT index ce0de7cb3..3de0f51e9 100644 --- a/PROJECT +++ b/PROJECT @@ -28,6 +28,6 @@ resources: domain: netobserv.io group: flows kind: FlowMetric - path: github.com/netobserv/network-observability-operator/apis/flowcollector/v1alpha1 + path: github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1 version: v1alpha1 version: "3" diff --git a/apis/flowcollector/v1alpha1/doc.go b/apis/flowcollector/v1alpha1/doc.go deleted file mode 100644 index c76a4c855..000000000 --- a/apis/flowcollector/v1alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1aplha1 contains the v1alpha1 API implementation. -// +k8s:conversion-gen=github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2 -// -// Deprecated: This package will be removed in one of the next releases. -package v1alpha1 diff --git a/apis/flowcollector/v1alpha1/flowcollector_types.go b/apis/flowcollector/v1alpha1/flowcollector_types.go deleted file mode 100644 index c9af1bbad..000000000 --- a/apis/flowcollector/v1alpha1/flowcollector_types.go +++ /dev/null @@ -1,762 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1alpha1 - -import ( - ascv2 "k8s.io/api/autoscaling/v2" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -const ( - AgentIPFIX = "IPFIX" - AgentEBPF = "EBPF" - DeploymentModelDirect = "DIRECT" - DeploymentModelKafka = "KAFKA" -) - -// Please notice that the FlowCollectorSpec's properties MUST redefine one of the default -// values to force the definition of the section when it is not provided by the manifest. -// This will cause that the remaining default fields will be set according to their definition. -// Otherwise, omitting the sections in the manifest would lead to zero-valued properties. -// This is a workaround for the related issue: -// https://github.com/kubernetes-sigs/controller-tools/issues/622 - -// FlowCollectorSpec defines the desired state of FlowCollector -type FlowCollectorSpec struct { - // Important: Run "make generate" to regenerate code after modifying this file - - // namespace where NetObserv pods are deployed. - // If empty, the namespace of the operator is going to be used. - // +optional - Namespace string `json:"namespace,omitempty"` - - // agent for flows extraction. - // +kubebuilder:default:={type:"EBPF"} - Agent FlowCollectorAgent `json:"agent"` - - // processor defines the settings of the component that receives the flows from the agent, - // enriches them, and forwards them to the Loki persistence layer. - Processor FlowCollectorFLP `json:"processor,omitempty"` - - // loki, the flow store, client settings. - Loki FlowCollectorLoki `json:"loki,omitempty"` - - // consolePlugin defines the settings related to the OpenShift Console plugin, when available. - ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"` - - // deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make - // the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption - // by the processor. - // Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - // +unionDiscriminator - // +kubebuilder:validation:Enum:="DIRECT";"KAFKA" - // +kubebuilder:validation:Required - // +kubebuilder:default:=DIRECT - DeploymentModel string `json:"deploymentModel"` - - // kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". - // +optional - Kafka FlowCollectorKafka `json:"kafka,omitempty"` - - // exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available. - // +optional - // +k8s:conversion-gen=false - Exporters []*FlowCollectorExporter `json:"exporters"` -} - -// FlowCollectorAgent is a discriminated union that allows to select either ipfix or ebpf, but does not -// allow defining both fields. -// +union -type FlowCollectorAgent struct { - // type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, - // "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better - // performances and should work regardless of the CNI installed on the cluster. - // "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, - // but they would require manual configuration). - // +unionDiscriminator - // +kubebuilder:validation:Enum:="EBPF";"IPFIX" - // +kubebuilder:validation:Required - // +kubebuilder:default:=EBPF - Type string `json:"type"` - - // ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" - // property is set to "IPFIX". - // +optional - IPFIX FlowCollectorIPFIX `json:"ipfix,omitempty"` - - // ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" - // property is set to "EBPF". - // +optional - EBPF FlowCollectorEBPF `json:"ebpf,omitempty"` -} - -// FlowCollectorIPFIX defines a FlowCollector that uses IPFIX on OVN-Kubernetes to collect the -// flows information -type FlowCollectorIPFIX struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ - //+kubebuilder:default:="20s" - // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending - CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty" mapstructure:"cacheActiveTimeout,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=400 - // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows - CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty" mapstructure:"cacheMaxFlows,omitempty"` - - //+kubebuilder:validation:Minimum=2 - //+kubebuilder:default:=400 - // sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. - // To ensure cluster stability, it is not possible to set a value below 2. - // If you really want to sample every packet, which might impact the cluster stability, - // refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX. - Sampling int32 `json:"sampling,omitempty" mapstructure:"sampling,omitempty"` - - //+kubebuilder:default:=false - // forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. - // It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. - // If you REALLY want to do that, set this flag to true. Use at your own risk. - // When it is set to true, the value of "sampling" is ignored. - ForceSampleAll bool `json:"forceSampleAll,omitempty" mapstructure:"-"` - - // clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. - ClusterNetworkOperator ClusterNetworkOperatorConfig `json:"clusterNetworkOperator,omitempty" mapstructure:"-"` - - // ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. - OVNKubernetes OVNKubernetesConfig `json:"ovnKubernetes,omitempty" mapstructure:"-"` -} - -// FlowCollectorEBPF defines a FlowCollector that uses eBPF to collect the flows information -type FlowCollectorEBPF struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"800Mi"}} - // resources are the compute resources required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - // sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=50 - //+optional - Sampling *int32 `json:"sampling,omitempty"` - - // cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. - // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, - // however you can expect higher memory consumption and an increased latency in the flow collection. - //+kubebuilder:validation:Pattern:=^\d+(ns|ms|s|m)?$ - //+kubebuilder:default:="5s" - CacheActiveTimeout string `json:"cacheActiveTimeout,omitempty"` - - // cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. - // Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, - // however you can expect higher memory consumption and an increased latency in the flow collection. - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:default:=100000 - CacheMaxFlows int32 `json:"cacheMaxFlows,omitempty"` - - // interfaces contains the interface names from where flows will be collected. If empty, the agent - // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. - // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, - // otherwise it will be matched as a case-sensitive string. - //+optional - Interfaces []string `json:"interfaces,omitempty"` - - // excludeInterfaces contains the interface names that will be excluded from flow tracing. - // If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, - // otherwise it will be matched as a case-sensitive string. - //+kubebuilder:default:=lo; - ExcludeInterfaces []string `json:"excludeInterfaces,omitempty"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel defines the log level for the NetObserv eBPF Agent - LogLevel string `json:"logLevel,omitempty"` - - // privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: - // in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) - // to the container, to enable its correct operation. - // If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) - // then you can turn on this mode for more global privileges. - // +optional - Privileged bool `json:"privileged,omitempty"` - - //+kubebuilder:default:=1048576 - // +optional - // kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. - KafkaBatchSize int `json:"kafkaBatchSize"` - - // Debug allows setting some aspects of the internal configuration of the eBPF agent. - // This section is aimed exclusively for debugging and fine-grained performance optimizations - // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - // +optional - Debug DebugConfig `json:"debug,omitempty"` -} - -// FlowCollectorKafka defines the desired Kafka config of FlowCollector -type FlowCollectorKafka struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:="" - // address of the Kafka server - Address string `json:"address"` - - //+kubebuilder:default:="" - // kafka topic to use. It must exist, NetObserv will not create it. - Topic string `json:"topic"` - - // tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. - // Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - // +optional - TLS ClientTLS `json:"tls"` - - // SASL authentication configuration. [Unsupported (*)]. - // +optional - SASL SASLConfig `json:"sasl"` -} - -type FlowCollectorIPFIXReceiver struct { - //+kubebuilder:default:="" - // Address of the IPFIX external receiver - TargetHost string `json:"targetHost"` - - // Port for the IPFIX external receiver - TargetPort int `json:"targetPort"` - - // Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - // +unionDiscriminator - // +kubebuilder:validation:Enum:="TCP";"UDP" - // +optional - Transport string `json:"transport,omitempty"` -} - -const ( - ServerTLSDisabled = "DISABLED" - ServerTLSProvided = "PROVIDED" - ServerTLSAuto = "AUTO" -) - -type ServerTLSConfigType string - -// ServerTLS define the TLS configuration, server side -type ServerTLS struct { - // Select the type of TLS configuration - // "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, - // and "AUTO" to use OpenShift auto generated certificate using annotations - // +unionDiscriminator - // +kubebuilder:validation:Enum:="DISABLED";"PROVIDED";"AUTO" - // +kubebuilder:validation:Required - //+kubebuilder:default:="DISABLED" - Type ServerTLSConfigType `json:"type,omitempty"` - - // TLS configuration. - // +optional - Provided *CertificateReference `json:"provided"` -} - -// MetricsServerConfig define the metrics server endpoint configuration for Prometheus scraper -type MetricsServerConfig struct { - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=9102 - // the prometheus HTTP port - Port int32 `json:"port,omitempty"` - - // TLS configuration. - // +optional - TLS ServerTLS `json:"tls"` -} - -// FLPMetrics define the desired FLP configuration regarding metrics -type FLPMetrics struct { - // metricsServer endpoint configuration for Prometheus scraper - // +optional - Server MetricsServerConfig `json:"server,omitempty"` - - // ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . - // Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads - //+kubebuilder:default:={"egress","packets"} - IgnoreTags []string `json:"ignoreTags,omitempty"` -} - -// FlowCollectorFLP defines the desired flowlogs-pipeline state of FlowCollector -type FlowCollectorFLP struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:validation:Minimum=1025 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=2055 - // port of the flow collector (host port) - // By conventions, some value are not authorized port must not be below 1024 and must not equal this values: - // 4789,6081,500, and 4500 - Port int32 `json:"port,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=8080 - // healthPort is a collector HTTP port in the Pod that exposes the health check API - HealthPort int32 `json:"healthPort,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:validation:Maximum=65535 - //+optional - // profilePort allows setting up a Go pprof profiler listening to this port - ProfilePort int32 `json:"profilePort,omitempty"` - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - // Metrics define the processor configuration regarding metrics - Metrics FLPMetrics `json:"metrics,omitempty"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel of the collector runtime - LogLevel string `json:"logLevel,omitempty"` - - //+kubebuilder:default:={requests:{memory:"100Mi",cpu:"100m"},limits:{memory:"800Mi"}} - // resources are the compute resources required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - //+kubebuilder:default:=true - // enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes - EnableKubeProbes bool `json:"enableKubeProbes,omitempty"` - - //+kubebuilder:default:=true - // dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space. - DropUnusedFields bool `json:"dropUnusedFields,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=3 - // kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. - // This setting is ignored when Kafka is disabled. - KafkaConsumerReplicas int32 `json:"kafkaConsumerReplicas,omitempty"` - - // kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. - // This setting is ignored when Kafka is disabled. - // +optional - KafkaConsumerAutoscaler FlowCollectorHPA `json:"kafkaConsumerAutoscaler,omitempty"` - - //+kubebuilder:default:=1000 - // +optional - // kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. - KafkaConsumerQueueCapacity int `json:"kafkaConsumerQueueCapacity"` - - //+kubebuilder:default:=10485760 - // +optional - // kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. - KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` - - // Debug allows setting some aspects of the internal configuration of the flow processor. - // This section is aimed exclusively for debugging and fine-grained performance optimizations - // (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - // +optional - Debug DebugConfig `json:"debug,omitempty"` -} - -const ( - HPAStatusDisabled = "DISABLED" - HPAStatusEnabled = "ENABLED" -) - -type FlowCollectorHPA struct { - // +kubebuilder:validation:Enum:=DISABLED;ENABLED - // +kubebuilder:default:=DISABLED - // Status describe the desired status regarding deploying an horizontal pod autoscaler - // DISABLED will not deploy an horizontal pod autoscaler - // ENABLED will deploy an horizontal pod autoscaler - Status string `json:"status,omitempty"` - - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - // +kubebuilder:default:=3 - // +optional - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // metrics used by the pod autoscaler - // +optional - Metrics []ascv2.MetricSpec `json:"metrics"` -} - -const ( - LokiAuthDisabled = "DISABLED" - LokiAuthUseHostToken = "HOST" - LokiAuthForwardUserToken = "FORWARD" -) - -// FlowCollectorLoki defines the desired state for FlowCollector's Loki client. -type FlowCollectorLoki struct { - //+kubebuilder:default:="http://loki:3100/" - // url is the address of an existing Loki service to push the flows to. When using the Loki Operator, - // set it to the Loki gateway service with the `network` tenant set in path, for example - // https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - URL string `json:"url,omitempty"` - - //+kubebuilder:validation:optional - // querierURL specifies the address of the Loki querier service, in case it is different from the - // Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester - // and querier are in the same server). When using the Loki Operator, do not set it, since - // ingestion and queries use the Loki gateway. - QuerierURL string `json:"querierUrl,omitempty"` - - //+kubebuilder:validation:optional - // statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the - // Loki querier URL. If empty, the QuerierURL value will be used. - // This is useful to show error messages and some context in the frontend. - // When using the Loki Operator, set it to the Loki HTTP query frontend service, for example - // https://loki-query-frontend-http.netobserv.svc:3100/. - StatusURL string `json:"statusUrl,omitempty"` - - //+kubebuilder:default:="netobserv" - // tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. - // When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. - TenantID string `json:"tenantID,omitempty"` - - // +kubebuilder:validation:Enum:="DISABLED";"HOST";"FORWARD" - //+kubebuilder:default:="DISABLED" - // AuthToken describe the way to get a token to authenticate to Loki. - // DISABLED will not send any token with the request. - // HOST will use the local pod service account to authenticate to Loki. - // FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. - // When using the Loki Operator, set it to `HOST` or `FORWARD`. - AuthToken string `json:"authToken,omitempty"` - - //+kubebuilder:default:="1s" - // batchWait is max time to wait before sending a batch. - BatchWait metav1.Duration `json:"batchWait,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:default:=102400 - // batchSize is max batch size (in bytes) of logs to accumulate before sending. - BatchSize int64 `json:"batchSize,omitempty"` - - //+kubebuilder:default:="10s" - // timeout is the maximum time connection / request limit. - // A Timeout of zero means no timeout. - Timeout metav1.Duration `json:"timeout,omitempty"` - - //+kubebuilder:default:="1s" - // minBackoff is the initial backoff time for client connection between retries. - MinBackoff metav1.Duration `json:"minBackoff,omitempty"` - - //+kubebuilder:default:="5s" - // maxBackoff is the maximum backoff time for client connection between retries. - MaxBackoff metav1.Duration `json:"maxBackoff,omitempty"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=2 - // maxRetries is the maximum number of retries for client connections. - MaxRetries int32 `json:"maxRetries,omitempty"` - - //+kubebuilder:default:={"app":"netobserv-flowcollector"} - // staticLabels is a map of common labels to set on each flow. - StaticLabels map[string]string `json:"staticLabels,omitempty"` - - // tls client configuration. - // +optional - TLS ClientTLS `json:"tls"` -} - -// FlowCollectorConsolePlugin defines the desired ConsolePlugin state of FlowCollector -type FlowCollectorConsolePlugin struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=true - // register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. - // When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. - // E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]' - Register bool `json:"register"` - - //+kubebuilder:validation:Minimum=0 - //+kubebuilder:default:=1 - // replicas defines the number of replicas (pods) to start. - Replicas int32 `json:"replicas,omitempty"` - - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=65535 - //+kubebuilder:default:=9001 - // port is the plugin service port - Port int32 `json:"port,omitempty"` - - //+kubebuilder:validation:Enum=IfNotPresent;Always;Never - //+kubebuilder:default:=IfNotPresent - // imagePullPolicy is the Kubernetes pull policy for the image defined above - ImagePullPolicy string `json:"imagePullPolicy,omitempty"` - - //+kubebuilder:default:={requests:{memory:"50Mi",cpu:"100m"},limits:{memory:"100Mi"}} - // resources, in terms of compute resources, required by this container. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - - //+kubebuilder:validation:Enum=trace;debug;info;warn;error;fatal;panic - //+kubebuilder:default:=info - // logLevel for the console plugin backend - LogLevel string `json:"logLevel,omitempty"` - - // autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. - // +optional - Autoscaler FlowCollectorHPA `json:"autoscaler,omitempty"` - - //+kubebuilder:default:={enable:true} - // portNaming defines the configuration of the port-to-service name translation - PortNaming ConsolePluginPortConfig `json:"portNaming,omitempty"` - - //+kubebuilder:default:={{name:"Applications",filter:{"src_namespace!":"openshift-,netobserv","dst_namespace!":"openshift-,netobserv"},default:true},{name:"Infrastructure",filter:{"src_namespace":"openshift-,netobserv","dst_namespace":"openshift-,netobserv"}},{name:"Pods network",filter:{"src_kind":"Pod","dst_kind":"Pod"},default:true},{name:"Services network",filter:{"dst_kind":"Service"}}} - // quickFilters configures quick filter presets for the Console plugin - QuickFilters []QuickFilter `json:"quickFilters,omitempty"` -} - -// Configuration of the port to service name translation feature of the console plugin -type ConsolePluginPortConfig struct { - //+kubebuilder:default:=true - // enable the console plugin port-to-service name translation - Enable bool `json:"enable,omitempty"` - - // portNames defines additional port names to use in the console. - // Example: portNames: {"3100": "loki"} - // +optional - PortNames map[string]string `json:"portNames,omitempty" yaml:"portNames,omitempty"` -} - -// QuickFilter defines preset configuration for Console's quick filters -type QuickFilter struct { - // name of the filter, that will be displayed in Console - // +kubebuilder:MinLength:=1 - Name string `json:"name"` - // filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. - // Example: filter: {"src_namespace": "namespace1,namespace2"} - // +kubebuilder:MinProperties:=1 - Filter map[string]string `json:"filter"` - // default defines whether this filter should be active by default or not - // +optional - Default bool `json:"default,omitempty"` -} - -// ClusterNetworkOperatorConfig defines the desired configuration related to the Cluster Network Configuration -type ClusterNetworkOperatorConfig struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=openshift-network-operator - // namespace where the config map is going to be deployed. - Namespace string `json:"namespace,omitempty"` -} - -// OVNKubernetesConfig defines the desired configuration related to the OVN-Kubernetes network provider, when Cluster Network Operator isn't installed. -type OVNKubernetesConfig struct { - // Important: Run "make generate" to regenerate code after modifying this file - - //+kubebuilder:default:=ovn-kubernetes - // namespace where OVN-Kubernetes pods are deployed. - Namespace string `json:"namespace,omitempty"` - - //+kubebuilder:default:=ovnkube-node - // daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods. - DaemonSetName string `json:"daemonSetName,omitempty"` - - //+kubebuilder:default:=ovnkube-node - // containerName defines the name of the container to configure for IPFIX. - ContainerName string `json:"containerName,omitempty"` -} - -type MountableType string - -const ( - CertRefTypeSecret MountableType = "secret" - CertRefTypeConfigMap MountableType = "configmap" -) - -type FileReference struct { - //+kubebuilder:validation:Enum=configmap;secret - // Type for the file reference: "configmap" or "secret" - Type MountableType `json:"type,omitempty"` - - // Name of the config map or secret containing the file - Name string `json:"name,omitempty"` - - // Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - // +optional - //+kubebuilder:default:="" - Namespace string `json:"namespace,omitempty"` - - // File name within the config map or secret - File string `json:"file,omitempty"` -} - -type CertificateReference struct { - //+kubebuilder:validation:Enum=configmap;secret - // type for the certificate reference: "configmap" or "secret" - Type MountableType `json:"type,omitempty"` - - // name of the config map or secret containing certificates - Name string `json:"name,omitempty"` - - // certFile defines the path to the certificate file name within the config map or secret - CertFile string `json:"certFile,omitempty"` - - // certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - // +optional - CertKey string `json:"certKey,omitempty"` - - // namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. - // If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - // +optional - //+kubebuilder:default:="" - Namespace string `json:"namespace,omitempty"` -} - -// ClientTLS defines TLS client configuration -type ClientTLS struct { - //+kubebuilder:default:=false - // enable TLS - Enable bool `json:"enable,omitempty"` - - //+kubebuilder:default:=false - // insecureSkipVerify allows skipping client-side verification of the server certificate - // If set to true, CACert field will be ignored - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` - - // caCert defines the reference of the certificate for the Certificate Authority - CACert CertificateReference `json:"caCert,omitempty"` - - // userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - // +optional - UserCert CertificateReference `json:"userCert,omitempty"` -} - -type SASLType string - -const ( - SASLDisabled SASLType = "DISABLED" - SASLPlain SASLType = "PLAIN" - SASLScramSHA512 SASLType = "SCRAM-SHA512" -) - -// `SASLConfig` defines SASL configuration -type SASLConfig struct { - //+kubebuilder:validation:Enum=DISABLED;PLAIN;SCRAM-SHA512 - //+kubebuilder:default:=DISABLED - // Type of SASL authentication to use, or `DISABLED` if SASL is not used - Type SASLType `json:"type,omitempty"` - - // Reference to the secret or config map containing the client ID - ClientIDReference FileReference `json:"clientIDReference,omitempty"` - - // Reference to the secret or config map containing the client secret - ClientSecretReference FileReference `json:"clientSecretReference,omitempty"` -} - -// DebugConfig allows tweaking some aspects of the internal configuration of the agent and FLP. -// They are aimed exclusively for debugging. Users setting these values do it at their own risk. -type DebugConfig struct { - // env allows passing custom environment variables to the NetObserv Agent. Useful for passing - // some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be - // publicly exposed as part of the FlowCollector descriptor, as they are only useful - // in edge debug and support scenarios. - //+optional - Env map[string]string `json:"env,omitempty"` -} - -// Add more exporter types below -type ExporterType string - -const ( - KafkaExporter ExporterType = "KAFKA" -) - -// FlowCollectorExporter defines an additional exporter to send enriched flows to -type FlowCollectorExporter struct { - // `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. - // +unionDiscriminator - // +kubebuilder:validation:Enum:="KAFKA";"IPFIX" - // +kubebuilder:validation:Required - Type ExporterType `json:"type"` - - // kafka configuration, such as address or topic, to send enriched flows to. - // +optional - Kafka FlowCollectorKafka `json:"kafka,omitempty"` - - // IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - // +optional - IPFIX FlowCollectorIPFIXReceiver `json:"ipfix,omitempty"` -} - -// FlowCollectorStatus defines the observed state of FlowCollector -type FlowCollectorStatus struct { - // Important: Run "make" to regenerate code after modifying this file - - // conditions represent the latest available observations of an object's state - Conditions []metav1.Condition `json:"conditions"` - - // namespace where console plugin and flowlogs-pipeline have been deployed. - Namespace string `json:"namespace,omitempty"` -} - -// +kubebuilder:deprecatedversion -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster -// +kubebuilder:printcolumn:name="Agent",type="string",JSONPath=`.spec.agent.type` -// +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` -// +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` - -// FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. -// -// Deprecated: This package will be removed in one of the next releases. -type FlowCollector struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FlowCollectorSpec `json:"spec,omitempty"` - Status FlowCollectorStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// FlowCollectorList contains a list of FlowCollector -// -// Deprecated: This package will be removed in one of the next releases. -type FlowCollectorList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []FlowCollector `json:"items"` -} - -func init() { - SchemeBuilder.Register(&FlowCollector{}, &FlowCollectorList{}) -} diff --git a/apis/flowcollector/v1alpha1/flowcollector_webhook.go b/apis/flowcollector/v1alpha1/flowcollector_webhook.go deleted file mode 100644 index 924f4e9dc..000000000 --- a/apis/flowcollector/v1alpha1/flowcollector_webhook.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - utilconversion "github.com/netobserv/network-observability-operator/pkg/conversion" - "github.com/netobserv/network-observability-operator/pkg/helper" - "github.com/netobserv/network-observability-operator/pkg/metrics" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiconversion "k8s.io/apimachinery/pkg/conversion" - "sigs.k8s.io/controller-runtime/pkg/conversion" -) - -// ConvertTo converts this v1alpha1 FlowCollector to its v1beta2 equivalent (the conversion Hub) -// https://book.kubebuilder.io/multiversion-tutorial/conversion.html -func (r *FlowCollector) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta2.FlowCollector) - - if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(r, dst, nil); err != nil { - return fmt.Errorf("copying v1alpha1.FlowCollector into v1beta2.FlowCollector: %w", err) - } - dst.Status.Conditions = make([]v1.Condition, len(r.Status.Conditions)) - copy(dst.Status.Conditions, r.Status.Conditions) - - // Manually restore data. - restored := &v1beta2.FlowCollector{} - if ok, err := utilconversion.UnmarshalData(r, restored); err != nil || !ok { - return err - } - - // Agent - if restored.Spec.Agent.EBPF.Features != nil { - dst.Spec.Agent.EBPF.Features = make([]v1beta2.AgentFeature, len(restored.Spec.Agent.EBPF.Features)) - copy(dst.Spec.Agent.EBPF.Features, restored.Spec.Agent.EBPF.Features) - } - - // Processor - dst.Spec.Processor.LogTypes = restored.Spec.Processor.LogTypes - if restored.Spec.Processor.Advanced.ConversationHeartbeatInterval != nil { - dst.Spec.Processor.Advanced.ConversationHeartbeatInterval = restored.Spec.Processor.Advanced.ConversationHeartbeatInterval - } - if restored.Spec.Processor.Advanced.ConversationEndTimeout != nil { - dst.Spec.Processor.Advanced.ConversationEndTimeout = restored.Spec.Processor.Advanced.ConversationEndTimeout - } - if restored.Spec.Processor.Advanced.ConversationTerminatingTimeout != nil { - dst.Spec.Processor.Advanced.ConversationTerminatingTimeout = restored.Spec.Processor.Advanced.ConversationTerminatingTimeout - } - if restored.Spec.Processor.Metrics.DisableAlerts != nil { - dst.Spec.Processor.Metrics.DisableAlerts = restored.Spec.Processor.Metrics.DisableAlerts - } - if restored.Spec.Processor.ClusterName != "" { - dst.Spec.Processor.ClusterName = restored.Spec.Processor.ClusterName - } - dst.Spec.Processor.AddZone = restored.Spec.Processor.AddZone - if restored.Spec.Processor.MultiClusterDeployment != nil { - dst.Spec.Processor.MultiClusterDeployment = restored.Spec.Processor.MultiClusterDeployment - } - - dst.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify = restored.Spec.Processor.Metrics.Server.TLS.InsecureSkipVerify - dst.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile = restored.Spec.Processor.Metrics.Server.TLS.ProvidedCaFile - - // Kafka - dst.Spec.Kafka.SASL = restored.Spec.Kafka.SASL - - // Loki - dst.Spec.Loki.Enable = restored.Spec.Loki.Enable - - if restored.Spec.Processor.Metrics.IncludeList != nil { - list := make([]v1beta2.FLPMetric, len(*restored.Spec.Processor.Metrics.IncludeList)) - copy(list, *restored.Spec.Processor.Metrics.IncludeList) - dst.Spec.Processor.Metrics.IncludeList = &list - } - - return nil -} - -// ConvertFrom converts the hub version v1beta2 FlowCollector object to v1alpha1 -func (r *FlowCollector) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta2.FlowCollector) - - if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(src, r, nil); err != nil { - return fmt.Errorf("copying v1beta2.FlowCollector into v1alpha1.FlowCollector: %w", err) - } - r.Status.Conditions = make([]v1.Condition, len(src.Status.Conditions)) - copy(r.Status.Conditions, src.Status.Conditions) - - // Preserve Hub data on down-conversion except for metadata - return utilconversion.MarshalData(src, r) -} - -func (r *FlowCollectorList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta2.FlowCollectorList) - return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(r, dst, nil) -} - -func (r *FlowCollectorList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta2.FlowCollectorList) - return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(src, r, nil) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s apiconversion.Scope) error { - return autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s apiconversion.Scope) error { - // Note that, despite we loose namespace info here, this isn't an issue because it's going to be restored from annotations - manual := helper.NewLokiConfig(in, "") - out.URL = manual.IngesterURL - out.QuerierURL = manual.QuerierURL - out.StatusURL = manual.StatusURL - out.TenantID = manual.TenantID - out.AuthToken = utilconversion.PascalToUpper(string(manual.AuthToken), '_') - if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&manual.TLS, &out.TLS, nil); err != nil { - return fmt.Errorf("copying v1beta2.Loki.TLS into v1alpha1.Loki.TLS: %w", err) - } - return autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s apiconversion.Scope) error { - out.Mode = v1beta2.LokiModeManual - out.Manual = v1beta2.LokiManualParams{ - IngesterURL: in.URL, - QuerierURL: in.QuerierURL, - StatusURL: in.StatusURL, - TenantID: in.TenantID, - AuthToken: v1beta2.LokiAuthToken(utilconversion.UpperToPascal(in.AuthToken)), - } - // fallback on ingester url if querier is not set - if len(out.Manual.QuerierURL) == 0 { - out.Manual.QuerierURL = out.Manual.IngesterURL - } - if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.Manual.TLS, nil); err != nil { - return fmt.Errorf("copying v1alpha1.Loki.TLS into v1beta2.Loki.Manual.TLS: %w", err) - } - return autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s apiconversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in, out, s); err != nil { - return err - } - out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(utilconversion.UpperToPascal(in.DeploymentModel)) - out.Exporters = []*v1beta2.FlowCollectorExporter{} - for _, inExporter := range in.Exporters { - outExporter := &v1beta2.FlowCollectorExporter{} - if err := Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(inExporter, outExporter, s); err != nil { - return err - } - out.Exporters = append(out.Exporters, outExporter) - } - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in, out, s); err != nil { - return err - } - out.DeploymentModel = utilconversion.PascalToUpper(string(in.DeploymentModel), '_') - out.Exporters = []*FlowCollectorExporter{} - for _, inExporter := range in.Exporters { - outExporter := &FlowCollectorExporter{} - if err := Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(inExporter, outExporter, s); err != nil { - return err - } - out.Exporters = append(out.Exporters, outExporter) - } - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in, out, s); err != nil { - return err - } - out.Type = v1beta2.FlowCollectorAgentType(utilconversion.UpperToPascal(in.Type)) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in, out, s); err != nil { - return err - } - out.Type = utilconversion.PascalToUpper(string(in.Type), '_') - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in, out, s); err != nil { - return err - } - out.Type = v1beta2.ServerTLSConfigType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in, out, s); err != nil { - return err - } - out.Type = ServerTLSConfigType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in, out, s); err != nil { - return err - } - out.Status = v1beta2.HPAStatus(utilconversion.UpperToPascal(in.Status)) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in, out, s); err != nil { - return err - } - out.Status = utilconversion.PascalToUpper(string(in.Status), '_') - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in, out, s); err != nil { - return err - } - out.Type = v1beta2.SASLType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in, out, s); err != nil { - return err - } - out.Type = SASLType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s apiconversion.Scope) error { - if err := autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in, out, s); err != nil { - return err - } - out.Type = v1beta2.ExporterType(utilconversion.UpperToPascal(string(in.Type))) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have camel case enum in v1beta2 which were uppercase in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s apiconversion.Scope) error { - if err := autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in, out, s); err != nil { - return err - } - out.Type = ExporterType(utilconversion.PascalToUpper(string(in.Type), '_')) - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1beta1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s apiconversion.Scope) error { - out.IncludeList = metrics.GetAsIncludeList(in.IgnoreTags, nil) - return autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s apiconversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s apiconversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in, out, s) -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedAgentConfig(in *DebugConfig, out *v1beta2.AdvancedAgentConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_AdvancedAgentConfig_To_v1alpha1_DebugConfig(in *v1beta2.AdvancedAgentConfig, out *DebugConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedProcessorConfig(in *DebugConfig, out *v1beta2.AdvancedProcessorConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1beta2_AdvancedProcessorConfig_To_v1alpha1_DebugConfig(in *v1beta2.AdvancedProcessorConfig, out *DebugConfig, s apiconversion.Scope) error { - out.Env = in.Env - return nil -} - -// This function need to be manually created because conversion-gen not able to create it intentionally because -// we have new defined fields in v1beta2 not in v1alpha1 -// nolint:golint,stylecheck,revive -func Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s apiconversion.Scope) error { - out.Advanced = &v1beta2.AdvancedAgentConfig{ - Env: in.Debug.Env, - } - return autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in, out, s) -} diff --git a/apis/flowcollector/v1alpha1/groupversion_info.go b/apis/flowcollector/v1alpha1/groupversion_info.go deleted file mode 100644 index 913c57b7f..000000000 --- a/apis/flowcollector/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the flows v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=flows.netobserv.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "flows.netobserv.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme - localSchemeBuilder = SchemeBuilder.SchemeBuilder -) diff --git a/apis/flowcollector/v1alpha1/zz_generated.conversion.go b/apis/flowcollector/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 899103749..000000000 --- a/apis/flowcollector/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,1017 +0,0 @@ -//go:build !ignore_autogenerated_core -// +build !ignore_autogenerated_core - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1beta2 "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" - v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CertificateReference)(nil), (*v1beta2.CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(a.(*CertificateReference), b.(*v1beta2.CertificateReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.CertificateReference)(nil), (*CertificateReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(a.(*v1beta2.CertificateReference), b.(*CertificateReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClientTLS)(nil), (*v1beta2.ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(a.(*ClientTLS), b.(*v1beta2.ClientTLS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ClientTLS)(nil), (*ClientTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(a.(*v1beta2.ClientTLS), b.(*ClientTLS), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterNetworkOperatorConfig)(nil), (*v1beta2.ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(a.(*ClusterNetworkOperatorConfig), b.(*v1beta2.ClusterNetworkOperatorConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ClusterNetworkOperatorConfig)(nil), (*ClusterNetworkOperatorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(a.(*v1beta2.ClusterNetworkOperatorConfig), b.(*ClusterNetworkOperatorConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ConsolePluginPortConfig)(nil), (*v1beta2.ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(a.(*ConsolePluginPortConfig), b.(*v1beta2.ConsolePluginPortConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ConsolePluginPortConfig)(nil), (*ConsolePluginPortConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(a.(*v1beta2.ConsolePluginPortConfig), b.(*ConsolePluginPortConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FileReference)(nil), (*v1beta2.FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FileReference_To_v1beta2_FileReference(a.(*FileReference), b.(*v1beta2.FileReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FileReference)(nil), (*FileReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FileReference_To_v1alpha1_FileReference(a.(*v1beta2.FileReference), b.(*FileReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollector)(nil), (*v1beta2.FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(a.(*FlowCollector), b.(*v1beta2.FlowCollector), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollector)(nil), (*FlowCollector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(a.(*v1beta2.FlowCollector), b.(*FlowCollector), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIX)(nil), (*v1beta2.FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(a.(*FlowCollectorIPFIX), b.(*v1beta2.FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIX)(nil), (*FlowCollectorIPFIX)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(a.(*v1beta2.FlowCollectorIPFIX), b.(*FlowCollectorIPFIX), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorIPFIXReceiver)(nil), (*v1beta2.FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(a.(*FlowCollectorIPFIXReceiver), b.(*v1beta2.FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorIPFIXReceiver)(nil), (*FlowCollectorIPFIXReceiver)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(a.(*v1beta2.FlowCollectorIPFIXReceiver), b.(*FlowCollectorIPFIXReceiver), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorKafka)(nil), (*v1beta2.FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(a.(*FlowCollectorKafka), b.(*v1beta2.FlowCollectorKafka), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorKafka)(nil), (*FlowCollectorKafka)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(a.(*v1beta2.FlowCollectorKafka), b.(*FlowCollectorKafka), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorList)(nil), (*v1beta2.FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(a.(*FlowCollectorList), b.(*v1beta2.FlowCollectorList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorList)(nil), (*FlowCollectorList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(a.(*v1beta2.FlowCollectorList), b.(*FlowCollectorList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*FlowCollectorStatus)(nil), (*v1beta2.FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(a.(*FlowCollectorStatus), b.(*v1beta2.FlowCollectorStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.FlowCollectorStatus)(nil), (*FlowCollectorStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(a.(*v1beta2.FlowCollectorStatus), b.(*FlowCollectorStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*MetricsServerConfig)(nil), (*v1beta2.MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(a.(*MetricsServerConfig), b.(*v1beta2.MetricsServerConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MetricsServerConfig)(nil), (*MetricsServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(a.(*v1beta2.MetricsServerConfig), b.(*MetricsServerConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OVNKubernetesConfig)(nil), (*v1beta2.OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(a.(*OVNKubernetesConfig), b.(*v1beta2.OVNKubernetesConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.OVNKubernetesConfig)(nil), (*OVNKubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(a.(*v1beta2.OVNKubernetesConfig), b.(*OVNKubernetesConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*QuickFilter)(nil), (*v1beta2.QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(a.(*QuickFilter), b.(*v1beta2.QuickFilter), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.QuickFilter)(nil), (*QuickFilter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(a.(*v1beta2.QuickFilter), b.(*QuickFilter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.AdvancedAgentConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedAgentConfig(a.(*DebugConfig), b.(*v1beta2.AdvancedAgentConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*DebugConfig)(nil), (*v1beta2.AdvancedProcessorConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DebugConfig_To_v1beta2_AdvancedProcessorConfig(a.(*DebugConfig), b.(*v1beta2.AdvancedProcessorConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FLPMetrics)(nil), (*v1beta2.FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(a.(*FLPMetrics), b.(*v1beta2.FLPMetrics), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorAgent)(nil), (*v1beta2.FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(a.(*FlowCollectorAgent), b.(*v1beta2.FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorConsolePlugin)(nil), (*v1beta2.FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(a.(*FlowCollectorConsolePlugin), b.(*v1beta2.FlowCollectorConsolePlugin), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorEBPF)(nil), (*v1beta2.FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(a.(*FlowCollectorEBPF), b.(*v1beta2.FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorExporter)(nil), (*v1beta2.FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(a.(*FlowCollectorExporter), b.(*v1beta2.FlowCollectorExporter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorFLP)(nil), (*v1beta2.FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(a.(*FlowCollectorFLP), b.(*v1beta2.FlowCollectorFLP), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorHPA)(nil), (*v1beta2.FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(a.(*FlowCollectorHPA), b.(*v1beta2.FlowCollectorHPA), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorLoki)(nil), (*v1beta2.FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(a.(*FlowCollectorLoki), b.(*v1beta2.FlowCollectorLoki), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*FlowCollectorSpec)(nil), (*v1beta2.FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(a.(*FlowCollectorSpec), b.(*v1beta2.FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*SASLConfig)(nil), (*v1beta2.SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(a.(*SASLConfig), b.(*v1beta2.SASLConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*ServerTLS)(nil), (*v1beta2.ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(a.(*ServerTLS), b.(*v1beta2.ServerTLS), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.AdvancedAgentConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AdvancedAgentConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.AdvancedAgentConfig), b.(*DebugConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.AdvancedProcessorConfig)(nil), (*DebugConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AdvancedProcessorConfig_To_v1alpha1_DebugConfig(a.(*v1beta2.AdvancedProcessorConfig), b.(*DebugConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FLPMetrics)(nil), (*FLPMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(a.(*v1beta2.FLPMetrics), b.(*FLPMetrics), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorAgent)(nil), (*FlowCollectorAgent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(a.(*v1beta2.FlowCollectorAgent), b.(*FlowCollectorAgent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorConsolePlugin)(nil), (*FlowCollectorConsolePlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(a.(*v1beta2.FlowCollectorConsolePlugin), b.(*FlowCollectorConsolePlugin), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorEBPF)(nil), (*FlowCollectorEBPF)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(a.(*v1beta2.FlowCollectorEBPF), b.(*FlowCollectorEBPF), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorExporter)(nil), (*FlowCollectorExporter)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(a.(*v1beta2.FlowCollectorExporter), b.(*FlowCollectorExporter), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorFLP)(nil), (*FlowCollectorFLP)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(a.(*v1beta2.FlowCollectorFLP), b.(*FlowCollectorFLP), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorHPA)(nil), (*FlowCollectorHPA)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(a.(*v1beta2.FlowCollectorHPA), b.(*FlowCollectorHPA), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorLoki)(nil), (*FlowCollectorLoki)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(a.(*v1beta2.FlowCollectorLoki), b.(*FlowCollectorLoki), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.FlowCollectorSpec)(nil), (*FlowCollectorSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(a.(*v1beta2.FlowCollectorSpec), b.(*FlowCollectorSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.SASLConfig)(nil), (*SASLConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(a.(*v1beta2.SASLConfig), b.(*SASLConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.ServerTLS)(nil), (*ServerTLS)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(a.(*v1beta2.ServerTLS), b.(*ServerTLS), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { - out.Type = v1beta2.MountableType(in.Type) - out.Name = in.Name - out.CertFile = in.CertFile - out.CertKey = in.CertKey - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference is an autogenerated conversion function. -func Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in *CertificateReference, out *v1beta2.CertificateReference, s conversion.Scope) error { - return autoConvert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(in, out, s) -} - -func autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { - out.Type = MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.CertFile = in.CertFile - out.CertKey = in.CertKey - return nil -} - -// Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference is an autogenerated conversion function. -func Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in *v1beta2.CertificateReference, out *CertificateReference, s conversion.Scope) error { - return autoConvert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(in, out, s) -} - -func autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { - out.Enable = in.Enable - out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.CACert, &out.CACert, s); err != nil { - return err - } - if err := Convert_v1alpha1_CertificateReference_To_v1beta2_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS is an autogenerated conversion function. -func Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in *ClientTLS, out *v1beta2.ClientTLS, s conversion.Scope) error { - return autoConvert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(in, out, s) -} - -func autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { - out.Enable = in.Enable - out.InsecureSkipVerify = in.InsecureSkipVerify - if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.CACert, &out.CACert, s); err != nil { - return err - } - if err := Convert_v1beta2_CertificateReference_To_v1alpha1_CertificateReference(&in.UserCert, &out.UserCert, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS is an autogenerated conversion function. -func Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in *v1beta2.ClientTLS, out *ClientTLS, s conversion.Scope) error { - return autoConvert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(in, out, s) -} - -func autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in *ClusterNetworkOperatorConfig, out *v1beta2.ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(in, out, s) -} - -func autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig is an autogenerated conversion function. -func Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in *v1beta2.ClusterNetworkOperatorConfig, out *ClusterNetworkOperatorConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(in, out, s) -} - -func autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { - if err := v1.Convert_bool_To_Pointer_bool(&in.Enable, &out.Enable, s); err != nil { - return err - } - out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) - return nil -} - -// Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in *ConsolePluginPortConfig, out *v1beta2.ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(in, out, s) -} - -func autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - if err := v1.Convert_Pointer_bool_To_bool(&in.Enable, &out.Enable, s); err != nil { - return err - } - out.PortNames = *(*map[string]string)(unsafe.Pointer(&in.PortNames)) - return nil -} - -// Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig is an autogenerated conversion function. -func Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in *v1beta2.ConsolePluginPortConfig, out *ConsolePluginPortConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(in, out, s) -} - -func autoConvert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(in *FLPMetrics, out *v1beta2.FLPMetrics, s conversion.Scope) error { - if err := Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { - return err - } - // WARNING: in.IgnoreTags requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(in *v1beta2.FLPMetrics, out *FLPMetrics, s conversion.Scope) error { - if err := Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(&in.Server, &out.Server, s); err != nil { - return err - } - // WARNING: in.IncludeList requires manual conversion: does not exist in peer-type - // WARNING: in.DisableAlerts requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { - out.Type = v1beta2.MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.File = in.File - return nil -} - -// Convert_v1alpha1_FileReference_To_v1beta2_FileReference is an autogenerated conversion function. -func Convert_v1alpha1_FileReference_To_v1beta2_FileReference(in *FileReference, out *v1beta2.FileReference, s conversion.Scope) error { - return autoConvert_v1alpha1_FileReference_To_v1beta2_FileReference(in, out, s) -} - -func autoConvert_v1beta2_FileReference_To_v1alpha1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { - out.Type = MountableType(in.Type) - out.Name = in.Name - out.Namespace = in.Namespace - out.File = in.File - return nil -} - -// Convert_v1beta2_FileReference_To_v1alpha1_FileReference is an autogenerated conversion function. -func Convert_v1beta2_FileReference_To_v1alpha1_FileReference(in *v1beta2.FileReference, out *FileReference, s conversion.Scope) error { - return autoConvert_v1beta2_FileReference_To_v1alpha1_FileReference(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in *FlowCollector, out *v1beta2.FlowCollector, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(in, out, s) -} - -func autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector is an autogenerated conversion function. -func Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in *v1beta2.FlowCollector, out *FlowCollector, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(in *FlowCollectorAgent, out *v1beta2.FlowCollectorAgent, s conversion.Scope) error { - out.Type = v1beta2.FlowCollectorAgentType(in.Type) - if err := Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(in *v1beta2.FlowCollectorAgent, out *FlowCollectorAgent, s conversion.Scope) error { - out.Type = string(in.Type) - if err := Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(&in.EBPF, &out.EBPF, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(in *FlowCollectorConsolePlugin, out *v1beta2.FlowCollectorConsolePlugin, s conversion.Scope) error { - // WARNING: in.Register requires manual conversion: does not exist in peer-type - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - // WARNING: in.Port requires manual conversion: does not exist in peer-type - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.LogLevel = in.LogLevel - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { - return err - } - if err := Convert_v1alpha1_ConsolePluginPortConfig_To_v1beta2_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { - return err - } - out.QuickFilters = *(*[]v1beta2.QuickFilter)(unsafe.Pointer(&in.QuickFilters)) - return nil -} - -func autoConvert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(in *v1beta2.FlowCollectorConsolePlugin, out *FlowCollectorConsolePlugin, s conversion.Scope) error { - // WARNING: in.Enable requires manual conversion: does not exist in peer-type - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.LogLevel = in.LogLevel - if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.Autoscaler, &out.Autoscaler, s); err != nil { - return err - } - if err := Convert_v1beta2_ConsolePluginPortConfig_To_v1alpha1_ConsolePluginPortConfig(&in.PortNaming, &out.PortNaming, s); err != nil { - return err - } - out.QuickFilters = *(*[]QuickFilter)(unsafe.Pointer(&in.QuickFilters)) - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorEBPF_To_v1beta2_FlowCollectorEBPF(in *FlowCollectorEBPF, out *v1beta2.FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - // WARNING: in.Debug requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorEBPF_To_v1alpha1_FlowCollectorEBPF(in *v1beta2.FlowCollectorEBPF, out *FlowCollectorEBPF, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - out.Resources = in.Resources - out.Sampling = (*int32)(unsafe.Pointer(in.Sampling)) - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Interfaces = *(*[]string)(unsafe.Pointer(&in.Interfaces)) - out.ExcludeInterfaces = *(*[]string)(unsafe.Pointer(&in.ExcludeInterfaces)) - out.LogLevel = in.LogLevel - out.Privileged = in.Privileged - out.KafkaBatchSize = in.KafkaBatchSize - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - // WARNING: in.Features requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorExporter_To_v1beta2_FlowCollectorExporter(in *FlowCollectorExporter, out *v1beta2.FlowCollectorExporter, s conversion.Scope) error { - out.Type = v1beta2.ExporterType(in.Type) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_FlowCollectorExporter_To_v1alpha1_FlowCollectorExporter(in *v1beta2.FlowCollectorExporter, out *FlowCollectorExporter, s conversion.Scope) error { - out.Type = ExporterType(in.Type) - if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(&in.IPFIX, &out.IPFIX, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(in *FlowCollectorFLP, out *v1beta2.FlowCollectorFLP, s conversion.Scope) error { - // WARNING: in.Port requires manual conversion: does not exist in peer-type - // WARNING: in.HealthPort requires manual conversion: does not exist in peer-type - // WARNING: in.ProfilePort requires manual conversion: does not exist in peer-type - out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1alpha1_FLPMetrics_To_v1beta2_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { - return err - } - out.LogLevel = in.LogLevel - out.Resources = in.Resources - // WARNING: in.EnableKubeProbes requires manual conversion: does not exist in peer-type - // WARNING: in.DropUnusedFields requires manual conversion: does not exist in peer-type - if err := v1.Convert_int32_To_Pointer_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { - return err - } - out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity - out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - // WARNING: in.Debug requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(in *v1beta2.FlowCollectorFLP, out *FlowCollectorFLP, s conversion.Scope) error { - out.ImagePullPolicy = in.ImagePullPolicy - if err := Convert_v1beta2_FLPMetrics_To_v1alpha1_FLPMetrics(&in.Metrics, &out.Metrics, s); err != nil { - return err - } - out.LogLevel = in.LogLevel - out.Resources = in.Resources - if err := v1.Convert_Pointer_int32_To_int32(&in.KafkaConsumerReplicas, &out.KafkaConsumerReplicas, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(&in.KafkaConsumerAutoscaler, &out.KafkaConsumerAutoscaler, s); err != nil { - return err - } - out.KafkaConsumerQueueCapacity = in.KafkaConsumerQueueCapacity - out.KafkaConsumerBatchSize = in.KafkaConsumerBatchSize - // WARNING: in.LogTypes requires manual conversion: does not exist in peer-type - // WARNING: in.ClusterName requires manual conversion: does not exist in peer-type - // WARNING: in.MultiClusterDeployment requires manual conversion: does not exist in peer-type - // WARNING: in.AddZone requires manual conversion: does not exist in peer-type - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorHPA_To_v1beta2_FlowCollectorHPA(in *FlowCollectorHPA, out *v1beta2.FlowCollectorHPA, s conversion.Scope) error { - out.Status = v1beta2.HPAStatus(in.Status) - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func autoConvert_v1beta2_FlowCollectorHPA_To_v1alpha1_FlowCollectorHPA(in *v1beta2.FlowCollectorHPA, out *FlowCollectorHPA, s conversion.Scope) error { - out.Status = string(in.Status) - out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) - out.MaxReplicas = in.MaxReplicas - out.Metrics = *(*[]v2.MetricSpec)(unsafe.Pointer(&in.Metrics)) - return nil -} - -func autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1alpha1_ClusterNetworkOperatorConfig_To_v1beta2_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in *FlowCollectorIPFIX, out *v1beta2.FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIX_To_v1beta2_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - out.CacheActiveTimeout = in.CacheActiveTimeout - out.CacheMaxFlows = in.CacheMaxFlows - out.Sampling = in.Sampling - out.ForceSampleAll = in.ForceSampleAll - if err := Convert_v1beta2_ClusterNetworkOperatorConfig_To_v1alpha1_ClusterNetworkOperatorConfig(&in.ClusterNetworkOperator, &out.ClusterNetworkOperator, s); err != nil { - return err - } - if err := Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(&in.OVNKubernetes, &out.OVNKubernetes, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in *v1beta2.FlowCollectorIPFIX, out *FlowCollectorIPFIX, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIX_To_v1alpha1_FlowCollectorIPFIX(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in *FlowCollectorIPFIXReceiver, out *v1beta2.FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorIPFIXReceiver_To_v1beta2_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - out.TargetHost = in.TargetHost - out.TargetPort = in.TargetPort - out.Transport = in.Transport - return nil -} - -// Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in *v1beta2.FlowCollectorIPFIXReceiver, out *FlowCollectorIPFIXReceiver, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorIPFIXReceiver_To_v1alpha1_FlowCollectorIPFIXReceiver(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { - out.Address = in.Address - out.Topic = in.Topic - if err := Convert_v1alpha1_ClientTLS_To_v1beta2_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - if err := Convert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(&in.SASL, &out.SASL, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in *FlowCollectorKafka, out *v1beta2.FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - out.Address = in.Address - out.Topic = in.Topic - if err := Convert_v1beta2_ClientTLS_To_v1alpha1_ClientTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - if err := Convert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(&in.SASL, &out.SASL, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in *v1beta2.FlowCollectorKafka, out *FlowCollectorKafka, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1beta2.FlowCollector, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_FlowCollector_To_v1beta2_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in *FlowCollectorList, out *v1beta2.FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorList_To_v1beta2_FlowCollectorList(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FlowCollector, len(*in)) - for i := range *in { - if err := Convert_v1beta2_FlowCollector_To_v1alpha1_FlowCollector(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in *v1beta2.FlowCollectorList, out *FlowCollectorList, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorList_To_v1alpha1_FlowCollectorList(in, out, s) -} - -func autoConvert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(in *FlowCollectorLoki, out *v1beta2.FlowCollectorLoki, s conversion.Scope) error { - // WARNING: in.URL requires manual conversion: does not exist in peer-type - // WARNING: in.QuerierURL requires manual conversion: does not exist in peer-type - // WARNING: in.StatusURL requires manual conversion: does not exist in peer-type - // WARNING: in.TenantID requires manual conversion: does not exist in peer-type - // WARNING: in.AuthToken requires manual conversion: does not exist in peer-type - // WARNING: in.BatchWait requires manual conversion: does not exist in peer-type - // WARNING: in.BatchSize requires manual conversion: does not exist in peer-type - // WARNING: in.Timeout requires manual conversion: does not exist in peer-type - // WARNING: in.MinBackoff requires manual conversion: does not exist in peer-type - // WARNING: in.MaxBackoff requires manual conversion: does not exist in peer-type - // WARNING: in.MaxRetries requires manual conversion: does not exist in peer-type - // WARNING: in.StaticLabels requires manual conversion: does not exist in peer-type - // WARNING: in.TLS requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(in *v1beta2.FlowCollectorLoki, out *FlowCollectorLoki, s conversion.Scope) error { - // WARNING: in.Enable requires manual conversion: does not exist in peer-type - // WARNING: in.Mode requires manual conversion: does not exist in peer-type - // WARNING: in.Manual requires manual conversion: does not exist in peer-type - // WARNING: in.Microservices requires manual conversion: does not exist in peer-type - // WARNING: in.Monolithic requires manual conversion: does not exist in peer-type - // WARNING: in.LokiStack requires manual conversion: does not exist in peer-type - // WARNING: in.ReadTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.WriteTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.WriteBatchWait requires manual conversion: does not exist in peer-type - // WARNING: in.WriteBatchSize requires manual conversion: does not exist in peer-type - // WARNING: in.Advanced requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_FlowCollectorSpec_To_v1beta2_FlowCollectorSpec(in *FlowCollectorSpec, out *v1beta2.FlowCollectorSpec, s conversion.Scope) error { - out.Namespace = in.Namespace - if err := Convert_v1alpha1_FlowCollectorAgent_To_v1beta2_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorFLP_To_v1beta2_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorLoki_To_v1beta2_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1alpha1_FlowCollectorConsolePlugin_To_v1beta2_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { - return err - } - out.DeploymentModel = v1beta2.FlowCollectorDeploymentModel(in.DeploymentModel) - if err := Convert_v1alpha1_FlowCollectorKafka_To_v1beta2_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - // INFO: in.Exporters opted out of conversion generation - return nil -} - -func autoConvert_v1beta2_FlowCollectorSpec_To_v1alpha1_FlowCollectorSpec(in *v1beta2.FlowCollectorSpec, out *FlowCollectorSpec, s conversion.Scope) error { - out.Namespace = in.Namespace - if err := Convert_v1beta2_FlowCollectorAgent_To_v1alpha1_FlowCollectorAgent(&in.Agent, &out.Agent, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorFLP_To_v1alpha1_FlowCollectorFLP(&in.Processor, &out.Processor, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorLoki_To_v1alpha1_FlowCollectorLoki(&in.Loki, &out.Loki, s); err != nil { - return err - } - if err := Convert_v1beta2_FlowCollectorConsolePlugin_To_v1alpha1_FlowCollectorConsolePlugin(&in.ConsolePlugin, &out.ConsolePlugin, s); err != nil { - return err - } - out.DeploymentModel = string(in.DeploymentModel) - if err := Convert_v1beta2_FlowCollectorKafka_To_v1alpha1_FlowCollectorKafka(&in.Kafka, &out.Kafka, s); err != nil { - return err - } - // INFO: in.Exporters opted out of conversion generation - return nil -} - -func autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { - out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) - out.Namespace = in.Namespace - return nil -} - -// Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in *FlowCollectorStatus, out *v1beta2.FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_FlowCollectorStatus_To_v1beta2_FlowCollectorStatus(in, out, s) -} - -func autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) - out.Namespace = in.Namespace - return nil -} - -// Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus is an autogenerated conversion function. -func Convert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in *v1beta2.FlowCollectorStatus, out *FlowCollectorStatus, s conversion.Scope) error { - return autoConvert_v1beta2_FlowCollectorStatus_To_v1alpha1_FlowCollectorStatus(in, out, s) -} - -func autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { - out.Port = in.Port - if err := Convert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in *MetricsServerConfig, out *v1beta2.MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_MetricsServerConfig_To_v1beta2_MetricsServerConfig(in, out, s) -} - -func autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - out.Port = in.Port - if err := Convert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(&in.TLS, &out.TLS, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig is an autogenerated conversion function. -func Convert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in *v1beta2.MetricsServerConfig, out *MetricsServerConfig, s conversion.Scope) error { - return autoConvert_v1beta2_MetricsServerConfig_To_v1alpha1_MetricsServerConfig(in, out, s) -} - -func autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - out.DaemonSetName = in.DaemonSetName - out.ContainerName = in.ContainerName - return nil -} - -// Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in *OVNKubernetesConfig, out *v1beta2.OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_OVNKubernetesConfig_To_v1beta2_OVNKubernetesConfig(in, out, s) -} - -func autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - out.Namespace = in.Namespace - out.DaemonSetName = in.DaemonSetName - out.ContainerName = in.ContainerName - return nil -} - -// Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig is an autogenerated conversion function. -func Convert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in *v1beta2.OVNKubernetesConfig, out *OVNKubernetesConfig, s conversion.Scope) error { - return autoConvert_v1beta2_OVNKubernetesConfig_To_v1alpha1_OVNKubernetesConfig(in, out, s) -} - -func autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { - out.Name = in.Name - out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) - out.Default = in.Default - return nil -} - -// Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter is an autogenerated conversion function. -func Convert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in *QuickFilter, out *v1beta2.QuickFilter, s conversion.Scope) error { - return autoConvert_v1alpha1_QuickFilter_To_v1beta2_QuickFilter(in, out, s) -} - -func autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { - out.Name = in.Name - out.Filter = *(*map[string]string)(unsafe.Pointer(&in.Filter)) - out.Default = in.Default - return nil -} - -// Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter is an autogenerated conversion function. -func Convert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in *v1beta2.QuickFilter, out *QuickFilter, s conversion.Scope) error { - return autoConvert_v1beta2_QuickFilter_To_v1alpha1_QuickFilter(in, out, s) -} - -func autoConvert_v1alpha1_SASLConfig_To_v1beta2_SASLConfig(in *SASLConfig, out *v1beta2.SASLConfig, s conversion.Scope) error { - out.Type = v1beta2.SASLType(in.Type) - if err := Convert_v1alpha1_FileReference_To_v1beta2_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { - return err - } - if err := Convert_v1alpha1_FileReference_To_v1beta2_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_SASLConfig_To_v1alpha1_SASLConfig(in *v1beta2.SASLConfig, out *SASLConfig, s conversion.Scope) error { - out.Type = SASLType(in.Type) - if err := Convert_v1beta2_FileReference_To_v1alpha1_FileReference(&in.ClientIDReference, &out.ClientIDReference, s); err != nil { - return err - } - if err := Convert_v1beta2_FileReference_To_v1alpha1_FileReference(&in.ClientSecretReference, &out.ClientSecretReference, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_ServerTLS_To_v1beta2_ServerTLS(in *ServerTLS, out *v1beta2.ServerTLS, s conversion.Scope) error { - out.Type = v1beta2.ServerTLSConfigType(in.Type) - out.Provided = (*v1beta2.CertificateReference)(unsafe.Pointer(in.Provided)) - return nil -} - -func autoConvert_v1beta2_ServerTLS_To_v1alpha1_ServerTLS(in *v1beta2.ServerTLS, out *ServerTLS, s conversion.Scope) error { - out.Type = ServerTLSConfigType(in.Type) - out.Provided = (*CertificateReference)(unsafe.Pointer(in.Provided)) - // WARNING: in.InsecureSkipVerify requires manual conversion: does not exist in peer-type - // WARNING: in.ProvidedCaFile requires manual conversion: does not exist in peer-type - return nil -} diff --git a/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go b/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 921d44bf7..000000000 --- a/apis/flowcollector/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,570 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2021. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateReference) DeepCopyInto(out *CertificateReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateReference. -func (in *CertificateReference) DeepCopy() *CertificateReference { - if in == nil { - return nil - } - out := new(CertificateReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientTLS) DeepCopyInto(out *ClientTLS) { - *out = *in - out.CACert = in.CACert - out.UserCert = in.UserCert -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS. -func (in *ClientTLS) DeepCopy() *ClientTLS { - if in == nil { - return nil - } - out := new(ClientTLS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkOperatorConfig) DeepCopyInto(out *ClusterNetworkOperatorConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkOperatorConfig. -func (in *ClusterNetworkOperatorConfig) DeepCopy() *ClusterNetworkOperatorConfig { - if in == nil { - return nil - } - out := new(ClusterNetworkOperatorConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConsolePluginPortConfig) DeepCopyInto(out *ConsolePluginPortConfig) { - *out = *in - if in.PortNames != nil { - in, out := &in.PortNames, &out.PortNames - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginPortConfig. -func (in *ConsolePluginPortConfig) DeepCopy() *ConsolePluginPortConfig { - if in == nil { - return nil - } - out := new(ConsolePluginPortConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DebugConfig) DeepCopyInto(out *DebugConfig) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugConfig. -func (in *DebugConfig) DeepCopy() *DebugConfig { - if in == nil { - return nil - } - out := new(DebugConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FLPMetrics) DeepCopyInto(out *FLPMetrics) { - *out = *in - in.Server.DeepCopyInto(&out.Server) - if in.IgnoreTags != nil { - in, out := &in.IgnoreTags, &out.IgnoreTags - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FLPMetrics. -func (in *FLPMetrics) DeepCopy() *FLPMetrics { - if in == nil { - return nil - } - out := new(FLPMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FileReference) DeepCopyInto(out *FileReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileReference. -func (in *FileReference) DeepCopy() *FileReference { - if in == nil { - return nil - } - out := new(FileReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollector) DeepCopyInto(out *FlowCollector) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollector. -func (in *FlowCollector) DeepCopy() *FlowCollector { - if in == nil { - return nil - } - out := new(FlowCollector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FlowCollector) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorAgent) DeepCopyInto(out *FlowCollectorAgent) { - *out = *in - out.IPFIX = in.IPFIX - in.EBPF.DeepCopyInto(&out.EBPF) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorAgent. -func (in *FlowCollectorAgent) DeepCopy() *FlowCollectorAgent { - if in == nil { - return nil - } - out := new(FlowCollectorAgent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorConsolePlugin) DeepCopyInto(out *FlowCollectorConsolePlugin) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - in.Autoscaler.DeepCopyInto(&out.Autoscaler) - in.PortNaming.DeepCopyInto(&out.PortNaming) - if in.QuickFilters != nil { - in, out := &in.QuickFilters, &out.QuickFilters - *out = make([]QuickFilter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorConsolePlugin. -func (in *FlowCollectorConsolePlugin) DeepCopy() *FlowCollectorConsolePlugin { - if in == nil { - return nil - } - out := new(FlowCollectorConsolePlugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorEBPF) DeepCopyInto(out *FlowCollectorEBPF) { - *out = *in - in.Resources.DeepCopyInto(&out.Resources) - if in.Sampling != nil { - in, out := &in.Sampling, &out.Sampling - *out = new(int32) - **out = **in - } - if in.Interfaces != nil { - in, out := &in.Interfaces, &out.Interfaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExcludeInterfaces != nil { - in, out := &in.ExcludeInterfaces, &out.ExcludeInterfaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.Debug.DeepCopyInto(&out.Debug) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorEBPF. -func (in *FlowCollectorEBPF) DeepCopy() *FlowCollectorEBPF { - if in == nil { - return nil - } - out := new(FlowCollectorEBPF) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorExporter) DeepCopyInto(out *FlowCollectorExporter) { - *out = *in - out.Kafka = in.Kafka - out.IPFIX = in.IPFIX -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorExporter. -func (in *FlowCollectorExporter) DeepCopy() *FlowCollectorExporter { - if in == nil { - return nil - } - out := new(FlowCollectorExporter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorFLP) DeepCopyInto(out *FlowCollectorFLP) { - *out = *in - in.Metrics.DeepCopyInto(&out.Metrics) - in.Resources.DeepCopyInto(&out.Resources) - in.KafkaConsumerAutoscaler.DeepCopyInto(&out.KafkaConsumerAutoscaler) - in.Debug.DeepCopyInto(&out.Debug) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorFLP. -func (in *FlowCollectorFLP) DeepCopy() *FlowCollectorFLP { - if in == nil { - return nil - } - out := new(FlowCollectorFLP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorHPA) DeepCopyInto(out *FlowCollectorHPA) { - *out = *in - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]v2.MetricSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorHPA. -func (in *FlowCollectorHPA) DeepCopy() *FlowCollectorHPA { - if in == nil { - return nil - } - out := new(FlowCollectorHPA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIX) DeepCopyInto(out *FlowCollectorIPFIX) { - *out = *in - out.ClusterNetworkOperator = in.ClusterNetworkOperator - out.OVNKubernetes = in.OVNKubernetes -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIX. -func (in *FlowCollectorIPFIX) DeepCopy() *FlowCollectorIPFIX { - if in == nil { - return nil - } - out := new(FlowCollectorIPFIX) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorIPFIXReceiver) DeepCopyInto(out *FlowCollectorIPFIXReceiver) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorIPFIXReceiver. -func (in *FlowCollectorIPFIXReceiver) DeepCopy() *FlowCollectorIPFIXReceiver { - if in == nil { - return nil - } - out := new(FlowCollectorIPFIXReceiver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorKafka) DeepCopyInto(out *FlowCollectorKafka) { - *out = *in - out.TLS = in.TLS - out.SASL = in.SASL -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorKafka. -func (in *FlowCollectorKafka) DeepCopy() *FlowCollectorKafka { - if in == nil { - return nil - } - out := new(FlowCollectorKafka) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorList) DeepCopyInto(out *FlowCollectorList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FlowCollector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorList. -func (in *FlowCollectorList) DeepCopy() *FlowCollectorList { - if in == nil { - return nil - } - out := new(FlowCollectorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FlowCollectorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorLoki) DeepCopyInto(out *FlowCollectorLoki) { - *out = *in - out.BatchWait = in.BatchWait - out.Timeout = in.Timeout - out.MinBackoff = in.MinBackoff - out.MaxBackoff = in.MaxBackoff - if in.StaticLabels != nil { - in, out := &in.StaticLabels, &out.StaticLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.TLS = in.TLS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorLoki. -func (in *FlowCollectorLoki) DeepCopy() *FlowCollectorLoki { - if in == nil { - return nil - } - out := new(FlowCollectorLoki) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorSpec) DeepCopyInto(out *FlowCollectorSpec) { - *out = *in - in.Agent.DeepCopyInto(&out.Agent) - in.Processor.DeepCopyInto(&out.Processor) - in.Loki.DeepCopyInto(&out.Loki) - in.ConsolePlugin.DeepCopyInto(&out.ConsolePlugin) - out.Kafka = in.Kafka - if in.Exporters != nil { - in, out := &in.Exporters, &out.Exporters - *out = make([]*FlowCollectorExporter, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(FlowCollectorExporter) - **out = **in - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorSpec. -func (in *FlowCollectorSpec) DeepCopy() *FlowCollectorSpec { - if in == nil { - return nil - } - out := new(FlowCollectorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlowCollectorStatus) DeepCopyInto(out *FlowCollectorStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowCollectorStatus. -func (in *FlowCollectorStatus) DeepCopy() *FlowCollectorStatus { - if in == nil { - return nil - } - out := new(FlowCollectorStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { - *out = *in - in.TLS.DeepCopyInto(&out.TLS) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. -func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { - if in == nil { - return nil - } - out := new(MetricsServerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. -func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { - if in == nil { - return nil - } - out := new(OVNKubernetesConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuickFilter) DeepCopyInto(out *QuickFilter) { - *out = *in - if in.Filter != nil { - in, out := &in.Filter, &out.Filter - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickFilter. -func (in *QuickFilter) DeepCopy() *QuickFilter { - if in == nil { - return nil - } - out := new(QuickFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SASLConfig) DeepCopyInto(out *SASLConfig) { - *out = *in - out.ClientIDReference = in.ClientIDReference - out.ClientSecretReference = in.ClientSecretReference -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASLConfig. -func (in *SASLConfig) DeepCopy() *SASLConfig { - if in == nil { - return nil - } - out := new(SASLConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerTLS) DeepCopyInto(out *ServerTLS) { - *out = *in - if in.Provided != nil { - in, out := &in.Provided, &out.Provided - *out = new(CertificateReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerTLS. -func (in *ServerTLS) DeepCopy() *ServerTLS { - if in == nil { - return nil - } - out := new(ServerTLS) - in.DeepCopyInto(out) - return out -} diff --git a/apis/flowcollector/v1beta1/doc.go b/apis/flowcollector/v1beta1/doc.go index 2bb94b776..42ca5cfc4 100644 --- a/apis/flowcollector/v1beta1/doc.go +++ b/apis/flowcollector/v1beta1/doc.go @@ -13,4 +13,6 @@ limitations under the License. // Package v1beta1 contains the v1beta1 API implementation. // +k8s:conversion-gen=github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2 +// +// Deprecated: This package will be removed in one of the next releases. package v1beta1 diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index 2ccee38a9..a53e39399 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -875,7 +875,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` -// +kubebuilder:storageversion +// +kubebuilder:deprecatedversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index 1b03c3ce4..00f4f24a1 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -1027,6 +1027,7 @@ type FlowCollectorStatus struct { // +kubebuilder:printcolumn:name="Sampling (EBPF)",type="string",JSONPath=`.spec.agent.ebpf.sampling` // +kubebuilder:printcolumn:name="Deployment Model",type="string",JSONPath=`.spec.deploymentModel` // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].reason` +// +kubebuilder:storageversion // `FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments. type FlowCollector struct { metav1.TypeMeta `json:",inline"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index d7b0df627..9d4e478dc 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -15,7 +15,6 @@ spec: namespace: netobserv path: /convert conversionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 group: flows.netobserv.io @@ -40,2429 +39,6 @@ spec: name: Status type: string deprecated: true - name: v1alpha1 - schema: - openAPIV3Schema: - description: "FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. \n Deprecated: This package will - be removed in one of the next releases." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - properties: - name: - pattern: ^cluster$ - type: string - type: object - spec: - description: FlowCollectorSpec defines the desired state of FlowCollector - properties: - agent: - default: - type: EBPF - description: agent for flows extraction. - properties: - ebpf: - description: ebpf describes the settings related to the eBPF-based - flow reporter when the "agent.type" property is set to "EBPF". - properties: - cacheActiveTimeout: - default: 5s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - format: int32 - minimum: 1 - type: integer - debug: - description: Debug allows setting some aspects of the internal - configuration of the eBPF agent. This section is aimed exclusively - for debugging and fine-grained performance optimizations - (for example GOGC, GOMAXPROCS env vars). Users setting its - values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very - concrete performance-tuning options (such as GOGC, GOMAXPROCS) - that shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and - support scenarios. - type: object - type: object - excludeInterfaces: - default: - - lo - description: excludeInterfaces contains the interface names - that will be excluded from flow tracing. If an entry is - enclosed by slashes (such as `/br-/`), it will match as - regular expression, otherwise it will be matched as a case-sensitive - string. - items: - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy - for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: interfaces contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. If an entry is enclosed by - slashes (such as `/br-/`), it will match as regular expression, - otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: 'kafkaBatchSize limits the maximum size of a - request in bytes before being sent to a partition. Ignored - when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: logLevel defines the log level for the NetObserv - eBPF Agent - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: 'privileged mode for the eBPF Agent container. - In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set (for example old kernel version - not knowing CAP_BPF) then you can turn on this mode for - more global privileges.' - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources are the compute resources required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: sampling rate of the flow reporter. 100 means - one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: ipfix describes the settings related to the IPFIX-based - flow reporter when the "agent.type" property is set to "IPFIX". - properties: - cacheActiveTimeout: - default: 20s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related - to the OpenShift Cluster Network Operator, when available. - properties: - namespace: - default: openshift-network-operator - description: namespace where the config map is going - to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: forceSampleAll allows disabling sampling in the - IPFIX-based flow reporter. It is not recommended to sample - all the traffic with IPFIX, as it might generate cluster - instability. If you REALLY want to do that, set this flag - to true. Use at your own risk. When it is set to true, the - value of "sampling" is ignored. - type: boolean - ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes - CNI, when available. This configuration is used when using - OVN's IPFIX exports, without OpenShift. When using OpenShift, - refer to the `clusterNetworkOperator` property instead. - properties: - containerName: - default: ovnkube-node - description: containerName defines the name of the container - to configure for IPFIX. - type: string - daemonSetName: - default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet - controlling the OVN-Kubernetes pods. - type: string - namespace: - default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: sampling is the sampling rate on the reporter. - 100 means one flow on 100 is sent. To ensure cluster stability, - it is not possible to set a value below 2. If you really - want to sample every packet, which might impact the cluster - stability, refer to "forceSampleAll". Alternatively, you - can use the eBPF Agent instead of IPFIX. - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: type selects the flows tracing agent. Possible values - are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to - use the legacy IPFIX collector. "EBPF" is recommended in most - cases as it offers better performances and should work regardless - of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes - CNI (other CNIs could work if they support exporting IPFIX, - but they would require manual configuration). - enum: - - EBPF - - IPFIX - type: string - required: - - type - type: object - consolePlugin: - description: consolePlugin defines the settings related to the OpenShift - Console plugin, when available. - properties: - autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to - set up for the plugin Deployment. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: logLevel for the console plugin backend - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: port is the plugin service port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: portNaming defines the configuration of the port-to-service - name translation - properties: - enable: - default: true - description: enable the console plugin port-to-service name - translation - type: boolean - portNames: - additionalProperties: - type: string - description: 'portNames defines additional port names to use - in the console. Example: portNames: {"3100": "loki"}' - type: object - type: object - quickFilters: - default: - - default: true - filter: - dst_namespace!: openshift-,netobserv - src_namespace!: openshift-,netobserv - name: Applications - - filter: - dst_namespace: openshift-,netobserv - src_namespace: openshift-,netobserv - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: quickFilters configures quick filter presets for - the Console plugin - items: - description: QuickFilter defines preset configuration for Console's - quick filters - properties: - default: - description: default defines whether this filter should - be active by default or not - type: boolean - filter: - additionalProperties: - type: string - description: 'filter is a set of keys and values to be set - when this filter is selected. Each key can relate to a - list of values using a coma-separated string. Example: - filter: {"src_namespace": "namespace1,namespace2"}' - type: object - name: - description: name of the filter, that will be displayed - in Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: 'register allows, when set to true, to automatically - register the provided console plugin with the OpenShift Console - operator. When set to false, you can still register it manually - by editing console.operator.openshift.io/cluster. E.g: oc patch - console.operator.openshift.io cluster --type=''json'' -p ''[{"op": - "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' - type: boolean - replicas: - default: 1 - description: replicas defines the number of replicas (pods) to - start. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources, in terms of compute resources, required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - register - type: object - deploymentModel: - default: DIRECT - description: deploymentModel defines the desired type of deployment - for flow processing. Possible values are "DIRECT" (default) to make - the flow processor listening directly from the agents, or "KAFKA" - to make flows sent to a Kafka pipeline before consumption by the - processor. Kafka can provide better scalability, resiliency and - high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: exporters defines additional optional exporters for custom - consumption or storage. This is an experimental feature. Currently, - only KAFKA exporter is available. - items: - description: FlowCollectorExporter defines an additional exporter - to send enriched flows to - properties: - ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: kafka configuration, such as address or topic, - to send enriched flows to. - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported - (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, - generally 9093. Note that, when eBPF agents are used, - Kafka certificate needs to be copied in the agent namespace - (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv - will not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: kafka configuration, allowing to use Kafka as a broker - as part of the flow collection pipeline. Available when the "spec.deploymentModel" - is "KAFKA". - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` - if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, generally - 9093. Note that, when eBPF agents are used, Kafka certificate - needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will - not create it. - type: string - required: - - address - - topic - type: object - loki: - description: loki, the flow store, client settings. - properties: - authToken: - default: DISABLED - description: AuthToken describe the way to get a token to authenticate - to Loki. DISABLED will not send any token with the request. - HOST will use the local pod service account to authenticate - to Loki. FORWARD will forward user token, in this mode, pod - that are not receiving user request like the processor will - use the local pod service account. Similar to HOST mode. When - using the Loki Operator, set it to `HOST` or `FORWARD`. - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: batchSize is max batch size (in bytes) of logs to - accumulate before sending. - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: batchWait is max time to wait before sending a batch. - type: string - maxBackoff: - default: 5s - description: maxBackoff is the maximum backoff time for client - connection between retries. - type: string - maxRetries: - default: 2 - description: maxRetries is the maximum number of retries for client - connections. - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: minBackoff is the initial backoff time for client - connection between retries. - type: string - querierUrl: - description: querierURL specifies the address of the Loki querier - service, in case it is different from the Loki ingester URL. - If empty, the URL value will be used (assuming that the Loki - ingester and querier are in the same server). When using the - Loki Operator, do not set it, since ingestion and queries use - the Loki gateway. - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on - each flow. - type: object - statusUrl: - description: statusURL specifies the address of the Loki /ready - /metrics /config endpoints, in case it is different from the - Loki querier URL. If empty, the QuerierURL value will be used. - This is useful to show error messages and some context in the - frontend. When using the Loki Operator, set it to the Loki HTTP - query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - type: string - tenantID: - default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies - the tenant for each request. When using the Loki Operator, set - it to `network`, which corresponds to a special tenant mode. - type: string - timeout: - default: 10s - description: timeout is the maximum time connection / request - limit. A Timeout of zero means no timeout. - type: string - tls: - description: tls client configuration. - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service to - push the flows to. When using the Loki Operator, set it to the - Loki gateway service with the `network` tenant set in path, - for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string - type: object - namespace: - description: namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. - type: string - processor: - description: processor defines the settings of the component that - receives the flows from the agent, enriches them, and forwards them - to the Loki persistence layer. - properties: - debug: - description: Debug allows setting some aspects of the internal - configuration of the flow processor. This section is aimed exclusively - for debugging and fine-grained performance optimizations (for - example GOGC, GOMAXPROCS env vars). Users setting its values - do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very concrete - performance-tuning options (such as GOGC, GOMAXPROCS) that - shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and support - scenarios. - type: object - type: object - dropUnusedFields: - default: true - description: dropUnusedFields allows, when set to true, to drop - fields that are known to be unused by OVS, in order to save - storage space. - type: boolean - enableKubeProbes: - default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes - liveness and readiness probes - type: boolean - healthPort: - default: 8080 - description: healthPort is a collector HTTP port in the Pod that - exposes the health check API - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod - autoscaler to set up for flowlogs-pipeline-transformer, which - consumes Kafka messages. This setting is ignored when Kafka - is disabled. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the - maximum batch size, in bytes, that the consumer will accept. - Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of - the internal message queue used in the Kafka consumer client. - Ignored when not using Kafka. - type: integer - kafkaConsumerReplicas: - default: 3 - description: kafkaConsumerReplicas defines the number of replicas - (pods) to start for flowlogs-pipeline-transformer, which consumes - Kafka messages. This setting is ignored when Kafka is disabled. - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: logLevel of the collector runtime - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - metrics: - description: Metrics define the processor configuration regarding - metrics - properties: - ignoreTags: - default: - - egress - - packets - description: 'ignoreTags is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: egress, ingress, flows, bytes, packets, - namespaces, nodes, workloads' - items: - type: string - type: array - server: - description: metricsServer endpoint configuration for Prometheus - scraper - properties: - port: - default: 9102 - description: the prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - provided: - description: TLS configuration. - properties: - certFile: - description: certFile defines the path to the - certificate file name within the config map - or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map - or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret - containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration - "DISABLED" (default) to not configure TLS for the - endpoint, "PROVIDED" to manually provide cert file - and a key file, and "AUTO" to use OpenShift auto - generated certificate using annotations - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - port: - default: 2055 - description: 'port of the flow collector (host port) By conventions, - some value are not authorized port must not be below 1024 and - must not equal this values: 4789,6081,500, and 4500' - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: profilePort allows setting up a Go pprof profiler - listening to this port - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: 'resources are the compute resources required by - this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - required: - - agent - - deploymentModel - type: object - status: - description: FlowCollectorStatus defines the observed state of FlowCollector - properties: - conditions: - description: conditions represent the latest available observations - of an object's state - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: namespace where console plugin and flowlogs-pipeline - have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string name: v1beta1 schema: openAPIV3Schema: @@ -2480,6 +56,10 @@ spec: submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: + properties: + name: + pattern: ^cluster$ + type: string type: object spec: description: 'Defines the desired state of the FlowCollector resource. @@ -5142,7 +2722,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} - additionalPrinterColumns: @@ -8096,7 +5676,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} status: diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 8814b9729..3f24a6d0c 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -4,16 +4,6 @@ metadata: annotations: alm-examples: |- [ - { - "apiVersion": "flows.netobserv.io/v1alpha1", - "kind": "FlowCollector", - "metadata": { - "name": "cluster" - }, - "spec": { - "deploymentModel": "DIRECT" - } - }, { "apiVersion": "flows.netobserv.io/v1alpha1", "kind": "FlowMetric", @@ -427,12 +417,6 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: - - description: FlowCollector is the Schema for the flowcollectors API, which pilots - and configures netflow collection. - displayName: Flow Collector - kind: FlowCollector - name: flowcollectors.flows.netobserv.io - version: v1alpha1 - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' displayName: Flow Collector @@ -1274,7 +1258,6 @@ spec: version: 1.0.5 webhookdefinitions: - admissionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 containerPort: 443 diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index d81633097..6a942d9aa 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -30,2425 +30,6 @@ spec: name: Status type: string deprecated: true - name: v1alpha1 - schema: - openAPIV3Schema: - description: "FlowCollector is the Schema for the flowcollectors API, which - pilots and configures netflow collection. \n Deprecated: This package will - be removed in one of the next releases." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: FlowCollectorSpec defines the desired state of FlowCollector - properties: - agent: - default: - type: EBPF - description: agent for flows extraction. - properties: - ebpf: - description: ebpf describes the settings related to the eBPF-based - flow reporter when the "agent.type" property is set to "EBPF". - properties: - cacheActiveTimeout: - default: 5s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 100000 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows. Increasing - `cacheMaxFlows` and `cacheActiveTimeout` can decrease the - network traffic overhead and the CPU load, however you can - expect higher memory consumption and an increased latency - in the flow collection. - format: int32 - minimum: 1 - type: integer - debug: - description: Debug allows setting some aspects of the internal - configuration of the eBPF agent. This section is aimed exclusively - for debugging and fine-grained performance optimizations - (for example GOGC, GOMAXPROCS env vars). Users setting its - values do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very - concrete performance-tuning options (such as GOGC, GOMAXPROCS) - that shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and - support scenarios. - type: object - type: object - excludeInterfaces: - default: - - lo - description: excludeInterfaces contains the interface names - that will be excluded from flow tracing. If an entry is - enclosed by slashes (such as `/br-/`), it will match as - regular expression, otherwise it will be matched as a case-sensitive - string. - items: - type: string - type: array - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy - for the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - interfaces: - description: interfaces contains the interface names from - where flows will be collected. If empty, the agent will - fetch all the interfaces in the system, excepting the ones - listed in ExcludeInterfaces. If an entry is enclosed by - slashes (such as `/br-/`), it will match as regular expression, - otherwise it will be matched as a case-sensitive string. - items: - type: string - type: array - kafkaBatchSize: - default: 1048576 - description: 'kafkaBatchSize limits the maximum size of a - request in bytes before being sent to a partition. Ignored - when not using Kafka. Default: 1MB.' - type: integer - logLevel: - default: info - description: logLevel defines the log level for the NetObserv - eBPF Agent - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - privileged: - description: 'privileged mode for the eBPF Agent container. - In general this setting can be ignored or set to false: - in that case, the operator will set granular capabilities - (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, - to enable its correct operation. If for some reason these - capabilities cannot be set (for example old kernel version - not knowing CAP_BPF) then you can turn on this mode for - more global privileges.' - type: boolean - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources are the compute resources required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. \n This field - is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - sampling: - default: 50 - description: sampling rate of the flow reporter. 100 means - one flow on 100 is sent. 0 or 1 means all flows are sampled. - format: int32 - minimum: 0 - type: integer - type: object - ipfix: - description: ipfix describes the settings related to the IPFIX-based - flow reporter when the "agent.type" property is set to "IPFIX". - properties: - cacheActiveTimeout: - default: 20s - description: cacheActiveTimeout is the max period during which - the reporter will aggregate flows before sending - pattern: ^\d+(ns|ms|s|m)?$ - type: string - cacheMaxFlows: - default: 400 - description: cacheMaxFlows is the max number of flows in an - aggregate; when reached, the reporter sends the flows - format: int32 - minimum: 0 - type: integer - clusterNetworkOperator: - description: clusterNetworkOperator defines the settings related - to the OpenShift Cluster Network Operator, when available. - properties: - namespace: - default: openshift-network-operator - description: namespace where the config map is going - to be deployed. - type: string - type: object - forceSampleAll: - default: false - description: forceSampleAll allows disabling sampling in the - IPFIX-based flow reporter. It is not recommended to sample - all the traffic with IPFIX, as it might generate cluster - instability. If you REALLY want to do that, set this flag - to true. Use at your own risk. When it is set to true, the - value of "sampling" is ignored. - type: boolean - ovnKubernetes: - description: ovnKubernetes defines the settings of the OVN-Kubernetes - CNI, when available. This configuration is used when using - OVN's IPFIX exports, without OpenShift. When using OpenShift, - refer to the `clusterNetworkOperator` property instead. - properties: - containerName: - default: ovnkube-node - description: containerName defines the name of the container - to configure for IPFIX. - type: string - daemonSetName: - default: ovnkube-node - description: daemonSetName defines the name of the DaemonSet - controlling the OVN-Kubernetes pods. - type: string - namespace: - default: ovn-kubernetes - description: namespace where OVN-Kubernetes pods are deployed. - type: string - type: object - sampling: - default: 400 - description: sampling is the sampling rate on the reporter. - 100 means one flow on 100 is sent. To ensure cluster stability, - it is not possible to set a value below 2. If you really - want to sample every packet, which might impact the cluster - stability, refer to "forceSampleAll". Alternatively, you - can use the eBPF Agent instead of IPFIX. - format: int32 - minimum: 2 - type: integer - type: object - type: - default: EBPF - description: type selects the flows tracing agent. Possible values - are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to - use the legacy IPFIX collector. "EBPF" is recommended in most - cases as it offers better performances and should work regardless - of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes - CNI (other CNIs could work if they support exporting IPFIX, - but they would require manual configuration). - enum: - - EBPF - - IPFIX - type: string - required: - - type - type: object - consolePlugin: - description: consolePlugin defines the settings related to the OpenShift - Console plugin, when available. - properties: - autoscaler: - description: autoscaler spec of a horizontal pod autoscaler to - set up for the plugin Deployment. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - logLevel: - default: info - description: logLevel for the console plugin backend - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - port: - default: 9001 - description: port is the plugin service port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - portNaming: - default: - enable: true - description: portNaming defines the configuration of the port-to-service - name translation - properties: - enable: - default: true - description: enable the console plugin port-to-service name - translation - type: boolean - portNames: - additionalProperties: - type: string - description: 'portNames defines additional port names to use - in the console. Example: portNames: {"3100": "loki"}' - type: object - type: object - quickFilters: - default: - - default: true - filter: - dst_namespace!: openshift-,netobserv - src_namespace!: openshift-,netobserv - name: Applications - - filter: - dst_namespace: openshift-,netobserv - src_namespace: openshift-,netobserv - name: Infrastructure - - default: true - filter: - dst_kind: Pod - src_kind: Pod - name: Pods network - - filter: - dst_kind: Service - name: Services network - description: quickFilters configures quick filter presets for - the Console plugin - items: - description: QuickFilter defines preset configuration for Console's - quick filters - properties: - default: - description: default defines whether this filter should - be active by default or not - type: boolean - filter: - additionalProperties: - type: string - description: 'filter is a set of keys and values to be set - when this filter is selected. Each key can relate to a - list of values using a coma-separated string. Example: - filter: {"src_namespace": "namespace1,namespace2"}' - type: object - name: - description: name of the filter, that will be displayed - in Console - type: string - required: - - filter - - name - type: object - type: array - register: - default: true - description: 'register allows, when set to true, to automatically - register the provided console plugin with the OpenShift Console - operator. When set to false, you can still register it manually - by editing console.operator.openshift.io/cluster. E.g: oc patch - console.operator.openshift.io cluster --type=''json'' -p ''[{"op": - "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''' - type: boolean - replicas: - default: 1 - description: replicas defines the number of replicas (pods) to - start. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 50Mi - description: 'resources, in terms of compute resources, required - by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - required: - - register - type: object - deploymentModel: - default: DIRECT - description: deploymentModel defines the desired type of deployment - for flow processing. Possible values are "DIRECT" (default) to make - the flow processor listening directly from the agents, or "KAFKA" - to make flows sent to a Kafka pipeline before consumption by the - processor. Kafka can provide better scalability, resiliency and - high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - enum: - - DIRECT - - KAFKA - type: string - exporters: - description: exporters defines additional optional exporters for custom - consumption or storage. This is an experimental feature. Currently, - only KAFKA exporter is available. - items: - description: FlowCollectorExporter defines an additional exporter - to send enriched flows to - properties: - ipfix: - description: IPFIX configuration, such as the IP address and - port to send enriched IPFIX flows to. - properties: - targetHost: - default: "" - description: Address of the IPFIX external receiver - type: string - targetPort: - description: Port for the IPFIX external receiver - type: integer - transport: - description: Transport protocol (`TCP` or `UDP`) to be used - for the IPFIX connection, defaults to `TCP`. - enum: - - TCP - - UDP - type: string - required: - - targetHost - - targetPort - type: object - kafka: - description: kafka configuration, such as address or topic, - to send enriched flows to. - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported - (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or - secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret - containing the file. If omitted, the default is - to use the same namespace as where NetObserv is - deployed. If the namespace is different, the config - map or the secret is copied so that it can be - mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or - `DISABLED` if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, - generally 9093. Note that, when eBPF agents are used, - Kafka certificate needs to be copied in the agent namespace - (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, - CACert field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, - one-way TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or - secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes same - namespace as where NetObserv is deployed. If the - namespace is different, the config map or the - secret will be copied so that it can be mounted - as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv - will not create it. - type: string - required: - - address - - topic - type: object - type: - description: '`type` selects the type of exporters. The available - options are `KAFKA` and `IPFIX`.' - enum: - - KAFKA - - IPFIX - type: string - required: - - type - type: object - type: array - kafka: - description: kafka configuration, allowing to use Kafka as a broker - as part of the flow collection pipeline. Available when the "spec.deploymentModel" - is "KAFKA". - properties: - address: - default: "" - description: address of the Kafka server - type: string - sasl: - description: SASL authentication configuration. [Unsupported (*)]. - properties: - clientIDReference: - description: Reference to the secret or config map containing - the client ID - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - clientSecretReference: - description: Reference to the secret or config map containing - the client secret - properties: - file: - description: File name within the config map or secret - type: string - name: - description: Name of the config map or secret containing - the file - type: string - namespace: - default: "" - description: Namespace of the config map or secret containing - the file. If omitted, the default is to use the same - namespace as where NetObserv is deployed. If the namespace - is different, the config map or the secret is copied - so that it can be mounted as required. - type: string - type: - description: 'Type for the file reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Type of SASL authentication to use, or `DISABLED` - if SASL is not used - enum: - - DISABLED - - PLAIN - - SCRAM-SHA512 - type: string - type: object - tls: - description: tls client configuration. When using TLS, verify - that the address matches the Kafka port used for TLS, generally - 9093. Note that, when eBPF agents are used, Kafka certificate - needs to be copied in the agent namespace (by default it's netobserv-privileged). - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - topic: - default: "" - description: kafka topic to use. It must exist, NetObserv will - not create it. - type: string - required: - - address - - topic - type: object - loki: - description: loki, the flow store, client settings. - properties: - authToken: - default: DISABLED - description: AuthToken describe the way to get a token to authenticate - to Loki. DISABLED will not send any token with the request. - HOST will use the local pod service account to authenticate - to Loki. FORWARD will forward user token, in this mode, pod - that are not receiving user request like the processor will - use the local pod service account. Similar to HOST mode. When - using the Loki Operator, set it to `HOST` or `FORWARD`. - enum: - - DISABLED - - HOST - - FORWARD - type: string - batchSize: - default: 102400 - description: batchSize is max batch size (in bytes) of logs to - accumulate before sending. - format: int64 - minimum: 1 - type: integer - batchWait: - default: 1s - description: batchWait is max time to wait before sending a batch. - type: string - maxBackoff: - default: 5s - description: maxBackoff is the maximum backoff time for client - connection between retries. - type: string - maxRetries: - default: 2 - description: maxRetries is the maximum number of retries for client - connections. - format: int32 - minimum: 0 - type: integer - minBackoff: - default: 1s - description: minBackoff is the initial backoff time for client - connection between retries. - type: string - querierUrl: - description: querierURL specifies the address of the Loki querier - service, in case it is different from the Loki ingester URL. - If empty, the URL value will be used (assuming that the Loki - ingester and querier are in the same server). When using the - Loki Operator, do not set it, since ingestion and queries use - the Loki gateway. - type: string - staticLabels: - additionalProperties: - type: string - default: - app: netobserv-flowcollector - description: staticLabels is a map of common labels to set on - each flow. - type: object - statusUrl: - description: statusURL specifies the address of the Loki /ready - /metrics /config endpoints, in case it is different from the - Loki querier URL. If empty, the QuerierURL value will be used. - This is useful to show error messages and some context in the - frontend. When using the Loki Operator, set it to the Loki HTTP - query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - type: string - tenantID: - default: netobserv - description: tenantID is the Loki X-Scope-OrgID that identifies - the tenant for each request. When using the Loki Operator, set - it to `network`, which corresponds to a special tenant mode. - type: string - timeout: - default: 10s - description: timeout is the maximum time connection / request - limit. A Timeout of zero means no timeout. - type: string - tls: - description: tls client configuration. - properties: - caCert: - description: caCert defines the reference of the certificate - for the Certificate Authority - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - enable: - default: false - description: enable TLS - type: boolean - insecureSkipVerify: - default: false - description: insecureSkipVerify allows skipping client-side - verification of the server certificate If set to true, CACert - field will be ignored - type: boolean - userCert: - description: userCert defines the user certificate reference, - used for mTLS (you can ignore it when using regular, one-way - TLS) - properties: - certFile: - description: certFile defines the path to the certificate - file name within the config map or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map or secret. - Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret containing - certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret containing - certificates. If omitted, assumes same namespace as - where NetObserv is deployed. If the namespace is different, - the config map or the secret will be copied so that - it can be mounted as required. - type: string - type: - description: 'type for the certificate reference: "configmap" - or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: object - url: - default: http://loki:3100/ - description: url is the address of an existing Loki service to - push the flows to. When using the Loki Operator, set it to the - Loki gateway service with the `network` tenant set in path, - for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - type: string - type: object - namespace: - description: namespace where NetObserv pods are deployed. If empty, - the namespace of the operator is going to be used. - type: string - processor: - description: processor defines the settings of the component that - receives the flows from the agent, enriches them, and forwards them - to the Loki persistence layer. - properties: - debug: - description: Debug allows setting some aspects of the internal - configuration of the flow processor. This section is aimed exclusively - for debugging and fine-grained performance optimizations (for - example GOGC, GOMAXPROCS env vars). Users setting its values - do it at their own risk. - properties: - env: - additionalProperties: - type: string - description: env allows passing custom environment variables - to the NetObserv Agent. Useful for passing some very concrete - performance-tuning options (such as GOGC, GOMAXPROCS) that - shouldn't be publicly exposed as part of the FlowCollector - descriptor, as they are only useful in edge debug and support - scenarios. - type: object - type: object - dropUnusedFields: - default: true - description: dropUnusedFields allows, when set to true, to drop - fields that are known to be unused by OVS, in order to save - storage space. - type: boolean - enableKubeProbes: - default: true - description: enableKubeProbes is a flag to enable or disable Kubernetes - liveness and readiness probes - type: boolean - healthPort: - default: 8080 - description: healthPort is a collector HTTP port in the Pod that - exposes the health check API - format: int32 - maximum: 65535 - minimum: 1 - type: integer - imagePullPolicy: - default: IfNotPresent - description: imagePullPolicy is the Kubernetes pull policy for - the image defined above - enum: - - IfNotPresent - - Always - - Never - type: string - kafkaConsumerAutoscaler: - description: kafkaConsumerAutoscaler spec of a horizontal pod - autoscaler to set up for flowlogs-pipeline-transformer, which - consumes Kafka messages. This setting is ignored when Kafka - is disabled. - properties: - maxReplicas: - default: 3 - description: maxReplicas is the upper limit for the number - of pods that can be set by the autoscaler; cannot be smaller - than MinReplicas. - format: int32 - type: integer - metrics: - description: metrics used by the pod autoscaler - items: - description: MetricSpec specifies how to scale based on - a single metric (only `type` and one other matching field - should be set at once). - properties: - containerResource: - description: containerResource refers to a resource - metric (such as those specified in requests and limits) - known to Kubernetes describing a single container - in each pod of the current scale target (e.g. CPU - or memory). Such metrics are built in to Kubernetes, - and have special scaling options on top of those available - to normal per-pod metrics using the "pods" source. - This is an alpha feature and can be enabled by the - HPAContainerMetrics feature flag. - properties: - container: - description: container is the name of the container - in the pods of the scaling target - type: string - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - container - - name - - target - type: object - external: - description: external refers to a global metric that - is not associated with any Kubernetes object. It allows - autoscaling based on information coming from components - running outside of cluster (for example length of - queue in cloud messaging service, or QPS from loadbalancer - running outside of cluster). - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - object: - description: object refers to a metric describing a - single kubernetes object (for example, hits-per-second - on an Ingress object). - properties: - describedObject: - description: describedObject specifies the descriptions - of a object,such as kind,name apiVersion - properties: - apiVersion: - description: apiVersion is the API version of - the referent - type: string - kind: - description: 'kind is the kind of the referent; - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - describedObject - - metric - - target - type: object - pods: - description: pods refers to a metric describing each - pod in the current scale target (for example, transactions-processed-per-second). The - values will be averaged together before being compared - to the target value. - properties: - metric: - description: metric identifies the target metric - by name and selector - properties: - name: - description: name is the name of the given metric - type: string - selector: - description: selector is the string-encoded - form of a standard kubernetes label selector - for the given metric When set, it is passed - as an additional parameter to the metrics - server for more specific metrics scoping. - When unset, just the metricName will be used - to gather metrics. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - required: - - name - type: object - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - metric - - target - type: object - resource: - description: resource refers to a resource metric (such - as those specified in requests and limits) known to - Kubernetes describing each pod in the current scale - target (e.g. CPU or memory). Such metrics are built - in to Kubernetes, and have special scaling options - on top of those available to normal per-pod metrics - using the "pods" source. - properties: - name: - description: name is the name of the resource in - question. - type: string - target: - description: target specifies the target value for - the given metric - properties: - averageUtilization: - description: averageUtilization is the target - value of the average of the resource metric - across all relevant pods, represented as a - percentage of the requested value of the resource - for the pods. Currently only valid for Resource - metric source type - format: int32 - type: integer - averageValue: - anyOf: - - type: integer - - type: string - description: averageValue is the target value - of the average of the metric across all relevant - pods (as a quantity) - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: - description: type represents whether the metric - type is Utilization, Value, or AverageValue - type: string - value: - anyOf: - - type: integer - - type: string - description: value is the target value of the - metric (as a quantity). - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - required: - - type - type: object - required: - - name - - target - type: object - type: - description: 'type is the type of metric source. It - should be one of "ContainerResource", "External", - "Object", "Pods" or "Resource", each mapping to a - matching field in the object. Note: "ContainerResource" - type is available on when the feature-gate HPAContainerMetrics - is enabled' - type: string - required: - - type - type: object - type: array - minReplicas: - description: minReplicas is the lower limit for the number - of replicas to which the autoscaler can scale down. It - defaults to 1 pod. minReplicas is allowed to be 0 if the - alpha feature gate HPAScaleToZero is enabled and at least - one Object or External metric is configured. Scaling is - active as long as at least one metric value is available. - format: int32 - type: integer - status: - default: DISABLED - description: Status describe the desired status regarding - deploying an horizontal pod autoscaler DISABLED will not - deploy an horizontal pod autoscaler ENABLED will deploy - an horizontal pod autoscaler - enum: - - DISABLED - - ENABLED - type: string - type: object - kafkaConsumerBatchSize: - default: 10485760 - description: 'kafkaConsumerBatchSize indicates to the broker the - maximum batch size, in bytes, that the consumer will accept. - Ignored when not using Kafka. Default: 10MB.' - type: integer - kafkaConsumerQueueCapacity: - default: 1000 - description: kafkaConsumerQueueCapacity defines the capacity of - the internal message queue used in the Kafka consumer client. - Ignored when not using Kafka. - type: integer - kafkaConsumerReplicas: - default: 3 - description: kafkaConsumerReplicas defines the number of replicas - (pods) to start for flowlogs-pipeline-transformer, which consumes - Kafka messages. This setting is ignored when Kafka is disabled. - format: int32 - minimum: 0 - type: integer - logLevel: - default: info - description: logLevel of the collector runtime - enum: - - trace - - debug - - info - - warn - - error - - fatal - - panic - type: string - metrics: - description: Metrics define the processor configuration regarding - metrics - properties: - ignoreTags: - default: - - egress - - packets - description: 'ignoreTags is a list of tags to specify which - metrics to ignore. Each metric is associated with a list - of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions - . Available tags are: egress, ingress, flows, bytes, packets, - namespaces, nodes, workloads' - items: - type: string - type: array - server: - description: metricsServer endpoint configuration for Prometheus - scraper - properties: - port: - default: 9102 - description: the prometheus HTTP port - format: int32 - maximum: 65535 - minimum: 1 - type: integer - tls: - description: TLS configuration. - properties: - provided: - description: TLS configuration. - properties: - certFile: - description: certFile defines the path to the - certificate file name within the config map - or secret - type: string - certKey: - description: certKey defines the path to the certificate - private key file name within the config map - or secret. Omit when the key is not necessary. - type: string - name: - description: name of the config map or secret - containing certificates - type: string - namespace: - default: "" - description: namespace of the config map or secret - containing certificates. If omitted, assumes - same namespace as where NetObserv is deployed. - If the namespace is different, the config map - or the secret will be copied so that it can - be mounted as required. - type: string - type: - description: 'type for the certificate reference: - "configmap" or "secret"' - enum: - - configmap - - secret - type: string - type: object - type: - default: DISABLED - description: Select the type of TLS configuration - "DISABLED" (default) to not configure TLS for the - endpoint, "PROVIDED" to manually provide cert file - and a key file, and "AUTO" to use OpenShift auto - generated certificate using annotations - enum: - - DISABLED - - PROVIDED - - AUTO - type: string - type: object - type: object - type: object - port: - default: 2055 - description: 'port of the flow collector (host port) By conventions, - some value are not authorized port must not be below 1024 and - must not equal this values: 4789,6081,500, and 4500' - format: int32 - maximum: 65535 - minimum: 1025 - type: integer - profilePort: - description: profilePort allows setting up a Go pprof profiler - listening to this port - format: int32 - maximum: 65535 - minimum: 0 - type: integer - resources: - default: - limits: - memory: 800Mi - requests: - cpu: 100m - memory: 100Mi - description: 'resources are the compute resources required by - this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - required: - - agent - - deploymentModel - type: object - status: - description: FlowCollectorStatus defines the observed state of FlowCollector - properties: - conditions: - description: conditions represent the latest available observations - of an object's state - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - namespace: - description: namespace where console plugin and flowlogs-pipeline - have been deployed. - type: string - required: - - conditions - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.agent.type - name: Agent - type: string - - jsonPath: .spec.agent.ebpf.sampling - name: Sampling (EBPF) - type: string - - jsonPath: .spec.deploymentModel - name: Deployment Model - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].reason - name: Status - type: string name: v1beta1 schema: openAPIV3Schema: @@ -5128,7 +2709,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} - additionalPrinterColumns: @@ -8082,7 +5663,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} status: diff --git a/config/crd/patches/webhook_in_flowcollectors.yaml b/config/crd/patches/webhook_in_flowcollectors.yaml index fa0d9bee0..04b0caf57 100644 --- a/config/crd/patches/webhook_in_flowcollectors.yaml +++ b/config/crd/patches/webhook_in_flowcollectors.yaml @@ -13,6 +13,5 @@ spec: name: webhook-service path: /convert conversionReviewVersions: - - v1alpha1 - v1beta1 - v1beta2 diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml index ff04f3687..c7fc74589 100644 --- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml +++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml @@ -25,12 +25,6 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: - - description: FlowCollector is the Schema for the flowcollectors API, which pilots - and configures netflow collection. - displayName: Flow Collector - kind: FlowCollector - name: flowcollectors.flows.netobserv.io - version: v1alpha1 - description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.' displayName: Flow Collector diff --git a/config/samples/flows_v1alpha1_flowcollector.yaml b/config/samples/flows_v1alpha1_flowcollector.yaml deleted file mode 100644 index 0f49d5a54..000000000 --- a/config/samples/flows_v1alpha1_flowcollector.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: flows.netobserv.io/v1alpha1 -kind: FlowCollector -metadata: - name: cluster -spec: - deploymentModel: DIRECT diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 7b8b38139..af8c16ab8 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,6 +1,5 @@ ## Append samples you want in your CSV to this file as resources ## resources: -- flows_v1alpha1_flowcollector.yaml - flows_v1beta1_flowcollector.yaml - flows_v1beta2_flowcollector.yaml - flows_v1alpha1_flowmetric.yaml diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 2e3e75fc9..74667b520 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -2,4316 +2,9 @@ Packages: -- [flows.netobserv.io/v1alpha1](#flowsnetobserviov1alpha1) - [flows.netobserv.io/v1beta1](#flowsnetobserviov1beta1) - [flows.netobserv.io/v1beta2](#flowsnetobserviov1beta2) -# flows.netobserv.io/v1alpha1 - -Resource Types: - -- [FlowCollector](#flowcollector) - - - - -## FlowCollector -[↩ Parent](#flowsnetobserviov1alpha1 ) - - - - - - -FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. - Deprecated: This package will be removed in one of the next releases. - -
Name | -Type | -Description | -Required | -
---|---|---|---|
apiVersion | -string | -flows.netobserv.io/v1alpha1 | -true | -
kind | -string | -FlowCollector | -true | -
metadata | -object | -Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -true | -
spec | -object | -
- FlowCollectorSpec defines the desired state of FlowCollector - |
- false | -
status | -object | -
- FlowCollectorStatus defines the observed state of FlowCollector - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
agent | -object | -
- agent for flows extraction. - - Default: map[type:EBPF] - |
- true | -
deploymentModel | -enum | -
- deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka). - - Enum: DIRECT, KAFKA - Default: DIRECT - |
- true | -
consolePlugin | -object | -
- consolePlugin defines the settings related to the OpenShift Console plugin, when available. - |
- false | -
exporters | -[]object | -
- exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available. - |
- false | -
kafka | -object | -
- kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA". - |
- false | -
loki | -object | -
- loki, the flow store, client settings. - |
- false | -
namespace | -string | -
- namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used. - |
- false | -
processor | -object | -
- processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -enum | -
- type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration). - - Enum: EBPF, IPFIX - Default: EBPF - |
- true | -
ebpf | -object | -
- ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF". - |
- false | -
ipfix | -object | -
- ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX". - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
cacheActiveTimeout | -string | -
- cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - - Default: 5s - |
- false | -
cacheMaxFlows | -integer | -
- cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. - - Format: int32 - Default: 100000 - Minimum: 1 - |
- false | -
debug | -object | -
- Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - |
- false | -
excludeInterfaces | -[]string | -
- excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string. - - Default: [lo] - |
- false | -
imagePullPolicy | -enum | -
- imagePullPolicy is the Kubernetes pull policy for the image defined above - - Enum: IfNotPresent, Always, Never - Default: IfNotPresent - |
- false | -
interfaces | -[]string | -
- interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string. - |
- false | -
kafkaBatchSize | -integer | -
- kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. - - Default: 1048576 - |
- false | -
logLevel | -enum | -
- logLevel defines the log level for the NetObserv eBPF Agent - - Enum: trace, debug, info, warn, error, fatal, panic - Default: info - |
- false | -
privileged | -boolean | -
- privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges. - |
- false | -
resources | -object | -
- resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:50Mi]] - |
- false | -
sampling | -integer | -
- sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled. - - Format: int32 - Default: 50 - Minimum: 0 - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
env | -map[string]string | -
- env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
claims | -[]object | -
- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
- This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
- This field is immutable. It can only be set for containers. - |
- false | -
limits | -map[string]int or string | -
- Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
requests | -map[string]int or string | -
- Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
cacheActiveTimeout | -string | -
- cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending - - Default: 20s - |
- false | -
cacheMaxFlows | -integer | -
- cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows - - Format: int32 - Default: 400 - Minimum: 0 - |
- false | -
clusterNetworkOperator | -object | -
- clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available. - |
- false | -
forceSampleAll | -boolean | -
- forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored. - - Default: false - |
- false | -
ovnKubernetes | -object | -
- ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead. - |
- false | -
sampling | -integer | -
- sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX. - - Format: int32 - Default: 400 - Minimum: 2 - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
namespace | -string | -
- namespace where the config map is going to be deployed. - - Default: openshift-network-operator - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
containerName | -string | -
- containerName defines the name of the container to configure for IPFIX. - - Default: ovnkube-node - |
- false | -
daemonSetName | -string | -
- daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods. - - Default: ovnkube-node - |
- false | -
namespace | -string | -
- namespace where OVN-Kubernetes pods are deployed. - - Default: ovn-kubernetes - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
register | -boolean | -
- register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]' - - Default: true - |
- true | -
autoscaler | -object | -
- autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment. - |
- false | -
imagePullPolicy | -enum | -
- imagePullPolicy is the Kubernetes pull policy for the image defined above - - Enum: IfNotPresent, Always, Never - Default: IfNotPresent - |
- false | -
logLevel | -enum | -
- logLevel for the console plugin backend - - Enum: trace, debug, info, warn, error, fatal, panic - Default: info - |
- false | -
port | -integer | -
- port is the plugin service port - - Format: int32 - Default: 9001 - Minimum: 1 - Maximum: 65535 - |
- false | -
portNaming | -object | -
- portNaming defines the configuration of the port-to-service name translation - - Default: map[enable:true] - |
- false | -
quickFilters | -[]object | -
- quickFilters configures quick filter presets for the Console plugin - - Default: [map[default:true filter:map[dst_namespace!:openshift-,netobserv src_namespace!:openshift-,netobserv] name:Applications] map[filter:map[dst_namespace:openshift-,netobserv src_namespace:openshift-,netobserv] name:Infrastructure] map[default:true filter:map[dst_kind:Pod src_kind:Pod] name:Pods network] map[filter:map[dst_kind:Service] name:Services network]] - |
- false | -
replicas | -integer | -
- replicas defines the number of replicas (pods) to start. - - Format: int32 - Default: 1 - Minimum: 0 - |
- false | -
resources | -object | -
- resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - Default: map[limits:map[memory:100Mi] requests:map[cpu:100m memory:50Mi]] - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
maxReplicas | -integer | -
- maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - - Format: int32 - Default: 3 - |
- false | -
metrics | -[]object | -
- metrics used by the pod autoscaler - |
- false | -
minReplicas | -integer | -
- minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. - - Format: int32 - |
- false | -
status | -enum | -
- Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler - - Enum: DISABLED, ENABLED - Default: DISABLED - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled - |
- true | -
containerResource | -object | -
- containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - |
- false | -
external | -object | -
- external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - |
- false | -
object | -object | -
- object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - |
- false | -
pods | -object | -
- pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - |
- false | -
resource | -object | -
- resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
container | -string | -
- container is the name of the container in the pods of the scaling target - |
- true | -
name | -string | -
- name is the name of the resource in question. - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
describedObject | -object | -
- describedObject specifies the descriptions of a object,such as kind,name apiVersion - |
- true | -
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
kind | -string | -
- kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - |
- true | -
name | -string | -
- name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - |
- true | -
apiVersion | -string | -
- apiVersion is the API version of the referent - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the resource in question. - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
enable | -boolean | -
- enable the console plugin port-to-service name translation - - Default: true - |
- false | -
portNames | -map[string]string | -
- portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"} - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
filter | -map[string]string | -
- filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"} - |
- true | -
name | -string | -
- name of the filter, that will be displayed in Console - |
- true | -
default | -boolean | -
- default defines whether this filter should be active by default or not - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
claims | -[]object | -
- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
- This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
- This field is immutable. It can only be set for containers. - |
- false | -
limits | -map[string]int or string | -
- Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
requests | -map[string]int or string | -
- Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -enum | -
- `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`. - - Enum: KAFKA, IPFIX - |
- true | -
ipfix | -object | -
- IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to. - |
- false | -
kafka | -object | -
- kafka configuration, such as address or topic, to send enriched flows to. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
targetHost | -string | -
- Address of the IPFIX external receiver - - Default: - |
- true | -
targetPort | -integer | -
- Port for the IPFIX external receiver - |
- true | -
transport | -enum | -
- Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`. - - Enum: TCP, UDP - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
address | -string | -
- address of the Kafka server - - Default: - |
- true | -
topic | -string | -
- kafka topic to use. It must exist, NetObserv will not create it. - - Default: - |
- true | -
sasl | -object | -
- SASL authentication configuration. [Unsupported (*)]. - |
- false | -
tls | -object | -
- tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
clientIDReference | -object | -
- Reference to the secret or config map containing the client ID - |
- false | -
clientSecretReference | -object | -
- Reference to the secret or config map containing the client secret - |
- false | -
type | -enum | -
- Type of SASL authentication to use, or `DISABLED` if SASL is not used - - Enum: DISABLED, PLAIN, SCRAM-SHA512 - Default: DISABLED - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
file | -string | -
- File name within the config map or secret - |
- false | -
name | -string | -
- Name of the config map or secret containing the file - |
- false | -
namespace | -string | -
- Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- Type for the file reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
file | -string | -
- File name within the config map or secret - |
- false | -
name | -string | -
- Name of the config map or secret containing the file - |
- false | -
namespace | -string | -
- Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- Type for the file reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
caCert | -object | -
- caCert defines the reference of the certificate for the Certificate Authority - |
- false | -
enable | -boolean | -
- enable TLS - - Default: false - |
- false | -
insecureSkipVerify | -boolean | -
- insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - - Default: false - |
- false | -
userCert | -object | -
- userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
address | -string | -
- address of the Kafka server - - Default: - |
- true | -
topic | -string | -
- kafka topic to use. It must exist, NetObserv will not create it. - - Default: - |
- true | -
sasl | -object | -
- SASL authentication configuration. [Unsupported (*)]. - |
- false | -
tls | -object | -
- tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
clientIDReference | -object | -
- Reference to the secret or config map containing the client ID - |
- false | -
clientSecretReference | -object | -
- Reference to the secret or config map containing the client secret - |
- false | -
type | -enum | -
- Type of SASL authentication to use, or `DISABLED` if SASL is not used - - Enum: DISABLED, PLAIN, SCRAM-SHA512 - Default: DISABLED - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
file | -string | -
- File name within the config map or secret - |
- false | -
name | -string | -
- Name of the config map or secret containing the file - |
- false | -
namespace | -string | -
- Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- Type for the file reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
file | -string | -
- File name within the config map or secret - |
- false | -
name | -string | -
- Name of the config map or secret containing the file - |
- false | -
namespace | -string | -
- Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- Type for the file reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
caCert | -object | -
- caCert defines the reference of the certificate for the Certificate Authority - |
- false | -
enable | -boolean | -
- enable TLS - - Default: false - |
- false | -
insecureSkipVerify | -boolean | -
- insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - - Default: false - |
- false | -
userCert | -object | -
- userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
authToken | -enum | -
- AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST will use the local pod service account to authenticate to Loki. FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. When using the Loki Operator, set it to `HOST` or `FORWARD`. - - Enum: DISABLED, HOST, FORWARD - Default: DISABLED - |
- false | -
batchSize | -integer | -
- batchSize is max batch size (in bytes) of logs to accumulate before sending. - - Format: int64 - Default: 102400 - Minimum: 1 - |
- false | -
batchWait | -string | -
- batchWait is max time to wait before sending a batch. - - Default: 1s - |
- false | -
maxBackoff | -string | -
- maxBackoff is the maximum backoff time for client connection between retries. - - Default: 5s - |
- false | -
maxRetries | -integer | -
- maxRetries is the maximum number of retries for client connections. - - Format: int32 - Default: 2 - Minimum: 0 - |
- false | -
minBackoff | -string | -
- minBackoff is the initial backoff time for client connection between retries. - - Default: 1s - |
- false | -
querierUrl | -string | -
- querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway. - |
- false | -
staticLabels | -map[string]string | -
- staticLabels is a map of common labels to set on each flow. - - Default: map[app:netobserv-flowcollector] - |
- false | -
statusUrl | -string | -
- statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. - |
- false | -
tenantID | -string | -
- tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode. - - Default: netobserv - |
- false | -
timeout | -string | -
- timeout is the maximum time connection / request limit. A Timeout of zero means no timeout. - - Default: 10s - |
- false | -
tls | -object | -
- tls client configuration. - |
- false | -
url | -string | -
- url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network. - - Default: http://loki:3100/ - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
caCert | -object | -
- caCert defines the reference of the certificate for the Certificate Authority - |
- false | -
enable | -boolean | -
- enable TLS - - Default: false - |
- false | -
insecureSkipVerify | -boolean | -
- insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored - - Default: false - |
- false | -
userCert | -object | -
- userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS) - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
debug | -object | -
- Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk. - |
- false | -
dropUnusedFields | -boolean | -
- dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space. - - Default: true - |
- false | -
enableKubeProbes | -boolean | -
- enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes - - Default: true - |
- false | -
healthPort | -integer | -
- healthPort is a collector HTTP port in the Pod that exposes the health check API - - Format: int32 - Default: 8080 - Minimum: 1 - Maximum: 65535 - |
- false | -
imagePullPolicy | -enum | -
- imagePullPolicy is the Kubernetes pull policy for the image defined above - - Enum: IfNotPresent, Always, Never - Default: IfNotPresent - |
- false | -
kafkaConsumerAutoscaler | -object | -
- kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - |
- false | -
kafkaConsumerBatchSize | -integer | -
- kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB. - - Default: 10485760 - |
- false | -
kafkaConsumerQueueCapacity | -integer | -
- kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka. - - Default: 1000 - |
- false | -
kafkaConsumerReplicas | -integer | -
- kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled. - - Format: int32 - Default: 3 - Minimum: 0 - |
- false | -
logLevel | -enum | -
- logLevel of the collector runtime - - Enum: trace, debug, info, warn, error, fatal, panic - Default: info - |
- false | -
metrics | -object | -
- Metrics define the processor configuration regarding metrics - |
- false | -
port | -integer | -
- port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500 - - Format: int32 - Default: 2055 - Minimum: 1025 - Maximum: 65535 - |
- false | -
profilePort | -integer | -
- profilePort allows setting up a Go pprof profiler listening to this port - - Format: int32 - Minimum: 0 - Maximum: 65535 - |
- false | -
resources | -object | -
- resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - - Default: map[limits:map[memory:800Mi] requests:map[cpu:100m memory:100Mi]] - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
env | -map[string]string | -
- env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
maxReplicas | -integer | -
- maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - - Format: int32 - Default: 3 - |
- false | -
metrics | -[]object | -
- metrics used by the pod autoscaler - |
- false | -
minReplicas | -integer | -
- minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. - - Format: int32 - |
- false | -
status | -enum | -
- Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler - - Enum: DISABLED, ENABLED - Default: DISABLED - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled - |
- true | -
containerResource | -object | -
- containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. - |
- false | -
external | -object | -
- external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - |
- false | -
object | -object | -
- object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). - |
- false | -
pods | -object | -
- pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. - |
- false | -
resource | -object | -
- resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
container | -string | -
- container is the name of the container in the pods of the scaling target - |
- true | -
name | -string | -
- name is the name of the resource in question. - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
describedObject | -object | -
- describedObject specifies the descriptions of a object,such as kind,name apiVersion - |
- true | -
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
kind | -string | -
- kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - |
- true | -
name | -string | -
- name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - |
- true | -
apiVersion | -string | -
- apiVersion is the API version of the referent - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
metric | -object | -
- metric identifies the target metric by name and selector - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the given metric - |
- true | -
selector | -object | -
- selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
matchExpressions | -[]object | -
- matchExpressions is a list of label selector requirements. The requirements are ANDed. - |
- false | -
matchLabels | -map[string]string | -
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
key | -string | -
- key is the label key that the selector applies to. - |
- true | -
operator | -string | -
- operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - |
- true | -
values | -[]string | -
- values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- name is the name of the resource in question. - |
- true | -
target | -object | -
- target specifies the target value for the given metric - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
type | -string | -
- type represents whether the metric type is Utilization, Value, or AverageValue - |
- true | -
averageUtilization | -integer | -
- averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type - - Format: int32 - |
- false | -
averageValue | -int or string | -
- averageValue is the target value of the average of the metric across all relevant pods (as a quantity) - |
- false | -
value | -int or string | -
- value is the target value of the metric (as a quantity). - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
ignoreTags | -[]string | -
- ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads - - Default: [egress packets] - |
- false | -
server | -object | -
- metricsServer endpoint configuration for Prometheus scraper - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
port | -integer | -
- the prometheus HTTP port - - Format: int32 - Default: 9102 - Minimum: 1 - Maximum: 65535 - |
- false | -
tls | -object | -
- TLS configuration. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
provided | -object | -
- TLS configuration. - |
- false | -
type | -enum | -
- Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations - - Enum: DISABLED, PROVIDED, AUTO - Default: DISABLED - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
certFile | -string | -
- certFile defines the path to the certificate file name within the config map or secret - |
- false | -
certKey | -string | -
- certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary. - |
- false | -
name | -string | -
- name of the config map or secret containing certificates - |
- false | -
namespace | -string | -
- namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required. - - Default: - |
- false | -
type | -enum | -
- type for the certificate reference: "configmap" or "secret" - - Enum: configmap, secret - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
claims | -[]object | -
- Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
- This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
- This field is immutable. It can only be set for containers. - |
- false | -
limits | -map[string]int or string | -
- Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
requests | -map[string]int or string | -
- Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
name | -string | -
- Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - |
- true | -
Name | -Type | -Description | -Required | -
---|---|---|---|
conditions | -[]object | -
- conditions represent the latest available observations of an object's state - |
- true | -
namespace | -string | -
- namespace where console plugin and flowlogs-pipeline have been deployed. - |
- false | -
Name | -Type | -Description | -Required | -
---|---|---|---|
lastTransitionTime | -string | -
- lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - - Format: date-time - |
- true | -
message | -string | -
- message is a human readable message indicating details about the transition. This may be an empty string. - |
- true | -
reason | -string | -
- reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - |
- true | -
status | -enum | -
- status of the condition, one of True, False, Unknown. - - Enum: True, False, Unknown - |
- true | -
type | -string | -
- type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - |
- true | -
observedGeneration | -integer | -
- observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - - Format: int64 - Minimum: 0 - |
- false | -