From 45ba20442cde824f83cc45764cf4003c5777fc13 Mon Sep 17 00:00:00 2001 From: Alex Boten <223565+codeboten@users.noreply.github.com> Date: Fri, 11 Oct 2024 00:49:40 -0700 Subject: [PATCH] config: support v0.3 of the config schema (#6126) Adding support for [v0.3.0 release](https://github.com/open-telemetry/opentelemetry-configuration/releases/tag/v0.3.0) of otel configuration. As part of the changes, i moved the unmarshaling code into its of file for each format (config_json.go and config_yaml.go) to ensure the resulting struct is consistent. Validated this through unit tests --------- Signed-off-by: Alex Boten <223565+codeboten@users.noreply.github.com> Co-authored-by: Tyler Yahn Co-authored-by: Damien Mathieu <42@dmathieu.com> --- CHANGELOG.md | 4 + Makefile | 3 +- config/config.go | 8 + config/config_json.go | 374 +++++++++++++++++++++++++++++ config/config_test.go | 183 ++++++++++++--- config/config_yaml.go | 42 ++++ config/generated_config.go | 428 +++++++++++++--------------------- config/log.go | 12 +- config/log_test.go | 78 +++---- config/metric.go | 103 +++++--- config/metric_test.go | 285 +++++++++++++---------- config/resource.go | 4 +- config/resource_test.go | 42 ++-- config/testdata/v0.3.json | 419 +++++++++++++++++++++++++++++++++ config/testdata/v0.3.yaml | 466 +++++++++++++++++++++++++++++++++++++ config/trace.go | 20 +- config/trace_test.go | 94 ++++---- 17 files changed, 1969 insertions(+), 596 deletions(-) create mode 100644 config/config_json.go create mode 100644 config/config_yaml.go create mode 100644 config/testdata/v0.3.json create mode 100644 config/testdata/v0.3.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 554cdb94587..3bccf8cf5d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,10 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - The deprecated `go.opentelemetry.io/contrib/instrumentation/gopkg.in/macaron.v1/otelmacaron` package is removed. (#6186) - The deprecated `go.opentelemetry.io/contrib/samplers/aws/xray` package is removed. (#6187) +### Changed + +- Updated `go.opentelemetry.io/contrib/config` to use the [v0.3.0](https://github.com/open-telemetry/opentelemetry-configuration/releases/tag/v0.3.0) release of schema which includes backwards incompatible changes. (#6126) + diff --git a/Makefile b/Makefile index 5489e46e065..7538aa25320 100644 --- a/Makefile +++ b/Makefile @@ -309,7 +309,7 @@ update-all-otel-deps: OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR=tmp/opentelememetry-configuration # The SHA matching the current version of the opentelemetry-configuration schema to use -OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.2.0 +OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.3.0 # Cleanup temporary directory genjsonschema-cleanup: @@ -326,6 +326,7 @@ genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) --capitalization OTLP \ --struct-name-from-title \ --package config \ + --only-models \ --output ${GENERATED_CONFIG} \ ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/schema/opentelemetry_configuration.json @echo Modify jsonschema generated files. diff --git a/config/config.go b/config/config.go index d8357b76ca2..e2599766fe3 100644 --- a/config/config.go +++ b/config/config.go @@ -150,3 +150,11 @@ func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { return &cfg, nil } + +func toStringMap(pairs []NameStringValuePair) map[string]string { + output := make(map[string]string) + for _, v := range pairs { + output[v.Name] = *v.Value + } + return output +} diff --git a/config/config_json.go b/config/config_json.go new file mode 100644 index 00000000000..147c595d91b --- /dev/null +++ b/config/config_json.go @@ -0,0 +1,374 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// MarshalJSON implements json.Marshaler. +func (j *AttributeNameValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Value) +} + +var enumValuesAttributeNameValueType = []interface{}{ + nil, + "string", + "bool", + "int", + "double", + "string_array", + "bool_array", + "int_array", + "double_array", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeNameValueType) UnmarshalJSON(b []byte) error { + var v struct { + Value interface{} + } + if err := json.Unmarshal(b, &v.Value); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesAttributeNameValueType { + if reflect.DeepEqual(v.Value, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) + } + *j = AttributeNameValueType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") + } + type Plain BatchLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchLogRecordProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in BatchSpanProcessor: required") + } + type Plain BatchSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = BatchSpanProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *GeneralInstrumentationPeerServiceMappingElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["peer"]; raw != nil && !ok { + return fmt.Errorf("field peer in GeneralInstrumentationPeerServiceMappingElem: required") + } + if _, ok := raw["service"]; raw != nil && !ok { + return fmt.Errorf("field service in GeneralInstrumentationPeerServiceMappingElem: required") + } + type Plain GeneralInstrumentationPeerServiceMappingElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = GeneralInstrumentationPeerServiceMappingElem(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *NameStringValuePair) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["name"]; raw != nil && !ok { + return fmt.Errorf("field name in NameStringValuePair: required") + } + if _, ok := raw["value"]; raw != nil && !ok { + return fmt.Errorf("field value in NameStringValuePair: required") + } + type Plain NameStringValuePair + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = NameStringValuePair(plain) + return nil +} + +var enumValuesOTLPMetricDefaultHistogramAggregation = []interface{}{ + "explicit_bucket_histogram", + "base2_exponential_bucket_histogram", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesOTLPMetricDefaultHistogramAggregation { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesOTLPMetricDefaultHistogramAggregation, v) + } + *j = OTLPMetricDefaultHistogramAggregation(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLPMetric) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLPMetric: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLPMetric: required") + } + type Plain OTLPMetric + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLPMetric(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OTLP) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in OTLP: required") + } + if _, ok := raw["protocol"]; raw != nil && !ok { + return fmt.Errorf("field protocol in OTLP: required") + } + type Plain OTLP + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OTLP(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["file_format"]; raw != nil && !ok { + return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") + } + type Plain OpenTelemetryConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OpenTelemetryConfiguration(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PeriodicMetricReader: required") + } + type Plain PeriodicMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PeriodicMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PullMetricReader) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in PullMetricReader: required") + } + type Plain PullMetricReader + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = PullMetricReader(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") + } + type Plain SimpleLogRecordProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleLogRecordProcessor(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["exporter"]; raw != nil && !ok { + return fmt.Errorf("field exporter in SimpleSpanProcessor: required") + } + type Plain SimpleSpanProcessor + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SimpleSpanProcessor(plain) + return nil +} + +var enumValuesViewSelectorInstrumentType = []interface{}{ + "counter", + "histogram", + "observable_counter", + "observable_gauge", + "observable_up_down_counter", + "up_down_counter", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesViewSelectorInstrumentType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesViewSelectorInstrumentType, v) + } + *j = ViewSelectorInstrumentType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Zipkin) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["endpoint"]; raw != nil && !ok { + return fmt.Errorf("field endpoint in Zipkin: required") + } + type Plain Zipkin + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = Zipkin(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *AttributeNameValue) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if _, ok := raw["name"]; raw != nil && !ok { + return fmt.Errorf("field name in AttributeNameValue: required") + } + if _, ok := raw["value"]; raw != nil && !ok { + return fmt.Errorf("field value in AttributeNameValue: required") + } + type Plain AttributeNameValue + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.Type != nil && plain.Type.Value == "int" { + val, ok := plain.Value.(float64) + if ok { + plain.Value = int(val) + } + } + if plain.Type != nil && plain.Type.Value == "int_array" { + m, ok := plain.Value.([]interface{}) + if ok { + var vals []interface{} + for _, v := range m { + val, ok := v.(float64) + if ok { + vals = append(vals, int(val)) + } else { + vals = append(vals, val) + } + } + plain.Value = vals + } + } + + *j = AttributeNameValue(plain) + return nil +} diff --git a/config/config_test.go b/config/config_test.go index c6b33b66e12..cdc3ddcd45b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -78,13 +78,88 @@ func TestNewSDK(t *testing.T) { } } -var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ +var v03OpenTelemetryConfig = OpenTelemetryConfiguration{ Disabled: ptr(false), - FileFormat: "0.2", + FileFormat: ptr("0.3"), AttributeLimits: &AttributeLimits{ AttributeCountLimit: ptr(128), AttributeValueLengthLimit: ptr(4096), }, + Instrumentation: &Instrumentation{ + Cpp: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Dotnet: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Erlang: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + General: &GeneralInstrumentation{ + Http: &GeneralInstrumentationHttp{ + Client: &GeneralInstrumentationHttpClient{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + Server: &GeneralInstrumentationHttpServer{ + RequestCapturedHeaders: []string{"Content-Type", "Accept"}, + ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, + }, + }, + Peer: &GeneralInstrumentationPeer{ + ServiceMapping: []GeneralInstrumentationPeerServiceMappingElem{ + {Peer: "1.2.3.4", Service: "FooService"}, + {Peer: "2.3.4.5", Service: "BarService"}, + }, + }, + }, + Go: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Java: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Js: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Php: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Python: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Ruby: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Rust: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + Swift: LanguageSpecificInstrumentation{ + "example": map[string]interface{}{ + "property": "value", + }, + }, + }, LoggerProvider: &LoggerProvider{ Limits: &LogRecordLimits{ AttributeCountLimit: ptr(128), @@ -100,13 +175,14 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ ClientCertificate: ptr("/app/cert.pem"), ClientKey: ptr("/app/cert.pem"), Compression: ptr("gzip"), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", + Endpoint: ptr("http://localhost:4318/v1/logs"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, }, - Insecure: ptr(false), - Protocol: "http/protobuf", - Timeout: ptr(10000), + HeadersList: ptr("api-key=1234"), + Insecure: ptr(false), + Protocol: ptr("http/protobuf"), + Timeout: ptr(10000), }, }, MaxExportBatchSize: ptr(512), @@ -126,8 +202,11 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ MeterProvider: &MeterProvider{ Readers: []MetricReader{ { + Producers: []MetricProducer{ + {Opencensus: MetricProducerOpencensus{}}, + }, Pull: &PullMetricReader{ - Exporter: MetricExporter{ + Exporter: PullMetricExporter{ Prometheus: &Prometheus{ Host: ptr("localhost"), Port: ptr(9464), @@ -143,20 +222,24 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ }, }, { + Producers: []MetricProducer{ + {}, + }, Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ Certificate: ptr("/app/cert.pem"), ClientCertificate: ptr("/app/cert.pem"), ClientKey: ptr("/app/cert.pem"), Compression: ptr("gzip"), DefaultHistogramAggregation: ptr(OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", + Endpoint: ptr("http://localhost:4318/v1/metrics"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, }, + HeadersList: ptr("api-key=1234"), Insecure: ptr(false), - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), TemporalityPreference: ptr("delta"), Timeout: ptr(10000), }, @@ -167,7 +250,7 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ }, { Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ Console: Console{}, }, }, @@ -190,20 +273,32 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ RecordMinMax: ptr(true), }, }, - AttributeKeys: []string{"key1", "key2"}, - Description: ptr("new_description"), - Name: ptr("new_instrument_name"), + AttributeKeys: &IncludeExclude{ + Included: []string{"key1", "key2"}, + Excluded: []string{"key3"}, + }, + Description: ptr("new_description"), + Name: ptr("new_instrument_name"), }, }, }, }, Propagator: &Propagator{ - Composite: []string{"tracecontext", "baggage", "b3", "b3multi", "jaeger", "xray", "ottrace"}, + Composite: []*string{ptr("tracecontext"), ptr("baggage"), ptr("b3"), ptr("b3multi"), ptr("jaeger"), ptr("xray"), ptr("ottrace")}, }, Resource: &Resource{ - Attributes: Attributes{ - "service.name": "unknown_service", + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "unknown_service"}, + {Name: "string_key", Type: &AttributeNameValueType{Value: "string"}, Value: "value"}, + {Name: "bool_key", Type: &AttributeNameValueType{Value: "bool"}, Value: true}, + {Name: "int_key", Type: &AttributeNameValueType{Value: "int"}, Value: 1}, + {Name: "double_key", Type: &AttributeNameValueType{Value: "double"}, Value: 1.1}, + {Name: "string_array_key", Type: &AttributeNameValueType{Value: "string_array"}, Value: []interface{}{"value1", "value2"}}, + {Name: "bool_array_key", Type: &AttributeNameValueType{Value: "bool_array"}, Value: []interface{}{true, false}}, + {Name: "int_array_key", Type: &AttributeNameValueType{Value: "int_array"}, Value: []interface{}{1, 2}}, + {Name: "double_array_key", Type: &AttributeNameValueType{Value: "double_array"}, Value: []interface{}{1.1, 2.2}}, }, + AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), Detectors: &Detectors{ Attributes: &DetectorsAttributes{ Excluded: []string{"process.command_args"}, @@ -231,13 +326,14 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ ClientCertificate: ptr("/app/cert.pem"), ClientKey: ptr("/app/cert.pem"), Compression: ptr("gzip"), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", + Endpoint: ptr("http://localhost:4318/v1/traces"), + Headers: []NameStringValuePair{ + {Name: "api-key", Value: ptr("1234")}, }, - Insecure: ptr(false), - Protocol: "http/protobuf", - Timeout: ptr(10000), + HeadersList: ptr("api-key=1234"), + Insecure: ptr(false), + Protocol: ptr("http/protobuf"), + Timeout: ptr(10000), }, }, MaxExportBatchSize: ptr(512), @@ -249,7 +345,7 @@ var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ Batch: &BatchSpanProcessor{ Exporter: SpanExporter{ Zipkin: &Zipkin{ - Endpoint: "http://localhost:9411/api/v2/spans", + Endpoint: ptr("http://localhost:9411/api/v2/spans"), Timeout: ptr(10000), }, }, @@ -300,7 +396,7 @@ func TestParseYAML(t *testing.T) { wantErr: nil, wantType: &OpenTelemetryConfiguration{ Disabled: ptr(false), - FileFormat: "0.1", + FileFormat: ptr("0.1"), }, }, { @@ -310,9 +406,19 @@ func TestParseYAML(t *testing.T) { line 2: cannot unmarshal !!str ` + "`notabool`" + ` into bool`), }, { - name: "valid v0.2 config", - input: "v0.2.yaml", - wantType: &v02OpenTelemetryConfig, + name: "valid v0.2 config", + input: "v0.2.yaml", + wantErr: errors.New(`yaml: unmarshal errors: + line 81: cannot unmarshal !!map into []config.NameStringValuePair + line 185: cannot unmarshal !!map into []config.NameStringValuePair + line 244: cannot unmarshal !!seq into config.IncludeExclude + line 305: cannot unmarshal !!map into []config.NameStringValuePair + line 408: cannot unmarshal !!map into []config.AttributeNameValue`), + }, + { + name: "valid v0.3 config", + input: "v0.3.yaml", + wantType: &v03OpenTelemetryConfig, }, } @@ -345,7 +451,7 @@ func TestSerializeJSON(t *testing.T) { wantErr: nil, wantType: OpenTelemetryConfiguration{ Disabled: ptr(false), - FileFormat: "0.1", + FileFormat: ptr("0.1"), }, }, { @@ -354,9 +460,14 @@ func TestSerializeJSON(t *testing.T) { wantErr: errors.New(`json: cannot unmarshal string into Go struct field Plain.disabled of type bool`), }, { - name: "valid v0.2 config", - input: "v0.2.json", - wantType: v02OpenTelemetryConfig, + name: "valid v0.2 config", + input: "v0.2.json", + wantErr: errors.New(`json: cannot unmarshal object into Go struct field LogRecordProcessor.logger_provider.processors.batch of type []config.NameStringValuePair`), + }, + { + name: "valid v0.3 config", + input: "v0.3.json", + wantType: v03OpenTelemetryConfig, }, } diff --git a/config/config_yaml.go b/config/config_yaml.go new file mode 100644 index 00000000000..88234178880 --- /dev/null +++ b/config/config_yaml.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "fmt" + "reflect" +) + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *AttributeNameValueType) UnmarshalYAML(unmarshal func(interface{}) error) error { + var v struct { + Value interface{} + } + if err := unmarshal(&v.Value); err != nil { + return err + } + var ok bool + for _, expected := range enumValuesAttributeNameValueType { + if reflect.DeepEqual(v.Value, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) + } + *j = AttributeNameValueType(v) + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (j *LanguageSpecificInstrumentation) UnmarshalYAML(unmarshal func(interface{}) error) error { + var raw map[string]interface{} + if err := unmarshal(&raw); err != nil { + return err + } + + *j = raw + return nil +} diff --git a/config/generated_config.go b/config/generated_config.go index 2315641db64..fbf69c3927e 100644 --- a/config/generated_config.go +++ b/config/generated_config.go @@ -2,10 +2,6 @@ package config -import "encoding/json" -import "fmt" -import "reflect" - type AttributeLimits struct { // AttributeCountLimit corresponds to the JSON schema field // "attribute_count_limit". @@ -18,7 +14,20 @@ type AttributeLimits struct { AdditionalProperties interface{} } -type Attributes map[string]interface{} +type AttributeNameValue struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Type corresponds to the JSON schema field "type". + Type *AttributeNameValueType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value interface{} `json:"value" yaml:"value" mapstructure:"value"` +} + +type AttributeNameValueType struct { + Value interface{} +} type BatchLogRecordProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". @@ -38,24 +47,6 @@ type BatchLogRecordProcessor struct { ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - type BatchSpanProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` @@ -74,24 +65,6 @@ type BatchSpanProcessor struct { ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - type Common map[string]interface{} type Console map[string]interface{} @@ -109,7 +82,54 @@ type DetectorsAttributes struct { Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` } -type Headers map[string]string +type GeneralInstrumentation struct { + // Http corresponds to the JSON schema field "http". + Http *GeneralInstrumentationHttp `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // Peer corresponds to the JSON schema field "peer". + Peer *GeneralInstrumentationPeer `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` +} + +type GeneralInstrumentationHttp struct { + // Client corresponds to the JSON schema field "client". + Client *GeneralInstrumentationHttpClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` + + // Server corresponds to the JSON schema field "server". + Server *GeneralInstrumentationHttpServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` +} + +type GeneralInstrumentationHttpClient struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type GeneralInstrumentationHttpServer struct { + // RequestCapturedHeaders corresponds to the JSON schema field + // "request_captured_headers". + RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` + + // ResponseCapturedHeaders corresponds to the JSON schema field + // "response_captured_headers". + ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` +} + +type GeneralInstrumentationPeer struct { + // ServiceMapping corresponds to the JSON schema field "service_mapping". + ServiceMapping []GeneralInstrumentationPeerServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` +} + +type GeneralInstrumentationPeerServiceMappingElem struct { + // Peer corresponds to the JSON schema field "peer". + Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` + + // Service corresponds to the JSON schema field "service". + Service string `json:"service" yaml:"service" mapstructure:"service"` +} type IncludeExclude struct { // Excluded corresponds to the JSON schema field "excluded". @@ -119,6 +139,46 @@ type IncludeExclude struct { Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` } +type Instrumentation struct { + // Cpp corresponds to the JSON schema field "cpp". + Cpp LanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` + + // Dotnet corresponds to the JSON schema field "dotnet". + Dotnet LanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` + + // Erlang corresponds to the JSON schema field "erlang". + Erlang LanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` + + // General corresponds to the JSON schema field "general". + General *GeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` + + // Go corresponds to the JSON schema field "go". + Go LanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` + + // Java corresponds to the JSON schema field "java". + Java LanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` + + // Js corresponds to the JSON schema field "js". + Js LanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` + + // Php corresponds to the JSON schema field "php". + Php LanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` + + // Python corresponds to the JSON schema field "python". + Python LanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` + + // Ruby corresponds to the JSON schema field "ruby". + Ruby LanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` + + // Rust corresponds to the JSON schema field "rust". + Rust LanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` + + // Swift corresponds to the JSON schema field "swift". + Swift LanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` +} + +type LanguageSpecificInstrumentation map[string]interface{} + type LogRecordExporter struct { // Console corresponds to the JSON schema field "console". Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` @@ -165,27 +225,34 @@ type MeterProvider struct { Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` } -type MetricExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` +type MetricProducer struct { + // Opencensus corresponds to the JSON schema field "opencensus". + Opencensus MetricProducerOpencensus `json:"opencensus,omitempty" yaml:"opencensus,omitempty" mapstructure:"opencensus,omitempty"` AdditionalProperties interface{} } +type MetricProducerOpencensus map[string]interface{} + type MetricReader struct { // Periodic corresponds to the JSON schema field "periodic". Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` + // Producers corresponds to the JSON schema field "producers". + Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` + // Pull corresponds to the JSON schema field "pull". Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` } +type NameStringValuePair struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Value corresponds to the JSON schema field "value". + Value *string `json:"value" yaml:"value" mapstructure:"value"` +} + type OTLP struct { // Certificate corresponds to the JSON schema field "certificate". Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` @@ -200,16 +267,19 @@ type OTLP struct { Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` // Insecure corresponds to the JSON schema field "insecure". Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + Protocol *string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` // Timeout corresponds to the JSON schema field "timeout". Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` @@ -233,16 +303,19 @@ type OTLPMetric struct { DefaultHistogramAggregation *OTLPMetricDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` + + // HeadersList corresponds to the JSON schema field "headers_list". + HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` // Insecure corresponds to the JSON schema field "insecure". Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + Protocol *string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` // TemporalityPreference corresponds to the JSON schema field // "temporality_preference". @@ -257,73 +330,6 @@ type OTLPMetricDefaultHistogramAggregation string const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" -var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLPMetric: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLP: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - type OpenTelemetryConfiguration struct { // AttributeLimits corresponds to the JSON schema field "attribute_limits". AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` @@ -332,7 +338,10 @@ type OpenTelemetryConfiguration struct { Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` // FileFormat corresponds to the JSON schema field "file_format". - FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + FileFormat *string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` + + // Instrumentation corresponds to the JSON schema field "instrumentation". + Instrumentation *Instrumentation `json:"instrumentation,omitempty" yaml:"instrumentation,omitempty" mapstructure:"instrumentation,omitempty"` // LoggerProvider corresponds to the JSON schema field "logger_provider". LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` @@ -352,27 +361,9 @@ type OpenTelemetryConfiguration struct { AdditionalProperties interface{} } -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["file_format"]; raw != nil && !ok { - return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OpenTelemetryConfiguration(plain) - return nil -} - type PeriodicMetricReader struct { // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` + Exporter PushMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` // Interval corresponds to the JSON schema field "interval". Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` @@ -381,24 +372,6 @@ type PeriodicMetricReader struct { Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - type Prometheus struct { // Host corresponds to the JSON schema field "host". Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` @@ -422,37 +395,39 @@ type Prometheus struct { type Propagator struct { // Composite corresponds to the JSON schema field "composite". - Composite []string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + Composite []*string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` + + AdditionalProperties interface{} +} + +type PullMetricExporter struct { + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` AdditionalProperties interface{} } type PullMetricReader struct { // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil + Exporter PullMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` +} + +type PushMetricExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` + + // OTLP corresponds to the JSON schema field "otlp". + OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` + + AdditionalProperties interface{} } type Resource struct { // Attributes corresponds to the JSON schema field "attributes". - Attributes Attributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + Attributes []AttributeNameValue `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` + + // AttributesList corresponds to the JSON schema field "attributes_list". + AttributesList *string `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` // Detectors corresponds to the JSON schema field "detectors". Detectors *Detectors `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` @@ -525,47 +500,11 @@ type SimpleLogRecordProcessor struct { Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - type SimpleSpanProcessor struct { // Exporter corresponds to the JSON schema field "exporter". Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - type SpanExporter struct { // Console corresponds to the JSON schema field "console". Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` @@ -661,41 +600,12 @@ const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "ob const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - type ViewStream struct { // Aggregation corresponds to the JSON schema field "aggregation". Aggregation *ViewStreamAggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` // AttributeKeys corresponds to the JSON schema field "attribute_keys". - AttributeKeys []string `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` + AttributeKeys *IncludeExclude `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` // Description corresponds to the JSON schema field "description". Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` @@ -755,26 +665,8 @@ type ViewStreamAggregationSum map[string]interface{} type Zipkin struct { // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` + Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` // Timeout corresponds to the JSON schema field "timeout". Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` } - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Zipkin) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in Zipkin: required") - } - type Plain Zipkin - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = Zipkin(plain) - return nil -} diff --git a/config/log.go b/config/log.go index f30b37c8a45..7b9eff8497c 100644 --- a/config/log.go +++ b/config/log.go @@ -75,12 +75,12 @@ func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Export ) } - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { + if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { + switch *exporter.OTLP.Protocol { case protocolProtobufHTTP: return otlpHTTPLogExporter(ctx, exporter.OTLP) default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) } } return nil, errors.New("no valid log exporter") @@ -120,8 +120,8 @@ func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdkl func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { var opts []otlploghttp.Option - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) if err != nil { return nil, err } @@ -148,7 +148,7 @@ func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) } if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) + opts = append(opts, otlploghttp.WithHeaders(toStringMap(otlpConfig.Headers))) } return otlploghttp.New(ctx, opts...) diff --git a/config/log_test.go b/config/log_test.go index 88318dc369d..1fa978f4f16 100644 --- a/config/log_test.go +++ b/config/log_test.go @@ -98,7 +98,7 @@ func TestLogProcessor(t *testing.T) { MaxExportBatchSize: ptr(-1), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), }, }, }, @@ -112,7 +112,7 @@ func TestLogProcessor(t *testing.T) { ExportTimeout: ptr(-2), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), }, }, }, @@ -127,7 +127,7 @@ func TestLogProcessor(t *testing.T) { MaxQueueSize: ptr(-3), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), }, }, }, @@ -141,7 +141,7 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(-4), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), }, }, }, @@ -182,12 +182,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -205,12 +205,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318/path/123"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -228,11 +228,11 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -250,12 +250,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -273,12 +273,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "invalid", - Endpoint: "https://10.0.0.0:443", + Protocol: ptr("invalid"), + Endpoint: ptr("https://10.0.0.0:443"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -296,12 +296,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: " ", + Protocol: ptr("http/protobuf"), + Endpoint: ptr(" "), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -319,12 +319,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -342,12 +342,12 @@ func TestLogProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("invalid"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -381,12 +381,12 @@ func TestLogProcessor(t *testing.T) { Simple: &SimpleLogRecordProcessor{ Exporter: LogRecordExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, diff --git a/config/metric.go b/config/metric.go index a95a4071a1a..743d594456e 100644 --- a/config/metric.go +++ b/config/metric.go @@ -93,14 +93,14 @@ func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) return nil, errors.New("no valid metric reader") } -func pullReader(ctx context.Context, exporter MetricExporter) (sdkmetric.Reader, error) { +func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { if exporter.Prometheus != nil { return prometheusReader(ctx, exporter.Prometheus) } return nil, errors.New("no valid metric exporter") } -func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { +func periodicExporter(ctx context.Context, exporter PushMetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { if exporter.Console != nil && exporter.OTLP != nil { return nil, errors.New("must not specify multiple exporters") } @@ -116,16 +116,16 @@ func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkm } return sdkmetric.NewPeriodicReader(exp, opts...), nil } - if exporter.OTLP != nil { + if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { var err error var exp sdkmetric.Exporter - switch exporter.OTLP.Protocol { + switch *exporter.OTLP.Protocol { case protocolProtobufHTTP: exp, err = otlpHTTPMetricExporter(ctx, exporter.OTLP) case protocolProtobufGRPC: exp, err = otlpGRPCMetricExporter(ctx, exporter.OTLP) default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) } if err != nil { return nil, err @@ -138,8 +138,8 @@ func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkm func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { opts := []otlpmetrichttp.Option{} - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) if err != nil { return nil, err } @@ -166,7 +166,7 @@ func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) } if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) + opts = append(opts, otlpmetrichttp.WithHeaders(toStringMap(otlpConfig.Headers))) } if otlpConfig.TemporalityPreference != nil { switch *otlpConfig.TemporalityPreference { @@ -187,8 +187,8 @@ func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { var opts []otlpmetricgrpc.Option - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) if err != nil { return nil, err } @@ -200,7 +200,7 @@ func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet if u.Host != "" { opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) } else { - opts = append(opts, otlpmetricgrpc.WithEndpoint(otlpConfig.Endpoint)) + opts = append(opts, otlpmetricgrpc.WithEndpoint(*otlpConfig.Endpoint)) } if u.Scheme == "http" { opts = append(opts, otlpmetricgrpc.WithInsecure()) @@ -221,7 +221,7 @@ func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) } if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) + opts = append(opts, otlpmetricgrpc.WithHeaders(toStringMap(otlpConfig.Headers))) } if otlpConfig.TemporalityPreference != nil { switch *otlpConfig.TemporalityPreference { @@ -261,6 +261,42 @@ func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { } } +// newIncludeExcludeFilter returns a Filter that includes attributes +// in the include list and excludes attributes in the excludes list. +// It returns an error if an attribute is in both lists +// +// If IncludeExclude is empty a include-all filter is returned. +func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { + if lists == nil { + return func(kv attribute.KeyValue) bool { return true }, nil + } + + included := make(map[attribute.Key]struct{}) + for _, k := range lists.Included { + included[attribute.Key(k)] = struct{}{} + } + excluded := make(map[attribute.Key]struct{}) + for _, k := range lists.Excluded { + if _, ok := included[attribute.Key(k)]; ok { + return nil, fmt.Errorf("attribute cannot be in both include and exclude list: %s", k) + } + excluded[attribute.Key(k)] = struct{}{} + } + return func(kv attribute.KeyValue) bool { + // check if a value is excluded first + if _, ok := excluded[kv.Key]; ok { + return false + } + + if len(included) == 0 { + return true + } + + _, ok := included[kv.Key] + return ok + }, nil +} + func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmetric.Reader, error) { var opts []otelprom.Option if prometheusConfig.Host == nil { @@ -279,20 +315,11 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet opts = append(opts, otelprom.WithoutUnits()) } if prometheusConfig.WithResourceConstantLabels != nil { - if prometheusConfig.WithResourceConstantLabels.Included != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) - } - if prometheusConfig.WithResourceConstantLabels.Excluded != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) + f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) + if err != nil { + return nil, err } + otelprom.WithResourceAsConstantLabels(f) } reg := prometheus.NewRegistry() @@ -353,7 +380,11 @@ func view(v View) (sdkmetric.View, error) { return nil, err } - return sdkmetric.NewView(inst, stream(v.Stream)), nil + s, err := stream(v.Stream) + if err != nil { + return nil, err + } + return sdkmetric.NewView(inst, s), nil } func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { @@ -378,25 +409,21 @@ func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { return inst, nil } -func stream(vs *ViewStream) sdkmetric.Stream { +func stream(vs *ViewStream) (sdkmetric.Stream, error) { if vs == nil { - return sdkmetric.Stream{} + return sdkmetric.Stream{}, nil } + f, err := newIncludeExcludeFilter(vs.AttributeKeys) + if err != nil { + return sdkmetric.Stream{}, err + } return sdkmetric.Stream{ Name: strOrEmpty(vs.Name), Description: strOrEmpty(vs.Description), Aggregation: aggregation(vs.Aggregation), - AttributeFilter: attributeFilter(vs.AttributeKeys), - } -} - -func attributeFilter(attributeKeys []string) attribute.Filter { - var attrKeys []attribute.Key - for _, attrStr := range attributeKeys { - attrKeys = append(attrKeys, attribute.Key(attrStr)) - } - return attribute.NewAllowKeysFilter(attrKeys...) + AttributeFilter: f, + }, nil } func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { diff --git a/config/metric_test.go b/config/metric_test.go index cf2145be0fd..8d7ced508ca 100644 --- a/config/metric_test.go +++ b/config/metric_test.go @@ -6,6 +6,7 @@ package config import ( "context" "errors" + "fmt" "net/url" "reflect" "testing" @@ -66,7 +67,7 @@ func TestMeterProvider(t *testing.T) { }, { Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ Console: Console{}, OTLP: &OTLPMetric{}, }, @@ -122,7 +123,7 @@ func TestReader(t *testing.T) { name: "pull/prometheus-no-host", reader: MetricReader{ Pull: &PullMetricReader{ - Exporter: MetricExporter{ + Exporter: PullMetricExporter{ Prometheus: &Prometheus{}, }, }, @@ -133,7 +134,7 @@ func TestReader(t *testing.T) { name: "pull/prometheus-no-port", reader: MetricReader{ Pull: &PullMetricReader{ - Exporter: MetricExporter{ + Exporter: PullMetricExporter{ Prometheus: &Prometheus{ Host: ptr("localhost"), }, @@ -146,7 +147,7 @@ func TestReader(t *testing.T) { name: "pull/prometheus", reader: MetricReader{ Pull: &PullMetricReader{ - Exporter: MetricExporter{ + Exporter: PullMetricExporter{ Prometheus: &Prometheus{ Host: ptr("localhost"), Port: ptr(8888), @@ -167,9 +168,9 @@ func TestReader(t *testing.T) { name: "periodic/otlp-exporter-invalid-protocol", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/invalid", + Protocol: ptr("http/invalid"), }, }, }, @@ -180,14 +181,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-exporter", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("http://localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -199,14 +200,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-exporter-with-path", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4318/path/123", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("http://localhost:4318/path/123"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -218,13 +219,13 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-exporter-no-endpoint", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", + Protocol: ptr("grpc/protobuf"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -236,14 +237,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-exporter-no-scheme", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -255,14 +256,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-invalid-endpoint", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: " ", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr(" "), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -274,14 +275,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-none-compression", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -293,14 +294,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-delta-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("delta"), }, @@ -313,14 +314,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-cumulative-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("cumulative"), }, @@ -333,14 +334,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-lowmemory-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("lowmemory"), }, @@ -353,14 +354,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-invalid-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("invalid"), }, @@ -373,14 +374,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-grpc-invalid-compression", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("invalid"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -392,14 +393,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-exporter", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -411,14 +412,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-exporter-with-path", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318/path/123"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -430,13 +431,13 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-exporter-no-endpoint", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -448,14 +449,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-exporter-no-scheme", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -467,14 +468,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-invalid-endpoint", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: " ", + Protocol: ptr("http/protobuf"), + Endpoint: ptr(" "), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -486,14 +487,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-none-compression", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -505,14 +506,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-cumulative-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("cumulative"), }, @@ -525,14 +526,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-lowmemory-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("lowmemory"), }, @@ -545,14 +546,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-delta-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("delta"), }, @@ -565,14 +566,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-invalid-temporality", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, TemporalityPreference: ptr("invalid"), }, @@ -585,14 +586,14 @@ func TestReader(t *testing.T) { name: "periodic/otlp-http-invalid-compression", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("invalid"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -604,7 +605,7 @@ func TestReader(t *testing.T) { name: "periodic/no-exporter", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{}, + Exporter: PushMetricExporter{}, }, }, wantErr: errors.New("no valid metric exporter"), @@ -613,7 +614,7 @@ func TestReader(t *testing.T) { name: "periodic/console-exporter", reader: MetricReader{ Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ Console: Console{}, }, }, @@ -626,7 +627,7 @@ func TestReader(t *testing.T) { Periodic: &PeriodicMetricReader{ Interval: ptr(30_000), Timeout: ptr(5_000), - Exporter: MetricExporter{ + Exporter: PushMetricExporter{ Console: Console{}, }, }, @@ -879,7 +880,7 @@ func TestView(t *testing.T) { Stream: &ViewStream{ Name: ptr("new_name"), Description: ptr("new_description"), - AttributeKeys: []string{"foo", "bar"}, + AttributeKeys: ptr(IncludeExclude{Included: []string{"foo", "bar"}}), Aggregation: &ViewStreamAggregation{Sum: make(ViewStreamAggregationSum)}, }, }, @@ -1077,29 +1078,49 @@ func TestAggregation(t *testing.T) { } } -func TestAttributeFilter(t *testing.T) { +func TestNewIncludeExcludeFilter(t *testing.T) { testCases := []struct { name string - attributeKeys []string + attributeKeys *IncludeExclude wantPass []string wantFail []string }{ { name: "empty", - attributeKeys: []string{}, - wantPass: nil, - wantFail: []string{"foo", "bar"}, + attributeKeys: nil, + wantPass: []string{"foo", "bar"}, + wantFail: nil, }, { - name: "filter", - attributeKeys: []string{"foo"}, - wantPass: []string{"foo"}, - wantFail: []string{"bar"}, + name: "filter-with-include", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"foo"}, + }), + wantPass: []string{"foo"}, + wantFail: []string{"bar"}, + }, + { + name: "filter-with-exclude", + attributeKeys: ptr(IncludeExclude{ + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, + }, + { + name: "filter-with-include-and-exclude", + attributeKeys: ptr(IncludeExclude{ + Included: []string{"bar"}, + Excluded: []string{"foo"}, + }), + wantPass: []string{"bar"}, + wantFail: []string{"foo"}, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - got := attributeFilter(tt.attributeKeys) + got, err := newIncludeExcludeFilter(tt.attributeKeys) + require.NoError(t, err) for _, pass := range tt.wantPass { require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) } @@ -1109,3 +1130,11 @@ func TestAttributeFilter(t *testing.T) { }) } } + +func TestNewIncludeExcludeFilterError(t *testing.T) { + _, err := newIncludeExcludeFilter(ptr(IncludeExclude{ + Included: []string{"foo"}, + Excluded: []string{"foo"}, + })) + require.Equal(t, fmt.Errorf("attribute cannot be in both include and exclude list: foo"), err) +} diff --git a/config/resource.go b/config/resource.go index 020d6660b23..1e62d29c295 100644 --- a/config/resource.go +++ b/config/resource.go @@ -51,8 +51,8 @@ func newResource(res *Resource) (*resource.Resource, error) { } var attrs []attribute.KeyValue - for k, v := range res.Attributes { - attrs = append(attrs, keyVal(k, v)) + for _, v := range res.Attributes { + attrs = append(attrs, keyVal(v.Name, v.Value)) } return resource.Merge(resource.Default(), diff --git a/config/resource_test.go b/config/resource_test.go index 3ae78565069..b51650429b0 100644 --- a/config/resource_test.go +++ b/config/resource_test.go @@ -63,8 +63,8 @@ func TestNewResource(t *testing.T) { name: "resource-with-attributes-invalid-schema", config: &Resource{ SchemaUrl: ptr("https://opentelemetry.io/invalid-schema"), - Attributes: Attributes{ - "service.name": "service-a", + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, }, }, wantResource: resource.NewSchemaless(res.Attributes()...), @@ -73,8 +73,8 @@ func TestNewResource(t *testing.T) { { name: "resource-with-attributes-and-schema", config: &Resource{ - Attributes: Attributes{ - "service.name": "service-a", + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, }, SchemaUrl: ptr(semconv.SchemaURL), }, @@ -83,23 +83,23 @@ func TestNewResource(t *testing.T) { { name: "resource-with-additional-attributes-and-schema", config: &Resource{ - Attributes: Attributes{ - "service.name": "service-a", - "attr-bool": true, - "attr-int64": int64(-164), - "attr-uint64": uint64(164), - "attr-float64": float64(64.0), - "attr-int8": int8(-18), - "attr-uint8": uint8(18), - "attr-int16": int16(-116), - "attr-uint16": uint16(116), - "attr-int32": int32(-132), - "attr-uint32": uint32(132), - "attr-float32": float32(32.0), - "attr-int": int(-1), - "attr-uint": uint(1), - "attr-string": "string-val", - "attr-default": other, + Attributes: []AttributeNameValue{ + {Name: "service.name", Value: "service-a"}, + {Name: "attr-bool", Value: true}, + {Name: "attr-int64", Value: int64(-164)}, + {Name: "attr-uint64", Value: uint64(164)}, + {Name: "attr-float64", Value: float64(64.0)}, + {Name: "attr-int8", Value: int8(-18)}, + {Name: "attr-uint8", Value: uint8(18)}, + {Name: "attr-int16", Value: int16(-116)}, + {Name: "attr-uint16", Value: uint16(116)}, + {Name: "attr-int32", Value: int32(-132)}, + {Name: "attr-uint32", Value: uint32(132)}, + {Name: "attr-float32", Value: float32(32.0)}, + {Name: "attr-int", Value: int(-1)}, + {Name: "attr-uint", Value: uint(1)}, + {Name: "attr-string", Value: "string-val"}, + {Name: "attr-default", Value: other}, }, SchemaUrl: ptr(semconv.SchemaURL), }, diff --git a/config/testdata/v0.3.json b/config/testdata/v0.3.json new file mode 100644 index 00000000000..b789a9dfdb0 --- /dev/null +++ b/config/testdata/v0.3.json @@ -0,0 +1,419 @@ +{ + "file_format": "0.3", + "disabled": false, + "attribute_limits": { + "attribute_value_length_limit": 4096, + "attribute_count_limit": 128 + }, + "logger_provider": { + "processors": [ + { + "batch": { + "schedule_delay": 5000, + "export_timeout": 30000, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "exporter": { + "otlp": { + "protocol": "http/protobuf", + "endpoint": "http://localhost:4318/v1/logs", + "certificate": "/app/cert.pem", + "client_key": "/app/cert.pem", + "client_certificate": "/app/cert.pem", + "headers": [ + { + "name": "api-key", + "value": "1234" + } + ], + "headers_list": "api-key=1234", + "compression": "gzip", + "timeout": 10000, + "insecure": false + } + } + } + }, + { + "simple": { + "exporter": { + "console": {} + } + } + } + ], + "limits": { + "attribute_value_length_limit": 4096, + "attribute_count_limit": 128 + } + }, + "meter_provider": { + "readers": [ + { + "pull": { + "exporter": { + "prometheus": { + "host": "localhost", + "port": 9464, + "without_units": false, + "without_type_suffix": false, + "without_scope_info": false, + "with_resource_constant_labels": { + "included": [ + "service*" + ], + "excluded": [ + "service.attr1" + ] + } + } + } + }, + "producers": [ + { + "opencensus": {} + } + ] + }, + { + "periodic": { + "interval": 5000, + "timeout": 30000, + "exporter": { + "otlp": { + "protocol": "http/protobuf", + "endpoint": "http://localhost:4318/v1/metrics", + "certificate": "/app/cert.pem", + "client_key": "/app/cert.pem", + "client_certificate": "/app/cert.pem", + "headers": [ + { + "name": "api-key", + "value": "1234" + } + ], + "headers_list": "api-key=1234", + "compression": "gzip", + "timeout": 10000, + "insecure": false, + "temporality_preference": "delta", + "default_histogram_aggregation": "base2_exponential_bucket_histogram" + } + } + }, + "producers": [ + { + "prometheus": {} + } + ] + }, + { + "periodic": { + "exporter": { + "console": {} + } + } + } + ], + "views": [ + { + "selector": { + "instrument_name": "my-instrument", + "instrument_type": "histogram", + "unit": "ms", + "meter_name": "my-meter", + "meter_version": "1.0.0", + "meter_schema_url": "https://opentelemetry.io/schemas/1.16.0" + }, + "stream": { + "name": "new_instrument_name", + "description": "new_description", + "aggregation": { + "explicit_bucket_histogram": { + "boundaries": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "record_min_max": true + } + }, + "attribute_keys": { + "included": [ + "key1", + "key2" + ], + "excluded": [ + "key3" + ] + } + } + } + ] + }, + "propagator": { + "composite": [ + "tracecontext", + "baggage", + "b3", + "b3multi", + "jaeger", + "xray", + "ottrace" + ] + }, + "tracer_provider": { + "processors": [ + { + "batch": { + "schedule_delay": 5000, + "export_timeout": 30000, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "exporter": { + "otlp": { + "protocol": "http/protobuf", + "endpoint": "http://localhost:4318/v1/traces", + "certificate": "/app/cert.pem", + "client_key": "/app/cert.pem", + "client_certificate": "/app/cert.pem", + "headers": [ + { + "name": "api-key", + "value": "1234" + } + ], + "headers_list": "api-key=1234", + "compression": "gzip", + "timeout": 10000, + "insecure": false + } + } + } + }, + { + "batch": { + "exporter": { + "zipkin": { + "endpoint": "http://localhost:9411/api/v2/spans", + "timeout": 10000 + } + } + } + }, + { + "simple": { + "exporter": { + "console": {} + } + } + } + ], + "limits": { + "attribute_value_length_limit": 4096, + "attribute_count_limit": 128, + "event_count_limit": 128, + "link_count_limit": 128, + "event_attribute_count_limit": 128, + "link_attribute_count_limit": 128 + }, + "sampler": { + "parent_based": { + "root": { + "trace_id_ratio_based": { + "ratio": 0.0001 + } + }, + "remote_parent_sampled": { + "always_on": {} + }, + "remote_parent_not_sampled": { + "always_off": {} + }, + "local_parent_sampled": { + "always_on": {} + }, + "local_parent_not_sampled": { + "always_off": {} + } + } + } + }, + "resource": { + "attributes": [ + { + "name": "service.name", + "value": "unknown_service" + }, + { + "name": "string_key", + "value": "value", + "type": "string" + }, + { + "name": "bool_key", + "value": true, + "type": "bool" + }, + { + "name": "int_key", + "value": 1, + "type": "int" + }, + { + "name": "double_key", + "value": 1.1, + "type": "double" + }, + { + "name": "string_array_key", + "value": [ + "value1", + "value2" + ], + "type": "string_array" + }, + { + "name": "bool_array_key", + "value": [ + true, + false + ], + "type": "bool_array" + }, + { + "name": "int_array_key", + "value": [ + 1, + 2 + ], + "type": "int_array" + }, + { + "name": "double_array_key", + "value": [ + 1.1, + 2.2 + ], + "type": "double_array" + } + ], + "attributes_list": "service.namespace=my-namespace,service.version=1.0.0", + "detectors": { + "attributes": { + "included": [ + "process.*" + ], + "excluded": [ + "process.command_args" + ] + } + }, + "schema_url": "https://opentelemetry.io/schemas/1.16.0" + }, + "instrumentation": { + "general": { + "peer": { + "service_mapping": [ + { + "peer": "1.2.3.4", + "service": "FooService" + }, + { + "peer": "2.3.4.5", + "service": "BarService" + } + ] + }, + "http": { + "client": { + "request_captured_headers": [ + "Content-Type", + "Accept" + ], + "response_captured_headers": [ + "Content-Type", + "Content-Encoding" + ] + }, + "server": { + "request_captured_headers": [ + "Content-Type", + "Accept" + ], + "response_captured_headers": [ + "Content-Type", + "Content-Encoding" + ] + } + } + }, + "cpp": { + "example": { + "property": "value" + } + }, + "dotnet": { + "example": { + "property": "value" + } + }, + "erlang": { + "example": { + "property": "value" + } + }, + "go": { + "example": { + "property": "value" + } + }, + "java": { + "example": { + "property": "value" + } + }, + "js": { + "example": { + "property": "value" + } + }, + "php": { + "example": { + "property": "value" + } + }, + "python": { + "example": { + "property": "value" + } + }, + "ruby": { + "example": { + "property": "value" + } + }, + "rust": { + "example": { + "property": "value" + } + }, + "swift": { + "example": { + "property": "value" + } + } + } +} \ No newline at end of file diff --git a/config/testdata/v0.3.yaml b/config/testdata/v0.3.yaml new file mode 100644 index 00000000000..04f2a314e4e --- /dev/null +++ b/config/testdata/v0.3.yaml @@ -0,0 +1,466 @@ +# kitchen-sink.yaml demonstrates all configurable surface area, including explanatory comments. +# +# It DOES NOT represent expected real world configuration, as it makes strange configuration +# choices in an effort to exercise the full surface area. +# +# Configuration values are set to their defaults when default values are defined. + +# The file format version. +file_format: "0.3" + +# Configure if the SDK is disabled or not. This is not required to be provided to ensure the SDK isn't disabled, the default value when this is not provided is for the SDK to be enabled. +disabled: false + +# Configure general attribute limits. See also tracer_provider.limits, logger_provider.limits. +attribute_limits: + # Configure max attribute value size. + attribute_value_length_limit: 4096 + # Configure max attribute count. + attribute_count_limit: 128 + +# Configure logger provider. +logger_provider: + # Configure log record processors. + processors: + - # Configure a batch log record processor. + batch: + # Configure delay interval (in milliseconds) between two consecutive exports. + schedule_delay: 5000 + # Configure maximum allowed time (in milliseconds) to export data. + export_timeout: 30000 + # Configure maximum queue size. + max_queue_size: 2048 + # Configure maximum batch size. + max_export_batch_size: 512 + # Configure exporter. + exporter: + # Configure exporter to be OTLP. + otlp: + # Configure protocol. + protocol: http/protobuf + # Configure endpoint. + endpoint: http://localhost:4318/v1/logs + # Configure certificate. + certificate: /app/cert.pem + # Configure mTLS private client key. + client_key: /app/cert.pem + # Configure mTLS client certificate. + client_certificate: /app/cert.pem + # Configure headers. Entries have higher priority than entries from .headers_list. + headers: + - name: api-key + value: "1234" + # Configure headers. Entries have lower priority than entries from .headers. + # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. + headers_list: "api-key=1234" + # Configure compression. + compression: gzip + # Configure max time (in milliseconds) to wait for each export. + timeout: 10000 + # Configure client transport security for the exporter's connection. + insecure: false + - # Configure a simple log record processor. + simple: + # Configure exporter. + exporter: + # Configure exporter to be console. + console: {} + # Configure log record limits. See also attribute_limits. + limits: + # Configure max attribute value size. Overrides .attribute_limits.attribute_value_length_limit. + attribute_value_length_limit: 4096 + # Configure max attribute count. Overrides .attribute_limits.attribute_count_limit. + attribute_count_limit: 128 + +# Configure meter provider. +meter_provider: + # Configure metric readers. + readers: + - # Configure a pull based metric reader. + pull: + # Configure exporter. + exporter: + # Configure exporter to be prometheus. + prometheus: + # Configure host. + host: localhost + # Configure port. + port: 9464 + # Configure Prometheus Exporter to produce metrics without a unit suffix or UNIT metadata. + without_units: false + # Configure Prometheus Exporter to produce metrics without a type suffix. + without_type_suffix: false + # Configure Prometheus Exporter to produce metrics without a scope info metric. + without_scope_info: false + # Configure Prometheus Exporter to add resource attributes as metrics attributes. + with_resource_constant_labels: + # Configure resource attributes to be included. If not set, no resource attributes are included. + # Attribute keys from resources are evaluated to match as follows: + # * If the value of the attribute key exactly matches. + # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. + included: + - "service*" + # Configure resource attributes to be excluded. Applies after .with_resource_constant_labels.included (i.e. excluded has higher priority than included). + # Attribute keys from resources are evaluated to match as follows: + # * If the value of the attribute key exactly matches. + # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. + excluded: + - "service.attr1" + # Configure metric producers. + producers: + - # Configure metric producer to be opencensus. + opencensus: {} + - # Configure a periodic metric reader. + periodic: + # Configure delay interval (in milliseconds) between start of two consecutive exports. + interval: 5000 + # Configure maximum allowed time (in milliseconds) to export data. + timeout: 30000 + # Configure exporter. + exporter: + # Configure exporter to be OTLP. + otlp: + # Configure protocol. + protocol: http/protobuf + # Configure endpoint. + endpoint: http://localhost:4318/v1/metrics + # Configure certificate. + certificate: /app/cert.pem + # Configure mTLS private client key. + client_key: /app/cert.pem + # Configure mTLS client certificate. + client_certificate: /app/cert.pem + # Configure headers. Entries have higher priority than entries from .headers_list. + headers: + - name: api-key + value: "1234" + # Configure headers. Entries have lower priority than entries from .headers. + # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. + headers_list: "api-key=1234" + # Configure compression. + compression: gzip + # Configure max time (in milliseconds) to wait for each export. + timeout: 10000 + # Configure client transport security for the exporter's connection. + insecure: false + # Configure temporality preference. + temporality_preference: delta + # Configure default histogram aggregation. + default_histogram_aggregation: base2_exponential_bucket_histogram + # Configure metric producers. + producers: + - # Configure metric producer to be prometheus. + prometheus: {} + - # Configure a periodic metric reader. + periodic: + # Configure exporter. + exporter: + # Configure exporter to be console. + console: {} + # Configure views. Each view has a selector which determines the instrument(s) it applies to, and a configuration for the resulting stream(s). + views: + - # Configure view selector. + selector: + # Configure instrument name selection criteria. + instrument_name: my-instrument + # Configure instrument type selection criteria. + instrument_type: histogram + # Configure the instrument unit selection criteria. + unit: ms + # Configure meter name selection criteria. + meter_name: my-meter + # Configure meter version selection criteria. + meter_version: 1.0.0 + # Configure meter schema url selection criteria. + meter_schema_url: https://opentelemetry.io/schemas/1.16.0 + # Configure view stream. + stream: + # Configure metric name of the resulting stream(s). + name: new_instrument_name + # Configure metric description of the resulting stream(s). + description: new_description + # Configure aggregation of the resulting stream(s). Known values include: default, drop, explicit_bucket_histogram, base2_exponential_bucket_histogram, last_value, sum. + aggregation: + # Configure aggregation to be explicit_bucket_histogram. + explicit_bucket_histogram: + # Configure bucket boundaries. + boundaries: + [ + 0.0, + 5.0, + 10.0, + 25.0, + 50.0, + 75.0, + 100.0, + 250.0, + 500.0, + 750.0, + 1000.0, + 2500.0, + 5000.0, + 7500.0, + 10000.0 + ] + # Configure record min and max. + record_min_max: true + # Configure attribute keys retained in the resulting stream(s). + attribute_keys: + # Configure list of attribute keys to include in the resulting stream(s). All other attributes are dropped. If not set, stream attributes are not configured. + included: + - key1 + - key2 + # Configure list of attribute keys to exclude from the resulting stream(s). Applies after .attribute_keys.included (i.e. excluded has higher priority than included). + excluded: + - key3 + +# Configure text map context propagators. +propagator: + # Configure the set of propagators to include in the composite text map propagator. + composite: [ tracecontext, baggage, b3, b3multi, jaeger, xray, ottrace ] + +# Configure tracer provider. +tracer_provider: + # Configure span processors. + processors: + - # Configure a batch span processor. + batch: + # Configure delay interval (in milliseconds) between two consecutive exports. + schedule_delay: 5000 + # Configure maximum allowed time (in milliseconds) to export data. + export_timeout: 30000 + # Configure maximum queue size. + max_queue_size: 2048 + # Configure maximum batch size. + max_export_batch_size: 512 + # Configure exporter. + exporter: + # Configure exporter to be OTLP. + otlp: + # Configure protocol. + protocol: http/protobuf + # Configure endpoint. + endpoint: http://localhost:4318/v1/traces + # Configure certificate. + certificate: /app/cert.pem + # Configure mTLS private client key. + client_key: /app/cert.pem + # Configure mTLS client certificate. + client_certificate: /app/cert.pem + # Configure headers. Entries have higher priority than entries from .headers_list. + headers: + - name: api-key + value: "1234" + # Configure headers. Entries have lower priority than entries from .headers. + # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. + headers_list: "api-key=1234" + # Configure compression. + compression: gzip + # Configure max time (in milliseconds) to wait for each export. + timeout: 10000 + # Configure client transport security for the exporter's connection. + insecure: false + - # Configure a batch span processor. + batch: + # Configure exporter. + exporter: + # Configure exporter to be zipkin. + zipkin: + # Configure endpoint. + endpoint: http://localhost:9411/api/v2/spans + # Configure max time (in milliseconds) to wait for each export. + timeout: 10000 + - # Configure a simple span processor. + simple: + # Configure exporter. + exporter: + # Configure exporter to be console. + console: {} + # Configure span limits. See also attribute_limits. + limits: + # Configure max attribute value size. Overrides .attribute_limits.attribute_value_length_limit. + attribute_value_length_limit: 4096 + # Configure max attribute count. Overrides .attribute_limits.attribute_count_limit. + attribute_count_limit: 128 + # Configure max span event count. + event_count_limit: 128 + # Configure max span link count. + link_count_limit: 128 + # Configure max attributes per span event. + event_attribute_count_limit: 128 + # Configure max attributes per span link. + link_attribute_count_limit: 128 + # Configure the sampler. + sampler: + # Configure sampler to be parent_based. + parent_based: + # Configure root sampler. + root: + # Configure sampler to be trace_id_ratio_based. + trace_id_ratio_based: + # Configure trace_id_ratio. + ratio: 0.0001 + # Configure remote_parent_sampled sampler. + remote_parent_sampled: + # Configure sampler to be always_on. + always_on: {} + # Configure remote_parent_not_sampled sampler. + remote_parent_not_sampled: + # Configure sampler to be always_off. + always_off: {} + # Configure local_parent_sampled sampler. + local_parent_sampled: + # Configure sampler to be always_on. + always_on: {} + # Configure local_parent_not_sampled sampler. + local_parent_not_sampled: + # Configure sampler to be always_off. + always_off: {} + +# Configure resource for all signals. +resource: + # Configure resource attributes. Entries have higher priority than entries from .resource.attributes_list. + # Entries must contain .name nand .value, and may optionally include .type, which defaults ot "string" if not set. The value must match the type. Values for .type include: string, bool, int, double, string_array, bool_array, int_array, double_array. + attributes: + - name: service.name + value: unknown_service + - name: string_key + value: value + type: string + - name: bool_key + value: true + type: bool + - name: int_key + value: 1 + type: int + - name: double_key + value: 1.1 + type: double + - name: string_array_key + value: [ "value1", "value2" ] + type: string_array + - name: bool_array_key + value: [ true, false ] + type: bool_array + - name: int_array_key + value: [ 1, 2 ] + type: int_array + - name: double_array_key + value: [ 1.1, 2.2 ] + type: double_array + # Configure resource attributes. Entries have lower priority than entries from .resource.attributes. + # The value is a list of comma separated key-value pairs matching the format of OTEL_RESOURCE_ATTRIBUTES. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration for details. + attributes_list: "service.namespace=my-namespace,service.version=1.0.0" + # Configure resource detectors. + detectors: + # Configure attributes provided by resource detectors. + attributes: + # Configure list of attribute key patterns to include from resource detectors. If not set, all attributes are included. + # Attribute keys from resource detectors are evaluated to match as follows: + # * If the value of the attribute key exactly matches. + # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. + included: + - process.* + # Configure list of attribute key patterns to exclude from resource detectors. Applies after .resource.detectors.attributes.included (i.e. excluded has higher priority than included). + # Attribute keys from resource detectors are evaluated to match as follows: + # * If the value of the attribute key exactly matches. + # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. + excluded: + - process.command_args + # Configure resource schema URL. + schema_url: https://opentelemetry.io/schemas/1.16.0 + +# Configure instrumentation. +instrumentation: + # Configure general SemConv options that may apply to multiple languages and instrumentations. + # Instrumenation may merge general config options with the language specific configuration at .instrumentation.. + general: + # Configure instrumentations following the peer semantic conventions. + # See peer semantic conventions: https://opentelemetry.io/docs/specs/semconv/attributes-registry/peer/ + peer: + # Configure the service mapping for instrumentations following peer.service semantic conventions. + # Each entry is a key value pair where "peer" defines the IP address and "service" defines the corresponding logical name of the service. + # See peer.service semantic conventions: https://opentelemetry.io/docs/specs/semconv/general/attributes/#general-remote-service-attributes + service_mapping: + - peer: 1.2.3.4 + service: FooService + - peer: 2.3.4.5 + service: BarService + # Configure instrumentations following the http semantic conventions. + # See http semantic conventions: https://opentelemetry.io/docs/specs/semconv/http/ + http: + # Configure instrumentations following the http client semantic conventions. + client: + # Configure headers to capture for outbound http requests. + request_captured_headers: + - Content-Type + - Accept + # Configure headers to capture for outbound http responses. + response_captured_headers: + - Content-Type + - Content-Encoding + # Configure instrumentations following the http server semantic conventions. + server: + # Configure headers to capture for inbound http requests. + request_captured_headers: + - Content-Type + - Accept + # Configure headers to capture for outbound http responses. + response_captured_headers: + - Content-Type + - Content-Encoding + # Configure C++ language-specific instrumentation libraries. + cpp: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure .NET language-specific instrumentation libraries. + dotnet: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Erlang language-specific instrumentation libraries. + erlang: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Go language-specific instrumentation libraries. + go: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Java language-specific instrumentation libraries. + java: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure JavaScript language-specific instrumentation libraries. + js: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure PHP language-specific instrumentation libraries. + php: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Python language-specific instrumentation libraries. + python: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Ruby language-specific instrumentation libraries. + ruby: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Rust language-specific instrumentation libraries. + rust: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" + # Configure Swift language-specific instrumentation libraries. + swift: + # Configure the instrumentation corresponding to key "example". + example: + property: "value" diff --git a/config/trace.go b/config/trace.go index aff4c3584ec..b80c8958a47 100644 --- a/config/trace.go +++ b/config/trace.go @@ -52,14 +52,14 @@ func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExpo stdouttrace.WithPrettyPrint(), ) } - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { + if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { + switch *exporter.OTLP.Protocol { case protocolProtobufHTTP: return otlpHTTPSpanExporter(ctx, exporter.OTLP) case protocolProtobufGRPC: return otlpGRPCSpanExporter(ctx, exporter.OTLP) default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) } } return nil, errors.New("no valid span exporter") @@ -89,8 +89,8 @@ func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanP func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { var opts []otlptracegrpc.Option - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) if err != nil { return nil, err } @@ -102,7 +102,7 @@ func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanE if u.Host != "" { opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) } else { - opts = append(opts, otlptracegrpc.WithEndpoint(otlpConfig.Endpoint)) + opts = append(opts, otlptracegrpc.WithEndpoint(*otlpConfig.Endpoint)) } if u.Scheme == "http" { @@ -124,7 +124,7 @@ func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanE opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) } if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracegrpc.WithHeaders(otlpConfig.Headers)) + opts = append(opts, otlptracegrpc.WithHeaders(toStringMap(otlpConfig.Headers))) } return otlptracegrpc.New(ctx, opts...) @@ -133,8 +133,8 @@ func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanE func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { var opts []otlptracehttp.Option - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if otlpConfig.Endpoint != nil { + u, err := url.ParseRequestURI(*otlpConfig.Endpoint) if err != nil { return nil, err } @@ -161,7 +161,7 @@ func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanE opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) } if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracehttp.WithHeaders(otlpConfig.Headers)) + opts = append(opts, otlptracehttp.WithHeaders(toStringMap(otlpConfig.Headers))) } return otlptracehttp.New(ctx, opts...) diff --git a/config/trace_test.go b/config/trace_test.go index 4f4a197770e..b526886242a 100644 --- a/config/trace_test.go +++ b/config/trace_test.go @@ -209,7 +209,7 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/invalid", + Protocol: ptr("http/invalid"), }, }, }, @@ -226,11 +226,11 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "grpc/protobuf", + Protocol: ptr("grpc/protobuf"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -248,12 +248,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4317", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("http://localhost:4317"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -271,12 +271,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4317", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4317"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -294,12 +294,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: " ", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr(" "), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -317,12 +317,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4317", + Protocol: ptr("grpc/protobuf"), + Endpoint: ptr("localhost:4317"), Compression: ptr("invalid"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -340,12 +340,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -363,12 +363,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("http://localhost:4318/path/123"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -386,11 +386,11 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", + Protocol: ptr("http/protobuf"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -408,12 +408,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -431,12 +431,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: " ", + Protocol: ptr("http/protobuf"), + Endpoint: ptr(" "), Compression: ptr("gzip"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -454,12 +454,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("none"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, }, @@ -477,12 +477,12 @@ func TestSpanProcessor(t *testing.T) { ScheduleDelay: ptr(0), Exporter: SpanExporter{ OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", + Protocol: ptr("http/protobuf"), + Endpoint: ptr("localhost:4318"), Compression: ptr("invalid"), Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", + Headers: []NameStringValuePair{ + {Name: "test", Value: ptr("test1")}, }, }, },