Skip to content

Commit

Permalink
Fix incorrectly reversed latency settings for the server and client
Browse files Browse the repository at this point in the history
  • Loading branch information
Frapschen committed Aug 30, 2024
1 parent 240ff76 commit 9af2e6e
Show file tree
Hide file tree
Showing 5 changed files with 113 additions and 87 deletions.
27 changes: 27 additions & 0 deletions .chloggen/fix-wrong-latency.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: servicegraphconnector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix incorrectly reversed latency settings for the server and client

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: []

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
12 changes: 6 additions & 6 deletions connector/servicegraphconnector/connector.go
Original file line number Diff line number Diff line change
Expand Up @@ -545,9 +545,9 @@ func (p *serviceGraphConnector) collectClientLatencyMetrics(ilm pmetric.ScopeMet
dpDuration.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime))
dpDuration.SetTimestamp(timestamp)
dpDuration.ExplicitBounds().FromRaw(p.reqDurationBounds)
dpDuration.BucketCounts().FromRaw(p.reqServerDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqServerDurationSecondsCount[key])
dpDuration.SetSum(p.reqServerDurationSecondsSum[key])
dpDuration.BucketCounts().FromRaw(p.reqClientDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqClientDurationSecondsCount[key])
dpDuration.SetSum(p.reqClientDurationSecondsSum[key])

// TODO: Support exemplars
dimensions, ok := p.dimensionsForSeries(key)
Expand Down Expand Up @@ -575,9 +575,9 @@ func (p *serviceGraphConnector) collectServerLatencyMetrics(ilm pmetric.ScopeMet
dpDuration.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime))
dpDuration.SetTimestamp(timestamp)
dpDuration.ExplicitBounds().FromRaw(p.reqDurationBounds)
dpDuration.BucketCounts().FromRaw(p.reqClientDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqClientDurationSecondsCount[key])
dpDuration.SetSum(p.reqClientDurationSecondsSum[key])
dpDuration.BucketCounts().FromRaw(p.reqServerDurationSecondsBucketCounts[key])
dpDuration.SetCount(p.reqServerDurationSecondsCount[key])
dpDuration.SetSum(p.reqServerDurationSecondsSum[key])

// TODO: Support exemplars
dimensions, ok := p.dimensionsForSeries(key)
Expand Down
29 changes: 14 additions & 15 deletions connector/servicegraphconnector/connector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func TestConnectorConsume(t *testing.T) {
},
},
sampleTraces: buildSampleTrace(t, "val"),
verifyMetrics: verifyHappyCaseMetrics,
verifyMetrics: verifyHappyCaseMetricsWithDuration(2, 1),
},
{
name: "test fix failed label not work",
Expand Down Expand Up @@ -163,7 +163,7 @@ func TestConnectorConsume(t *testing.T) {
},
sampleTraces: buildSampleTrace(t, "val"),
gates: []*featuregate.Gate{legacyLatencyUnitMsFeatureGate},
verifyMetrics: verifyHappyCaseMetricsWithDuration(1000),
verifyMetrics: verifyHappyCaseMetricsWithDuration(2000, 1000),
},
} {
t.Run(tc.name, func(t *testing.T) {
Expand Down Expand Up @@ -209,11 +209,7 @@ func getGoldenTraces(t *testing.T, file string) ptrace.Traces {
return td
}

func verifyHappyCaseMetrics(t *testing.T, md pmetric.Metrics) {
verifyHappyCaseMetricsWithDuration(1)(t, md)
}

func verifyHappyCaseMetricsWithDuration(durationSum float64) func(t *testing.T, md pmetric.Metrics) {
func verifyHappyCaseMetricsWithDuration(serverDurationSum, clientDurationSum float64) func(t *testing.T, md pmetric.Metrics) {
return func(t *testing.T, md pmetric.Metrics) {
assert.Equal(t, 3, md.MetricCount())

Expand All @@ -231,11 +227,11 @@ func verifyHappyCaseMetricsWithDuration(durationSum float64) func(t *testing.T,

mServerDuration := ms.At(1)
assert.Equal(t, "traces_service_graph_request_server_seconds", mServerDuration.Name())
verifyDuration(t, mServerDuration, durationSum)
verifyDuration(t, mServerDuration, serverDurationSum, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0})

mClientDuration := ms.At(2)
assert.Equal(t, "traces_service_graph_request_client_seconds", mClientDuration.Name())
verifyDuration(t, mClientDuration, durationSum)
verifyDuration(t, mClientDuration, clientDurationSum, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})
}
}

Expand All @@ -259,16 +255,16 @@ func verifyCount(t *testing.T, m pmetric.Metric) {
verifyAttr(t, attributes, "client_some-attribute", "val")
}

func verifyDuration(t *testing.T, m pmetric.Metric, durationSum float64) {
func verifyDuration(t *testing.T, m pmetric.Metric, durationSum float64, bs []uint64) {
assert.Equal(t, pmetric.MetricTypeHistogram, m.Type())
dps := m.Histogram().DataPoints()
assert.Equal(t, 1, dps.Len())

dp := dps.At(0)
assert.Equal(t, durationSum, dp.Sum()) // Duration: 1sec
assert.Equal(t, durationSum, dp.Sum()) // Duration: client is 1sec, server is 2sec
assert.Equal(t, uint64(1), dp.Count())
buckets := pcommon.NewUInt64Slice()
buckets.FromRaw([]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})
buckets.FromRaw(bs)
assert.Equal(t, buckets, dp.BucketCounts())

attributes := dp.Attributes()
Expand All @@ -287,7 +283,10 @@ func verifyAttr(t *testing.T, attrs pcommon.Map, k, expected string) {

func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
tStart := time.Date(2022, 1, 2, 3, 4, 5, 6, time.UTC)
tEnd := time.Date(2022, 1, 2, 3, 4, 6, 6, time.UTC)
// client: 1s
cEnd := time.Date(2022, 1, 2, 3, 4, 6, 6, time.UTC)
// server: 2s
sEnd := time.Date(2022, 1, 2, 3, 4, 7, 6, time.UTC)

traces := ptrace.NewTraces()

Expand All @@ -312,7 +311,7 @@ func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
clientSpan.SetTraceID(traceID)
clientSpan.SetKind(ptrace.SpanKindClient)
clientSpan.SetStartTimestamp(pcommon.NewTimestampFromTime(tStart))
clientSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(tEnd))
clientSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(cEnd))
clientSpan.Attributes().PutStr("some-attribute", attrValue) // Attribute selected as dimension for metrics
serverSpan := scopeSpans.Spans().AppendEmpty()
serverSpan.SetName("server span")
Expand All @@ -321,7 +320,7 @@ func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces {
serverSpan.SetParentSpanID(clientSpanID)
serverSpan.SetKind(ptrace.SpanKindServer)
serverSpan.SetStartTimestamp(pcommon.NewTimestampFromTime(tStart))
serverSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(tEnd))
serverSpan.SetEndTimestamp(pcommon.NewTimestampFromTime(sEnd))

return traces
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,74 +32,74 @@ resourceMetrics:
- histogram:
aggregationTemporality: 2
dataPoints:
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000000
timeUnixNano: "2000000"
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 1e-06
timeUnixNano: "2000000"
name: traces_service_graph_request_server_seconds
- histogram:
aggregationTemporality: 2
dataPoints:
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000001
timeUnixNano: "2000000"
- attributes:
- key: client
value:
stringValue: user
- key: connection_type
value:
stringValue: virtual_node
- key: failed
value:
boolValue: false
- key: server
value:
stringValue: bar-requester
- key: server_peer.service
value:
stringValue: external-platform
- key: virtual_node
value:
stringValue: client
bucketCounts:
- "1"
- "0"
- "0"
- "0"
count: "1"
explicitBounds:
- 0.1
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0
timeUnixNano: "2000000"
name: traces_service_graph_request_client_seconds
scope:
name: traces_service_graph
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ resourceMetrics:
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000001
sum: 0
timeUnixNano: "2000000"
name: traces_service_graph_request_server_seconds
- histogram:
Expand Down Expand Up @@ -89,7 +89,7 @@ resourceMetrics:
- 1
- 10
startTimeUnixNano: "1000000"
sum: 0.000000
sum: 1e-06
timeUnixNano: "2000000"
name: traces_service_graph_request_client_seconds
scope:
Expand Down

0 comments on commit 9af2e6e

Please sign in to comment.