Skip to content

Commit

Permalink
remove usage of SmallHashMap (#584)
Browse files Browse the repository at this point in the history
Phase out usage of SmallHashMap type. Would like to
remove this from Atlas to simplify code base.
  • Loading branch information
brharrington authored Oct 10, 2024
1 parent 7345de8 commit d002c67
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 49 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import com.fasterxml.jackson.core.JsonToken
import com.github.benmanes.caffeine.cache.Caffeine
import com.netflix.atlas.core.model.TagKey
import com.netflix.atlas.core.util.IdMap
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.core.util.SortedTagMap
import com.netflix.atlas.core.validation.CompositeTagRule
import com.netflix.atlas.core.validation.Rule
import com.netflix.atlas.core.validation.TagRule
Expand Down Expand Up @@ -191,7 +191,7 @@ class PayloadDecoder(

if (name == null) {
// This should never happen if clients are working properly
val builder = new SmallHashMap.Builder[String, String](n + 1)
val builder = new SortedTagMap.Builder(n + 1)
var i = 0
while (i < pos) {
builder.add(tags(i), tags(i + 1))
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.pekko.http.scaladsl.model.StatusCodes
import com.fasterxml.jackson.core.JsonFactory
import com.netflix.atlas.pekko.ByteStringInputStream
import com.netflix.atlas.core.util.RefIntHashMap
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.core.util.SortedTagMap
import com.netflix.atlas.core.util.Strings
import com.netflix.atlas.json.Json
import com.netflix.spectator.api.Clock
Expand Down Expand Up @@ -181,7 +181,7 @@ class UpdateApiSuite extends FunSuite {
assertEquals(service.lookup(id).counter(id).actualCount(), 42.0)
}

private def createPayload(ts: List[TagMap], op: Int, value: Double): String = {
private def createPayload(ts: List[SortedTagMap], op: Int, value: Double): String = {
val stringTable = new RefIntHashMap[String]()
ts.foreach { tags =>
tags.foreachEntry { (k, v) =>
Expand Down Expand Up @@ -218,11 +218,11 @@ class UpdateApiSuite extends FunSuite {
Json.encode(data)
}

private def validationTest(tags: TagMap, expectedStatus: StatusCode): FailureMessage = {
private def validationTest(tags: SortedTagMap, expectedStatus: StatusCode): FailureMessage = {
validationTest(List(tags), expectedStatus)
}

private def validationTest(ts: List[TagMap], expectedStatus: StatusCode): FailureMessage = {
private def validationTest(ts: List[SortedTagMap], expectedStatus: StatusCode): FailureMessage = {
val clock = new ManualClock()
val service = createAggrService(clock)
val parser = factory.createParser(createPayload(ts, 0, 1.0))
Expand All @@ -234,23 +234,23 @@ class UpdateApiSuite extends FunSuite {
}

test("validation: ok") {
val tags = SmallHashMap("name" -> "foo")
val tags = SortedTagMap("name" -> "foo")
validationTest(tags, StatusCodes.OK)
}

test("validation: missing name") {
val tags = SmallHashMap("foo" -> "bar")
val tags = SortedTagMap("foo" -> "bar")
val msg = validationTest(tags, StatusCodes.BadRequest)
assertEquals(msg.errorCount, 1)
assertEquals(
msg.message,
List("missing key 'name' (tags={\"foo\":\"bar\",\"atlas.dstype\":\"sum\"})")
List("missing key 'name' (tags={\"atlas.dstype\":\"sum\",\"foo\":\"bar\"})")
)
}

test("validation: name too long") {
val name = "a" * 300
val tags = SmallHashMap("name" -> name)
val tags = SortedTagMap("name" -> name)
val msg = validationTest(tags, StatusCodes.BadRequest)
assertEquals(msg.errorCount, 1)
assertEquals(
Expand All @@ -265,7 +265,7 @@ class UpdateApiSuite extends FunSuite {
val tags = Map("name" -> "foo") ++ (0 until 20)
.map(v => Strings.zeroPad(v, 5))
.map(v => v -> v)
val msg = validationTest(SmallHashMap(tags), StatusCodes.BadRequest)
val msg = validationTest(SortedTagMap(tags), StatusCodes.BadRequest)
assertEquals(msg.errorCount, 1)
assert(msg.message.head.startsWith("too many user tags: 21 > 20 (tags={"))
}
Expand All @@ -274,12 +274,12 @@ class UpdateApiSuite extends FunSuite {
val tags = Map("name" -> "foo", "nf.app" -> "www") ++ (0 until 19)
.map(v => Strings.zeroPad(v, 5))
.map(v => v -> v)
val msg = validationTest(SmallHashMap(tags), StatusCodes.OK)
val msg = validationTest(SortedTagMap(tags), StatusCodes.OK)
assertEquals(msg.errorCount, 0)
}

test("validation: tag rule") {
val tags = SmallHashMap("name" -> "test", "nf.foo" -> "bar")
val tags = SortedTagMap("name" -> "test", "nf.foo" -> "bar")
val msg = validationTest(tags, StatusCodes.BadRequest)
assertEquals(msg.errorCount, 1)
assertEquals(
Expand All @@ -292,8 +292,8 @@ class UpdateApiSuite extends FunSuite {

test("validation: partial failure") {
val ts = List(
SmallHashMap("name" -> "test", "nf.foo" -> "bar"),
SmallHashMap("name" -> "test", "nf.app" -> "bar")
SortedTagMap("name" -> "test", "nf.foo" -> "bar"),
SortedTagMap("name" -> "test", "nf.app" -> "bar")
)
val msg = validationTest(ts, StatusCodes.Accepted)
assertEquals(msg.errorCount, 1)
Expand All @@ -307,7 +307,7 @@ class UpdateApiSuite extends FunSuite {

test("validation: truncate if there are too many errors") {
val ts = (0 until 20).toList.map { i =>
SmallHashMap("name" -> i.toString, "nf.foo" -> "bar")
SortedTagMap("name" -> i.toString, "nf.foo" -> "bar")
}
val msg = validationTest(ts, StatusCodes.BadRequest)
assertEquals(msg.errorCount, 20)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import com.netflix.atlas.cloudwatch.CloudWatchMetricsProcessor.normalize
import com.netflix.atlas.cloudwatch.CloudWatchMetricsProcessor.toAWSDatapoint
import com.netflix.atlas.cloudwatch.CloudWatchMetricsProcessor.toAWSDimensions
import com.netflix.atlas.cloudwatch.CloudWatchMetricsProcessor.toTagMap
import com.netflix.atlas.core.util.SmallHashMap
import com.netflix.atlas.core.util.SortedTagMap
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
Expand Down Expand Up @@ -1035,8 +1035,7 @@ object CloudWatchMetricsProcessor {
* A non-null map with at least the `name` and `aws.namespace` tags.
*/
private[cloudwatch] def toTagMap(entry: CloudWatchCacheEntry): Map[String, String] = {
SmallHashMap(
entry.getDimensionsCount + 2,
SortedTagMap(
entry.getDimensionsList.asScala
.map(d => d.getName -> d.getValue)
.append("name" -> entry.getMetric)
Expand All @@ -1050,14 +1049,13 @@ object CloudWatchMetricsProcessor {
* Query filter from a [MetricCategory] config. Note that the `name` and `aws.namespace` tags are populated per
* Atlas standards.
*
* @param entry
* The non-null entry to encode.
* @param datapoint
* The non-null datapoint to encode.
* @return
* A non-null map with at least the `name` and `aws.namespace` tags.
* A non-null map with at least the `name` and `aws.namespace` tags.
*/
private[cloudwatch] def toTagMap(datapoint: FirehoseMetric): Map[String, String] = {
SmallHashMap(
datapoint.dimensions.size + 2,
SortedTagMap(
datapoint.dimensions
.map(d => d.name() -> d.value())
.appended("name" -> datapoint.metricName)
Expand All @@ -1077,8 +1075,7 @@ object CloudWatchMetricsProcessor {
* A non-null map with at least the `name` and `aws.namespace` tags.
*/
private[cloudwatch] def toTagMap(metric: Metric): Map[String, String] = {
SmallHashMap(
metric.dimensions().size() + 2,
SortedTagMap(
metric
.dimensions()
.asScala
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ class RollingFileWriter(
}

private def toAvro(dp: Datapoint): GenericRecord = {
// Use custom wrapper for SmallHashMap if possible as it avoids allocations when
// Use custom wrapper for scala Map if possible as it avoids allocations when
// iterating across the entries.
val tags = toCompressedJavaMap(dp.tags)
val record = new GenericData.Record(RollingFileWriter.AvroSchema)
Expand Down

0 comments on commit d002c67

Please sign in to comment.