Resp parseEntity(final HttpEntity entity, final CheckedFu
if (entity.getContentType() == null) {
throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body");
}
- MediaType medaiType = MediaType.fromMediaType(entity.getContentType());
- if (medaiType == null) {
+ MediaType mediaType = MediaType.fromMediaType(entity.getContentType());
+ if (mediaType == null) {
throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType());
}
- try (XContentParser parser = medaiType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) {
+ try (XContentParser parser = mediaType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) {
return entityParser.apply(parser);
}
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
index da9f790215669..f5b1b0768ff4a 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
@@ -1299,4 +1299,61 @@ public void testMultiTermvectors() throws IOException {
}
}
}
+
+ public void testBulkWithRequireAlias() throws IOException {
+ {
+ String indexAliasName = "testindex-1";
+
+ BulkRequest bulkRequest = new BulkRequest(indexAliasName);
+ bulkRequest.requireAlias(true);
+ bulkRequest.add(new IndexRequest().id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-2";
+
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.requireAlias(true);
+ bulkRequest.add(new IndexRequest().index(indexAliasName).id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().index(indexAliasName).id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-3";
+
+ BulkRequest bulkRequest = new BulkRequest(indexAliasName);
+ bulkRequest.add(new IndexRequest().id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-4";
+
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(
+ new IndexRequest().index(indexAliasName).id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON)
+ );
+ bulkRequest.add(
+ new IndexRequest().index(indexAliasName).id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON)
+ );
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ }
}
diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
index af7138569972a..726c381db09f6 100644
--- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
+++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
@@ -105,8 +105,13 @@ private static String javaLocaleProviders() {
SPI setting is used to allow loading custom CalendarDataProvider
in jdk8 it has to be loaded from jre/lib/ext,
in jdk9+ it is already within ES project and on a classpath
+
+ Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date
+ parsing will break in an incompatible way for some date patterns and locales.
+ //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906
+ See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider
*/
- return "-Djava.locale.providers=SPI,CLDR";
+ return "-Djava.locale.providers=SPI,COMPAT";
}
}
diff --git a/gradle/ide.gradle b/gradle/ide.gradle
index e266d9add172d..ea353f8d92bdd 100644
--- a/gradle/ide.gradle
+++ b/gradle/ide.gradle
@@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') {
}
runConfigurations {
defaults(JUnit) {
- vmParameters = '-ea -Djava.locale.providers=SPI,CLDR'
+ vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT'
if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) {
vmParameters += ' -Djava.security.manager=allow'
}
diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 943a9b2fd214b..0000000000000
--- a/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3c2361bd633374ae3814b175cc25ccf773f67026
\ No newline at end of file
diff --git a/libs/core/licenses/lucene-core-9.11.0.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..b0d38c4165581
--- /dev/null
+++ b/libs/core/licenses/lucene-core-9.11.0.jar.sha1
@@ -0,0 +1 @@
+2e487755a6814b2a1bc770c26569dcba86873dcf
\ No newline at end of file
diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java
index af09a7aebba79..711f56c9f3e3b 100644
--- a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java
+++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java
@@ -78,6 +78,19 @@ public static Compressor compressor(final BytesReference bytes) {
return null;
}
+ /**
+ * @param bytes The bytes to check the compression for
+ * @return The detected compressor. If no compressor detected then return NoneCompressor.
+ */
+ public static Compressor compressorForWritable(final BytesReference bytes) {
+ for (Compressor compressor : registeredCompressors.values()) {
+ if (compressor.isCompressed(bytes) == true) {
+ return compressor;
+ }
+ }
+ return CompressorRegistry.none();
+ }
+
/** Decompress the provided {@link BytesReference}. */
public static BytesReference uncompress(BytesReference bytes) throws IOException {
Compressor compressor = compressor(bytes);
diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java
index a278b61894a65..e7b51c3389b52 100644
--- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java
+++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java
@@ -104,6 +104,10 @@ public long getTotalValue() {
return endValue.get() - startValue;
}
+ public long getStartValue() {
+ return startValue;
+ }
+
@Override
public String toString() {
return String.valueOf(getTotalValue());
diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java
new file mode 100644
index 0000000000000..373cdbfa7e9a1
--- /dev/null
+++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java
@@ -0,0 +1,225 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.core.tasks.resourcetracker;
+
+import org.opensearch.common.annotation.PublicApi;
+import org.opensearch.core.ParseField;
+import org.opensearch.core.common.Strings;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.core.common.io.stream.Writeable;
+import org.opensearch.core.xcontent.ConstructingObjectParser;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
+import org.opensearch.core.xcontent.ToXContentObject;
+import org.opensearch.core.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg;
+
+/**
+ * Task resource usage information with minimal information about the task
+ *
+ * Writeable TaskResourceInfo objects are used to represent resource usage
+ * information of running tasks, which can be propagated to coordinator node
+ * to infer query-level resource usage
+ *
+ * @opensearch.api
+ */
+@PublicApi(since = "2.15.0")
+public class TaskResourceInfo implements Writeable, ToXContentObject {
+ private final String action;
+ private final long taskId;
+ private final long parentTaskId;
+ private final String nodeId;
+ private final TaskResourceUsage taskResourceUsage;
+
+ private static final ParseField ACTION = new ParseField("action");
+ private static final ParseField TASK_ID = new ParseField("taskId");
+ private static final ParseField PARENT_TASK_ID = new ParseField("parentTaskId");
+ private static final ParseField NODE_ID = new ParseField("nodeId");
+ private static final ParseField TASK_RESOURCE_USAGE = new ParseField("taskResourceUsage");
+
+ public TaskResourceInfo(
+ final String action,
+ final long taskId,
+ final long parentTaskId,
+ final String nodeId,
+ final TaskResourceUsage taskResourceUsage
+ ) {
+ this.action = action;
+ this.taskId = taskId;
+ this.parentTaskId = parentTaskId;
+ this.nodeId = nodeId;
+ this.taskResourceUsage = taskResourceUsage;
+ }
+
+ public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
+ "task_resource_info",
+ a -> new Builder().setAction((String) a[0])
+ .setTaskId((Long) a[1])
+ .setParentTaskId((Long) a[2])
+ .setNodeId((String) a[3])
+ .setTaskResourceUsage((TaskResourceUsage) a[4])
+ .build()
+ );
+
+ static {
+ PARSER.declareString(constructorArg(), ACTION);
+ PARSER.declareLong(constructorArg(), TASK_ID);
+ PARSER.declareLong(constructorArg(), PARENT_TASK_ID);
+ PARSER.declareString(constructorArg(), NODE_ID);
+ PARSER.declareObject(constructorArg(), TaskResourceUsage.PARSER, TASK_RESOURCE_USAGE);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(ACTION.getPreferredName(), this.action);
+ builder.field(TASK_ID.getPreferredName(), this.taskId);
+ builder.field(PARENT_TASK_ID.getPreferredName(), this.parentTaskId);
+ builder.field(NODE_ID.getPreferredName(), this.nodeId);
+ builder.startObject(TASK_RESOURCE_USAGE.getPreferredName());
+ this.taskResourceUsage.toXContent(builder, params);
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+ /**
+ * Builder for {@link TaskResourceInfo}
+ */
+ public static class Builder {
+ private TaskResourceUsage taskResourceUsage;
+ private String action;
+ private long taskId;
+ private long parentTaskId;
+ private String nodeId;
+
+ public Builder setTaskResourceUsage(final TaskResourceUsage taskResourceUsage) {
+ this.taskResourceUsage = taskResourceUsage;
+ return this;
+ }
+
+ public Builder setAction(final String action) {
+ this.action = action;
+ return this;
+ }
+
+ public Builder setTaskId(final long taskId) {
+ this.taskId = taskId;
+ return this;
+ }
+
+ public Builder setParentTaskId(final long parentTaskId) {
+ this.parentTaskId = parentTaskId;
+ return this;
+ }
+
+ public Builder setNodeId(final String nodeId) {
+ this.nodeId = nodeId;
+ return this;
+ }
+
+ public TaskResourceInfo build() {
+ return new TaskResourceInfo(action, taskId, parentTaskId, nodeId, taskResourceUsage);
+ }
+ }
+
+ /**
+ * Read task info from a stream.
+ *
+ * @param in StreamInput to read
+ * @return {@link TaskResourceInfo}
+ * @throws IOException IOException
+ */
+ public static TaskResourceInfo readFromStream(StreamInput in) throws IOException {
+ return new TaskResourceInfo.Builder().setAction(in.readString())
+ .setTaskId(in.readLong())
+ .setParentTaskId(in.readLong())
+ .setNodeId(in.readString())
+ .setTaskResourceUsage(TaskResourceUsage.readFromStream(in))
+ .build();
+ }
+
+ /**
+ * Get TaskResourceUsage
+ *
+ * @return taskResourceUsage
+ */
+ public TaskResourceUsage getTaskResourceUsage() {
+ return taskResourceUsage;
+ }
+
+ /**
+ * Get parent task id
+ *
+ * @return parent task id
+ */
+ public long getParentTaskId() {
+ return parentTaskId;
+ }
+
+ /**
+ * Get task id
+ * @return task id
+ */
+ public long getTaskId() {
+ return taskId;
+ }
+
+ /**
+ * Get node id
+ * @return node id
+ */
+ public String getNodeId() {
+ return nodeId;
+ }
+
+ /**
+ * Get task action
+ * @return task action
+ */
+ public String getAction() {
+ return action;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(action);
+ out.writeLong(taskId);
+ out.writeLong(parentTaskId);
+ out.writeString(nodeId);
+ taskResourceUsage.writeTo(out);
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(MediaTypeRegistry.JSON, this);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != TaskResourceInfo.class) {
+ return false;
+ }
+ TaskResourceInfo other = (TaskResourceInfo) obj;
+ return action.equals(other.action)
+ && taskId == other.taskId
+ && parentTaskId == other.parentTaskId
+ && Objects.equals(nodeId, other.nodeId)
+ && taskResourceUsage.equals(other.taskResourceUsage);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(action, taskId, parentTaskId, nodeId, taskResourceUsage);
+ }
+}
diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java
index c861c21f89fc5..bcf5c163cb91f 100644
--- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java
+++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java
@@ -48,6 +48,11 @@ public Closeable createGauge(String name, String description, String unit, Suppl
return metricsTelemetry.createGauge(name, description, unit, valueProvider, tags);
}
+ @Override
+ public Closeable createGauge(String name, String description, String unit, Supplier value) {
+ return metricsTelemetry.createGauge(name, description, unit, value);
+ }
+
@Override
public void close() throws IOException {
metricsTelemetry.close();
diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java
index 3ab3dcf82c7a7..3dc212b1341cc 100644
--- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java
+++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java
@@ -63,4 +63,16 @@ public interface MetricsRegistry extends Closeable {
*/
Closeable createGauge(String name, String description, String unit, Supplier valueProvider, Tags tags);
+ /**
+ * Creates the Observable Gauge type of Metric. Where the value provider will be called at a certain frequency
+ * to capture the value.
+ *
+ * @param name name of the observable gauge.
+ * @param description any description about the metric.
+ * @param unit unit of the metric.
+ * @param value value provider.
+ * @return closeable to dispose/close the Gauge metric.
+ */
+ Closeable createGauge(String name, String description, String unit, Supplier value);
+
}
diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java
new file mode 100644
index 0000000000000..707f2c79c62f2
--- /dev/null
+++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java
@@ -0,0 +1,53 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.telemetry.metrics;
+
+import org.opensearch.common.annotation.ExperimentalApi;
+import org.opensearch.telemetry.metrics.tags.Tags;
+
+/**
+ * Observable Measurement for the Asynchronous instruments.
+ * @opensearch.experimental
+ */
+@ExperimentalApi
+public final class TaggedMeasurement {
+ private final Double value;
+ private final Tags tags;
+
+ /**
+ * Factory method to create the {@link TaggedMeasurement} object.
+ * @param value value.
+ * @param tags tags to be added per value.
+ * @return tagged measurement TaggedMeasurement
+ */
+ public static TaggedMeasurement create(double value, Tags tags) {
+ return new TaggedMeasurement(value, tags);
+ }
+
+ private TaggedMeasurement(double value, Tags tags) {
+ this.value = value;
+ this.tags = tags;
+ }
+
+ /**
+ * Returns the value.
+ * @return value
+ */
+ public Double getValue() {
+ return value;
+ }
+
+ /**
+ * Returns the tags.
+ * @return tags
+ */
+ public Tags getTags() {
+ return tags;
+ }
+}
diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java
index 9a913d25e872d..7bec136c42ba7 100644
--- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java
+++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java
@@ -12,6 +12,7 @@
import org.opensearch.telemetry.metrics.Counter;
import org.opensearch.telemetry.metrics.Histogram;
import org.opensearch.telemetry.metrics.MetricsRegistry;
+import org.opensearch.telemetry.metrics.TaggedMeasurement;
import org.opensearch.telemetry.metrics.tags.Tags;
import java.io.Closeable;
@@ -52,6 +53,11 @@ public Closeable createGauge(String name, String description, String unit, Suppl
return () -> {};
}
+ @Override
+ public Closeable createGauge(String name, String description, String unit, Supplier value) {
+ return () -> {};
+ }
+
@Override
public void close() throws IOException {
diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java
index 872f697ade09e..e1506eecff6e9 100644
--- a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java
+++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java
@@ -79,4 +79,19 @@ public void testGauge() {
assertSame(mockCloseable, closeable);
}
+ @SuppressWarnings("unchecked")
+ public void testGaugeWithValueAndTagSupplier() {
+ Closeable mockCloseable = mock(Closeable.class);
+ when(defaultMeterRegistry.createGauge(any(String.class), any(String.class), any(String.class), any(Supplier.class))).thenReturn(
+ mockCloseable
+ );
+ Closeable closeable = defaultMeterRegistry.createGauge(
+ "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testObservableGauge",
+ "test observable gauge",
+ "ms",
+ () -> TaggedMeasurement.create(1.0, Tags.EMPTY)
+ );
+ assertSame(mockCloseable, closeable);
+ }
+
}
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index ee00419f52066..0000000000000
--- a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8752daf173a642ae02e081cc0398f2ce59278200
\ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..29aade3ad4298
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1
@@ -0,0 +1 @@
+5e21d20edee0712472e7c6f605c9d97aeecf16c0
\ No newline at end of file
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml
new file mode 100644
index 0000000000000..4f700c3b83e8f
--- /dev/null
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml
@@ -0,0 +1,421 @@
+"Test derived_field supported type using index mapping definition":
+ - skip:
+ version: " - 2.14.99"
+ reason: "derived_field feature was added in 2.15"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ text:
+ type: text
+ keyword:
+ type: keyword
+ long:
+ type: long
+ float:
+ type: float
+ double:
+ type: double
+ date:
+ type: date
+ geo:
+ type: geo_point
+ ip:
+ type: ip
+ boolean:
+ type: boolean
+ array_of_long:
+ type: long
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_text_prefilter_field:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ prefilter_field: "text"
+ derived_keyword:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ derived_long:
+ type: long
+ script: "emit(params._source[\"long\"])"
+ derived_float:
+ type: float
+ script: "emit(params._source[\"float\"])"
+ derived_double:
+ type: double
+ script: "emit(params._source[\"double\"])"
+ derived_date:
+ type: date
+ script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())"
+ derived_geo:
+ type: geo_point
+ script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])"
+ derived_ip:
+ type: ip
+ script: "emit(params._source[\"ip\"])"
+ derived_boolean:
+ type: boolean
+ script: "emit(params._source[\"boolean\"])"
+ derived_array_of_long:
+ type: long
+ script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);"
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "peter piper",
+ keyword: "foo",
+ long: 1,
+ float: 1.0,
+ double: 1.0,
+ date: "2017-01-01T00:00:00Z",
+ geo: [0.0, 20.0],
+ ip: "192.168.0.1",
+ boolean: true,
+ array_of_long: [1, 2],
+ json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ body: {
+ text: "piper picked a peck",
+ keyword: "bar",
+ long: 2,
+ float: 2.0,
+ double: 2.0,
+ date: "2017-01-02T00:00:00Z",
+ geo: [10.0, 30.0],
+ ip: "192.168.0.2",
+ boolean: false,
+ array_of_long: [2, 3],
+ json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ body: {
+ text: "peck of pickled peppers",
+ keyword: "baz",
+ long: -3,
+ float: -3.0,
+ double: -3.0,
+ date: "2017-01-03T00:00:00Z",
+ geo: [20.0, 40.0],
+ ip: "192.168.0.3",
+ boolean: true,
+ array_of_long: [3, 4],
+ json_field: "{\"keyword\":\"json_keyword3\",\"long\":30,\"float\":30.0,\"double\":30.0,\"date\":\"2021-03-01T00:00:00Z\",\"ip\":\"10.0.0.3\",\"boolean\":true, \"array_of_long\": [3, 4]}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ body: {
+ text: "pickled peppers",
+ keyword: "qux",
+ long: 4,
+ float: 4.0,
+ double: 4.0,
+ date: "2017-01-04T00:00:00Z",
+ geo: [30.0, 50.0],
+ ip: "192.168.0.4",
+ boolean: false,
+ array_of_long: [4, 5],
+ json_field: "{\"keyword\":\"json_keyword4\",\"long\":40,\"float\":40.0,\"double\":40.0,\"date\":\"2021-04-01T00:00:00Z\",\"ip\":\"10.0.0.4\",\"boolean\":false, \"array_of_long\": [4, 5]}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 5
+ body: {
+ text: "peppers",
+ keyword: "quux",
+ long: 5,
+ float: 5.0,
+ double: 5.0,
+ date: "2017-01-05T00:00:00Z",
+ geo: [40.0, 60.0],
+ ip: "192.168.0.5",
+ boolean: true,
+ array_of_long: [5, 6],
+ json_field: "{\"keyword\":\"json_keyword5\",\"long\":50,\"float\":50.0,\"double\":50.0,\"date\":\"2021-05-01T00:00:00Z\",\"ip\":\"10.0.0.5\",\"boolean\":true, \"array_of_long\": [5, 6]}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+
+ # Tests for derived_text
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ match_phrase:
+ derived_text:
+ query: "peter piper"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_keyword
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_keyword:
+ value: "foo"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_long:
+ gte: 1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_float
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_float:
+ gte: 1.0
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_double
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_double:
+ gte: 1.0
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_date
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_date:
+ gte: "2017-01-02"
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_geo
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ geo_distance:
+ distance: "20km"
+ derived_geo:
+ lat: 0.0
+ lon: 20.0
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_ip
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_ip:
+ value: "192.168.0.1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_boolean
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_boolean:
+ value: true
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_array_of_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_array_of_long:
+ gte: 3
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.keyword
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_object.keyword:
+ value: "json_keyword1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_object.long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_object.long:
+ gte: 11
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.float
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_object.float:
+ gte: 10.1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.double
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_object.double:
+ gte: 10.1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.date
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_object.date:
+ gte: "2021-03-01"
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_object.ip
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_object.ip:
+ value: "10.0.0.1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_object.boolean
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ term:
+ derived_object.boolean:
+ value: true
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_object.array_of_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ range:
+ derived_object.array_of_long:
+ gte: 3
+
+ - match: { hits.total: 4 }
+
+ # Tests for query string
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ q: "derived_keyword:foo"
+
+ - match: { hits.total: 1 }
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ q: derived_object.keyword:json_keyword1
+
+ - match: { hits.total: 1 }
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml
new file mode 100644
index 0000000000000..0370fd94e8548
--- /dev/null
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml
@@ -0,0 +1,123 @@
+---
+"Test create and update mapping for derived fields":
+ - skip:
+ version: " - 2.14.99"
+ reason: "derived_field feature was added in 2.15"
+ - do:
+ indices.create:
+ index: test_index
+
+ - do:
+ indices.put_mapping:
+ index: test_index
+ body:
+ properties:
+ text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_text_prefilter_field:
+ type: keyword
+ script: "emit(params._source[\"text\"])"
+ prefilter_field: "text"
+ derived_date:
+ type: date
+ script: "emit(params._source[\"keyword\"])"
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: {test_index.mappings.derived.derived_text.type: text}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.type: keyword}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text}
+ - match: {test_index.mappings.derived.derived_date.type: date}
+ - match: {test_index.mappings.derived.derived_object.type: object}
+ - match: {test_index.mappings.derived.derived_object.properties.keyword: keyword}
+ - match: {test_index.mappings.derived.derived_object.prefilter_field: json_field}
+
+
+ - do:
+ indices.put_mapping:
+ index: test_index
+ body:
+ properties:
+ text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: keyword
+ script: "emit(params._source[\"text\"])"
+ derived_text_prefilter_field:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ prefilter_field: "text"
+ derived_date:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ derived_object:
+ type: object
+ properties:
+ keyword: text
+ script: "emit(params._source[\"text\"])"
+ prefilter_field: "text"
+ format: "dd-MM-yyyy"
+ ignore_malformed: true
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: {test_index.mappings.derived.derived_text.type: keyword}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.type: text}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text}
+ - match: {test_index.mappings.derived.derived_date.type: keyword}
+ - match: {test_index.mappings.derived.derived_object.type: object}
+ - match: {test_index.mappings.derived.derived_object.properties.keyword: text}
+ - match: {test_index.mappings.derived.derived_object.prefilter_field: text}
+ - match: {test_index.mappings.derived.derived_object.format: "dd-MM-yyyy"}
+ - match: {test_index.mappings.derived.derived_object.ignore_malformed: true}
+
+
+ - do:
+ indices.put_mapping:
+ index: test_index
+ body:
+ properties:
+ text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ ignore_malformed: false
+
+ - do:
+ indices.get_mapping:
+ index: test_index
+
+ - match: {test_index.mappings.derived.derived_text.type: keyword}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.type: text}
+ - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text}
+ - match: {test_index.mappings.derived.derived_date.type: keyword}
+ - match: {test_index.mappings.derived.derived_object.type: object}
+ - match: {test_index.mappings.derived.derived_object.properties.keyword: keyword}
+ - match: {test_index.mappings.derived.derived_object.prefilter_field: json_field}
+ - is_false: test_index.mappings.derived.derived_object.ignore_malformed
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml
new file mode 100644
index 0000000000000..bb619dce63010
--- /dev/null
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml
@@ -0,0 +1,489 @@
+"Test derived_field supported type using search definition":
+ - skip:
+ version: " - 2.14.99"
+ reason: "derived_field feature was added in 2.15"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ text:
+ type: text
+ keyword:
+ type: keyword
+ long:
+ type: long
+ float:
+ type: float
+ double:
+ type: double
+ date:
+ type: date
+ geo:
+ type: geo_point
+ ip:
+ type: ip
+ boolean:
+ type: boolean
+ array_of_long:
+ type: long
+ json_field:
+ type: text
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "peter piper",
+ keyword: "foo",
+ long: 1,
+ float: 1.0,
+ double: 1.0,
+ date: "2017-01-01T00:00:00Z",
+ geo: [0.0, 20.0],
+ ip: "192.168.0.1",
+ boolean: true,
+ array_of_long: [1, 2],
+ json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ body: {
+ text: "piper picked a peck",
+ keyword: "bar",
+ long: 2,
+ float: 2.0,
+ double: 2.0,
+ date: "2017-01-02T00:00:00Z",
+ geo: [10.0, 30.0],
+ ip: "192.168.0.2",
+ boolean: false,
+ array_of_long: [2, 3],
+ json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ body: {
+ text: "peck of pickled peppers",
+ keyword: "baz",
+ long: -3,
+ float: -3.0,
+ double: -3.0,
+ date: "2017-01-03T00:00:00Z",
+ geo: [20.0, 40.0],
+ ip: "192.168.0.3",
+ boolean: true,
+ array_of_long: [3, 4],
+ json_field: "{\"keyword\":\"json_keyword3\",\"long\":30,\"float\":30.0,\"double\":30.0,\"date\":\"2021-03-01T00:00:00Z\",\"ip\":\"10.0.0.3\",\"boolean\":true, \"array_of_long\": [3, 4]}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ body: {
+ text: "pickled peppers",
+ keyword: "qux",
+ long: 4,
+ float: 4.0,
+ double: 4.0,
+ date: "2017-01-04T00:00:00Z",
+ geo: [30.0, 50.0],
+ ip: "192.168.0.4",
+ boolean: false,
+ array_of_long: [4, 5],
+ json_field: "{\"keyword\":\"json_keyword4\",\"long\":40,\"float\":40.0,\"double\":40.0,\"date\":\"2021-04-01T00:00:00Z\",\"ip\":\"10.0.0.4\",\"boolean\":false, \"array_of_long\": [4, 5]}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 5
+ body: {
+ text: "peppers",
+ keyword: "quux",
+ long: 5,
+ float: 5.0,
+ double: 5.0,
+ date: "2017-01-05T00:00:00Z",
+ geo: [40.0, 60.0],
+ ip: "192.168.0.5",
+ boolean: true,
+ array_of_long: [5, 6],
+ json_field: "{\"keyword\":\"json_keyword5\",\"long\":50,\"float\":50.0,\"double\":50.0,\"date\":\"2021-05-01T00:00:00Z\",\"ip\":\"10.0.0.5\",\"boolean\":true, \"array_of_long\": [5, 6]}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+
+ # Tests for derived_text
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ query:
+ match_phrase:
+ derived_text:
+ query: "peter piper"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_keyword
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_keyword:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ query:
+ term:
+ derived_keyword:
+ value: "foo"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_long:
+ type: long
+ script: "emit(params._source[\"long\"])"
+ query:
+ range:
+ derived_long:
+ gte: 1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_float
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_float:
+ type: float
+ script: "emit(params._source[\"float\"])"
+ query:
+ range:
+ derived_float:
+ gte: 1.0
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_double
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_double:
+ type: double
+ script: "emit(params._source[\"double\"])"
+ query:
+ range:
+ derived_double:
+ gte: 1.0
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_date
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_date:
+ type: date
+ script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())"
+ query:
+ range:
+ derived_date:
+ gte: "2017-01-02"
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_geo
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_geo:
+ type: geo_point
+ script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])"
+ query:
+ geo_distance:
+ distance: "20km"
+ derived_geo:
+ lat: 0.0
+ lon: 20.0
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_ip
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_ip:
+ type: ip
+ script: "emit(params._source[\"ip\"])"
+ query:
+ term:
+ derived_ip:
+ value: "192.168.0.1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_boolean
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_boolean:
+ type: boolean
+ script: "emit(params._source[\"boolean\"])"
+ query:
+ term:
+ derived_boolean:
+ value: true
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_array_of_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_array_of_long:
+ type: long
+ script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);"
+ query:
+ range:
+ derived_array_of_long:
+ gte: 3
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.keyword
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ term:
+ derived_object.keyword:
+ value: "json_keyword1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_object.long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ range:
+ derived_object.long:
+ gte: 11
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.float
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ range:
+ derived_object.float:
+ gte: 10.1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.double
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ range:
+ derived_object.double:
+ gte: 10.1
+
+ - match: { hits.total: 4 }
+
+ # Tests for derived_object.date
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ range:
+ derived_object.date:
+ gte: "2021-03-01"
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_object.ip
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ term:
+ derived_object.ip:
+ value: "10.0.0.1"
+
+ - match: { hits.total: 1 }
+
+ # Tests for derived_object.boolean
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ term:
+ derived_object.boolean:
+ value: true
+
+ - match: { hits.total: 3 }
+
+ # Tests for derived_object.array_of_long
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ query:
+ range:
+ derived_object.array_of_long:
+ gte: 3
+
+ - match: { hits.total: 4 }
+
+ # Tests for query string
+ - do:
+ search:
+ body:
+ derived:
+ derived_keyword:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ rest_total_hits_as_int: true
+ index: test
+ q: "derived_keyword:foo"
+
+ - match: { hits.total: 1 }
+
+ - do:
+ search:
+ body:
+ derived:
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ rest_total_hits_as_int: true
+ index: test
+ q: derived_object.keyword:json_keyword1
+
+ - match: { hits.total: 1 }
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml
new file mode 100644
index 0000000000000..52a897c341419
--- /dev/null
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml
@@ -0,0 +1,279 @@
+setup:
+ - skip:
+ version: " - 2.14.99"
+ reason: "derived_field feature was added in 2.15"
+
+---
+"Test basic field retrieval":
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ text:
+ type: text
+ keyword:
+ type: keyword
+ long:
+ type: long
+ float:
+ type: float
+ double:
+ type: double
+ date:
+ type: date
+ geo:
+ type: geo_point
+ ip:
+ type: ip
+ boolean:
+ type: boolean
+ array_of_long:
+ type: long
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_text_prefilter_field:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ prefilter_field: "text"
+ derived_keyword:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ derived_long:
+ type: long
+ script: "emit(params._source[\"long\"])"
+ derived_float:
+ type: float
+ script: "emit(params._source[\"float\"])"
+ derived_double:
+ type: double
+ script: "emit(params._source[\"double\"])"
+ derived_date:
+ type: date
+ script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())"
+ derived_geo:
+ type: geo_point
+ script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])"
+ derived_ip:
+ type: ip
+ script: "emit(params._source[\"ip\"])"
+ derived_boolean:
+ type: boolean
+ script: "emit(params._source[\"boolean\"])"
+ derived_array_of_long:
+ type: long
+ script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);"
+ derived_object:
+ type: object
+ properties:
+ keyword: keyword
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+ format: "yyyy-MM-dd"
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "peter piper",
+ keyword: "foo",
+ long: 1,
+ float: 1.0,
+ double: 1.0,
+ date: "2017-01-01T00:00:00Z",
+ geo: [0.0, 20.0],
+ ip: "192.168.0.1",
+ boolean: true,
+ array_of_long: [1, 2],
+ json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ body: {
+ text: "piper picked a peck",
+ keyword: "bar",
+ long: 2,
+ float: 2.0,
+ double: 2.0,
+ date: "2017-01-02T00:00:00Z",
+ geo: [10.0, 30.0],
+ ip: "192.168.0.2",
+ boolean: false,
+ array_of_long: [2, 3],
+ json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+
+ - do:
+ search:
+ index: test
+ body:
+ fields: [derived_text, derived_keyword, derived_long, derived_float, derived_double, derived_date, derived_geo, derived_ip, derived_boolean, derived_array_of_long,
+ derived_object, derived_object.keyword, derived_object.long, derived_object.float, derived_object.double, derived_object.date, derived_object.ip, derived_object.boolean, derived_object.array_of_long]
+
+ - is_true: hits.hits.0._id
+ - is_true: hits.hits.0._source
+
+ - match: { hits.hits.0.fields.derived_text.0: "peter piper" }
+ - match: { hits.hits.0.fields.derived_keyword.0: foo }
+ - match: { hits.hits.0.fields.derived_long.0: 1 }
+ - match: { hits.hits.0.fields.derived_float.0: 1.0 }
+ - match: { hits.hits.0.fields.derived_double.0: 1 }
+ - match: { hits.hits.0.fields.derived_date.0: 2017-01-01T00:00:00.000Z }
+ - match: { hits.hits.0.fields.derived_geo.0.lat: 0.0 }
+ - match: { hits.hits.0.fields.derived_geo.0.lon: 20.0 }
+ - match: { hits.hits.0.fields.derived_ip.0: 192.168.0.1 }
+ - match: { hits.hits.0.fields.derived_array_of_long.0: 1 }
+ - match: { hits.hits.0.fields.derived_array_of_long.1: 2 }
+ - match: { hits.hits.0.fields.derived_object.0: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}" }
+ - match: { hits.hits.0.fields.derived_object\.keyword.0: json_keyword1 }
+ - match: { hits.hits.0.fields.derived_object\.long.0: 10 }
+ - match: { hits.hits.0.fields.derived_object\.float.0: 10.0 }
+ - match: { hits.hits.0.fields.derived_object\.double.0: 10.0 }
+ - match: { hits.hits.0.fields.derived_object\.date.0: 2021-01-01 }
+ - match: { hits.hits.0.fields.derived_object\.ip.0: 10.0.0.1 }
+ - match: { hits.hits.0.fields.derived_object\.boolean.0: true }
+ - match: { hits.hits.0.fields.derived_object\.array_of_long.0: 1 }
+ - match: { hits.hits.0.fields.derived_object\.array_of_long.1: 2 }
+
+ - match: { hits.hits.1.fields.derived_text.0: "piper picked a peck" }
+ - match: { hits.hits.1.fields.derived_keyword.0: bar }
+ - match: { hits.hits.1.fields.derived_long.0: 2 }
+ - match: { hits.hits.1.fields.derived_float.0: 2.0 }
+ - match: { hits.hits.1.fields.derived_double.0: 2 }
+ - match: { hits.hits.1.fields.derived_date.0: 2017-01-02T00:00:00.000Z }
+ - match: { hits.hits.1.fields.derived_geo.0.lat: 10.0 }
+ - match: { hits.hits.1.fields.derived_geo.0.lon: 30.0 }
+ - match: { hits.hits.1.fields.derived_ip.0: 192.168.0.2 }
+ - match: { hits.hits.1.fields.derived_array_of_long.0: 2 }
+ - match: { hits.hits.1.fields.derived_array_of_long.1: 3 }
+ - match: { hits.hits.1.fields.derived_object.0: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}" }
+ - match: { hits.hits.1.fields.derived_object\.keyword.0: json_keyword2 }
+ - match: { hits.hits.1.fields.derived_object\.long.0: 20 }
+ - match: { hits.hits.1.fields.derived_object\.float.0: 20.0 }
+ - match: { hits.hits.1.fields.derived_object\.double.0: 20.0 }
+ - match: { hits.hits.1.fields.derived_object\.date.0: 2021-02-01 }
+ - match: { hits.hits.1.fields.derived_object\.ip.0: 10.0.0.2 }
+ - match: { hits.hits.1.fields.derived_object\.boolean.0: false }
+ - match: { hits.hits.1.fields.derived_object\.array_of_long.0: 2 }
+ - match: { hits.hits.1.fields.derived_object\.array_of_long.1: 3 }
+
+
+---
+"Test highlight":
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ text:
+ type: text
+ array_of_text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_keyword:
+ type: keyword
+ script: "emit(params._source[\"keyword\"])"
+ derived_array_of_text:
+ type: text
+ script: "emit(params._source[\"array_of_text\"][0]);emit(params._source[\"array_of_text\"][1]);"
+ derived_object:
+ type: object
+ properties:
+ array_of_text: text
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "peter piper",
+ keyword: "foo",
+ long: 1,
+ float: 1.0,
+ double: 1.0,
+ date: "2017-01-01T00:00:00Z",
+ geo: [0.0, 20.0],
+ ip: "192.168.0.1",
+ boolean: true,
+ array_of_text: ["The quick brown fox is brown", "The quick brown fox is black"],
+ json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_text\": [\"The quick brown fox is brown\", \"The quick brown fox is black\"]}}"
+ }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ body: {
+ text: "piper picked a peck",
+ keyword: "bar",
+ long: 2,
+ float: 2.0,
+ double: 2.0,
+ date: "2017-01-02T00:00:00Z",
+ geo: [10.0, 30.0],
+ ip: "192.168.0.2",
+ boolean: false,
+ array_of_text: ["The quick brown fox is brown", "The quick brown fox is black"],
+ json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_text\": [\"The quick brown fox is brown\", \"The quick brown fox is black\"]}}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ body: { "query" : {"multi_match" : { "query" : "piper", "fields" : [ "derived_text"] } },
+ "fields": [derived_text],
+ "highlight" : { "type" : "unified", "fields" : { "derived_text" : {} } }
+ }
+
+ - match: {hits.hits.0.highlight.derived_text.0: "peter piper "}
+
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ body: { "query" : {"multi_match" : { "query" : "quick brown", "fields" : [ "derived_array_of_text"] } },
+ "fields": [derived_array_of_text],
+ "highlight" : { "type" : "unified", "fields" : { "derived_array_of_text" : {} } }
+ }
+
+ - match: {hits.hits.0.highlight.derived_array_of_text.0: "The quick brown fox is brown "}
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ match_phrase:
+ derived_object.array_of_text:
+ query: "quick brown"
+ highlight:
+ type: unified
+ fields:
+ derived_object.array_of_text: {}
+
+ - match: {hits.hits.0.highlight.derived_object\.array_of_text.0: "The quick brown fox is brown "}
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml
new file mode 100644
index 0000000000000..e10c9cb3c133f
--- /dev/null
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml
@@ -0,0 +1,105 @@
+---
+"Test default index analyzer simple is applied on derived fields":
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ index.analysis.analyzer.default.type: simple
+ mappings:
+ properties:
+ text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_object:
+ type: object
+ properties:
+ array_of_text: text
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "Email: example@example.com, Visit https://example.com for more info.",
+ json_field: "{\"array_of_text\": [\"Email: example@example.com, Visit https://example.com for more info.\", \"Email: example@example.com, Visit https://example.com for more info.\"]}}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+ - do:
+ search:
+ index: test
+ q: "derived_text:example.com"
+ analyzer: standard
+
+ - match: { hits.total.value: 0 }
+
+ - do:
+ search:
+ index: test
+ q: "derived_text:example.com"
+ analyzer: simple
+
+ - match: { hits.total.value: 1 }
+
+---
+"Test default index analyzer standard is applied on derived fields":
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ index.analysis.analyzer.default.type: standard
+ mappings:
+ properties:
+ text:
+ type: text
+ json_field:
+ type: text
+ derived:
+ derived_text:
+ type: text
+ script: "emit(params._source[\"text\"])"
+ derived_object:
+ type: object
+ properties:
+ array_of_text: text
+ script: "emit(params._source[\"json_field\"])"
+ prefilter_field: "json_field"
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body: {
+ text: "Email: example@example.com, Visit https://example.com for more info.",
+ json_field: "{\"array_of_text\": [\"Email: example@example.com, Visit https://example.com for more info.\", \"Email: example@example.com, Visit https://example.com for more info.\"]}}"
+ }
+
+ - do:
+ indices.refresh:
+ index: [test]
+ - do:
+ search:
+ index: test
+ q: "derived_object.array_of_text:example.com"
+ analyzer: standard
+
+ - match: { hits.total.value: 1 }
+
+ - do:
+ search:
+ index: test
+ q: "derived_object.array_of_text:example.com"
+ analyzer: simple
+
+ - match: { hits.total.value: 1 }
diff --git a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java
index 09fd52ff65c66..6d5020336eb0b 100644
--- a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java
+++ b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java
@@ -54,6 +54,7 @@
import org.opensearch.rest.action.admin.indices.RestRefreshAction;
import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction;
import org.opensearch.rest.action.document.RestBulkAction;
+import org.opensearch.rest.action.document.RestBulkStreamingAction;
import org.opensearch.rest.action.document.RestDeleteAction;
import org.opensearch.rest.action.document.RestGetAction;
import org.opensearch.rest.action.document.RestIndexAction;
@@ -127,6 +128,7 @@ public List getRestHandlers(
new OpenSearchDashboardsWrappedRestHandler(new RestMultiGetAction(settings)),
new OpenSearchDashboardsWrappedRestHandler(new RestSearchAction()),
new OpenSearchDashboardsWrappedRestHandler(new RestBulkAction(settings)),
+ new OpenSearchDashboardsWrappedRestHandler(new RestBulkStreamingAction(settings)),
new OpenSearchDashboardsWrappedRestHandler(new RestDeleteAction()),
new OpenSearchDashboardsWrappedRestHandler(new RestDeleteByQueryAction()),
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 04338d8933590..0000000000000
--- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-12630ff9c56e2a372ba57f519c579ff9e728208a
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..6f0501d3312ae
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1
@@ -0,0 +1 @@
+5c7f2d8eab0fca3fdc3d3e57a7f48a335dc7ac33
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index b8da0dacfe9f1..0000000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-752bfc61c7829be6c27d9c1764250196e2c6b06b
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..25031381c9cb3
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1
@@ -0,0 +1 @@
+efcf65dda1b4e9d7e83926fd5895a47e491cbf29
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index b58adc03938f3..0000000000000
--- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5ca56d42b24498a226cf91f48b94e010b6af5fe2
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..e27d45b217dad
--- /dev/null
+++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1
@@ -0,0 +1 @@
+59599d7b8bed2e6bd27d0dad7935c078b98c39cc
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index dea962647d995..0000000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8eb59a89aa8984457798ccffb8e97e5351bebc1f
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..ad5473865537d
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1
@@ -0,0 +1 @@
+e55f83bb373ac139e313f64e80afe1eb0a75b8c0
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 1259b95a789a5..0000000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-851c1bd99eaef368e84335853dd448e4f56cdbc8
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..68abd162e7266
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1
@@ -0,0 +1 @@
+1be59d91c45a4de069611fb7f8aa3e8fd26020ec
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 8c0d8fd278b89..0000000000000
--- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-63647085d41ae231733580c20a498ce7c9134ce5
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..c5f1521ec3769
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1
@@ -0,0 +1 @@
+d5b5922acf3743b5a0c542959dd93fca8be333a7
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 0eb1fb5f2b31f..0000000000000
--- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a3ba7dd03b1df9efed08eb544689d51d2be22aa5
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..b676ca507467a
--- /dev/null
+++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1
@@ -0,0 +1 @@
+50fd7b471cbdd6648c4972169f3fc67fae9db7f6
\ No newline at end of file
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java
index 4d7e0d486068a..bba676436c39a 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java
@@ -70,7 +70,7 @@ public Collection createComponents(
final Supplier repositoriesServiceSupplier
) {
// create top n queries service
- final QueryInsightsService queryInsightsService = new QueryInsightsService(threadPool);
+ final QueryInsightsService queryInsightsService = new QueryInsightsService(clusterService.getClusterSettings(), threadPool, client);
return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService));
}
@@ -110,7 +110,16 @@ public List> getSettings() {
// Settings for top N queries
QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED,
QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE,
- QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE
+ QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS
);
}
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java
new file mode 100644
index 0000000000000..116bd26e1f9bc
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+
+import java.util.List;
+
+/**
+ * Debug exporter for development purpose
+ */
+public final class DebugExporter implements QueryInsightsExporter {
+ /**
+ * Logger of the debug exporter
+ */
+ private final Logger logger = LogManager.getLogger();
+
+ /**
+ * Constructor of DebugExporter
+ */
+ private DebugExporter() {}
+
+ private static class InstanceHolder {
+ private static final DebugExporter INSTANCE = new DebugExporter();
+ }
+
+ /**
+ Get the singleton instance of DebugExporter
+ *
+ @return DebugExporter instance
+ */
+ public static DebugExporter getInstance() {
+ return InstanceHolder.INSTANCE;
+ }
+
+ /**
+ * Write the list of SearchQueryRecord to debug log
+ *
+ * @param records list of {@link SearchQueryRecord}
+ */
+ @Override
+ public void export(final List records) {
+ logger.debug("QUERY_INSIGHTS_RECORDS: " + records.toString());
+ }
+
+ /**
+ * Close the debugger exporter sink
+ */
+ @Override
+ public void close() {
+ logger.debug("Closing the DebugExporter..");
+ }
+}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java
new file mode 100644
index 0000000000000..c19fe3655098b
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java
@@ -0,0 +1,113 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.action.bulk.BulkRequestBuilder;
+import org.opensearch.action.bulk.BulkResponse;
+import org.opensearch.action.index.IndexRequest;
+import org.opensearch.client.Client;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.common.xcontent.XContentFactory;
+import org.opensearch.core.action.ActionListener;
+import org.opensearch.core.xcontent.ToXContent;
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormatter;
+
+import java.util.List;
+
+/**
+ * Local index exporter for exporting query insights data to local OpenSearch indices.
+ */
+public final class LocalIndexExporter implements QueryInsightsExporter {
+ /**
+ * Logger of the local index exporter
+ */
+ private final Logger logger = LogManager.getLogger();
+ private final Client client;
+ private DateTimeFormatter indexPattern;
+
+ /**
+ * Constructor of LocalIndexExporter
+ *
+ * @param client OS client
+ * @param indexPattern the pattern of index to export to
+ */
+ public LocalIndexExporter(final Client client, final DateTimeFormatter indexPattern) {
+ this.indexPattern = indexPattern;
+ this.client = client;
+ }
+
+ /**
+ * Getter of indexPattern
+ *
+ * @return indexPattern
+ */
+ public DateTimeFormatter getIndexPattern() {
+ return indexPattern;
+ }
+
+ /**
+ * Setter of indexPattern
+ *
+ * @param indexPattern index pattern
+ * @return the current LocalIndexExporter
+ */
+ public LocalIndexExporter setIndexPattern(DateTimeFormatter indexPattern) {
+ this.indexPattern = indexPattern;
+ return this;
+ }
+
+ /**
+ * Export a list of SearchQueryRecord to a local index
+ *
+ * @param records list of {@link SearchQueryRecord}
+ */
+ @Override
+ public void export(final List records) {
+ if (records == null || records.size() == 0) {
+ return;
+ }
+ try {
+ final String index = getDateTimeFromFormat();
+ final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk().setTimeout(TimeValue.timeValueMinutes(1));
+ for (SearchQueryRecord record : records) {
+ bulkRequestBuilder.add(
+ new IndexRequest(index).source(record.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS))
+ );
+ }
+ bulkRequestBuilder.execute(new ActionListener() {
+ @Override
+ public void onResponse(BulkResponse bulkItemResponses) {}
+
+ @Override
+ public void onFailure(Exception e) {
+ logger.error("Failed to execute bulk operation for query insights data: ", e);
+ }
+ });
+ } catch (final Exception e) {
+ logger.error("Unable to index query insights data: ", e);
+ }
+ }
+
+ /**
+ * Close the exporter sink
+ */
+ @Override
+ public void close() {
+ logger.debug("Closing the LocalIndexExporter..");
+ }
+
+ private String getDateTimeFromFormat() {
+ return indexPattern.print(DateTime.now(DateTimeZone.UTC));
+ }
+}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java
new file mode 100644
index 0000000000000..42e5354eb1640
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java
@@ -0,0 +1,26 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+
+import java.io.Closeable;
+import java.util.List;
+
+/**
+ * Base interface for Query Insights exporters
+ */
+public interface QueryInsightsExporter extends Closeable {
+ /**
+ * Export a list of SearchQueryRecord to the exporter sink
+ *
+ * @param records list of {@link SearchQueryRecord}
+ */
+ void export(final List records);
+}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java
new file mode 100644
index 0000000000000..016911761a3d0
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java
@@ -0,0 +1,143 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.client.Client;
+import org.opensearch.common.settings.Settings;
+import org.joda.time.format.DateTimeFormat;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX;
+
+/**
+ * Factory class for validating and creating exporters based on provided settings
+ */
+public class QueryInsightsExporterFactory {
+ /**
+ * Logger of the query insights exporter factory
+ */
+ private final Logger logger = LogManager.getLogger();
+ final private Client client;
+ final private Set exporters;
+
+ /**
+ * Constructor of QueryInsightsExporterFactory
+ *
+ * @param client OS client
+ */
+ public QueryInsightsExporterFactory(final Client client) {
+ this.client = client;
+ this.exporters = new HashSet<>();
+ }
+
+ /**
+ * Validate exporter sink config
+ *
+ * @param settings exporter sink config {@link Settings}
+ * @throws IllegalArgumentException if provided exporter sink config settings are invalid
+ */
+ public void validateExporterConfig(final Settings settings) throws IllegalArgumentException {
+ // Disable exporter if the EXPORTER_TYPE setting is null
+ if (settings.get(EXPORTER_TYPE) == null) {
+ return;
+ }
+ SinkType type;
+ try {
+ type = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE));
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(
+ String.format(
+ Locale.ROOT,
+ "Invalid exporter type [%s], type should be one of %s",
+ settings.get(EXPORTER_TYPE),
+ SinkType.allSinkTypes()
+ )
+ );
+ }
+ switch (type) {
+ case LOCAL_INDEX:
+ final String indexPattern = settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN);
+ if (indexPattern.length() == 0) {
+ throw new IllegalArgumentException("Empty index pattern configured for the exporter");
+ }
+ try {
+ DateTimeFormat.forPattern(indexPattern);
+ } catch (Exception e) {
+ throw new IllegalArgumentException(
+ String.format(Locale.ROOT, "Invalid index pattern [%s] configured for the exporter", indexPattern)
+ );
+ }
+ }
+ }
+
+ /**
+ * Create an exporter based on provided parameters
+ *
+ * @param type The type of exporter to create
+ * @param indexPattern the index pattern if creating a index exporter
+ * @return QueryInsightsExporter the created exporter sink
+ */
+ public QueryInsightsExporter createExporter(SinkType type, String indexPattern) {
+ if (SinkType.LOCAL_INDEX.equals(type)) {
+ QueryInsightsExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern(indexPattern));
+ this.exporters.add(exporter);
+ return exporter;
+ }
+ return DebugExporter.getInstance();
+ }
+
+ /**
+ * Update an exporter based on provided parameters
+ *
+ * @param exporter The exporter to update
+ * @param indexPattern the index pattern if creating a index exporter
+ * @return QueryInsightsExporter the updated exporter sink
+ */
+ public QueryInsightsExporter updateExporter(QueryInsightsExporter exporter, String indexPattern) {
+ if (exporter.getClass() == LocalIndexExporter.class) {
+ ((LocalIndexExporter) exporter).setIndexPattern(DateTimeFormat.forPattern(indexPattern));
+ }
+ return exporter;
+ }
+
+ /**
+ * Close an exporter
+ *
+ * @param exporter the exporter to close
+ */
+ public void closeExporter(QueryInsightsExporter exporter) throws IOException {
+ if (exporter != null) {
+ exporter.close();
+ this.exporters.remove(exporter);
+ }
+ }
+
+ /**
+ * Close all exporters
+ *
+ */
+ public void closeAllExporters() {
+ for (QueryInsightsExporter exporter : exporters) {
+ try {
+ closeExporter(exporter);
+ } catch (IOException e) {
+ logger.error("Fail to close query insights exporter, error: ", e);
+ }
+ }
+ }
+}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java
new file mode 100644
index 0000000000000..c90c9c76b6706
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java
@@ -0,0 +1,66 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import java.util.Arrays;
+import java.util.Locale;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Type of supported sinks
+ */
+public enum SinkType {
+ /** debug exporter */
+ DEBUG("debug"),
+ /** local index exporter */
+ LOCAL_INDEX("local_index");
+
+ private final String type;
+
+ SinkType(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public String toString() {
+ return type;
+ }
+
+ /**
+ * Parse SinkType from String
+ * @param type the String representation of the SinkType
+ * @return SinkType
+ */
+ public static SinkType parse(final String type) {
+ return valueOf(type.toUpperCase(Locale.ROOT));
+ }
+
+ /**
+ * Get all valid SinkTypes
+ *
+ * @return A set contains all valid SinkTypes
+ */
+ public static Set allSinkTypes() {
+ return Arrays.stream(values()).collect(Collectors.toSet());
+ }
+
+ /**
+ * Get Sink type from exporter
+ *
+ * @param exporter the {@link QueryInsightsExporter}
+ * @return SinkType associated with this exporter
+ */
+ public static SinkType getSinkTypeFromExporter(QueryInsightsExporter exporter) {
+ if (exporter.getClass().equals(LocalIndexExporter.class)) {
+ return SinkType.LOCAL_INDEX;
+ }
+ return SinkType.DEBUG;
+ }
+}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java
new file mode 100644
index 0000000000000..7164411194f85
--- /dev/null
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * Query Insights exporter
+ */
+package org.opensearch.plugin.insights.core.exporter;
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java
index 9ec8673147c38..a1f810ad5987c 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java
@@ -14,23 +14,27 @@
import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchRequestContext;
import org.opensearch.action.search.SearchRequestOperationsListener;
+import org.opensearch.action.search.SearchTask;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.inject.Inject;
+import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.plugin.insights.core.service.QueryInsightsService;
import org.opensearch.plugin.insights.rules.model.Attribute;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+import org.opensearch.tasks.Task;
import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
-import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED;
-import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE;
-import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNEnabledSetting;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNSizeSetting;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNWindowSizeSetting;
/**
* The listener for query insights services.
@@ -45,6 +49,7 @@ public final class QueryInsightsListener extends SearchRequestOperationsListener
private static final Logger log = LogManager.getLogger(QueryInsightsListener.class);
private final QueryInsightsService queryInsightsService;
+ private final ClusterService clusterService;
/**
* Constructor for QueryInsightsListener
@@ -54,26 +59,32 @@ public final class QueryInsightsListener extends SearchRequestOperationsListener
*/
@Inject
public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) {
+ this.clusterService = clusterService;
this.queryInsightsService = queryInsightsService;
- clusterService.getClusterSettings()
- .addSettingsUpdateConsumer(TOP_N_LATENCY_QUERIES_ENABLED, v -> this.setEnableTopQueries(MetricType.LATENCY, v));
- clusterService.getClusterSettings()
- .addSettingsUpdateConsumer(
- TOP_N_LATENCY_QUERIES_SIZE,
- v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setTopNSize(v),
- v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateTopNSize(v)
- );
- clusterService.getClusterSettings()
- .addSettingsUpdateConsumer(
- TOP_N_LATENCY_QUERIES_WINDOW_SIZE,
- v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setWindowSize(v),
- v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateWindowSize(v)
- );
- this.setEnableTopQueries(MetricType.LATENCY, clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_ENABLED));
- this.queryInsightsService.getTopQueriesService(MetricType.LATENCY)
- .setTopNSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_SIZE));
- this.queryInsightsService.getTopQueriesService(MetricType.LATENCY)
- .setWindowSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_WINDOW_SIZE));
+ // Setting endpoints set up for top n queries, including enabling top n queries, window size and top n size
+ // Expected metricTypes are Latency, CPU and Memory.
+ for (MetricType type : MetricType.allMetricTypes()) {
+ clusterService.getClusterSettings()
+ .addSettingsUpdateConsumer(getTopNEnabledSetting(type), v -> this.setEnableTopQueries(type, v));
+ clusterService.getClusterSettings()
+ .addSettingsUpdateConsumer(
+ getTopNSizeSetting(type),
+ v -> this.queryInsightsService.setTopNSize(type, v),
+ v -> this.queryInsightsService.validateTopNSize(type, v)
+ );
+ clusterService.getClusterSettings()
+ .addSettingsUpdateConsumer(
+ getTopNWindowSizeSetting(type),
+ v -> this.queryInsightsService.setWindowSize(type, v),
+ v -> this.queryInsightsService.validateWindowSize(type, v)
+ );
+
+ this.setEnableTopQueries(type, clusterService.getClusterSettings().get(getTopNEnabledSetting(type)));
+ this.queryInsightsService.validateTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type)));
+ this.queryInsightsService.setTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type)));
+ this.queryInsightsService.validateWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type)));
+ this.queryInsightsService.setWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type)));
+ }
}
/**
@@ -123,6 +134,27 @@ public void onRequestStart(SearchRequestContext searchRequestContext) {}
@Override
public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) {
+ constructSearchQueryRecord(context, searchRequestContext);
+ }
+
+ @Override
+ public void onRequestFailure(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) {
+ constructSearchQueryRecord(context, searchRequestContext);
+ }
+
+ private void constructSearchQueryRecord(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) {
+ SearchTask searchTask = context.getTask();
+ List tasksResourceUsages = searchRequestContext.getPhaseResourceUsage();
+ tasksResourceUsages.add(
+ new TaskResourceInfo(
+ searchTask.getAction(),
+ searchTask.getId(),
+ searchTask.getParentTaskId().getId(),
+ clusterService.localNode().getId(),
+ searchTask.getTotalResourceStats()
+ )
+ );
+
final SearchRequest request = context.getRequest();
try {
Map measurements = new HashMap<>();
@@ -132,16 +164,39 @@ public void onRequestEnd(final SearchPhaseContext context, final SearchRequestCo
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos())
);
}
+ if (queryInsightsService.isCollectionEnabled(MetricType.CPU)) {
+ measurements.put(
+ MetricType.CPU,
+ tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getCpuTimeInNanos()).mapToLong(Long::longValue).sum()
+ );
+ }
+ if (queryInsightsService.isCollectionEnabled(MetricType.MEMORY)) {
+ measurements.put(
+ MetricType.MEMORY,
+ tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getMemoryInBytes()).mapToLong(Long::longValue).sum()
+ );
+ }
Map attributes = new HashMap<>();
attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT));
attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS));
attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards());
attributes.put(Attribute.INDICES, request.indices());
attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap());
+ attributes.put(Attribute.TASK_RESOURCE_USAGES, tasksResourceUsages);
+
+ Map labels = new HashMap<>();
+ // Retrieve user provided label if exists
+ String userProvidedLabel = context.getTask().getHeader(Task.X_OPAQUE_ID);
+ if (userProvidedLabel != null) {
+ labels.put(Task.X_OPAQUE_ID, userProvidedLabel);
+ }
+ attributes.put(Attribute.LABELS, labels);
+ // construct SearchQueryRecord from attributes and measurements
SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes);
queryInsightsService.addRecord(record);
} catch (Exception e) {
log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e));
}
}
+
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java
index 525ca0d4a3d33..c63430a1a726c 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java
@@ -8,14 +8,20 @@
package org.opensearch.plugin.insights.core.service;
+import org.opensearch.client.Client;
import org.opensearch.common.inject.Inject;
import org.opensearch.common.lifecycle.AbstractLifecycleComponent;
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
import org.opensearch.threadpool.Scheduler;
import org.opensearch.threadpool.ThreadPool;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
@@ -23,6 +29,8 @@
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getExporterSettings;
+
/**
* Service responsible for gathering, analyzing, storing and exporting
* information related to search queries
@@ -56,21 +64,37 @@ public class QueryInsightsService extends AbstractLifecycleComponent {
*/
protected volatile Scheduler.Cancellable scheduledFuture;
+ /**
+ * Query Insights exporter factory
+ */
+ final QueryInsightsExporterFactory queryInsightsExporterFactory;
+
/**
* Constructor of the QueryInsightsService
*
- * @param threadPool The OpenSearch thread pool to run async tasks
+ * @param clusterSettings OpenSearch cluster level settings
+ * @param threadPool The OpenSearch thread pool to run async tasks
+ * @param client OS client
*/
@Inject
- public QueryInsightsService(final ThreadPool threadPool) {
+ public QueryInsightsService(final ClusterSettings clusterSettings, final ThreadPool threadPool, final Client client) {
enableCollect = new HashMap<>();
queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY);
+ this.threadPool = threadPool;
+ this.queryInsightsExporterFactory = new QueryInsightsExporterFactory(client);
+ // initialize top n queries services and configurations consumers
topQueriesServices = new HashMap<>();
for (MetricType metricType : MetricType.allMetricTypes()) {
enableCollect.put(metricType, false);
- topQueriesServices.put(metricType, new TopQueriesService(metricType));
+ topQueriesServices.put(metricType, new TopQueriesService(metricType, threadPool, queryInsightsExporterFactory));
+ }
+ for (MetricType type : MetricType.allMetricTypes()) {
+ clusterSettings.addSettingsUpdateConsumer(
+ getExporterSettings(type),
+ (settings -> setExporter(type, settings)),
+ (settings -> validateExporterConfig(type, settings))
+ );
}
- this.threadPool = threadPool;
}
/**
@@ -157,6 +181,78 @@ public boolean isEnabled() {
return false;
}
+ /**
+ * Validate the window size config for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param windowSize {@link TimeValue}
+ */
+ public void validateWindowSize(final MetricType type, final TimeValue windowSize) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).validateWindowSize(windowSize);
+ }
+ }
+
+ /**
+ * Set window size for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param windowSize {@link TimeValue}
+ */
+ public void setWindowSize(final MetricType type, final TimeValue windowSize) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).setWindowSize(windowSize);
+ }
+ }
+
+ /**
+ * Validate the top n size config for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param topNSize top n size
+ */
+ public void validateTopNSize(final MetricType type, final int topNSize) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).validateTopNSize(topNSize);
+ }
+ }
+
+ /**
+ * Set the top n size config for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param topNSize top n size
+ */
+ public void setTopNSize(final MetricType type, final int topNSize) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).setTopNSize(topNSize);
+ }
+ }
+
+ /**
+ * Set the exporter config for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param settings exporter settings
+ */
+ public void setExporter(final MetricType type, final Settings settings) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).setExporter(settings);
+ }
+ }
+
+ /**
+ * Validate the exporter config for a metricType
+ *
+ * @param type {@link MetricType}
+ * @param settings exporter settings
+ */
+ public void validateExporterConfig(final MetricType type, final Settings settings) {
+ if (topQueriesServices.containsKey(type)) {
+ topQueriesServices.get(type).validateExporterConfig(settings);
+ }
+ }
+
@Override
protected void doStart() {
if (isEnabled()) {
@@ -176,5 +272,12 @@ protected void doStop() {
}
@Override
- protected void doClose() {}
+ protected void doClose() throws IOException {
+ // close all top n queries service
+ for (TopQueriesService topQueriesService : topQueriesServices.values()) {
+ topQueriesService.close();
+ }
+ // close any unclosed resources
+ queryInsightsExporterFactory.closeAllExporters();
+ }
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java
index d2c30cbdf98e7..c21b89be4dcca 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java
@@ -8,11 +8,19 @@
package org.opensearch.plugin.insights.core.service;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporter;
+import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory;
+import org.opensearch.plugin.insights.core.exporter.SinkType;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
+import org.opensearch.threadpool.ThreadPool;
+import java.io.IOException;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
@@ -27,6 +35,12 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR;
+
/**
* Service responsible for gathering and storing top N queries
* with high latency or resource usage
@@ -34,6 +48,10 @@
* @opensearch.internal
*/
public class TopQueriesService {
+ /**
+ * Logger of the local index exporter
+ */
+ private final Logger logger = LogManager.getLogger();
private boolean enabled;
/**
* The metric type to measure top n queries
@@ -63,12 +81,34 @@ public class TopQueriesService {
*/
private final AtomicReference> topQueriesHistorySnapshot;
- TopQueriesService(final MetricType metricType) {
+ /**
+ * Factory for validating and creating exporters
+ */
+ private final QueryInsightsExporterFactory queryInsightsExporterFactory;
+
+ /**
+ * The internal OpenSearch thread pool that execute async processing and exporting tasks
+ */
+ private final ThreadPool threadPool;
+
+ /**
+ * Exporter for exporting top queries data
+ */
+ private QueryInsightsExporter exporter;
+
+ TopQueriesService(
+ final MetricType metricType,
+ final ThreadPool threadPool,
+ final QueryInsightsExporterFactory queryInsightsExporterFactory
+ ) {
this.enabled = false;
this.metricType = metricType;
+ this.threadPool = threadPool;
+ this.queryInsightsExporterFactory = queryInsightsExporterFactory;
this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE;
this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE;
this.windowStart = -1L;
+ this.exporter = null;
topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType));
topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>());
topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>());
@@ -169,6 +209,47 @@ public void validateWindowSize(final TimeValue windowSize) {
}
}
+ /**
+ * Set up the top queries exporter based on provided settings
+ *
+ * @param settings exporter config {@link Settings}
+ */
+ public void setExporter(final Settings settings) {
+ if (settings.get(EXPORTER_TYPE) != null) {
+ SinkType expectedType = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE));
+ if (exporter != null && expectedType == SinkType.getSinkTypeFromExporter(exporter)) {
+ queryInsightsExporterFactory.updateExporter(exporter, settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN));
+ } else {
+ try {
+ queryInsightsExporterFactory.closeExporter(this.exporter);
+ } catch (IOException e) {
+ logger.error("Fail to close the current exporter when updating exporter, error: ", e);
+ }
+ this.exporter = queryInsightsExporterFactory.createExporter(
+ SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)),
+ settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN)
+ );
+ }
+ } else {
+ // Disable exporter if exporter type is set to null
+ try {
+ queryInsightsExporterFactory.closeExporter(this.exporter);
+ this.exporter = null;
+ } catch (IOException e) {
+ logger.error("Fail to close the current exporter when disabling exporter, error: ", e);
+ }
+ }
+ }
+
+ /**
+ * Validate provided settings for top queries exporter
+ *
+ * @param settings settings exporter config {@link Settings}
+ */
+ public void validateExporterConfig(Settings settings) {
+ queryInsightsExporterFactory.validateExporterConfig(settings);
+ }
+
/**
* Get all top queries records that are in the current top n queries store
* Optionally include top N records from the last window.
@@ -254,6 +335,10 @@ private void rotateWindowIfNecessary(final long newWindowStart) {
topQueriesStore.clear();
topQueriesCurrentSnapshot.set(new ArrayList<>());
windowStart = newWindowStart;
+ // export to the configured sink
+ if (exporter != null) {
+ threadPool.executor(QUERY_INSIGHTS_EXECUTOR).execute(() -> exporter.export(history));
+ }
}
}
@@ -279,4 +364,11 @@ private long calculateWindowStart(final long timestamp) {
public List getTopQueriesCurrentSnapshot() {
return topQueriesCurrentSnapshot.get();
}
+
+ /**
+ * Close the top n queries service
+ */
+ public void close() throws IOException {
+ queryInsightsExporterFactory.closeExporter(this.exporter);
+ }
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java
index c1d17edf9ff14..dcdb085fdc6fa 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java
@@ -43,7 +43,15 @@ public enum Attribute {
/**
* The node id for this request
*/
- NODE_ID;
+ NODE_ID,
+ /**
+ * Tasks level resource usages in this request
+ */
+ TASK_RESOURCE_USAGES,
+ /**
+ * Custom search request labels
+ */
+ LABELS;
/**
* Read an Attribute from a StreamInput
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java
index cdd090fbf4804..4694c757f4ef2 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java
@@ -35,7 +35,7 @@ public enum MetricType implements Comparator {
/**
* JVM heap usage metric type
*/
- JVM;
+ MEMORY;
/**
* Read a MetricType from a StreamInput
@@ -93,10 +93,9 @@ public static Set allMetricTypes() {
public int compare(final Number a, final Number b) {
switch (this) {
case LATENCY:
- return Long.compare(a.longValue(), b.longValue());
- case JVM:
case CPU:
- return Double.compare(a.doubleValue(), b.doubleValue());
+ case MEMORY:
+ return Long.compare(a.longValue(), b.longValue());
}
return -1;
}
@@ -110,10 +109,9 @@ public int compare(final Number a, final Number b) {
Number parseValue(final Object o) {
switch (this) {
case LATENCY:
- return (Long) o;
- case JVM:
case CPU:
- return (Double) o;
+ case MEMORY:
+ return (Long) o;
default:
return (Number) o;
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java
index 060711edb5580..fec00a680ae58 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java
@@ -8,9 +8,11 @@
package org.opensearch.plugin.insights.rules.model;
+import org.opensearch.core.common.Strings;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.common.io.stream.StreamOutput;
import org.opensearch.core.common.io.stream.Writeable;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.core.xcontent.ToXContentObject;
import org.opensearch.core.xcontent.XContentBuilder;
@@ -173,4 +175,9 @@ public boolean equals(final Object o) {
public int hashCode() {
return Objects.hash(timestamp, measurements, attributes);
}
+
+ @Override
+ public String toString() {
+ return Strings.toString(MediaTypeRegistry.JSON, this);
+ }
}
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java
index ddf614211bc41..7949b70a16db6 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java
@@ -8,7 +8,6 @@
package org.opensearch.plugin.insights.rules.transport.top_queries;
-import org.opensearch.OpenSearchException;
import org.opensearch.action.FailedNodeException;
import org.opensearch.action.support.ActionFilters;
import org.opensearch.action.support.nodes.TransportNodesAction;
@@ -21,7 +20,6 @@
import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction;
import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest;
import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse;
-import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.TransportRequest;
@@ -29,7 +27,6 @@
import java.io.IOException;
import java.util.List;
-import java.util.Locale;
/**
* Transport action for cluster/node level top queries information.
@@ -81,17 +78,18 @@ protected TopQueriesResponse newResponse(
final List responses,
final List failures
) {
- if (topQueriesRequest.getMetricType() == MetricType.LATENCY) {
- return new TopQueriesResponse(
- clusterService.getClusterName(),
- responses,
- failures,
- clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE),
- MetricType.LATENCY
- );
- } else {
- throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", topQueriesRequest.getMetricType()));
+ int size;
+ switch (topQueriesRequest.getMetricType()) {
+ case CPU:
+ size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE);
+ break;
+ case MEMORY:
+ size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE);
+ break;
+ default:
+ size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE);
}
+ return new TopQueriesResponse(clusterService.getClusterName(), responses, failures, size, topQueriesRequest.getMetricType());
}
@Override
@@ -107,15 +105,10 @@ protected TopQueries newNodeResponse(final StreamInput in) throws IOException {
@Override
protected TopQueries nodeOperation(final NodeRequest nodeRequest) {
final TopQueriesRequest request = nodeRequest.request;
- if (request.getMetricType() == MetricType.LATENCY) {
- return new TopQueries(
- clusterService.localNode(),
- queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(true)
- );
- } else {
- throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", request.getMetricType()));
- }
-
+ return new TopQueries(
+ clusterService.localNode(),
+ queryInsightsService.getTopQueriesService(request.getMetricType()).getTopQueriesRecords(true)
+ );
}
/**
diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java
index 52cc1fbde790f..25309b5721792 100644
--- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java
+++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java
@@ -9,7 +9,10 @@
package org.opensearch.plugin.insights.settings;
import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.plugin.insights.core.exporter.SinkType;
+import org.opensearch.plugin.insights.rules.model.MetricType;
import java.util.Arrays;
import java.util.HashSet;
@@ -79,6 +82,10 @@ public class QueryInsightsSettings {
public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries";
/** Default prefix for top N queries by latency feature */
public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency";
+ /** Default prefix for top N queries by cpu feature */
+ public static final String TOP_N_CPU_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".cpu";
+ /** Default prefix for top N queries by memory feature */
+ public static final String TOP_N_MEMORY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".memory";
/**
* Boolean setting for enabling top queries by latency.
*/
@@ -109,6 +116,187 @@ public class QueryInsightsSettings {
Setting.Property.Dynamic
);
+ /**
+ * Boolean setting for enabling top queries by cpu.
+ */
+ public static final Setting TOP_N_CPU_QUERIES_ENABLED = Setting.boolSetting(
+ TOP_N_CPU_QUERIES_PREFIX + ".enabled",
+ false,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Int setting to define the top n size for top queries by cpu.
+ */
+ public static final Setting TOP_N_CPU_QUERIES_SIZE = Setting.intSetting(
+ TOP_N_CPU_QUERIES_PREFIX + ".top_n_size",
+ DEFAULT_TOP_N_SIZE,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Time setting to define the window size in seconds for top queries by cpu.
+ */
+ public static final Setting TOP_N_CPU_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting(
+ TOP_N_CPU_QUERIES_PREFIX + ".window_size",
+ DEFAULT_WINDOW_SIZE,
+ Setting.Property.NodeScope,
+ Setting.Property.Dynamic
+ );
+
+ /**
+ * Boolean setting for enabling top queries by memory.
+ */
+ public static final Setting TOP_N_MEMORY_QUERIES_ENABLED = Setting.boolSetting(
+ TOP_N_MEMORY_QUERIES_PREFIX + ".enabled",
+ false,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Int setting to define the top n size for top queries by memory.
+ */
+ public static final Setting TOP_N_MEMORY_QUERIES_SIZE = Setting.intSetting(
+ TOP_N_MEMORY_QUERIES_PREFIX + ".top_n_size",
+ DEFAULT_TOP_N_SIZE,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Time setting to define the window size in seconds for top queries by memory.
+ */
+ public static final Setting TOP_N_MEMORY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting(
+ TOP_N_MEMORY_QUERIES_PREFIX + ".window_size",
+ DEFAULT_WINDOW_SIZE,
+ Setting.Property.NodeScope,
+ Setting.Property.Dynamic
+ );
+
+ /**
+ * Config key for exporter type
+ */
+ public static final String EXPORTER_TYPE = "type";
+ /**
+ * Config key for export index
+ */
+ public static final String EXPORT_INDEX = "config.index";
+
+ /**
+ * Settings and defaults for top queries exporters
+ */
+ private static final String TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX = TOP_N_LATENCY_QUERIES_PREFIX + ".exporter.";
+ /**
+ * Prefix for top n queries by cpu exporters
+ */
+ private static final String TOP_N_CPU_QUERIES_EXPORTER_PREFIX = TOP_N_CPU_QUERIES_PREFIX + ".exporter.";
+ /**
+ * Prefix for top n queries by memory exporters
+ */
+ private static final String TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX = TOP_N_MEMORY_QUERIES_PREFIX + ".exporter.";
+ /**
+ * Default index pattern of top n queries
+ */
+ public static final String DEFAULT_TOP_N_QUERIES_INDEX_PATTERN = "'top_queries-'YYYY.MM.dd";
+ /**
+ * Default exporter type of top queries
+ */
+ public static final String DEFAULT_TOP_QUERIES_EXPORTER_TYPE = SinkType.LOCAL_INDEX.toString();
+
+ /**
+ * Settings for the exporter of top latency queries
+ */
+ public static final Setting TOP_N_LATENCY_EXPORTER_SETTINGS = Setting.groupSetting(
+ TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Settings for the exporter of top cpu queries
+ */
+ public static final Setting TOP_N_CPU_EXPORTER_SETTINGS = Setting.groupSetting(
+ TOP_N_CPU_QUERIES_EXPORTER_PREFIX,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Settings for the exporter of top cpu queries
+ */
+ public static final Setting TOP_N_MEMORY_EXPORTER_SETTINGS = Setting.groupSetting(
+ TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX,
+ Setting.Property.Dynamic,
+ Setting.Property.NodeScope
+ );
+
+ /**
+ * Get the enabled setting based on type
+ * @param type MetricType
+ * @return enabled setting
+ */
+ public static Setting getTopNEnabledSetting(MetricType type) {
+ switch (type) {
+ case CPU:
+ return TOP_N_CPU_QUERIES_ENABLED;
+ case MEMORY:
+ return TOP_N_MEMORY_QUERIES_ENABLED;
+ default:
+ return TOP_N_LATENCY_QUERIES_ENABLED;
+ }
+ }
+
+ /**
+ * Get the top n size setting based on type
+ * @param type MetricType
+ * @return top n size setting
+ */
+ public static Setting getTopNSizeSetting(MetricType type) {
+ switch (type) {
+ case CPU:
+ return TOP_N_CPU_QUERIES_SIZE;
+ case MEMORY:
+ return TOP_N_MEMORY_QUERIES_SIZE;
+ default:
+ return TOP_N_LATENCY_QUERIES_SIZE;
+ }
+ }
+
+ /**
+ * Get the window size setting based on type
+ * @param type MetricType
+ * @return top n queries window size setting
+ */
+ public static Setting getTopNWindowSizeSetting(MetricType type) {
+ switch (type) {
+ case CPU:
+ return TOP_N_CPU_QUERIES_WINDOW_SIZE;
+ case MEMORY:
+ return TOP_N_MEMORY_QUERIES_WINDOW_SIZE;
+ default:
+ return TOP_N_LATENCY_QUERIES_WINDOW_SIZE;
+ }
+ }
+
+ /**
+ * Get the exporter settings based on type
+ * @param type MetricType
+ * @return exporter setting
+ */
+ public static Setting getExporterSettings(MetricType type) {
+ switch (type) {
+ case CPU:
+ return TOP_N_CPU_EXPORTER_SETTINGS;
+ case MEMORY:
+ return TOP_N_MEMORY_EXPORTER_SETTINGS;
+ default:
+ return TOP_N_LATENCY_EXPORTER_SETTINGS;
+ }
+ }
+
/**
* Default constructor
*/
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java
index 2f353f2a53329..2efe9085a39ee 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java
@@ -47,10 +47,7 @@ public void setup() {
Settings.Builder settingsBuilder = Settings.builder();
Settings settings = settingsBuilder.build();
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE);
-
+ QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings);
clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool);
}
@@ -59,7 +56,16 @@ public void testGetSettings() {
Arrays.asList(
QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED,
QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE,
- QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE
+ QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE,
+ QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE,
+ QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE,
+ QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS
),
queryInsightsPlugin.getSettings()
);
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java
index 870ef5b9c8be9..7fa4e9841c20e 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java
@@ -10,6 +10,7 @@
import org.opensearch.action.search.SearchType;
import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.util.Maps;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.core.xcontent.XContentBuilder;
@@ -17,6 +18,7 @@
import org.opensearch.plugin.insights.rules.model.Attribute;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
import org.opensearch.test.VersionUtils;
import java.io.IOException;
@@ -36,7 +38,6 @@
import static org.opensearch.test.OpenSearchTestCase.random;
import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween;
import static org.opensearch.test.OpenSearchTestCase.randomArray;
-import static org.opensearch.test.OpenSearchTestCase.randomDouble;
import static org.opensearch.test.OpenSearchTestCase.randomIntBetween;
import static org.opensearch.test.OpenSearchTestCase.randomLong;
import static org.opensearch.test.OpenSearchTestCase.randomLongBetween;
@@ -63,9 +64,9 @@ public static List generateQueryInsightRecords(int lower, int
MetricType.LATENCY,
randomLongBetween(1000, 10000),
MetricType.CPU,
- randomDouble(),
- MetricType.JVM,
- randomDouble()
+ randomLongBetween(1000, 10000),
+ MetricType.MEMORY,
+ randomLongBetween(1000, 10000)
);
Map phaseLatencyMap = new HashMap<>();
@@ -186,4 +187,19 @@ public static boolean checkRecordsEqualsWithoutOrder(
}
return true;
}
+
+ public static void registerAllQueryInsightsSettings(ClusterSettings clusterSettings) {
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE);
+ clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS);
+ }
}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java
new file mode 100644
index 0000000000000..736e406289b2c
--- /dev/null
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.opensearch.plugin.insights.QueryInsightsTestUtils;
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+import org.opensearch.test.OpenSearchTestCase;
+import org.junit.Before;
+
+import java.util.List;
+
+/**
+ * Granular tests for the {@link DebugExporterTests} class.
+ */
+public class DebugExporterTests extends OpenSearchTestCase {
+ private DebugExporter debugExporter;
+
+ @Before
+ public void setup() {
+ debugExporter = DebugExporter.getInstance();
+ }
+
+ public void testExport() {
+ List records = QueryInsightsTestUtils.generateQueryInsightRecords(2);
+ try {
+ debugExporter.export(records);
+ } catch (Exception e) {
+ fail("No exception should be thrown when exporting query insights data");
+ }
+ }
+}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java
new file mode 100644
index 0000000000000..9ea864a7083f4
--- /dev/null
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.opensearch.action.bulk.BulkAction;
+import org.opensearch.action.bulk.BulkRequestBuilder;
+import org.opensearch.action.bulk.BulkResponse;
+import org.opensearch.action.support.PlainActionFuture;
+import org.opensearch.client.Client;
+import org.opensearch.plugin.insights.QueryInsightsTestUtils;
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
+import org.opensearch.test.OpenSearchTestCase;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.junit.Before;
+
+import java.util.List;
+
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+/**
+ * Granular tests for the {@link LocalIndexExporterTests} class.
+ */
+public class LocalIndexExporterTests extends OpenSearchTestCase {
+ private final DateTimeFormatter format = DateTimeFormat.forPattern("YYYY.MM.dd");
+ private final Client client = mock(Client.class);
+ private LocalIndexExporter localIndexExporter;
+
+ @Before
+ public void setup() {
+ localIndexExporter = new LocalIndexExporter(client, format);
+ }
+
+ public void testExportEmptyRecords() {
+ List records = List.of();
+ try {
+ localIndexExporter.export(records);
+ } catch (Exception e) {
+ fail("No exception should be thrown when exporting empty query insights data");
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testExportRecords() {
+ BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE));
+ final PlainActionFuture future = mock(PlainActionFuture.class);
+ when(future.actionGet()).thenReturn(null);
+ doAnswer(invocation -> future).when(bulkRequestBuilder).execute();
+ when(client.prepareBulk()).thenReturn(bulkRequestBuilder);
+
+ List records = QueryInsightsTestUtils.generateQueryInsightRecords(2);
+ try {
+ localIndexExporter.export(records);
+ } catch (Exception e) {
+ fail("No exception should be thrown when exporting query insights data");
+ }
+ assertEquals(2, bulkRequestBuilder.numberOfActions());
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testExportRecordsWithError() {
+ BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE));
+ final PlainActionFuture future = mock(PlainActionFuture.class);
+ when(future.actionGet()).thenReturn(null);
+ doThrow(new RuntimeException()).when(bulkRequestBuilder).execute();
+ when(client.prepareBulk()).thenReturn(bulkRequestBuilder);
+
+ List records = QueryInsightsTestUtils.generateQueryInsightRecords(2);
+ try {
+ localIndexExporter.export(records);
+ } catch (Exception e) {
+ fail("No exception should be thrown when exporting query insights data");
+ }
+ }
+
+ public void testClose() {
+ try {
+ localIndexExporter.close();
+ } catch (Exception e) {
+ fail("No exception should be thrown when closing local index exporter");
+ }
+ }
+
+ public void testGetAndSetIndexPattern() {
+ DateTimeFormatter newFormatter = mock(DateTimeFormatter.class);
+ localIndexExporter.setIndexPattern(newFormatter);
+ assert (localIndexExporter.getIndexPattern() == newFormatter);
+ }
+}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java
new file mode 100644
index 0000000000000..f01dd2c17509c
--- /dev/null
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java
@@ -0,0 +1,89 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.plugin.insights.core.exporter;
+
+import org.opensearch.client.Client;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.test.OpenSearchTestCase;
+import org.joda.time.format.DateTimeFormat;
+import org.junit.Before;
+
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE;
+import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Granular tests for the {@link QueryInsightsExporterFactoryTests} class.
+ */
+public class QueryInsightsExporterFactoryTests extends OpenSearchTestCase {
+ private final String format = "YYYY.MM.dd";
+
+ private final Client client = mock(Client.class);
+ private QueryInsightsExporterFactory queryInsightsExporterFactory;
+
+ @Before
+ public void setup() {
+ queryInsightsExporterFactory = new QueryInsightsExporterFactory(client);
+ }
+
+ public void testValidateConfigWhenResetExporter() {
+ Settings.Builder settingsBuilder = Settings.builder();
+ // empty settings
+ Settings settings = settingsBuilder.build();
+ try {
+ queryInsightsExporterFactory.validateExporterConfig(settings);
+ } catch (Exception e) {
+ fail("No exception should be thrown when setting is null");
+ }
+ }
+
+ public void testInvalidExporterTypeConfig() {
+ Settings.Builder settingsBuilder = Settings.builder();
+ Settings settings = settingsBuilder.put(EXPORTER_TYPE, "some_invalid_type").build();
+ assertThrows(IllegalArgumentException.class, () -> { queryInsightsExporterFactory.validateExporterConfig(settings); });
+ }
+
+ public void testInvalidLocalIndexConfig() {
+ Settings.Builder settingsBuilder = Settings.builder();
+ assertThrows(IllegalArgumentException.class, () -> {
+ queryInsightsExporterFactory.validateExporterConfig(
+ settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "").build()
+ );
+ });
+ assertThrows(IllegalArgumentException.class, () -> {
+ queryInsightsExporterFactory.validateExporterConfig(
+ settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "some_invalid_pattern").build()
+ );
+ });
+ }
+
+ public void testCreateAndCloseExporter() {
+ QueryInsightsExporter exporter1 = queryInsightsExporterFactory.createExporter(SinkType.LOCAL_INDEX, format);
+ assertTrue(exporter1 instanceof LocalIndexExporter);
+ QueryInsightsExporter exporter2 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format);
+ assertTrue(exporter2 instanceof DebugExporter);
+ QueryInsightsExporter exporter3 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format);
+ assertTrue(exporter3 instanceof DebugExporter);
+ try {
+ queryInsightsExporterFactory.closeExporter(exporter1);
+ queryInsightsExporterFactory.closeExporter(exporter2);
+ queryInsightsExporterFactory.closeAllExporters();
+ } catch (Exception e) {
+ fail("No exception should be thrown when closing exporter");
+ }
+ }
+
+ public void testUpdateExporter() {
+ LocalIndexExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern("yyyy-MM-dd"));
+ queryInsightsExporterFactory.updateExporter(exporter, "yyyy-MM-dd-HH");
+ assertEquals(DateTimeFormat.forPattern("yyyy-MM-dd-HH"), exporter.getIndexPattern());
+ }
+
+}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java
index 328ed0cd2ed15..86de44c680188 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java
@@ -11,27 +11,44 @@
import org.opensearch.action.search.SearchPhaseContext;
import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchRequestContext;
+import org.opensearch.action.search.SearchTask;
import org.opensearch.action.search.SearchType;
+import org.opensearch.action.support.replication.ClusterStateCreationUtils;
+import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.service.ClusterService;
+import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.concurrent.ThreadContext;
+import org.opensearch.common.util.io.IOUtils;
+import org.opensearch.core.tasks.TaskId;
+import org.opensearch.plugin.insights.QueryInsightsTestUtils;
import org.opensearch.plugin.insights.core.service.QueryInsightsService;
import org.opensearch.plugin.insights.core.service.TopQueriesService;
+import org.opensearch.plugin.insights.rules.model.Attribute;
import org.opensearch.plugin.insights.rules.model.MetricType;
-import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
+import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.opensearch.search.aggregations.support.ValueType;
import org.opensearch.search.builder.SearchSourceBuilder;
+import org.opensearch.tasks.Task;
import org.opensearch.test.ClusterServiceUtils;
import org.opensearch.test.OpenSearchTestCase;
+import org.opensearch.threadpool.TestThreadPool;
+import org.opensearch.threadpool.ThreadPool;
import org.junit.Before;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Phaser;
+import java.util.concurrent.TimeUnit;
+
+import org.mockito.ArgumentCaptor;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
@@ -48,6 +65,7 @@ public class QueryInsightsListenerTests extends OpenSearchTestCase {
private final SearchRequest searchRequest = mock(SearchRequest.class);
private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class);
private final TopQueriesService topQueriesService = mock(TopQueriesService.class);
+ private final ThreadPool threadPool = new TestThreadPool("QueryInsightsThreadPool");
private ClusterService clusterService;
@Before
@@ -55,14 +73,25 @@ public void setup() {
Settings.Builder settingsBuilder = Settings.builder();
Settings settings = settingsBuilder.build();
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE);
- clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE);
- clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null);
+ QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings);
+ ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary("test", true, 1 + randomInt(3), randomInt(2));
+ clusterService = ClusterServiceUtils.createClusterService(threadPool, state.getNodes().getLocalNode(), clusterSettings);
+ ClusterServiceUtils.setState(clusterService, state);
when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true);
when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService);
+
+ ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
+ threadPool.getThreadContext().setHeaders(new Tuple<>(Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel"), new HashMap<>()));
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ IOUtils.close(clusterService);
+ ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
+ @SuppressWarnings("unchecked")
public void testOnRequestEnd() throws InterruptedException {
Long timestamp = System.currentTimeMillis() - 100L;
SearchType searchType = SearchType.QUERY_THEN_FETCH;
@@ -70,6 +99,14 @@ public void testOnRequestEnd() throws InterruptedException {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword"));
searchSourceBuilder.size(0);
+ SearchTask task = new SearchTask(
+ 0,
+ "n/a",
+ "n/a",
+ () -> "test",
+ TaskId.EMPTY_TASK_ID,
+ Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel")
+ );
String[] indices = new String[] { "index-1", "index-2" };
@@ -89,10 +126,19 @@ public void testOnRequestEnd() throws InterruptedException {
when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap);
when(searchPhaseContext.getRequest()).thenReturn(searchRequest);
when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards);
+ when(searchPhaseContext.getTask()).thenReturn(task);
+ ArgumentCaptor captor = ArgumentCaptor.forClass(SearchQueryRecord.class);
queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext);
- verify(queryInsightsService, times(1)).addRecord(any());
+ verify(queryInsightsService, times(1)).addRecord(captor.capture());
+ SearchQueryRecord generatedRecord = captor.getValue();
+ assertEquals(timestamp.longValue(), generatedRecord.getTimestamp());
+ assertEquals(numberOfShards, generatedRecord.getAttributes().get(Attribute.TOTAL_SHARDS));
+ assertEquals(searchType.toString().toLowerCase(Locale.ROOT), generatedRecord.getAttributes().get(Attribute.SEARCH_TYPE));
+ assertEquals(searchSourceBuilder.toString(), generatedRecord.getAttributes().get(Attribute.SOURCE));
+ Map labels = (Map) generatedRecord.getAttributes().get(Attribute.LABELS);
+ assertEquals("userLabel", labels.get(Task.X_OPAQUE_ID));
}
public void testConcurrentOnRequestEnd() throws InterruptedException {
@@ -102,6 +148,14 @@ public void testConcurrentOnRequestEnd() throws InterruptedException {
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword"));
searchSourceBuilder.size(0);
+ SearchTask task = new SearchTask(
+ 0,
+ "n/a",
+ "n/a",
+ () -> "test",
+ TaskId.EMPTY_TASK_ID,
+ Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel")
+ );
String[] indices = new String[] { "index-1", "index-2" };
@@ -121,6 +175,7 @@ public void testConcurrentOnRequestEnd() throws InterruptedException {
when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap);
when(searchPhaseContext.getRequest()).thenReturn(searchRequest);
when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards);
+ when(searchPhaseContext.getTask()).thenReturn(task);
int numRequests = 50;
Thread[] threads = new Thread[numRequests];
@@ -155,7 +210,7 @@ public void testSetEnabled() {
when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false);
when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false);
- when(queryInsightsService.isCollectionEnabled(MetricType.JVM)).thenReturn(false);
+ when(queryInsightsService.isCollectionEnabled(MetricType.MEMORY)).thenReturn(false);
queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false);
assertFalse(queryInsightsListener.isEnabled());
}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java
index c29b48b9690d1..75a5768f50681 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java
@@ -8,6 +8,9 @@
package org.opensearch.plugin.insights.core.service;
+import org.opensearch.client.Client;
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Settings;
import org.opensearch.plugin.insights.QueryInsightsTestUtils;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
@@ -23,14 +26,19 @@
*/
public class QueryInsightsServiceTests extends OpenSearchTestCase {
private final ThreadPool threadPool = mock(ThreadPool.class);
+ private final Client client = mock(Client.class);
private QueryInsightsService queryInsightsService;
@Before
public void setup() {
- queryInsightsService = new QueryInsightsService(threadPool);
+ Settings.Builder settingsBuilder = Settings.builder();
+ Settings settings = settingsBuilder.build();
+ ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings);
+ queryInsightsService = new QueryInsightsService(clusterSettings, threadPool, client);
queryInsightsService.enableCollection(MetricType.LATENCY, true);
queryInsightsService.enableCollection(MetricType.CPU, true);
- queryInsightsService.enableCollection(MetricType.JVM, true);
+ queryInsightsService.enableCollection(MetricType.MEMORY, true);
}
public void testAddRecordToLimitAndDrain() {
@@ -46,4 +54,12 @@ public void testAddRecordToLimitAndDrain() {
queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size()
);
}
+
+ public void testClose() {
+ try {
+ queryInsightsService.doClose();
+ } catch (Exception e) {
+ fail("No exception expected when closing query insights service");
+ }
+ }
}
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java
index 060df84a89485..3efd4c86833cc 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java
@@ -11,24 +11,30 @@
import org.opensearch.cluster.coordination.DeterministicTaskQueue;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.plugin.insights.QueryInsightsTestUtils;
+import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory;
import org.opensearch.plugin.insights.rules.model.MetricType;
import org.opensearch.plugin.insights.rules.model.SearchQueryRecord;
import org.opensearch.plugin.insights.settings.QueryInsightsSettings;
import org.opensearch.test.OpenSearchTestCase;
+import org.opensearch.threadpool.ThreadPool;
import org.junit.Before;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import static org.mockito.Mockito.mock;
+
/**
* Unit Tests for {@link QueryInsightsService}.
*/
public class TopQueriesServiceTests extends OpenSearchTestCase {
private TopQueriesService topQueriesService;
+ private final ThreadPool threadPool = mock(ThreadPool.class);
+ private final QueryInsightsExporterFactory queryInsightsExporterFactory = mock(QueryInsightsExporterFactory.class);
@Before
public void setup() {
- topQueriesService = new TopQueriesService(MetricType.LATENCY);
+ topQueriesService = new TopQueriesService(MetricType.LATENCY, threadPool, queryInsightsExporterFactory);
topQueriesService.setTopNSize(Integer.MAX_VALUE);
topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE));
topQueriesService.setEnabled(true);
diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java
index 793d5878e2300..ad45b53ec5363 100644
--- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java
+++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java
@@ -39,7 +39,7 @@ public void testSerializationAndEquals() throws Exception {
public void testAllMetricTypes() {
Set allMetrics = MetricType.allMetricTypes();
- Set expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.JVM));
+ Set expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.MEMORY));
assertEquals(expected, allMetrics);
}
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index ff62c328c7e74..61e9f71712eaf 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -44,10 +44,11 @@ opensearchplugin {
}
dependencies {
- api 'com.azure:azure-core:1.47.0'
+ api 'com.azure:azure-core:1.49.1'
api 'com.azure:azure-json:1.1.0'
+ api 'com.azure:azure-xml:1.0.0'
api 'com.azure:azure-storage-common:12.21.2'
- api 'com.azure:azure-core-http-netty:1.12.8'
+ api 'com.azure:azure-core-http-netty:1.15.1'
api "io.netty:netty-codec-dns:${versions.netty}"
api "io.netty:netty-codec-socks:${versions.netty}"
api "io.netty:netty-codec-http2:${versions.netty}"
diff --git a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1
deleted file mode 100644
index 42e35aacc63b1..0000000000000
--- a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6b300175826f0bb0916fca2fa5f70885b716e93f
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1
new file mode 100644
index 0000000000000..d487c08c26e94
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1
@@ -0,0 +1 @@
+a7c44282eaa0f5a3be4b920d6a057509adfe8674
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1
deleted file mode 100644
index e6ee1dec64641..0000000000000
--- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-511ed2d02afb0f43f029df3d10ff80d2d3539f05
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1
new file mode 100644
index 0000000000000..3a0747a0daacb
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1
@@ -0,0 +1 @@
+036f7466a521aa99c79a491a9cf20444667df78b
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1
new file mode 100644
index 0000000000000..798ec5d95c6ac
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1
@@ -0,0 +1 @@
+ba584703bd47e9e789343ee3332f0f5a64f7f187
\ No newline at end of file
diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java
index 90143d907cd99..b0582624e21d5 100644
--- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java
+++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java
@@ -23,10 +23,13 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import java.util.stream.Collectors;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.sdk.metrics.data.DoublePointData;
import io.opentelemetry.sdk.metrics.data.MetricData;
import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramPointData;
@@ -147,6 +150,36 @@ public void testGauge() throws Exception {
}
+ public void testGaugeWithValueAndTagSupplier() throws Exception {
+ String metricName = "test-gauge";
+ MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class);
+ InMemorySingletonMetricsExporter.INSTANCE.reset();
+ Tags tags = Tags.create().addTag("test", "integ-test");
+ final AtomicInteger testValue = new AtomicInteger(0);
+ Supplier valueProvider = () -> {
+ return TaggedMeasurement.create(Double.valueOf(testValue.incrementAndGet()), tags);
+ };
+ Closeable gaugeCloseable = metricsRegistry.createGauge(metricName, "test", "ms", valueProvider);
+ // Sleep for about 2.2s to wait for metrics to be published.
+ Thread.sleep(2200);
+
+ InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE;
+
+ assertTrue(getMaxObservableGaugeValue(exporter, metricName) >= 2.0);
+
+ gaugeCloseable.close();
+ double observableGaugeValueAfterStop = getMaxObservableGaugeValue(exporter, metricName);
+
+ Map, Object> attributes = getMetricAttributes(exporter, metricName);
+
+ assertEquals("integ-test", attributes.get(AttributeKey.stringKey("test")));
+
+ // Sleep for about 1.2s to wait for metrics to see that closed observableGauge shouldn't execute the callable.
+ Thread.sleep(1200);
+ assertEquals(observableGaugeValueAfterStop, getMaxObservableGaugeValue(exporter, metricName), 0.0);
+
+ }
+
private static double getMaxObservableGaugeValue(InMemorySingletonMetricsExporter exporter, String metricName) {
List dataPoints = exporter.getFinishedMetricItems()
.stream()
@@ -159,6 +192,15 @@ private static double getMaxObservableGaugeValue(InMemorySingletonMetricsExporte
return totalValue;
}
+ private static Map, Object> getMetricAttributes(InMemorySingletonMetricsExporter exporter, String metricName) {
+ List dataPoints = exporter.getFinishedMetricItems()
+ .stream()
+ .filter(a -> a.getName().contains(metricName))
+ .collect(Collectors.toList());
+ Attributes attributes = dataPoints.get(0).getDoubleGaugeData().getPoints().stream().findAny().get().getAttributes();
+ return attributes.asMap();
+ }
+
@After
public void reset() {
InMemorySingletonMetricsExporter.INSTANCE.reset();
diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java
index 6fe08040d7af5..3258e91738ba6 100644
--- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java
+++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java
@@ -101,6 +101,17 @@ public Closeable createGauge(String name, String description, String unit, Suppl
return () -> doubleObservableGauge.close();
}
+ @Override
+ public Closeable createGauge(String name, String description, String unit, Supplier value) {
+ ObservableDoubleGauge doubleObservableGauge = AccessController.doPrivileged(
+ (PrivilegedAction) () -> otelMeter.gaugeBuilder(name)
+ .setUnit(unit)
+ .setDescription(description)
+ .buildWithCallback(record -> record.record(value.get().getValue(), OTelAttributesConverter.convert(value.get().getTags())))
+ );
+ return () -> doubleObservableGauge.close();
+ }
+
@Override
public void close() throws IOException {
meterProvider.close();
diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java
index 2e89a3c488d5c..794cafc1fb608 100644
--- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java
+++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java
@@ -180,4 +180,34 @@ public void testGauge() throws Exception {
closeable.close();
verify(observableDoubleGauge).close();
}
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testGaugeWithValueAndTagsSupplier() throws Exception {
+ String observableGaugeName = "test-gauge";
+ String description = "test";
+ String unit = "1";
+ Meter mockMeter = mock(Meter.class);
+ OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class);
+ ObservableDoubleGauge observableDoubleGauge = mock(ObservableDoubleGauge.class);
+ DoubleGaugeBuilder mockOTelDoubleGaugeBuilder = mock(DoubleGaugeBuilder.class);
+ MeterProvider meterProvider = mock(MeterProvider.class);
+ when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter);
+ MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry(
+ new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}),
+ meterProvider
+ );
+ when(mockMeter.gaugeBuilder(Mockito.contains(observableGaugeName))).thenReturn(mockOTelDoubleGaugeBuilder);
+ when(mockOTelDoubleGaugeBuilder.setDescription(description)).thenReturn(mockOTelDoubleGaugeBuilder);
+ when(mockOTelDoubleGaugeBuilder.setUnit(unit)).thenReturn(mockOTelDoubleGaugeBuilder);
+ when(mockOTelDoubleGaugeBuilder.buildWithCallback(any(Consumer.class))).thenReturn(observableDoubleGauge);
+
+ Closeable closeable = metricsTelemetry.createGauge(
+ observableGaugeName,
+ description,
+ unit,
+ () -> TaggedMeasurement.create(1.0, Tags.EMPTY)
+ );
+ closeable.close();
+ verify(observableDoubleGauge).close();
+ }
}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java
new file mode 100644
index 0000000000000..3b4a308691e7b
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java
@@ -0,0 +1,49 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.opensearch.core.common.bytes.BytesReference;
+import org.opensearch.http.HttpChunk;
+import org.opensearch.transport.reactor.netty4.Netty4Utils;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import io.netty.buffer.ByteBuf;
+
+class ReactorNetty4HttpChunk implements HttpChunk {
+ private final AtomicBoolean released;
+ private final boolean pooled;
+ private final ByteBuf content;
+ private final boolean last;
+
+ ReactorNetty4HttpChunk(ByteBuf content, boolean last) {
+ this.content = content;
+ this.pooled = true;
+ this.released = new AtomicBoolean(false);
+ this.last = last;
+ }
+
+ @Override
+ public BytesReference content() {
+ assert released.get() == false;
+ return Netty4Utils.toBytesReference(content);
+ }
+
+ @Override
+ public void close() {
+ if (pooled && released.compareAndSet(false, true)) {
+ content.release();
+ }
+ }
+
+ @Override
+ public boolean isLast() {
+ return last;
+ }
+}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java
index 4406c555a5b04..491c7aa885103 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java
@@ -44,6 +44,10 @@ class ReactorNetty4HttpRequest implements HttpRequest {
private final Exception inboundException;
private final boolean pooled;
+ ReactorNetty4HttpRequest(HttpServerRequest request) {
+ this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), false, Unpooled.EMPTY_BUFFER);
+ }
+
ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) {
this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content);
}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
index bd1646d753016..906bbfd072da8 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java
@@ -26,6 +26,8 @@
import org.opensearch.http.HttpServerChannel;
import org.opensearch.http.reactor.netty4.ssl.SslUtils;
import org.opensearch.plugins.SecureHttpTransportSettingsProvider;
+import org.opensearch.rest.RestHandler;
+import org.opensearch.rest.RestRequest.Method;
import org.opensearch.telemetry.tracing.Tracer;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.reactor.SharedGroupFactory;
@@ -40,6 +42,7 @@
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
+import java.util.Optional;
import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelOption;
@@ -351,24 +354,45 @@ public List protocols() {
* @return response publisher
*/
protected Publisher incomingRequest(HttpServerRequest request, HttpServerResponse response) {
- final NonStreamingRequestConsumer consumer = new NonStreamingRequestConsumer<>(
- this,
- request,
- response,
- maxCompositeBufferComponents
+ final Method method = HttpConversionUtil.convertMethod(request.method());
+ final Optional dispatchHandlerOpt = dispatcher.dispatchHandler(
+ request.uri(),
+ request.fullPath(),
+ method,
+ request.params()
);
+ if (dispatchHandlerOpt.map(RestHandler::supportsStreaming).orElse(false)) {
+ final ReactorNetty4StreamingRequestConsumer consumer = new ReactorNetty4StreamingRequestConsumer<>(
+ request,
+ response
+ );
+
+ request.receiveContent()
+ .switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT))
+ .subscribe(consumer, error -> {}, () -> consumer.accept(DefaultLastHttpContent.EMPTY_LAST_CONTENT));
+
+ incomingStream(new ReactorNetty4HttpRequest(request), consumer.httpChannel());
+ return response.sendObject(consumer);
+ } else {
+ final ReactorNetty4NonStreamingRequestConsumer consumer = new ReactorNetty4NonStreamingRequestConsumer<>(
+ this,
+ request,
+ response,
+ maxCompositeBufferComponents
+ );
- request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer);
-
- return Mono.from(consumer).flatMap(hc -> {
- final FullHttpResponse r = (FullHttpResponse) hc;
- response.status(r.status());
- response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue())));
- response.chunkedTransfer(false);
- response.compression(true);
- r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue()));
- return Mono.from(response.sendObject(r.content()));
- });
+ request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer);
+
+ return Mono.from(consumer).flatMap(hc -> {
+ final FullHttpResponse r = (FullHttpResponse) hc;
+ response.status(r.status());
+ response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue())));
+ response.chunkedTransfer(false);
+ response.compression(true);
+ r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue()));
+ return Mono.from(response.sendObject(r.content()));
+ });
+ }
}
/**
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java
similarity index 92%
rename from plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java
rename to plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java
index 98b359319ff1b..7df0b3c0c35fe 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java
@@ -23,13 +23,13 @@
import reactor.netty.http.server.HttpServerRequest;
import reactor.netty.http.server.HttpServerResponse;
-class NonStreamingHttpChannel implements HttpChannel {
+class ReactorNetty4NonStreamingHttpChannel implements HttpChannel {
private final HttpServerRequest request;
private final HttpServerResponse response;
private final CompletableContext closeContext = new CompletableContext<>();
private final FluxSink emitter;
- NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) {
+ ReactorNetty4NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) {
this.request = request;
this.response = response;
this.emitter = emitter;
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java
similarity index 89%
rename from plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java
rename to plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java
index d43e23e800e65..c09e7755b1670 100644
--- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java
@@ -25,7 +25,7 @@
import reactor.netty.http.server.HttpServerRequest;
import reactor.netty.http.server.HttpServerResponse;
-class NonStreamingRequestConsumer implements Consumer, Publisher, Disposable {
+class ReactorNetty4NonStreamingRequestConsumer implements Consumer, Publisher, Disposable {
private final HttpServerRequest request;
private final HttpServerResponse response;
private final CompositeByteBuf content;
@@ -34,7 +34,7 @@ class NonStreamingRequestConsumer implements Consumer,
private final AtomicBoolean disposed = new AtomicBoolean(false);
private volatile FluxSink emitter;
- NonStreamingRequestConsumer(
+ ReactorNetty4NonStreamingRequestConsumer(
AbstractHttpServerTransport transport,
HttpServerRequest request,
HttpServerResponse response,
@@ -64,12 +64,12 @@ public void accept(T message) {
}
}
- public void process(HttpContent in, FluxSink emitter) {
+ void process(HttpContent in, FluxSink emitter) {
// Consume request body in full before dispatching it
content.addComponent(true, in.content().retain());
if (in instanceof LastHttpContent) {
- final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter);
+ final ReactorNetty4NonStreamingHttpChannel channel = new ReactorNetty4NonStreamingHttpChannel(request, response, emitter);
final HttpRequest r = createRequest(request, content);
try {
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java
new file mode 100644
index 0000000000000..56dadea0477c5
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java
@@ -0,0 +1,132 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.opensearch.common.concurrent.CompletableContext;
+import org.opensearch.core.action.ActionListener;
+import org.opensearch.core.common.bytes.BytesReference;
+import org.opensearch.http.HttpChunk;
+import org.opensearch.http.HttpResponse;
+import org.opensearch.http.StreamingHttpChannel;
+import org.opensearch.transport.reactor.netty4.Netty4Utils;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map;
+
+import io.netty.buffer.Unpooled;
+import io.netty.handler.codec.http.DefaultHttpContent;
+import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.codec.http.HttpContent;
+import org.reactivestreams.Publisher;
+import org.reactivestreams.Subscriber;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.FluxSink;
+import reactor.netty.http.server.HttpServerRequest;
+import reactor.netty.http.server.HttpServerResponse;
+
+class ReactorNetty4StreamingHttpChannel implements StreamingHttpChannel {
+ private final HttpServerRequest request;
+ private final HttpServerResponse response;
+ private final CompletableContext closeContext = new CompletableContext<>();
+ private final Publisher receiver;
+ private final StreamingHttpContentSender sender;
+ private volatile FluxSink producer;
+ private volatile boolean lastChunkReceived = false;
+
+ ReactorNetty4StreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, StreamingHttpContentSender sender) {
+ this.request = request;
+ this.response = response;
+ this.sender = sender;
+ this.receiver = Flux.create(producer -> this.producer = producer);
+ this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext));
+ }
+
+ @Override
+ public boolean isOpen() {
+ return true;
+ }
+
+ @Override
+ public void close() {
+ request.withConnection(connection -> connection.channel().close());
+ }
+
+ @Override
+ public void addCloseListener(ActionListener listener) {
+ closeContext.addListener(ActionListener.toBiConsumer(listener));
+ }
+
+ @Override
+ public void sendChunk(HttpChunk chunk, ActionListener listener) {
+ sender.send(createContent(chunk), listener, chunk.isLast());
+ }
+
+ @Override
+ public void sendResponse(HttpResponse response, ActionListener listener) {
+ sender.send(createContent(response), listener, true);
+ }
+
+ @Override
+ public void prepareResponse(int status, Map> headers) {
+ this.response.status(status);
+ headers.forEach((k, vs) -> vs.forEach(v -> this.response.addHeader(k, v)));
+ }
+
+ @Override
+ public InetSocketAddress getRemoteAddress() {
+ return (InetSocketAddress) response.remoteAddress();
+ }
+
+ @Override
+ public InetSocketAddress getLocalAddress() {
+ return (InetSocketAddress) response.hostAddress();
+ }
+
+ @Override
+ public void receiveChunk(HttpChunk message) {
+ try {
+ if (lastChunkReceived) {
+ return;
+ }
+
+ producer.next(message);
+ if (message.isLast()) {
+ lastChunkReceived = true;
+ producer.complete();
+ }
+ } finally {
+ message.close();
+ }
+ }
+
+ @Override
+ public boolean isReadable() {
+ return producer != null;
+ }
+
+ @Override
+ public boolean isWritable() {
+ return sender.isReady();
+ }
+
+ @Override
+ public void subscribe(Subscriber super HttpChunk> subscriber) {
+ receiver.subscribe(subscriber);
+ }
+
+ private static HttpContent createContent(HttpResponse response) {
+ final FullHttpResponse fullHttpResponse = (FullHttpResponse) response;
+ return new DefaultHttpContent(fullHttpResponse.content());
+ }
+
+ private static HttpContent createContent(HttpChunk chunk) {
+ return new DefaultHttpContent(Unpooled.copiedBuffer(BytesReference.toByteBuffers(chunk.content())));
+ }
+}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java
new file mode 100644
index 0000000000000..f34f54e561021
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java
@@ -0,0 +1,53 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.opensearch.http.HttpChunk;
+import org.opensearch.http.StreamingHttpChannel;
+
+import java.util.function.Consumer;
+
+import io.netty.handler.codec.http.HttpContent;
+import io.netty.handler.codec.http.LastHttpContent;
+import org.reactivestreams.Publisher;
+import org.reactivestreams.Subscriber;
+import reactor.netty.http.server.HttpServerRequest;
+import reactor.netty.http.server.HttpServerResponse;
+
+class ReactorNetty4StreamingRequestConsumer implements Consumer, Publisher {
+ private final ReactorNetty4StreamingResponseProducer sender;
+ private final StreamingHttpChannel httpChannel;
+
+ ReactorNetty4StreamingRequestConsumer(HttpServerRequest request, HttpServerResponse response) {
+ this.sender = new ReactorNetty4StreamingResponseProducer();
+ this.httpChannel = new ReactorNetty4StreamingHttpChannel(request, response, sender);
+ }
+
+ @Override
+ public void accept(T message) {
+ if (message instanceof LastHttpContent) {
+ httpChannel.receiveChunk(createChunk(message, true));
+ } else if (message instanceof HttpContent) {
+ httpChannel.receiveChunk(createChunk(message, false));
+ }
+ }
+
+ @Override
+ public void subscribe(Subscriber super HttpContent> s) {
+ sender.subscribe(s);
+ }
+
+ HttpChunk createChunk(HttpContent chunk, boolean last) {
+ return new ReactorNetty4HttpChunk(chunk.content().retain(), last);
+ }
+
+ StreamingHttpChannel httpChannel() {
+ return httpChannel;
+ }
+}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java
new file mode 100644
index 0000000000000..616edccdfc396
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java
@@ -0,0 +1,54 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.opensearch.core.action.ActionListener;
+
+import io.netty.handler.codec.http.HttpContent;
+import org.reactivestreams.Publisher;
+import org.reactivestreams.Subscriber;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.FluxSink;
+
+class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSender, Publisher {
+ private final Publisher sender;
+ private volatile FluxSink emitter;
+
+ ReactorNetty4StreamingResponseProducer() {
+ this.sender = Flux.create(emitter -> this.emitter = emitter);
+ }
+
+ @Override
+ public void send(HttpContent content, ActionListener listener, boolean isLast) {
+ try {
+ emitter.next(content);
+ listener.onResponse(null);
+ if (isLast) {
+ emitter.complete();
+ }
+ } catch (final Exception ex) {
+ emitter.error(ex);
+ listener.onFailure(ex);
+ }
+ }
+
+ @Override
+ public void subscribe(Subscriber super HttpContent> s) {
+ sender.subscribe(s);
+ }
+
+ @Override
+ public boolean isReady() {
+ return emitter != null;
+ }
+
+ FluxSink emitter() {
+ return emitter;
+ }
+}
diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java
new file mode 100644
index 0000000000000..f07d6fbb88349
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.opensearch.core.action.ActionListener;
+
+import io.netty.handler.codec.http.HttpContent;
+
+/**
+ * The generic interface for chunked {@link HttpContent} producers (response streaming).
+ */
+interface StreamingHttpContentSender {
+ /**
+ * Sends the next {@link HttpContent} over the wire
+ * @param content next {@link HttpContent}
+ * @param listener action listener
+ * @param isLast {@code true} if this is the last chunk, {@code false} otherwise
+ */
+ void send(HttpContent content, ActionListener listener, boolean isLast);
+
+ /**
+ * Returns {@code true} is this channel is ready for streaming response data, {@code false} otherwise
+ * @return {@code true} is this channel is ready for streaming response data, {@code false} otherwise
+ */
+ boolean isReady();
+}
diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java
index 920c895205023..0953e51484bd3 100644
--- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java
+++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java
@@ -14,16 +14,22 @@
package org.opensearch.http.reactor.netty4;
import org.opensearch.common.collect.Tuple;
+import org.opensearch.common.xcontent.XContentType;
+import org.opensearch.core.xcontent.ToXContent;
+import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.tasks.Task;
import org.opensearch.test.OpenSearchTestCase;
import java.io.Closeable;
+import java.io.IOException;
+import java.io.UncheckedIOException;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.stream.Stream;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
@@ -36,6 +42,7 @@
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http2.HttpConversionUtil;
@@ -121,6 +128,11 @@ public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequ
return responses.get(0);
}
+ public final FullHttpResponse stream(InetSocketAddress remoteAddress, HttpRequest httpRequest, Stream stream)
+ throws InterruptedException {
+ return sendRequestStream(remoteAddress, httpRequest, stream);
+ }
+
public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content)
throws InterruptedException {
final List responses = sendRequests(
@@ -207,6 +219,46 @@ private List sendRequests(
}
}
+ private FullHttpResponse sendRequestStream(
+ final InetSocketAddress remoteAddress,
+ final HttpRequest request,
+ final Stream stream
+ ) {
+ final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1);
+ try {
+ final HttpClient client = createClient(remoteAddress, eventLoopGroup);
+
+ return client.headers(h -> h.add(request.headers()))
+ .baseUrl(request.getUri())
+ .request(request.method())
+ .send(Flux.fromStream(stream).map(s -> {
+ try (XContentBuilder builder = XContentType.JSON.contentBuilder()) {
+ return Unpooled.wrappedBuffer(
+ s.toXContent(builder, ToXContent.EMPTY_PARAMS).toString().getBytes(StandardCharsets.UTF_8)
+ );
+ } catch (final IOException ex) {
+ throw new UncheckedIOException(ex);
+ }
+ }))
+ .response(
+ (r, c) -> c.aggregate()
+ .map(
+ b -> new DefaultFullHttpResponse(
+ r.version(),
+ r.status(),
+ b.retain(),
+ r.responseHeaders(),
+ EmptyHttpHeaders.INSTANCE
+ )
+ )
+ )
+ .blockLast();
+
+ } finally {
+ eventLoopGroup.shutdownGracefully().awaitUninterruptibly();
+ }
+ }
+
private HttpClient createClient(final InetSocketAddress remoteAddress, final NioEventLoopGroup eventLoopGroup) {
final HttpClient client = HttpClient.newConnection()
.resolver(DefaultAddressResolverGroup.INSTANCE)
diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java
new file mode 100644
index 0000000000000..a7bf71e58e9b6
--- /dev/null
+++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java
@@ -0,0 +1,211 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.http.reactor.netty4;
+
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.opensearch.client.node.NodeClient;
+import org.opensearch.common.lease.Releasable;
+import org.opensearch.common.network.NetworkService;
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.MockBigArrays;
+import org.opensearch.common.util.MockPageCacheRecycler;
+import org.opensearch.common.util.concurrent.ThreadContext;
+import org.opensearch.common.xcontent.XContentType;
+import org.opensearch.common.xcontent.support.XContentHttpChunk;
+import org.opensearch.core.common.transport.TransportAddress;
+import org.opensearch.core.indices.breaker.NoneCircuitBreakerService;
+import org.opensearch.core.xcontent.ToXContent;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.http.HttpServerTransport;
+import org.opensearch.rest.RestChannel;
+import org.opensearch.rest.RestHandler;
+import org.opensearch.rest.RestRequest;
+import org.opensearch.rest.RestRequest.Method;
+import org.opensearch.rest.StreamingRestChannel;
+import org.opensearch.telemetry.tracing.noop.NoopTracer;
+import org.opensearch.test.OpenSearchTestCase;
+import org.opensearch.test.rest.FakeRestRequest;
+import org.opensearch.threadpool.TestThreadPool;
+import org.opensearch.threadpool.ThreadPool;
+import org.opensearch.transport.reactor.SharedGroupFactory;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.charset.StandardCharsets;
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import io.netty.handler.codec.http.DefaultHttpRequest;
+import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpRequest;
+import io.netty.handler.codec.http.HttpResponseStatus;
+import io.netty.handler.codec.http.HttpVersion;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the {@link ReactorNetty4HttpServerTransport} class with streaming support.
+ */
+public class ReactorNetty4HttpServerTransportStreamingTests extends OpenSearchTestCase {
+ private static final Function XCONTENT_CONVERTER = (str) -> new ToXContent() {
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ return builder.startObject().field("doc", str).endObject();
+ }
+ };
+
+ private NetworkService networkService;
+ private ThreadPool threadPool;
+ private MockBigArrays bigArrays;
+ private ClusterSettings clusterSettings;
+
+ @Before
+ public void setup() throws Exception {
+ networkService = new NetworkService(Collections.emptyList());
+ threadPool = new TestThreadPool("test");
+ bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
+ clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ }
+
+ @After
+ public void shutdown() throws Exception {
+ if (threadPool != null) {
+ threadPool.shutdownNow();
+ }
+ threadPool = null;
+ networkService = null;
+ bigArrays = null;
+ clusterSettings = null;
+ }
+
+ public void testRequestResponseStreaming() throws InterruptedException {
+ final String responseString = randomAlphaOfLength(4 * 1024);
+ final String url = "/stream/";
+
+ final ToXContent[] chunks = newChunks(responseString);
+ final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
+ @Override
+ public Optional dispatchHandler(String uri, String rawPath, Method method, Map params) {
+ return Optional.of(new RestHandler() {
+ @Override
+ public boolean supportsStreaming() {
+ return true;
+ }
+
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {
+ logger.error("--> Unexpected request [{}]", request.uri());
+ throw new AssertionError();
+ }
+ });
+ }
+
+ @Override
+ public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
+ if (url.equals(request.uri())) {
+ assertThat(channel, instanceOf(StreamingRestChannel.class));
+ final StreamingRestChannel streamingChannel = (StreamingRestChannel) channel;
+
+ // Await at most 5 seconds till channel is ready for writing the response stream, fail otherwise
+ final Mono> ready = Mono.fromRunnable(() -> {
+ while (!streamingChannel.isWritable()) {
+ Thread.onSpinWait();
+ }
+ }).timeout(Duration.ofSeconds(5));
+
+ threadPool.executor(ThreadPool.Names.WRITE)
+ .execute(() -> Flux.concat(Flux.fromArray(newChunks(responseString)).map(e -> {
+ try (XContentBuilder builder = channel.newBuilder(XContentType.JSON, true)) {
+ return XContentHttpChunk.from(e.toXContent(builder, ToXContent.EMPTY_PARAMS));
+ } catch (final IOException ex) {
+ throw new UncheckedIOException(ex);
+ }
+ }), Mono.just(XContentHttpChunk.last()))
+ .delaySubscription(ready)
+ .subscribe(streamingChannel::sendChunk, null, () -> {
+ if (channel.bytesOutput() instanceof Releasable) {
+ ((Releasable) channel.bytesOutput()).close();
+ }
+ }));
+ } else {
+ logger.error("--> Unexpected successful uri [{}]", request.uri());
+ throw new AssertionError();
+ }
+ }
+
+ @Override
+ public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
+ logger.error(
+ new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())),
+ cause
+ );
+ throw new AssertionError();
+ }
+
+ };
+
+ try (
+ ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport(
+ Settings.EMPTY,
+ networkService,
+ bigArrays,
+ threadPool,
+ xContentRegistry(),
+ dispatcher,
+ clusterSettings,
+ new SharedGroupFactory(Settings.EMPTY),
+ NoopTracer.INSTANCE
+ )
+ ) {
+ transport.start();
+ final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
+
+ try (ReactorHttpClient client = ReactorHttpClient.create(false)) {
+ HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
+ final FullHttpResponse response = client.stream(remoteAddress.address(), request, Arrays.stream(chunks));
+ try {
+ assertThat(response.status(), equalTo(HttpResponseStatus.OK));
+ byte[] bytes = new byte[response.content().readableBytes()];
+ response.content().readBytes(bytes);
+ assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(Arrays.stream(newChunks(responseString)).map(s -> {
+ try (XContentBuilder builder = XContentType.JSON.contentBuilder()) {
+ return s.toXContent(builder, ToXContent.EMPTY_PARAMS).toString();
+ } catch (final IOException ex) {
+ throw new UncheckedIOException(ex);
+ }
+ }).collect(Collectors.joining(""))));
+ } finally {
+ response.release();
+ }
+ }
+ }
+ }
+
+ private static ToXContent[] newChunks(final String responseString) {
+ final ToXContent[] chunks = new ToXContent[responseString.length() / 16];
+
+ for (int chunk = 0; chunk < responseString.length(); chunk += 16) {
+ chunks[chunk / 16] = XCONTENT_CONVERTER.apply(responseString.substring(chunk, chunk + 16));
+ }
+
+ return chunks;
+ }
+}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
index fa71137912a91..996c2aae8cfe4 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
@@ -658,6 +658,7 @@ setup:
settings:
number_of_replicas: 0
number_of_shards: 1
+ refresh_interval: -1
mappings:
properties:
date:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
index 3a0099dae3b33..78e2e6858c6ff 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
@@ -1083,6 +1083,7 @@ setup:
settings:
number_of_replicas: 0
number_of_shards: 1
+ refresh_interval: -1
mappings:
properties:
date:
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
index 1356eac41ae79..fc82517788c91 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
@@ -106,8 +106,36 @@ setup:
version: " - 2.99.99"
reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.14.0)
+ - do:
+ indices.create:
+ index: test_profile
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+ refresh_interval: -1
+ mappings:
+ properties:
+ date:
+ type: date
+
+ - do:
+ bulk:
+ index: test_profile
+ refresh: true
+ body:
+ - '{"index": {}}'
+ - '{"date": "2020-03-01", "v": 1}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-02", "v": 2}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-08", "v": 3}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-09", "v": 4}'
+
- do:
search:
+ index: test_profile
body:
profile: true
size: 0
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
new file mode 100644
index 0000000000000..05b6b2e5ed712
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
@@ -0,0 +1,229 @@
+setup:
+ - skip:
+ version: " - 2.99.99"
+ reason: "Added in 2.15, but need to skip pre-3.0 before backport"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ my_field:
+ type: wildcard
+ fields:
+ lower:
+ type: wildcard
+ normalizer: lowercase
+ doc_values:
+ type: wildcard
+ doc_values: true
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body:
+ my_field: "org.opensearch.transport.NodeDisconnectedException: [node_s0][127.0.0.1:39953][disconnected] disconnected"
+ - do:
+ index:
+ index: test
+ id: 2
+ body:
+ my_field: "[2024-06-08T06:31:37,443][INFO ][o.o.c.c.Coordinator ] [node_s2] cluster-manager node [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}] failed, restarting discovery"
+
+ - do:
+ index:
+ index: test
+ id: 3
+ body:
+ my_field: "[2024-06-08T06:31:37,451][INFO ][o.o.c.s.ClusterApplierService] [node_s2] cluster-manager node changed {previous [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}], current []}, term: 1, version: 24, reason: becoming candidate: onLeaderFailure"
+ - do:
+ index:
+ index: test
+ id: 4
+ body:
+ my_field: "[2024-06-08T06:31:37,452][WARN ][o.o.c.NodeConnectionsService] [node_s1] failed to connect to {node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true} (tried [1] times)"
+ - do:
+ index:
+ index: test
+ id: 5
+ body:
+ my_field: "AbCd"
+ - do:
+ index:
+ index: test
+ id: 6
+ body:
+ other_field: "test"
+ - do:
+ indices.refresh: {}
+
+---
+"term query matches exact value":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field: "AbCd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.doc_values: "AbCd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+---
+"term query matches lowercase-normalized value":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.lower: "abcd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.lower: "ABCD"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field: "abcd"
+ - match: { hits.total.value: 0 }
+
+---
+"wildcard query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*Node*Exception*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+---
+"wildcard query matches lowercase-normalized field":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field.lower:
+ value: "*node*exception*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field.lower:
+ value: "*NODE*EXCEPTION*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*node*exception*"
+ - match: { hits.total.value: 0 }
+
+---
+"prefix query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ prefix:
+ my_field:
+ value: "[2024-06-08T"
+ - match: { hits.total.value: 3 }
+
+---
+"regexp query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*06-08.*cluster-manager node.*"
+ - match: { hits.total.value: 2 }
+
+---
+"regexp query matches lowercase-normalized field":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field.lower:
+ value: ".*06-08.*Cluster-Manager Node.*"
+ - match: { hits.total.value: 2 }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*06-08.*Cluster-Manager Node.*"
+ - match: { hits.total.value: 0 }
+
+---
+"wildcard match-all works":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*"
+ - match: { hits.total.value: 5 }
+---
+"regexp match-all works":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*"
+ - match: { hits.total.value: 5 }
diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 08339fa8a4ce1..0000000000000
--- a/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9cc4e600289bf1171b47de74536bd34c476f85a8
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.11.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..7139f6a43a15a
--- /dev/null
+++ b/server/licenses/lucene-analysis-common-9.11.0.jar.sha1
@@ -0,0 +1 @@
+75a0a333cf1e043102743066c929e65fe51cbcda
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 3dce8a2162edd..0000000000000
--- a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8babfe85be7e36c893741e08072c11e71db09715
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..735e80b60b001
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1
@@ -0,0 +1 @@
+db385446bc3fd70e7c6a744276c0a157bd60ee0a
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 943a9b2fd214b..0000000000000
--- a/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3c2361bd633374ae3814b175cc25ccf773f67026
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.11.0.jar.sha1 b/server/licenses/lucene-core-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..b0d38c4165581
--- /dev/null
+++ b/server/licenses/lucene-core-9.11.0.jar.sha1
@@ -0,0 +1 @@
+2e487755a6814b2a1bc770c26569dcba86873dcf
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 8587c3ed5e82a..0000000000000
--- a/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d9f29b49cd1e0a061ff7fa4a53e8605bd49bd3d0
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.11.0.jar.sha1 b/server/licenses/lucene-grouping-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..562de95605b60
--- /dev/null
+++ b/server/licenses/lucene-grouping-9.11.0.jar.sha1
@@ -0,0 +1 @@
+882bdaf209b0acb332aa34836616424bcbecf462
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 25579432a9cbd..0000000000000
--- a/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-33bc26d46d62bb1cf3bf725db637226a43db7625
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.11.0.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..e0ef36d321c9d
--- /dev/null
+++ b/server/licenses/lucene-highlighter-9.11.0.jar.sha1
@@ -0,0 +1 @@
+44accdc03c5482e602718f7bf91e5940ba4e4870
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 1bfef89965e67..0000000000000
--- a/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-82966698abdb8f0367a162f642560566a6085dc8
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.11.0.jar.sha1 b/server/licenses/lucene-join-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..34c618ccfbcc7
--- /dev/null
+++ b/server/licenses/lucene-join-9.11.0.jar.sha1
@@ -0,0 +1 @@
+32a30ee03ed4f3e43bf63250270b2d4d53050045
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 73adf3fcb2829..0000000000000
--- a/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-746f392e7ec27a7cd6ca2add7dd8441d2a6085da
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.11.0.jar.sha1 b/server/licenses/lucene-memory-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..d730cfb4b7660
--- /dev/null
+++ b/server/licenses/lucene-memory-9.11.0.jar.sha1
@@ -0,0 +1 @@
+b3e80aa6aa3299118e76a23edc23b58f3ba5a515
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 7f7dfead4c329..0000000000000
--- a/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0eb06ecc39c0ec0db380a6e5aad1b16907e0bfd9
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.11.0.jar.sha1 b/server/licenses/lucene-misc-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..9be27f004435b
--- /dev/null
+++ b/server/licenses/lucene-misc-9.11.0.jar.sha1
@@ -0,0 +1 @@
+54fe308908194e1b0697a1157a45c5998c9e1083
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index e3d400003efd8..0000000000000
--- a/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0e56eb18cceffcd5ce2e47b679e873420254df74
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.11.0.jar.sha1 b/server/licenses/lucene-queries-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..b445610c25858
--- /dev/null
+++ b/server/licenses/lucene-queries-9.11.0.jar.sha1
@@ -0,0 +1 @@
+987d1286949ddf514b8405fd453ed47bebdfb12d
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 8e8c7f5171107..0000000000000
--- a/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dee3997a72eeae905e92930f53e724b6bef279da
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.11.0.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..a1620ba9c7708
--- /dev/null
+++ b/server/licenses/lucene-queryparser-9.11.0.jar.sha1
@@ -0,0 +1 @@
+e97fe1c0d102edb8d6e1c01454992fd2b8d80ae0
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 2d1df051e30b4..0000000000000
--- a/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-946bc45b87b3d770ab6828b0d0a5f8684f2c3624
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.11.0.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..0dc193f054973
--- /dev/null
+++ b/server/licenses/lucene-sandbox-9.11.0.jar.sha1
@@ -0,0 +1 @@
+5e46b790744bd9118ccc053f70235364213312a5
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 0f9b7c0e90218..0000000000000
--- a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d73667f61fb5e7fde4cec52fcfbbfd9847068aec
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..9d3a8d2857db6
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1
@@ -0,0 +1 @@
+079ca5aaf544a3acde84b8b88423ace6dedc23eb
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 87894603e0d84..0000000000000
--- a/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a8e8ab80bfb6abd70932e50fe31e13ecf2e00987
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.11.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..fd5ff875a0113
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-9.11.0.jar.sha1
@@ -0,0 +1 @@
+564558818d70fc384db5b36fbc8a0ab27b107609
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1
deleted file mode 100644
index 6100f6fe0d585..0000000000000
--- a/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-45d6f0facd45d4e49585f0dabfa62ed5a1883033
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.11.0.jar.sha1 b/server/licenses/lucene-suggest-9.11.0.jar.sha1
new file mode 100644
index 0000000000000..2fa96e97f307a
--- /dev/null
+++ b/server/licenses/lucene-suggest-9.11.0.jar.sha1
@@ -0,0 +1 @@
+aa345db9b6caaf881e7890ea5b8911357d592167
\ No newline at end of file
diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
index ab2f0f0080566..f6c7355ea06f6 100644
--- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
@@ -26,13 +26,13 @@
import java.util.function.Function;
import java.util.stream.Collectors;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.CUSTOM_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX;
import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_FILE_PREFIX;
+import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA;
+import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA;
+import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA;
+import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
index 766ca2c1189e5..9888d2d8abd98 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
@@ -631,6 +631,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException {
assertCacheState(client, index, 2, 2);
}
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/11374")
public void testProfileDisableCache() throws Exception {
Client client = client();
String index = "index";
@@ -673,6 +674,7 @@ public void testProfileDisableCache() throws Exception {
}
}
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/12308")
public void testCacheWithInvalidation() throws Exception {
Client client = client();
String index = "index";
@@ -758,6 +760,7 @@ public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throw
}
// when staleness threshold is lower than staleness, it should clean the stale keys from cache
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13540")
public void testStaleKeysCleanupWithLowThreshold() throws Exception {
int cacheCleanIntervalInMillis = 1;
String node = internalCluster().startNode(
@@ -804,6 +807,7 @@ public void testStaleKeysCleanupWithLowThreshold() throws Exception {
}
// when staleness threshold is equal to staleness, it should clean the stale keys from cache
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13503")
public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception {
int cacheCleanIntervalInMillis = 1;
String node = internalCluster().startNode(
@@ -982,6 +986,7 @@ public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception {
}
// when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13711")
public void testCacheCleanupWithDefaultSettings() throws Exception {
int cacheCleanIntervalInMillis = 1;
String node = internalCluster().startNode(
@@ -1022,6 +1027,7 @@ public void testCacheCleanupWithDefaultSettings() throws Exception {
}
// staleness threshold updates flows through to the cache cleaner
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13949")
public void testDynamicStalenessThresholdUpdate() throws Exception {
int cacheCleanIntervalInMillis = 1;
String node = internalCluster().startNode(
@@ -1169,6 +1175,7 @@ public void testCacheCleanupAfterIndexDeletion() throws Exception {
}
// when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13437")
public void testStaleKeysCleanupWithMultipleIndices() throws Exception {
int cacheCleanIntervalInMillis = 10;
String node = internalCluster().startNode(
@@ -1223,6 +1230,7 @@ public void testStaleKeysCleanupWithMultipleIndices() throws Exception {
}, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
}
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13600")
public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
String node_1 = internalCluster().startNode(Settings.builder().build());
Client client = client(node_1);
@@ -1280,8 +1288,8 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
final Index index = state.metadata().index(indexName).getIndex();
assertBusy(() -> {
- assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false));
- assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
+ assertFalse(Arrays.stream(shardDirectory(node_1, index, 0)).anyMatch(Files::exists));
+ assertEquals(1, Arrays.stream(shardDirectory(node_2, index, 0)).filter(Files::exists).count());
});
logger.info("Moving the shard: {} again from node:{} to node:{}", indexName + "#0", node_2, node_1);
@@ -1294,11 +1302,10 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
.setWaitForNoInitializingShards(true)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
- assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
assertBusy(() -> {
- assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
- assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
+ assertEquals(1, Arrays.stream(shardDirectory(node_1, index, 0)).filter(Files::exists).count());
+ assertFalse(Arrays.stream(shardDirectory(node_2, index, 0)).anyMatch(Files::exists));
});
logger.info("Clearing the cache for index:{}. And verify the request stats doesn't go negative", indexName);
@@ -1311,11 +1318,12 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
assertTrue(stats.getMemorySizeInBytes() == 0);
}
- private Path shardDirectory(String server, Index index, int shard) {
+ private Path[] shardDirectory(String server, Index index, int shard) {
NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
final Path[] paths = env.availableShardPaths(new ShardId(index, shard));
- assert paths.length == 1;
- return paths[0];
+ // the available paths of the shard may be bigger than the 1,
+ // it depends on `InternalTestCluster.numDataPaths`.
+ return paths;
}
private void setupIndex(Client client, String index) throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java
index 8ce87f37d77cd..cf93a432d0371 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java
@@ -104,7 +104,6 @@
import org.opensearch.indices.recovery.RecoveryState.Stage;
import org.opensearch.indices.replication.common.ReplicationLuceneIndex;
import org.opensearch.node.NodeClosedException;
-import org.opensearch.node.RecoverySettingsChunkSizePlugin;
import org.opensearch.plugins.AnalysisPlugin;
import org.opensearch.plugins.Plugin;
import org.opensearch.plugins.PluginsService;
@@ -156,7 +155,7 @@
import static java.util.stream.Collectors.toList;
import static org.opensearch.action.DocWriteResponse.Result.CREATED;
import static org.opensearch.action.DocWriteResponse.Result.UPDATED;
-import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING;
+import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.empty;
@@ -187,7 +186,6 @@ protected Collection> nodePlugins() {
return Arrays.asList(
MockTransportService.TestPlugin.class,
MockFSIndexStore.TestPlugin.class,
- RecoverySettingsChunkSizePlugin.class,
TestAnalysisPlugin.class,
InternalSettingsPlugin.class,
MockEngineFactoryPlugin.class
@@ -263,7 +261,7 @@ private void slowDownRecovery(ByteSizeValue shardSize) {
// one chunk per sec..
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES)
// small chunks
- .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES))
+ .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES))
)
.get()
.isAcknowledged()
@@ -278,7 +276,10 @@ private void restoreRecoverySpeed() {
.setTransientSettings(
Settings.builder()
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb")
- .put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE)
+ .put(
+ INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING.getDefault(Settings.EMPTY)
+ )
)
.get()
.isAcknowledged()
diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
index 0752ab7c9d0f1..d9e3cec426edf 100644
--- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java
@@ -36,6 +36,7 @@
import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.opensearch.action.admin.indices.create.CreateIndexResponse;
import org.opensearch.action.admin.indices.recovery.RecoveryResponse;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.metadata.IndexMetadata;
@@ -45,6 +46,8 @@
import org.opensearch.common.collect.MapBuilder;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.common.xcontent.XContentFactory;
+import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope;
import org.opensearch.test.OpenSearchIntegTestCase.Scope;
@@ -253,4 +256,144 @@ public void testNoRebalanceOnRollingRestart() throws Exception {
);
}
}
+
+ public void testFullRollingRestart_withNoRecoveryPayloadAndSource() throws Exception {
+ internalCluster().startNode();
+ XContentBuilder builder = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("_source")
+ .field("enabled")
+ .value(false)
+ .field("recovery_source_enabled")
+ .value(false)
+ .endObject()
+ .endObject();
+ CreateIndexResponse response = prepareCreate("test").setMapping(builder).get();
+ logger.info("Create index response is : {}", response);
+
+ final String healthTimeout = "1m";
+
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test")
+ .setId(Long.toString(i))
+ .setSource(MapBuilder.newMapBuilder().put("test", "value" + i).map())
+ .execute()
+ .actionGet();
+ }
+
+ for (int i = 1000; i < 2000; i++) {
+ client().prepareIndex("test")
+ .setId(Long.toString(i))
+ .setSource(MapBuilder.newMapBuilder().put("test", "value" + i).map())
+ .execute()
+ .actionGet();
+ }
+ // ensuring all docs are committed to file system
+ flush();
+
+ logger.info("--> now start adding nodes");
+ internalCluster().startNode();
+ internalCluster().startNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForGreenStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("3")
+ );
+
+ logger.info("--> add two more nodes");
+ internalCluster().startNode();
+ internalCluster().startNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForGreenStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("5")
+ );
+
+ logger.info("--> refreshing and checking data");
+ refreshAndWaitForReplication();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
+ }
+
+ // now start shutting nodes down
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForGreenStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("4")
+ );
+
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForGreenStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("3")
+ );
+
+ logger.info("--> stopped two nodes, verifying data");
+ refreshAndWaitForReplication();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
+ }
+
+ // closing the 3rd node
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForGreenStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("2")
+ );
+
+ internalCluster().stopRandomDataNode();
+
+ // make sure the cluster state is yellow, and all has been recovered
+ assertTimeout(
+ client().admin()
+ .cluster()
+ .prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(healthTimeout)
+ .setWaitForYellowStatus()
+ .setWaitForNoRelocatingShards(true)
+ .setWaitForNodes("1")
+ );
+
+ logger.info("--> one node left, verifying data");
+ refreshAndWaitForReplication();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L);
+ }
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java
index bf0533143cf91..692beb86279b9 100644
--- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java
@@ -46,7 +46,6 @@
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.indices.recovery.FileChunkRequest;
import org.opensearch.indices.recovery.PeerRecoveryTargetService;
-import org.opensearch.node.RecoverySettingsChunkSizePlugin;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase;
@@ -61,7 +60,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
-import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING;
+import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@@ -81,7 +80,7 @@ public static Collection parameters() {
@Override
protected Collection> nodePlugins() {
- return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class);
+ return Arrays.asList(MockTransportService.TestPlugin.class);
}
/**
@@ -96,7 +95,8 @@ public void testCancelRecoveryAndResume() throws Exception {
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
- Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))
+ Settings.builder()
+ .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))
)
.get()
.isAcknowledged()
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
index 0493bcf800c97..901b36f872622 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
@@ -21,7 +21,6 @@
import org.opensearch.cluster.routing.RoutingNode;
import org.opensearch.common.UUIDs;
import org.opensearch.common.settings.Settings;
-import org.opensearch.common.util.FeatureFlags;
import org.opensearch.repositories.fs.ReloadableFsRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.junit.Before;
@@ -87,11 +86,6 @@ protected Settings nodeSettings(int nodeOrdinal) {
}
}
- @Override
- protected Settings featureFlagSettings() {
- return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build();
- }
-
protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException {
GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName });
GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get();
@@ -186,10 +180,11 @@ private Thread getIndexingThread() {
indexSingleDoc(indexName);
long currentDocCount = indexedDocs.incrementAndGet();
if (currentDocCount > 0 && currentDocCount % refreshFrequency == 0) {
- logger.info("--> [iteration {}] flushing index", currentDocCount);
if (rarely()) {
+ logger.info("--> [iteration {}] flushing index", currentDocCount);
client().admin().indices().prepareFlush(indexName).get();
} else {
+ logger.info("--> [iteration {}] refreshing index", currentDocCount);
client().admin().indices().prepareRefresh(indexName).get();
}
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
index 5094a7cf29c6a..d046f41ce0590 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
@@ -18,6 +18,7 @@
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.IndexService;
+import org.opensearch.index.ReplicationStats;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.seqno.RetentionLease;
import org.opensearch.index.seqno.RetentionLeases;
@@ -665,6 +666,43 @@ public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() thro
});
}
+ /*
+ Performs the same experiment as testRemotePrimaryDocRepReplica.
+
+ This ensures that the primary shard for the index has moved over to remote
+ enabled node whereas the replica copy is still left behind on the docrep nodes
+
+ At this stage, segrep lag computation shouldn't consider the docrep shard copy while calculating bytes lag
+ */
+ public void testZeroSegrepLagForShardsWithMixedReplicationGroup() throws Exception {
+ testRemotePrimaryDocRepReplica();
+ String remoteNodeName = internalCluster().client()
+ .admin()
+ .cluster()
+ .prepareNodesStats()
+ .get()
+ .getNodes()
+ .stream()
+ .filter(nodeStats -> nodeStats.getNode().isRemoteStoreNode())
+ .findFirst()
+ .get()
+ .getNode()
+ .getName();
+ ReplicationStats replicationStats = internalCluster().client()
+ .admin()
+ .cluster()
+ .prepareNodesStats(remoteNodeName)
+ .get()
+ .getNodes()
+ .get(0)
+ .getIndices()
+ .getSegments()
+ .getReplicationStats();
+ assertEquals(0, replicationStats.getMaxBytesBehind());
+ assertEquals(0, replicationStats.getTotalBytesBehind());
+ assertEquals(0, replicationStats.getMaxReplicationLag());
+ }
+
private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception {
assertBusy(() -> {
Map shardStatsMap = internalCluster().client()
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
index c72b6851c1125..793adef0594fc 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
@@ -273,6 +273,7 @@ initalMetadataVersion < internalCluster().client()
* After shard relocation completes, shuts down the docrep nodes and asserts remote
* index settings are applied even when the index is in YELLOW state
*/
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13737")
public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Exception {
internalCluster().startClusterManagerOnlyNode();
@@ -329,6 +330,7 @@ public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Except
* After shard relocation completes, restarts the docrep node holding extra replica shard copy
* and asserts remote index settings are applied as soon as the docrep replica copy is unassigned
*/
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13871")
public void testIndexSettingsUpdatedWhenDocrepNodeIsRestarted() throws Exception {
internalCluster().startClusterManagerOnlyNode();
@@ -469,6 +471,7 @@ public void testRemotePathMetadataAddedWithFirstPrimaryMovingToRemote() throws E
* exclude docrep nodes, assert that remote index path file exists
* when shards start relocating to the remote nodes.
*/
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13939")
public void testRemoteIndexPathFileExistsAfterMigration() throws Exception {
String docrepClusterManager = internalCluster().startClusterManagerOnlyNode();
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
index 293691ace2edd..cea653c0ead4b 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
@@ -8,14 +8,11 @@
package org.opensearch.remotemigration;
-import org.opensearch.action.DocWriteResponse;
import org.opensearch.action.admin.cluster.health.ClusterHealthRequest;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
-import org.opensearch.action.delete.DeleteResponse;
-import org.opensearch.action.index.IndexResponse;
import org.opensearch.client.Client;
import org.opensearch.client.Requests;
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
@@ -66,8 +63,8 @@ public void testRemotePrimaryRelocation() throws Exception {
AtomicInteger numAutoGenDocs = new AtomicInteger();
final AtomicBoolean finished = new AtomicBoolean(false);
- Thread indexingThread = getIndexingThread(finished, numAutoGenDocs);
-
+ AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test");
+ asyncIndexingService.startIndexing();
refresh("test");
// add remote node in mixed mode cluster
@@ -141,17 +138,19 @@ public void testRemotePrimaryRelocation() throws Exception {
logger.info("--> relocation from remote to remote complete");
finished.set(true);
- indexingThread.join();
+ asyncIndexingService.stopIndexing();
refresh("test");
- OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get());
+ OpenSearchAssertions.assertHitCount(
+ client().prepareSearch("test").setTrackTotalHits(true).get(),
+ asyncIndexingService.getIndexedDocs()
+ );
OpenSearchAssertions.assertHitCount(
client().prepareSearch("test")
.setTrackTotalHits(true)// extra paranoia ;)
.setQuery(QueryBuilders.termQuery("auto", true))
.get(),
- numAutoGenDocs.get()
+ asyncIndexingService.getIndexedDocs()
);
-
}
public void testMixedModeRelocation_RemoteSeedingFail() throws Exception {
@@ -165,9 +164,8 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get();
ensureGreen("test");
- AtomicInteger numAutoGenDocs = new AtomicInteger();
- final AtomicBoolean finished = new AtomicBoolean(false);
- Thread indexingThread = getIndexingThread(finished, numAutoGenDocs);
+ AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test");
+ asyncIndexingService.startIndexing();
refresh("test");
@@ -209,27 +207,11 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception {
assertEquals(actionGet.getRelocatingShards(), 0);
assertEquals(docRepNode, primaryNodeName("test"));
- finished.set(true);
- indexingThread.join();
+ asyncIndexingService.stopIndexing();
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null))
.get();
}
-
- private static Thread getIndexingThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) {
- Thread indexingThread = new Thread(() -> {
- while (finished.get() == false && numAutoGenDocs.get() < 10_000) {
- IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get();
- assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
- DeleteResponse deleteResponse = client().prepareDelete("test", "id").get();
- assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
- client().prepareIndex("test").setSource("auto", true).get();
- numAutoGenDocs.incrementAndGet();
- }
- });
- indexingThread.start();
- return indexingThread;
- }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
index 196ecb991bbc0..aae726fe2a6bc 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
@@ -8,32 +8,27 @@
package org.opensearch.remotemigration;
-import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
-
-import org.opensearch.action.DocWriteResponse;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse;
import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest;
-import org.opensearch.action.delete.DeleteResponse;
-import org.opensearch.action.index.IndexResponse;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.opensearch.common.Priority;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.index.SegmentReplicationPerGroupStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.hamcrest.OpenSearchAssertions;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.TimeUnit;
import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING;
import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false)
-
public class RemoteReplicaRecoveryIT extends MigrationBaseTestCase {
protected int maximumNumberOfShards() {
@@ -52,6 +47,7 @@ protected int minimumNumberOfReplicas() {
Brings up new replica copies on remote and docrep nodes, when primary is on a remote node
Live indexing is happening meanwhile
*/
+ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13473")
public void testReplicaRecovery() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(0);
String primaryNode = internalCluster().startNode();
@@ -63,10 +59,8 @@ public void testReplicaRecovery() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get();
String replicaNode = internalCluster().startNode();
ensureGreen("test");
-
- AtomicInteger numAutoGenDocs = new AtomicInteger();
- final AtomicBoolean finished = new AtomicBoolean(false);
- Thread indexingThread = getThread(finished, numAutoGenDocs);
+ AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test");
+ asyncIndexingService.startIndexing();
refresh("test");
@@ -78,12 +72,10 @@ public void testReplicaRecovery() throws Exception {
updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store"));
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
- String remoteNode2 = internalCluster().startNode();
+ internalCluster().startNode();
internalCluster().validateClusterFormed();
// identify the primary
-
- Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000));
logger.info("--> relocating primary from {} to {} ", primaryNode, remoteNode);
client().admin()
.cluster()
@@ -102,7 +94,6 @@ public void testReplicaRecovery() throws Exception {
assertEquals(0, clusterHealthResponse.getRelocatingShards());
logger.info("--> relocation of primary from docrep to remote complete");
- Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000));
logger.info("--> getting up the new replicas now to doc rep node as well as remote node ");
// Increase replica count to 3
@@ -129,52 +120,33 @@ public void testReplicaRecovery() throws Exception {
logger.info("--> replica is up now on another docrep now as well as remote node");
assertEquals(0, clusterHealthResponse.getRelocatingShards());
+ asyncIndexingService.stopIndexing();
+ refresh("test");
- Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000));
+ // segrep lag should be zero
+ assertBusy(() -> {
+ SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin()
+ .indices()
+ .prepareSegmentReplicationStats("test")
+ .setDetailed(true)
+ .execute()
+ .actionGet();
+ SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get("test").get(0);
+ assertEquals(segmentReplicationStatsResponse.getReplicationStats().size(), 1);
+ perGroupStats.getReplicaStats().stream().forEach(e -> assertEquals(e.getCurrentReplicationLagMillis(), 0));
+ }, 20, TimeUnit.SECONDS);
- // Stop replicas on docrep now.
- // ToDo : Remove once we have dual replication enabled
- client().admin()
- .indices()
- .updateSettings(
- new UpdateSettingsRequest("test").settings(
- Settings.builder()
- .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
- .put("index.routing.allocation.exclude._name", primaryNode + "," + replicaNode)
- .build()
- )
- )
- .get();
-
- finished.set(true);
- indexingThread.join();
- refresh("test");
- OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get());
+ OpenSearchAssertions.assertHitCount(
+ client().prepareSearch("test").setTrackTotalHits(true).get(),
+ asyncIndexingService.getIndexedDocs()
+ );
OpenSearchAssertions.assertHitCount(
client().prepareSearch("test")
.setTrackTotalHits(true)// extra paranoia ;)
.setQuery(QueryBuilders.termQuery("auto", true))
- // .setPreference("_prefer_nodes:" + (remoteNode+ "," + remoteNode2))
.get(),
- numAutoGenDocs.get()
+ asyncIndexingService.getIndexedDocs()
);
}
-
- private Thread getThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) {
- Thread indexingThread = new Thread(() -> {
- while (finished.get() == false && numAutoGenDocs.get() < 100) {
- IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get();
- assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
- DeleteResponse deleteResponse = client().prepareDelete("test", "id").get();
- assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
- client().prepareIndex("test").setSource("auto", true).get();
- numAutoGenDocs.incrementAndGet();
- logger.info("Indexed {} docs here", numAutoGenDocs.get());
- }
- });
- indexingThread.start();
- return indexingThread;
- }
-
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
index 4b1c91f1d57ca..4e4f6da56d622 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
@@ -17,6 +17,7 @@
import org.opensearch.common.Priority;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.common.util.FeatureFlags;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.snapshots.SnapshotInfo;
@@ -42,6 +43,11 @@ protected int minimumNumberOfReplicas() {
return 1;
}
+ @Override
+ protected Settings featureFlagSettings() {
+ return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build();
+ }
+
public void testMixedModeAddRemoteNodes() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(0);
List cmNodes = internalCluster().startNodes(1);
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java
index b817906a8f828..b804e6dbc1231 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java
@@ -12,6 +12,7 @@
import org.opensearch.action.admin.indices.shrink.ResizeType;
import org.opensearch.action.support.ActiveShardCount;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.FeatureFlags;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.test.OpenSearchIntegTestCase;
@@ -27,6 +28,11 @@ public class ResizeIndexMigrationTestCase extends MigrationBaseTestCase {
private final static String DOC_REP_DIRECTION = "docrep";
private final static String MIXED_MODE = "mixed";
+ @Override
+ protected Settings featureFlagSettings() {
+ return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build();
+ }
+
/*
* This test will verify the resize request failure, when cluster mode is mixed
* and index is on DocRep node, and migration to remote store is in progress.
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
index b22817ef19d1b..11260e0914dc5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
@@ -57,6 +57,7 @@
import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK;
import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING;
import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.encodeString;
import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE;
import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -326,9 +327,7 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh
// Step - 3 Delete index metadata file in remote
try {
Files.move(
- segmentRepoPath.resolve(
- RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"
- ),
+ segmentRepoPath.resolve(encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"),
segmentRepoPath.resolve("cluster-state/")
);
} catch (IOException e) {
@@ -354,10 +353,7 @@ public void testRemoteStateFullRestart() throws Exception {
try {
Files.move(
segmentRepoPath.resolve(
- RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value())
- + "/cluster-state/"
- + prevClusterUUID
- + "/manifest"
+ encodeString(clusterService().state().getClusterName().value()) + "/cluster-state/" + prevClusterUUID + "/manifest"
),
segmentRepoPath.resolve("cluster-state/")
);
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
index 4a8b00ea45738..4051bee3e4e5c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
@@ -187,4 +187,27 @@ public void testAggsOnEmptyShards() {
// Validate non-global agg does not throw an exception
assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get());
}
+
+ public void testAggsWithTerminateAfter() throws InterruptedException {
+ assertAcked(
+ prepareCreate(
+ "terminate_index",
+ Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ ).setMapping("f", "type=keyword").get()
+ );
+ List docs = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(5, 20); ++i) {
+ docs.add(client().prepareIndex("terminate_index").setSource("f", Integer.toString(i / 3)));
+ }
+ indexRandom(true, docs);
+
+ SearchResponse response = client().prepareSearch("terminate_index")
+ .setSize(2)
+ .setTerminateAfter(1)
+ .addAggregation(terms("f").field("f"))
+ .get();
+ assertSearchResponse(response);
+ assertTrue(response.isTerminatedEarly());
+ assertEquals(response.getHits().getHits().length, 1);
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java
index edf9cd432dda2..f5d018b2ef491 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java
@@ -42,10 +42,12 @@
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.script.Script;
import org.opensearch.script.ScriptType;
+import org.opensearch.search.aggregations.AggregationBuilders;
import org.opensearch.search.aggregations.AggregationExecutionException;
import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.opensearch.search.aggregations.BucketOrder;
import org.opensearch.search.aggregations.bucket.filter.Filter;
+import org.opensearch.search.aggregations.bucket.filter.InternalFilters;
import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.opensearch.search.aggregations.metrics.Avg;
import org.opensearch.search.aggregations.metrics.ExtendedStats;
@@ -999,6 +1001,72 @@ public void testOtherDocCount() {
testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME);
}
+ public void testDeferredSubAggs() {
+ // Tests subAgg doc count is the same with different collection modes and additional top level aggs
+ SearchResponse r1 = client().prepareSearch("idx")
+ .setSize(0)
+ .addAggregation(
+ terms("terms1").collectMode(SubAggCollectionMode.BREADTH_FIRST)
+ .field("s_value")
+ .size(2)
+ .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery()))
+ )
+ .addAggregation(AggregationBuilders.min("min").field("constant"))
+ .get();
+
+ SearchResponse r2 = client().prepareSearch("idx")
+ .setSize(0)
+ .addAggregation(
+ terms("terms1").collectMode(SubAggCollectionMode.DEPTH_FIRST)
+ .field("s_value")
+ .size(2)
+ .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery()))
+ )
+ .addAggregation(AggregationBuilders.min("min").field("constant"))
+ .get();
+
+ SearchResponse r3 = client().prepareSearch("idx")
+ .setSize(0)
+ .addAggregation(
+ terms("terms1").collectMode(SubAggCollectionMode.BREADTH_FIRST)
+ .field("s_value")
+ .size(2)
+ .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery()))
+ )
+ .get();
+
+ SearchResponse r4 = client().prepareSearch("idx")
+ .setSize(0)
+ .addAggregation(
+ terms("terms1").collectMode(SubAggCollectionMode.DEPTH_FIRST)
+ .field("s_value")
+ .size(2)
+ .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery()))
+ )
+ .get();
+
+ assertNotNull(r1.getAggregations().get("terms1"));
+ assertNotNull(r2.getAggregations().get("terms1"));
+ assertNotNull(r3.getAggregations().get("terms1"));
+ assertNotNull(r4.getAggregations().get("terms1"));
+
+ Terms terms = r1.getAggregations().get("terms1");
+ Bucket b1 = terms.getBucketByKey("val0");
+ InternalFilters f1 = b1.getAggregations().get("filter");
+ long docCount1 = f1.getBuckets().get(0).getDocCount();
+ Bucket b2 = terms.getBucketByKey("val1");
+ InternalFilters f2 = b2.getAggregations().get("filter");
+ long docCount2 = f1.getBuckets().get(0).getDocCount();
+
+ for (SearchResponse response : new SearchResponse[] { r2, r3, r4 }) {
+ terms = response.getAggregations().get("terms1");
+ f1 = terms.getBucketByKey(b1.getKeyAsString()).getAggregations().get("filter");
+ f2 = terms.getBucketByKey(b2.getKeyAsString()).getAggregations().get("filter");
+ assertEquals(docCount1, f1.getBuckets().get(0).getDocCount());
+ assertEquals(docCount2, f2.getBuckets().get(0).getDocCount());
+ }
+ }
+
/**
* Make sure that a request using a deterministic script or not using a script get cached.
* Ensure requests using nondeterministic scripts do not get cached.
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
index db4ee3571d141..b2ed689622e7d 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
@@ -34,6 +34,7 @@
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.opensearch.action.index.IndexRequestBuilder;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.common.settings.Settings;
@@ -59,6 +60,7 @@
import static java.util.Collections.emptyMap;
import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
+import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
import static org.opensearch.search.aggregations.AggregationBuilders.cardinality;
import static org.opensearch.search.aggregations.AggregationBuilders.global;
@@ -255,6 +257,36 @@ public void testSingleValuedString() throws Exception {
assertCount(count, numDocs);
}
+ public void testDisableDynamicPruning() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .get();
+ assertSearchResponse(response);
+
+ Cardinality count1 = response.getAggregations().get("cardinality");
+
+ final ClusterUpdateSettingsResponse updateSettingResponse = client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey(), 0))
+ .get();
+ assertEquals(updateSettingResponse.getTransientSettings().get(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()), "0");
+
+ response = client().prepareSearch("idx")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .get();
+ assertSearchResponse(response);
+ Cardinality count2 = response.getAggregations().get("cardinality");
+
+ assertEquals(count1, count2);
+
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().putNull(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()))
+ .get();
+ }
+
public void testSingleValuedNumeric() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()))
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
index 01ad06757640c..a58db51780826 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
@@ -1914,8 +1914,14 @@ public void testRangeQueryWithTimeZone() throws Exception {
* Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week
* as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query
* as "Dez" for "Dezember (December)".
+ * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set.
+ * When running with gradle this is done implicitly through the BuildPlugin, but when running from
+ * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section
+ * on "Configuring IDEs And Running Tests".
*/
public void testRangeQueryWithLocaleMapping() throws Exception {
+ assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set";
+
assertAcked(
prepareCreate("test").setMapping(
jsonBuilder().startObject()
@@ -1932,21 +1938,17 @@ public void testRangeQueryWithLocaleMapping() throws Exception {
indexRandom(
true,
- client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"),
- client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800")
+ client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"),
+ client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800")
);
SearchResponse searchResponse = client().prepareSearch("test")
- .setQuery(
- QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800")
- )
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
.get();
assertHitCount(searchResponse, 1L);
searchResponse = client().prepareSearch("test")
- .setQuery(
- QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800")
- )
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
.get();
assertHitCount(searchResponse, 2L);
}
diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java
index 5e2b62614fc47..16c15f553951c 100644
--- a/server/src/main/java/org/opensearch/action/ActionModule.java
+++ b/server/src/main/java/org/opensearch/action/ActionModule.java
@@ -441,6 +441,7 @@
import org.opensearch.rest.action.cat.RestTemplatesAction;
import org.opensearch.rest.action.cat.RestThreadPoolAction;
import org.opensearch.rest.action.document.RestBulkAction;
+import org.opensearch.rest.action.document.RestBulkStreamingAction;
import org.opensearch.rest.action.document.RestDeleteAction;
import org.opensearch.rest.action.document.RestGetAction;
import org.opensearch.rest.action.document.RestGetSourceAction;
@@ -887,6 +888,7 @@ public void initRestHandlers(Supplier nodesInCluster) {
registerHandler.accept(new RestTermVectorsAction());
registerHandler.accept(new RestMultiTermVectorsAction());
registerHandler.accept(new RestBulkAction(settings));
+ registerHandler.accept(new RestBulkStreamingAction(settings));
registerHandler.accept(new RestUpdateAction());
registerHandler.accept(new RestSearchAction());
diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java
index 9bf4a4b1e18f1..f0fc05c595d6f 100644
--- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java
+++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java
@@ -51,6 +51,7 @@
import org.opensearch.core.action.ActionListener;
import org.opensearch.core.action.ShardOperationFailedException;
import org.opensearch.core.index.shard.ShardId;
+import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo;
import org.opensearch.search.SearchPhaseResult;
import org.opensearch.search.SearchShardTarget;
import org.opensearch.search.internal.AliasFilter;
@@ -469,6 +470,10 @@ private void onRequestEnd(SearchRequestContext searchRequestContext) {
this.searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(this, searchRequestContext);
}
+ private void onRequestFailure(SearchRequestContext searchRequestContext) {
+ this.searchRequestContext.getSearchRequestOperationsListener().onRequestFailure(this, searchRequestContext);
+ }
+
private void executePhase(SearchPhase phase) {
Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]"));
try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) {
@@ -507,6 +512,7 @@ ShardSearchFailure[] buildShardFailures() {
private void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shard, final SearchShardIterator shardIt, Exception e) {
// we always add the shard failure for a specific shard instance
// we do make sure to clean it on a successful response from a shard
+ setPhaseResourceUsages();
onShardFailure(shardIndex, shard, e);
SearchShardTarget nextShard = FailAwareWeightedRouting.getInstance()
.findNext(shardIt, clusterState, e, () -> totalOps.incrementAndGet());
@@ -618,9 +624,15 @@ protected void onShardResult(Result result, SearchShardIterator shardIt) {
if (logger.isTraceEnabled()) {
logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null);
}
+ this.setPhaseResourceUsages();
results.consumeResult(result, () -> onShardResultConsumed(result, shardIt));
}
+ public void setPhaseResourceUsages() {
+ TaskResourceInfo taskResourceUsage = searchRequestContext.getTaskResourceUsageSupplier().get();
+ searchRequestContext.recordPhaseResourceUsage(taskResourceUsage);
+ }
+
private void onShardResultConsumed(Result result, SearchShardIterator shardIt) {
successfulOps.incrementAndGet();
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
@@ -751,6 +763,7 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At
@Override
public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) {
+ setPhaseResourceUsages();
if (currentPhaseHasLifecycle) {
this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause);
}
@@ -780,6 +793,7 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) {
});
}
Releasables.close(releasables);
+ onRequestFailure(searchRequestContext);
listener.onFailure(exception);
}
diff --git a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java
index ebb2f33f8f37d..2ad7f8a29896c 100644
--- a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java
+++ b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java
@@ -240,6 +240,7 @@ private void executeFetch(
public void innerOnResponse(FetchSearchResult result) {
try {
progressListener.notifyFetchResult(shardIndex);
+ context.setPhaseResourceUsages();
counter.onResult(result);
} catch (Exception e) {
context.onPhaseFailure(FetchSearchPhase.this, "", e);
@@ -254,6 +255,7 @@ public void onFailure(Exception e) {
e
);
progressListener.notifyFetchFailure(shardIndex, shardTarget, e);
+ context.setPhaseResourceUsages();
counter.onFailure(shardIndex, shardTarget, e);
} finally {
// the search context might not be cleared on the node where the fetch was executed for example
diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java
index df451e0745e3c..55f2a22749e70 100644
--- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java
+++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java
@@ -150,4 +150,9 @@ default void sendReleaseSearchContext(
* Registers a {@link Releasable} that will be closed when the search request finishes or fails.
*/
void addReleasable(Releasable releasable);
+
+ /**
+ * Set the resource usage info for this phase
+ */
+ void setPhaseResourceUsages();
}
diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java
index b8bbde65ca6bc..111d9c64550b3 100644
--- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java
+++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java
@@ -8,13 +8,20 @@
package org.opensearch.action.search;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.TotalHits;
import org.opensearch.common.annotation.InternalApi;
+import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo;
+import java.util.ArrayList;
import java.util.EnumMap;
import java.util.HashMap;
+import java.util.List;
import java.util.Locale;
import java.util.Map;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.function.Supplier;
/**
* This class holds request-level context for search queries at the coordinator node
@@ -23,6 +30,7 @@
*/
@InternalApi
public class SearchRequestContext {
+ private static final Logger logger = LogManager.getLogger();
private final SearchRequestOperationsListener searchRequestOperationsListener;
private long absoluteStartNanos;
private final Map phaseTookMap;
@@ -30,13 +38,21 @@ public class SearchRequestContext {
private final EnumMap shardStats;
private final SearchRequest searchRequest;
-
- SearchRequestContext(final SearchRequestOperationsListener searchRequestOperationsListener, final SearchRequest searchRequest) {
+ private final LinkedBlockingQueue phaseResourceUsage;
+ private final Supplier taskResourceUsageSupplier;
+
+ SearchRequestContext(
+ final SearchRequestOperationsListener searchRequestOperationsListener,
+ final SearchRequest searchRequest,
+ final Supplier taskResourceUsageSupplier
+ ) {
this.searchRequestOperationsListener = searchRequestOperationsListener;
this.absoluteStartNanos = System.nanoTime();
this.phaseTookMap = new HashMap<>();
this.shardStats = new EnumMap<>(ShardStatsFieldNames.class);
this.searchRequest = searchRequest;
+ this.phaseResourceUsage = new LinkedBlockingQueue<>();
+ this.taskResourceUsageSupplier = taskResourceUsageSupplier;
}
SearchRequestOperationsListener getSearchRequestOperationsListener() {
@@ -107,6 +123,24 @@ String formattedShardStats() {
);
}
}
+
+ public Supplier getTaskResourceUsageSupplier() {
+ return taskResourceUsageSupplier;
+ }
+
+ public void recordPhaseResourceUsage(TaskResourceInfo usage) {
+ if (usage != null) {
+ this.phaseResourceUsage.add(usage);
+ }
+ }
+
+ public List getPhaseResourceUsage() {
+ return new ArrayList<>(phaseResourceUsage);
+ }
+
+ public SearchRequest getRequest() {
+ return searchRequest;
+ }
}
enum ShardStatsFieldNames {
diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java
index 53efade174502..61f19977ae5ce 100644
--- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java
+++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java
@@ -41,16 +41,18 @@ protected SearchRequestOperationsListener(final boolean enabled) {
this.enabled = enabled;
}
- protected abstract void onPhaseStart(SearchPhaseContext context);
+ protected void onPhaseStart(SearchPhaseContext context) {};
- protected abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext);
+ protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {};
- protected abstract void onPhaseFailure(SearchPhaseContext context, Throwable cause);
+ protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {};
protected void onRequestStart(SearchRequestContext searchRequestContext) {}
protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {}
+ protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) {}
+
protected boolean isEnabled(SearchRequest searchRequest) {
return isEnabled();
}
@@ -133,6 +135,17 @@ public void onRequestEnd(SearchPhaseContext context, SearchRequestContext search
}
}
+ @Override
+ public void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) {
+ for (SearchRequestOperationsListener listener : listeners) {
+ try {
+ listener.onRequestFailure(context, searchRequestContext);
+ } catch (Exception e) {
+ logger.warn(() -> new ParameterizedMessage("onRequestFailure listener [{}] failed", listener), e);
+ }
+ }
+ }
+
public List getListeners() {
return listeners;
}
diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
index 143b01af3f62f..6e380775355a2 100644
--- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
+++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java
@@ -87,6 +87,7 @@
import org.opensearch.search.profile.SearchProfileShardResults;
import org.opensearch.tasks.CancellableTask;
import org.opensearch.tasks.Task;
+import org.opensearch.tasks.TaskResourceTrackingService;
import org.opensearch.telemetry.metrics.MetricsRegistry;
import org.opensearch.telemetry.tracing.Span;
import org.opensearch.telemetry.tracing.SpanBuilder;
@@ -186,6 +187,7 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new);
this.client = client;
@@ -224,6 +227,7 @@ public TransportSearchAction(
clusterService.getClusterSettings()
.addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled);
this.tracer = tracer;
+ this.taskResourceTrackingService = taskResourceTrackingService;
}
private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) {
@@ -451,7 +455,11 @@ private void executeRequest(
logger,
TraceableSearchRequestOperationsListener.create(tracer, requestSpan)
);
- SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest);
+ SearchRequestContext searchRequestContext = new SearchRequestContext(
+ requestOperationsListeners,
+ originalSearchRequest,
+ taskResourceTrackingService::getTaskResourceUsageFromThreadContext
+ );
searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext);
PipelinedRequest searchRequest;
diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
index f56c906db1002..c7fd263bda56a 100644
--- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java
+++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
@@ -84,7 +84,6 @@
import org.opensearch.common.settings.Setting;
import org.opensearch.common.settings.Setting.Property;
import org.opensearch.common.settings.Settings;
-import org.opensearch.common.util.FeatureFlags;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.util.set.Sets;
import org.opensearch.core.ParseField;
@@ -384,9 +383,7 @@ public static Collection createAllocationDeciders(
addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new NodeLoadAwareAllocationDecider(settings, clusterSettings));
addAllocationDecider(deciders, new TargetPoolAllocationDecider());
- if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING)) {
- addAllocationDecider(deciders, new RemoteStoreMigrationAllocationDecider(settings, clusterSettings));
- }
+ addAllocationDecider(deciders, new RemoteStoreMigrationAllocationDecider(settings, clusterSettings));
clusterPlugins.stream()
.flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream())
diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
index a38fc81bebc08..d21cd354bf659 100644
--- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
+++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
@@ -494,6 +494,18 @@ public void writeDiff(Diff value, StreamOutput out) throws IOException {
* @opensearch.internal
*/
public abstract static class NonDiffableValueSerializer implements ValueSerializer {
+ private static final NonDiffableValueSerializer ABSTRACT_INSTANCE = new NonDiffableValueSerializer<>() {
+ @Override
+ public void write(Object value, StreamOutput out) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Object read(StreamInput in, Object key) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
@Override
public boolean supportsDiffableValues() {
return false;
@@ -513,6 +525,10 @@ public void writeDiff(Diff value, StreamOutput out) throws IOException {
public Diff readDiff(StreamInput in, K key) throws IOException {
throw new UnsupportedOperationException();
}
+
+ public static NonDiffableValueSerializer getAbstractInstance() {
+ return ABSTRACT_INSTANCE;
+ }
}
/**
diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
index 72a3519aca6f8..4c76858107ed8 100644
--- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
+++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
@@ -45,6 +45,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
/**
* Information passed during repository cleanup
@@ -118,6 +119,24 @@ public Version getMinimalSupportedVersion() {
return LegacyESVersion.fromId(7040099);
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ RepositoryCleanupInProgress that = (RepositoryCleanupInProgress) o;
+ return entries.equals(that.entries);
+ }
+
+ @Override
+ public int hashCode() {
+ return 31 + entries.hashCode();
+ }
+
/**
* Entry in the collection.
*
@@ -155,6 +174,23 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeLong(repositoryStateId);
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ RepositoryCleanupInProgress.Entry that = (RepositoryCleanupInProgress.Entry) o;
+ return repository.equals(that.repository) && repositoryStateId == that.repositoryStateId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(repository, repositoryStateId);
+ }
+
@Override
public String toString() {
return "{" + repository + '}' + '{' + repositoryStateId + '}';
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java
index 987a3e3ffa7d3..7fa63ae8abc62 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java
@@ -39,6 +39,7 @@
import org.opensearch.cluster.metadata.Metadata;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.FeatureFlags;
import org.opensearch.common.util.io.IOUtils;
import java.io.Closeable;
@@ -52,6 +53,7 @@
import java.util.Set;
import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM;
+import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled;
/**
@@ -79,6 +81,7 @@ public class CoordinationState {
private VotingConfiguration lastPublishedConfiguration;
private VoteCollection publishVotes;
private final boolean isRemoteStateEnabled;
+ private final boolean isRemotePublicationEnabled;
public CoordinationState(
DiscoveryNode localNode,
@@ -102,6 +105,12 @@ public CoordinationState(
.getLastAcceptedConfiguration();
this.publishVotes = new VoteCollection();
this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings);
+ this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL)
+ && localNode.isRemoteStatePublicationEnabled();
+ }
+
+ public boolean isRemotePublicationEnabled() {
+ return isRemotePublicationEnabled;
}
public long getCurrentTerm() {
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
index f53e6837a67f5..87f02c6891be6 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
@@ -85,6 +85,7 @@
import org.opensearch.discovery.PeerFinder;
import org.opensearch.discovery.SeedHostsProvider;
import org.opensearch.discovery.SeedHostsResolver;
+import org.opensearch.gateway.remote.RemoteClusterStateService;
import org.opensearch.monitor.NodeHealthService;
import org.opensearch.monitor.StatusInfo;
import org.opensearch.node.remotestore.RemoteStoreNodeService;
@@ -209,7 +210,8 @@ public Coordinator(
NodeHealthService nodeHealthService,
PersistedStateRegistry persistedStateRegistry,
RemoteStoreNodeService remoteStoreNodeService,
- ClusterManagerMetrics clusterManagerMetrics
+ ClusterManagerMetrics clusterManagerMetrics,
+ RemoteClusterStateService remoteClusterStateService
) {
this.settings = settings;
this.transportService = transportService;
@@ -261,7 +263,8 @@ public Coordinator(
transportService,
namedWriteableRegistry,
this::handlePublishRequest,
- this::handleApplyCommit
+ this::handleApplyCommit,
+ remoteClusterStateService
);
this.leaderChecker = new LeaderChecker(
settings,
@@ -1330,7 +1333,9 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId())
+ clusterState;
final PublicationTransportHandler.PublicationContext publicationContext = publicationHandler.newPublicationContext(
- clusterChangedEvent
+ clusterChangedEvent,
+ coordinationState.get().isRemotePublicationEnabled(),
+ persistedStateRegistry
);
final PublishRequest publishRequest = coordinationState.get().handleClientValue(clusterState);
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
index 1fdaeead0d28d..36eabd51ffda1 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java
@@ -40,6 +40,7 @@
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.Diff;
import org.opensearch.cluster.IncompatibleClusterStateVersionException;
+import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.core.action.ActionListener;
@@ -47,6 +48,9 @@
import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.transport.TransportResponse;
+import org.opensearch.gateway.GatewayMetaState.RemotePersistedState;
+import org.opensearch.gateway.remote.ClusterMetadataManifest;
+import org.opensearch.gateway.remote.RemoteClusterStateService;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.BytesTransportRequest;
import org.opensearch.transport.TransportChannel;
@@ -74,6 +78,7 @@ public class PublicationTransportHandler {
private static final Logger logger = LogManager.getLogger(PublicationTransportHandler.class);
public static final String PUBLISH_STATE_ACTION_NAME = "internal:cluster/coordination/publish_state";
+ public static final String PUBLISH_REMOTE_STATE_ACTION_NAME = "internal:cluster/coordination/publish_remote_state";
public static final String COMMIT_STATE_ACTION_NAME = "internal:cluster/coordination/commit_state";
private final TransportService transportService;
@@ -97,16 +102,19 @@ public class PublicationTransportHandler {
private final TransportRequestOptions stateRequestOptions = TransportRequestOptions.builder()
.withType(TransportRequestOptions.Type.STATE)
.build();
+ private final RemoteClusterStateService remoteClusterStateService;
public PublicationTransportHandler(
TransportService transportService,
NamedWriteableRegistry namedWriteableRegistry,
Function handlePublishRequest,
- BiConsumer> handleApplyCommit
+ BiConsumer> handleApplyCommit,
+ RemoteClusterStateService remoteClusterStateService
) {
this.transportService = transportService;
this.namedWriteableRegistry = namedWriteableRegistry;
this.handlePublishRequest = handlePublishRequest;
+ this.remoteClusterStateService = remoteClusterStateService;
transportService.registerRequestHandler(
PUBLISH_STATE_ACTION_NAME,
@@ -117,6 +125,15 @@ public PublicationTransportHandler(
(request, channel, task) -> channel.sendResponse(handleIncomingPublishRequest(request))
);
+ transportService.registerRequestHandler(
+ PUBLISH_REMOTE_STATE_ACTION_NAME,
+ ThreadPool.Names.GENERIC,
+ false,
+ false,
+ RemotePublishRequest::new,
+ (request, channel, task) -> channel.sendResponse(handleIncomingRemotePublishRequest(request))
+ );
+
transportService.registerRequestHandler(
COMMIT_STATE_ACTION_NAME,
ThreadPool.Names.GENERIC,
@@ -211,6 +228,74 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque
}
}
+ // package private for testing
+ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest request) throws IOException {
+ if (transportService.getLocalNode().equals(request.getSourceNode())) {
+ return acceptRemoteStateOnLocalNode(request);
+ }
+ // TODO Make cluster state download non-blocking: https://github.com/opensearch-project/OpenSearch/issues/14102
+ ClusterMetadataManifest manifest = remoteClusterStateService.getClusterMetadataManifestByFileName(
+ request.getClusterUUID(),
+ request.getManifestFile()
+ );
+ if (manifest == null) {
+ throw new IllegalStateException("Publication failed as manifest was not found for " + request);
+ }
+ boolean applyFullState = false;
+ final ClusterState lastSeen = lastSeenClusterState.get();
+ if (lastSeen == null) {
+ logger.debug(() -> "Diff cannot be applied as there is no last cluster state");
+ applyFullState = true;
+ } else if (manifest.getDiffManifest() == null) {
+ logger.trace(() -> "There is no diff in the manifest");
+ applyFullState = true;
+ } else if (manifest.getDiffManifest().getFromStateUUID().equals(lastSeen.stateUUID()) == false) {
+ logger.debug(() -> "Last cluster state not compatible with the diff");
+ applyFullState = true;
+ }
+
+ if (applyFullState == true) {
+ logger.debug(
+ () -> new ParameterizedMessage(
+ "Downloading full cluster state for term {}, version {}, stateUUID {}",
+ manifest.getClusterTerm(),
+ manifest.getStateVersion(),
+ manifest.getStateUUID()
+ )
+ );
+ ClusterState clusterState = remoteClusterStateService.getClusterStateForManifest(
+ request.getClusterName(),
+ manifest,
+ transportService.getLocalNode().getId(),
+ true
+ );
+ fullClusterStateReceivedCount.incrementAndGet();
+ final PublishWithJoinResponse response = acceptState(clusterState);
+ lastSeenClusterState.set(clusterState);
+ return response;
+ } else {
+ logger.debug(
+ () -> new ParameterizedMessage(
+ "Downloading diff cluster state for term {}, version {}, previousUUID {}, current UUID {}",
+ manifest.getClusterTerm(),
+ manifest.getStateVersion(),
+ manifest.getDiffManifest().getFromStateUUID(),
+ manifest.getStateUUID()
+ )
+ );
+ ClusterState clusterState = remoteClusterStateService.getClusterStateUsingDiff(
+ request.getClusterName(),
+ manifest,
+ lastSeen,
+ transportService.getLocalNode().getId()
+ );
+ compatibleClusterStateDiffReceivedCount.incrementAndGet();
+ final PublishWithJoinResponse response = acceptState(clusterState);
+ lastSeenClusterState.compareAndSet(lastSeen, clusterState);
+ return response;
+ }
+ }
+
private PublishWithJoinResponse acceptState(ClusterState incomingState) {
// if the state is coming from the current node, use original request instead (see currentPublishRequestToSelf for explanation)
if (transportService.getLocalNode().equals(incomingState.nodes().getClusterManagerNode())) {
@@ -224,8 +309,35 @@ private PublishWithJoinResponse acceptState(ClusterState incomingState) {
return handlePublishRequest.apply(new PublishRequest(incomingState));
}
- public PublicationContext newPublicationContext(ClusterChangedEvent clusterChangedEvent) {
- final PublicationContext publicationContext = new PublicationContext(clusterChangedEvent);
+ private PublishWithJoinResponse acceptRemoteStateOnLocalNode(RemotePublishRequest remotePublishRequest) {
+ final PublishRequest publishRequest = currentPublishRequestToSelf.get();
+ if (publishRequest == null
+ || publishRequest.getAcceptedState().coordinationMetadata().term() != remotePublishRequest.term
+ || publishRequest.getAcceptedState().version() != remotePublishRequest.version) {
+ logger.debug(
+ () -> new ParameterizedMessage(
+ "Publication failure for current publish request : {} and remote publish request: {}",
+ publishRequest,
+ remotePublishRequest
+ )
+ );
+ throw new IllegalStateException("publication to self failed for " + remotePublishRequest);
+ }
+ PublishWithJoinResponse publishWithJoinResponse = handlePublishRequest.apply(publishRequest);
+ lastSeenClusterState.set(publishRequest.getAcceptedState());
+ return publishWithJoinResponse;
+ }
+
+ public PublicationContext newPublicationContext(
+ ClusterChangedEvent clusterChangedEvent,
+ boolean isRemotePublicationEnabled,
+ PersistedStateRegistry persistedStateRegistry
+ ) {
+ final PublicationContext publicationContext = new PublicationContext(
+ clusterChangedEvent,
+ isRemotePublicationEnabled,
+ persistedStateRegistry
+ );
// Build the serializations we expect to need now, early in the process, so that an error during serialization fails the publication
// straight away. This isn't watertight since we send diffs on a best-effort basis and may fall back to sending a full state (and
@@ -234,6 +346,16 @@ public PublicationContext newPublicationContext(ClusterChangedEvent clusterChang
return publicationContext;
}
+ // package private for testing
+ void setCurrentPublishRequestToSelf(PublishRequest publishRequest) {
+ this.currentPublishRequestToSelf.set(publishRequest);
+ }
+
+ // package private for testing
+ void setLastSeenClusterState(ClusterState clusterState) {
+ this.lastSeenClusterState.set(clusterState);
+ }
+
private static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException {
final BytesReference serializedState = CompressedStreamUtils.createCompressedStream(nodeVersion, stream -> {
stream.writeBoolean(true);
@@ -270,12 +392,20 @@ public class PublicationContext {
private final boolean sendFullVersion;
private final Map serializedStates = new HashMap<>();
private final Map serializedDiffs = new HashMap<>();
+ private final boolean sendRemoteState;
+ private final PersistedStateRegistry persistedStateRegistry;
- PublicationContext(ClusterChangedEvent clusterChangedEvent) {
+ PublicationContext(
+ ClusterChangedEvent clusterChangedEvent,
+ boolean isRemotePublicationEnabled,
+ PersistedStateRegistry persistedStateRegistry
+ ) {
discoveryNodes = clusterChangedEvent.state().nodes();
newState = clusterChangedEvent.state();
previousState = clusterChangedEvent.previousState();
sendFullVersion = previousState.getBlocks().disableStatePersistence();
+ sendRemoteState = isRemotePublicationEnabled;
+ this.persistedStateRegistry = persistedStateRegistry;
}
void buildDiffAndSerializeStates() {
@@ -339,7 +469,11 @@ public void onFailure(Exception e) {
} else {
responseActionListener = listener;
}
- if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) {
+ // TODO Decide to send remote state before starting publication by checking remote publication on all nodes
+ if (sendRemoteState && destination.isRemoteStatePublicationEnabled()) {
+ logger.trace("sending remote cluster state version [{}] to [{}]", newState.version(), destination);
+ sendRemoteClusterState(destination, publishRequest.getAcceptedState(), responseActionListener);
+ } else if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) {
logger.trace("sending full cluster state version [{}] to [{}]", newState.version(), destination);
sendFullClusterState(destination, responseActionListener);
} else {
@@ -384,6 +518,61 @@ public String executor() {
);
}
+ private void sendRemoteClusterState(
+ final DiscoveryNode destination,
+ final ClusterState clusterState,
+ final ActionListener listener
+ ) {
+ try {
+ final String manifestFileName = ((RemotePersistedState) persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE))
+ .getLastUploadedManifestFile();
+ final RemotePublishRequest remotePublishRequest = new RemotePublishRequest(
+ discoveryNodes.getLocalNode(),
+ clusterState.term(),
+ clusterState.getVersion(),
+ clusterState.getClusterName().value(),
+ clusterState.metadata().clusterUUID(),
+ manifestFileName
+ );
+ final Consumer transportExceptionHandler = exp -> {
+ logger.debug(() -> new ParameterizedMessage("failed to send remote cluster state to {}", destination), exp);
+ listener.onFailure(exp);
+ };
+ final TransportResponseHandler responseHandler = new TransportResponseHandler<>() {
+
+ @Override
+ public PublishWithJoinResponse read(StreamInput in) throws IOException {
+ return new PublishWithJoinResponse(in);
+ }
+
+ @Override
+ public void handleResponse(PublishWithJoinResponse response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ transportExceptionHandler.accept(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+ };
+ transportService.sendRequest(
+ destination,
+ PUBLISH_REMOTE_STATE_ACTION_NAME,
+ remotePublishRequest,
+ stateRequestOptions,
+ responseHandler
+ );
+ } catch (Exception e) {
+ logger.warn(() -> new ParameterizedMessage("error sending remote cluster state to {}", destination), e);
+ listener.onFailure(e);
+ }
+ }
+
private void sendFullClusterState(DiscoveryNode destination, ActionListener listener) {
BytesReference bytes = serializedStates.get(destination.getVersion());
if (bytes == null) {
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java
new file mode 100644
index 0000000000000..9461c5ee63627
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java
@@ -0,0 +1,85 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.coordination;
+
+import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Send the publish request with the remote cluster state details
+ * @opensearch.internal
+ */
+public class RemotePublishRequest extends TermVersionRequest {
+
+ private final String clusterName;
+ private final String clusterUUID;
+ private final String manifestFile;
+
+ public RemotePublishRequest(
+ DiscoveryNode sourceNode,
+ long term,
+ long version,
+ String clusterName,
+ String clusterUUID,
+ String manifestFile
+ ) {
+ super(sourceNode, term, version);
+ this.clusterName = clusterName;
+ this.clusterUUID = clusterUUID;
+ this.manifestFile = manifestFile;
+ }
+
+ public RemotePublishRequest(StreamInput in) throws IOException {
+ super(in);
+ this.clusterName = in.readString();
+ this.clusterUUID = in.readString();
+ this.manifestFile = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(clusterName);
+ out.writeString(clusterUUID);
+ out.writeString(manifestFile);
+ }
+
+ @Override
+ public String toString() {
+ return "RemotePublishRequest{"
+ + "term="
+ + term
+ + ", version="
+ + version
+ + ", clusterName="
+ + clusterName
+ + ", clusterUUID="
+ + clusterUUID
+ + ", sourceNode="
+ + sourceNode
+ + ", manifestFile="
+ + manifestFile
+ + '}';
+ }
+
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ public String getClusterUUID() {
+ return clusterUUID;
+ }
+
+ public String getManifestFile() {
+ return manifestFile;
+ }
+}
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java
index a8102182576ff..5865891c8a7f9 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java
@@ -66,7 +66,7 @@ public static DiffableStringMap readFrom(StreamInput in) throws IOException {
return map.isEmpty() ? EMPTY : new DiffableStringMap(map);
}
- DiffableStringMap(final Map map) {
+ public DiffableStringMap(final Map map) {
this.innerMap = Collections.unmodifiableMap(map);
}
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
index e1aa5626f36c1..a0ef8de07fbf2 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
@@ -973,10 +973,18 @@ public static boolean isSettingsMetadataEqual(Metadata metadata1, Metadata metad
return metadata1.persistentSettings.equals(metadata2.persistentSettings);
}
+ public static boolean isTransientSettingsMetadataEqual(Metadata metadata1, Metadata metadata2) {
+ return metadata1.transientSettings.equals(metadata2.transientSettings);
+ }
+
public static boolean isTemplatesMetadataEqual(Metadata metadata1, Metadata metadata2) {
return metadata1.templates.equals(metadata2.templates);
}
+ public static boolean isHashesOfConsistentSettingsEqual(Metadata metadata1, Metadata metadata2) {
+ return metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings);
+ }
+
public static boolean isCustomMetadataEqual(Metadata metadata1, Metadata metadata2) {
int customCount1 = 0;
for (Map.Entry cursor : metadata1.customs.entrySet()) {
diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java
index 5226e9570ac14..690621c2e7bca 100644
--- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java
+++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java
@@ -61,7 +61,9 @@
import java.util.stream.Stream;
import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY;
import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY;
/**
* A discovery node represents a node that is part of the cluster.
@@ -470,6 +472,18 @@ public boolean isRemoteStoreNode() {
return this.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX));
}
+ /**
+ * Returns whether remote cluster state publication is enabled on this node
+ * @return true if the node contains remote cluster state node attribute and remote routing table node attribute
+ */
+ public boolean isRemoteStatePublicationEnabled() {
+ return this.getAttributes()
+ .keySet()
+ .stream()
+ .anyMatch(key -> (key.equals(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)))
+ && this.getAttributes().keySet().stream().anyMatch(key -> key.equals(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY));
+ }
+
/**
* Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name.
*
diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
index fd8cbea42c12f..479143fa9a2f0 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
@@ -738,9 +738,7 @@ public boolean equals(Object o) {
IndexShardRoutingTable that = (IndexShardRoutingTable) o;
if (!shardId.equals(that.shardId)) return false;
- if (!shards.equals(that.shards)) return false;
-
- return true;
+ return shards.size() == that.shards.size() && shards.containsAll(that.shards) && that.shards.containsAll(shards);
}
@Override
diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
index e4095a84be081..6c7b94f316da2 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
@@ -79,7 +79,7 @@ public class RoutingTable implements Iterable, Diffable indicesRouting;
- private RoutingTable(long version, final Map indicesRouting) {
+ public RoutingTable(long version, final Map indicesRouting) {
this.version = version;
this.indicesRouting = Collections.unmodifiableMap(indicesRouting);
}
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java
index 3864e282a310b..5ad3a2fd47ce3 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java
@@ -584,10 +584,7 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) {
/*
Use batch mode if enabled and there is no custom allocator set for Allocation service
*/
- Boolean batchModeEnabled = EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings);
- if (batchModeEnabled
- && allocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0)
- && existingShardsAllocators.size() == 2) {
+ if (isBatchModeEnabled(allocation)) {
/*
If we do not have any custom allocator set then we will be using ShardsBatchGatewayAllocator
Currently AllocationService will not run any custom Allocator that implements allocateAllUnassignedShards
@@ -724,13 +721,24 @@ private AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting
private ExistingShardsAllocator getAllocatorForShard(ShardRouting shardRouting, RoutingAllocation routingAllocation) {
assert assertInitialized();
- final String allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get(
- routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings()
- );
+ String allocatorName;
+ if (isBatchModeEnabled(routingAllocation)) {
+ allocatorName = ShardsBatchGatewayAllocator.ALLOCATOR_NAME;
+ } else {
+ allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get(
+ routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings()
+ );
+ }
final ExistingShardsAllocator existingShardsAllocator = existingShardsAllocators.get(allocatorName);
return existingShardsAllocator != null ? existingShardsAllocator : new NotFoundAllocator(allocatorName);
}
+ private boolean isBatchModeEnabled(RoutingAllocation routingAllocation) {
+ return EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings)
+ && routingAllocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0)
+ && existingShardsAllocators.size() == 2;
+ }
+
private boolean assertInitialized() {
assert existingShardsAllocators != null : "must have set allocators first";
return true;
diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java
new file mode 100644
index 0000000000000..cc1b0713393f3
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java
@@ -0,0 +1,381 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.routing.remote;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.apache.lucene.store.IndexInput;
+import org.opensearch.action.LatchedActionListener;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.DiffableUtils;
+import org.opensearch.cluster.routing.IndexRoutingTable;
+import org.opensearch.cluster.routing.RoutingTable;
+import org.opensearch.common.CheckedRunnable;
+import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer;
+import org.opensearch.common.blobstore.BlobContainer;
+import org.opensearch.common.blobstore.BlobPath;
+import org.opensearch.common.blobstore.stream.write.WritePriority;
+import org.opensearch.common.blobstore.transfer.RemoteTransferContainer;
+import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream;
+import org.opensearch.common.io.stream.BytesStreamOutput;
+import org.opensearch.common.lifecycle.AbstractLifecycleComponent;
+import org.opensearch.common.lucene.store.ByteArrayIndexInput;
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.io.IOUtils;
+import org.opensearch.core.action.ActionListener;
+import org.opensearch.core.common.bytes.BytesReference;
+import org.opensearch.core.index.Index;
+import org.opensearch.gateway.remote.ClusterMetadataManifest;
+import org.opensearch.gateway.remote.RemoteStateTransferException;
+import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable;
+import org.opensearch.index.remote.RemoteStoreEnums;
+import org.opensearch.index.remote.RemoteStorePathStrategy;
+import org.opensearch.index.remote.RemoteStoreUtils;
+import org.opensearch.node.Node;
+import org.opensearch.node.remotestore.RemoteStoreNodeAttribute;
+import org.opensearch.repositories.RepositoriesService;
+import org.opensearch.repositories.Repository;
+import org.opensearch.repositories.blobstore.BlobStoreRepository;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER;
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled;
+
+/**
+ * A Service which provides APIs to upload and download routing table from remote store.
+ *
+ * @opensearch.internal
+ */
+public class InternalRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService {
+
+ /**
+ * This setting is used to set the remote routing table store blob store path type strategy.
+ */
+ public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>(
+ "cluster.remote_store.routing_table.path_type",
+ RemoteStoreEnums.PathType.HASHED_PREFIX.toString(),
+ RemoteStoreEnums.PathType::parseString,
+ Setting.Property.NodeScope,
+ Setting.Property.Dynamic
+ );
+
+ /**
+ * This setting is used to set the remote routing table store blob store path hash algorithm strategy.
+ * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING}
+ * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}.
+ */
+ public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>(
+ "cluster.remote_store.routing_table.path_hash_algo",
+ RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(),
+ RemoteStoreEnums.PathHashAlgorithm::parseString,
+ Setting.Property.NodeScope,
+ Setting.Property.Dynamic
+ );
+
+ public static final String INDEX_ROUTING_PATH_TOKEN = "index-routing";
+ public static final String INDEX_ROUTING_FILE_PREFIX = "index_routing";
+ public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--";
+
+ private static final Logger logger = LogManager.getLogger(InternalRemoteRoutingTableService.class);
+ private final Settings settings;
+ private final Supplier repositoriesService;
+ private BlobStoreRepository blobStoreRepository;
+ private RemoteStoreEnums.PathType pathType;
+ private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo;
+ private ThreadPool threadPool;
+
+ public InternalRemoteRoutingTableService(
+ Supplier repositoriesService,
+ Settings settings,
+ ClusterSettings clusterSettings,
+ ThreadPool threadpool
+ ) {
+ assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled";
+ this.repositoriesService = repositoriesService;
+ this.settings = settings;
+ this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING);
+ this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING);
+ clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting);
+ clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting);
+ this.threadPool = threadpool;
+ }
+
+ private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) {
+ this.pathType = pathType;
+ }
+
+ private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) {
+ this.pathHashAlgo = pathHashAlgo;
+ }
+
+ public List getIndicesRouting(RoutingTable routingTable) {
+ return new ArrayList<>(routingTable.indicesRouting().values());
+ }
+
+ /**
+ * Returns diff between the two routing tables, which includes upserts and deletes.
+ * @param before previous routing table
+ * @param after current routing table
+ * @return diff of the previous and current routing table
+ */
+ public DiffableUtils.MapDiff> getIndicesRoutingMapDiff(
+ RoutingTable before,
+ RoutingTable after
+ ) {
+ return DiffableUtils.diff(
+ before.getIndicesRouting(),
+ after.getIndicesRouting(),
+ DiffableUtils.getStringKeySerializer(),
+ CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER
+ );
+ }
+
+ /**
+ * Create async action for writing one {@code IndexRoutingTable} to remote store
+ * @param clusterState current cluster state
+ * @param indexRouting indexRoutingTable to write to remote store
+ * @param latchedActionListener listener for handling async action response
+ * @param clusterBasePath base path for remote file
+ * @return returns runnable async action
+ */
+ public CheckedRunnable getIndexRoutingAsyncAction(
+ ClusterState clusterState,
+ IndexRoutingTable indexRouting,
+ LatchedActionListener latchedActionListener,
+ BlobPath clusterBasePath
+ ) {
+
+ BlobPath indexRoutingPath = clusterBasePath.add(INDEX_ROUTING_PATH_TOKEN);
+ BlobPath path = pathType.path(
+ RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID(indexRouting.getIndex().getUUID()).build(),
+ pathHashAlgo
+ );
+ final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(path);
+
+ final String fileName = getIndexRoutingFileName(clusterState.term(), clusterState.version());
+
+ ActionListener completionListener = ActionListener.wrap(
+ resp -> latchedActionListener.onResponse(
+ new ClusterMetadataManifest.UploadedIndexMetadata(
+ indexRouting.getIndex().getName(),
+ indexRouting.getIndex().getUUID(),
+ path.buildAsString() + fileName,
+ INDEX_ROUTING_METADATA_PREFIX
+ )
+ ),
+ ex -> latchedActionListener.onFailure(
+ new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex)
+ )
+ );
+
+ return () -> uploadIndex(indexRouting, fileName, blobContainer, completionListener);
+ }
+
+ /**
+ * Combines IndicesRoutingMetadata from previous manifest and current uploaded indices, removes deleted indices.
+ * @param previousManifest previous manifest, used to get all existing indices routing paths
+ * @param indicesRoutingUploaded current uploaded indices routings
+ * @param indicesRoutingToDelete indices to delete
+ * @return combined list of metadata
+ */
+ public List getAllUploadedIndicesRouting(
+ ClusterMetadataManifest previousManifest,
+ List indicesRoutingUploaded,
+ List indicesRoutingToDelete
+ ) {
+ final Map allUploadedIndicesRouting = previousManifest.getIndicesRouting()
+ .stream()
+ .collect(Collectors.toMap(ClusterMetadataManifest.UploadedIndexMetadata::getIndexName, Function.identity()));
+
+ indicesRoutingUploaded.forEach(
+ uploadedIndexRouting -> allUploadedIndicesRouting.put(uploadedIndexRouting.getIndexName(), uploadedIndexRouting)
+ );
+ indicesRoutingToDelete.forEach(allUploadedIndicesRouting::remove);
+
+ return new ArrayList<>(allUploadedIndicesRouting.values());
+ }
+
+ private void uploadIndex(
+ IndexRoutingTable indexRouting,
+ String fileName,
+ BlobContainer blobContainer,
+ ActionListener completionListener
+ ) {
+ RemoteIndexRoutingTable indexRoutingInput = new RemoteIndexRoutingTable(indexRouting);
+ BytesReference bytesInput = null;
+ try (BytesStreamOutput streamOutput = new BytesStreamOutput()) {
+ indexRoutingInput.writeTo(streamOutput);
+ bytesInput = streamOutput.bytes();
+ } catch (IOException e) {
+ logger.error("Failed to serialize IndexRoutingTable for [{}]: [{}]", indexRouting, e);
+ completionListener.onFailure(e);
+ return;
+ }
+
+ if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) {
+ try {
+ blobContainer.writeBlob(fileName, bytesInput.streamInput(), bytesInput.length(), true);
+ completionListener.onResponse(null);
+ } catch (IOException e) {
+ logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e);
+ completionListener.onFailure(e);
+ }
+ return;
+ }
+
+ try (IndexInput input = new ByteArrayIndexInput("indexrouting", BytesReference.toBytes(bytesInput))) {
+ try (
+ RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer(
+ fileName,
+ fileName,
+ input.length(),
+ true,
+ WritePriority.URGENT,
+ (size, position) -> new OffsetRangeIndexInputStream(input, size, position),
+ null,
+ false
+ )
+ ) {
+ ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(
+ remoteTransferContainer.createWriteContext(),
+ completionListener
+ );
+ } catch (IOException e) {
+ logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e);
+ completionListener.onFailure(e);
+ }
+ } catch (IOException e) {
+ logger.error(
+ "Failed to create transfer object for IndexRoutingTable for remote store upload for indexRouting [{}]: [{}]",
+ indexRouting,
+ e
+ );
+ completionListener.onFailure(e);
+ }
+ }
+
+ @Override
+ public CheckedRunnable getAsyncIndexRoutingReadAction(
+ String uploadedFilename,
+ Index index,
+ LatchedActionListener latchedActionListener
+ ) {
+ int idx = uploadedFilename.lastIndexOf("/");
+ String blobFileName = uploadedFilename.substring(idx + 1);
+ BlobContainer blobContainer = blobStoreRepository.blobStore()
+ .blobContainer(BlobPath.cleanPath().add(uploadedFilename.substring(0, idx)));
+
+ return () -> readAsync(
+ blobContainer,
+ blobFileName,
+ index,
+ threadPool.executor(ThreadPool.Names.REMOTE_STATE_READ),
+ ActionListener.wrap(
+ response -> latchedActionListener.onResponse(response.getIndexRoutingTable()),
+ latchedActionListener::onFailure
+ )
+ );
+ }
+
+ private void readAsync(
+ BlobContainer blobContainer,
+ String name,
+ Index index,
+ ExecutorService executorService,
+ ActionListener listener
+ ) {
+ executorService.execute(() -> {
+ try {
+ listener.onResponse(read(blobContainer, name, index));
+ } catch (Exception e) {
+ listener.onFailure(e);
+ }
+ });
+ }
+
+ private RemoteIndexRoutingTable read(BlobContainer blobContainer, String path, Index index) {
+ try {
+ return new RemoteIndexRoutingTable(blobContainer.readBlob(path), index);
+ } catch (IOException | AssertionError e) {
+ logger.error(() -> new ParameterizedMessage("RoutingTable read failed for path {}", path), e);
+ throw new RemoteStateTransferException("Failed to read RemoteRoutingTable from Manifest with error ", e);
+ }
+ }
+
+ @Override
+ public List getUpdatedIndexRoutingTableMetadata(
+ List updatedIndicesRouting,
+ List allIndicesRouting
+ ) {
+ return updatedIndicesRouting.stream().map(idx -> {
+ Optional uploadedIndexMetadataOptional = allIndicesRouting.stream()
+ .filter(idx2 -> idx2.getIndexName().equals(idx))
+ .findFirst();
+ assert uploadedIndexMetadataOptional.isPresent() == true;
+ return uploadedIndexMetadataOptional.get();
+ }).collect(Collectors.toList());
+ }
+
+ private String getIndexRoutingFileName(long term, long version) {
+ return String.join(
+ DELIMITER,
+ INDEX_ROUTING_FILE_PREFIX,
+ RemoteStoreUtils.invertLong(term),
+ RemoteStoreUtils.invertLong(version),
+ RemoteStoreUtils.invertLong(System.currentTimeMillis())
+ );
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ if (blobStoreRepository != null) {
+ IOUtils.close(blobStoreRepository);
+ }
+ }
+
+ @Override
+ protected void doStart() {
+ assert isRemoteRoutingTableEnabled(settings) == true : "Remote routing table is not enabled";
+ final String remoteStoreRepo = settings.get(
+ Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY
+ );
+ assert remoteStoreRepo != null : "Remote routing table repository is not configured";
+ final Repository repository = repositoriesService.get().repository(remoteStoreRepo);
+ assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository";
+ blobStoreRepository = (BlobStoreRepository) repository;
+ }
+
+ @Override
+ protected void doStop() {}
+
+ @Override
+ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException {
+ try {
+ logger.debug(() -> "Deleting stale index routing files from remote - " + stalePaths);
+ blobStoreRepository.blobStore().blobContainer(BlobPath.cleanPath()).deleteBlobsIgnoringIfNotExists(stalePaths);
+ } catch (IOException e) {
+ logger.error(() -> new ParameterizedMessage("Failed to delete some stale index routing paths from {}", stalePaths), e);
+ throw e;
+ }
+ }
+
+}
diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java
new file mode 100644
index 0000000000000..6236d107d0220
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java
@@ -0,0 +1,103 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.routing.remote;
+
+import org.opensearch.action.LatchedActionListener;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.DiffableUtils;
+import org.opensearch.cluster.routing.IndexRoutingTable;
+import org.opensearch.cluster.routing.RoutingTable;
+import org.opensearch.common.CheckedRunnable;
+import org.opensearch.common.blobstore.BlobPath;
+import org.opensearch.common.lifecycle.AbstractLifecycleComponent;
+import org.opensearch.core.index.Index;
+import org.opensearch.gateway.remote.ClusterMetadataManifest;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Noop impl for RemoteRoutingTableService.
+ */
+public class NoopRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService {
+
+ @Override
+ public List getIndicesRouting(RoutingTable routingTable) {
+ return List.of();
+ }
+
+ @Override
+ public DiffableUtils.MapDiff> getIndicesRoutingMapDiff(
+ RoutingTable before,
+ RoutingTable after
+ ) {
+ return DiffableUtils.diff(Map.of(), Map.of(), DiffableUtils.getStringKeySerializer(), CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER);
+ }
+
+ @Override
+ public CheckedRunnable getIndexRoutingAsyncAction(
+ ClusterState clusterState,
+ IndexRoutingTable indexRouting,
+ LatchedActionListener latchedActionListener,
+ BlobPath clusterBasePath
+ ) {
+ // noop
+ return () -> {};
+ }
+
+ @Override
+ public List getAllUploadedIndicesRouting(
+ ClusterMetadataManifest previousManifest,
+ List indicesRoutingUploaded,
+ List indicesRoutingToDelete
+ ) {
+ // noop
+ return List.of();
+ }
+
+ @Override
+ public CheckedRunnable getAsyncIndexRoutingReadAction(
+ String uploadedFilename,
+ Index index,
+ LatchedActionListener latchedActionListener
+ ) {
+ // noop
+ return () -> {};
+ }
+
+ @Override
+ public List getUpdatedIndexRoutingTableMetadata(
+ List updatedIndicesRouting,
+ List allIndicesRouting
+ ) {
+ // noop
+ return List.of();
+ }
+
+ @Override
+ protected void doStart() {
+ // noop
+ }
+
+ @Override
+ protected void doStop() {
+ // noop
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ // noop
+ }
+
+ @Override
+ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException {
+ // noop
+ }
+}
diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java
index ba2208e17df1f..d455dfb58eabc 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java
@@ -8,60 +8,73 @@
package org.opensearch.cluster.routing.remote;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.opensearch.common.lifecycle.AbstractLifecycleComponent;
-import org.opensearch.common.settings.Settings;
-import org.opensearch.common.util.io.IOUtils;
-import org.opensearch.node.Node;
-import org.opensearch.node.remotestore.RemoteStoreNodeAttribute;
-import org.opensearch.repositories.RepositoriesService;
-import org.opensearch.repositories.Repository;
-import org.opensearch.repositories.blobstore.BlobStoreRepository;
+import org.opensearch.action.LatchedActionListener;
+import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.DiffableUtils;
+import org.opensearch.cluster.routing.IndexRoutingTable;
+import org.opensearch.cluster.routing.RoutingTable;
+import org.opensearch.common.CheckedRunnable;
+import org.opensearch.common.blobstore.BlobPath;
+import org.opensearch.common.lifecycle.LifecycleComponent;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.core.index.Index;
+import org.opensearch.gateway.remote.ClusterMetadataManifest;
import java.io.IOException;
-import java.util.function.Supplier;
-
-import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled;
+import java.util.List;
+import java.util.Map;
/**
* A Service which provides APIs to upload and download routing table from remote store.
*
* @opensearch.internal
*/
-public class RemoteRoutingTableService extends AbstractLifecycleComponent {
+public interface RemoteRoutingTableService extends LifecycleComponent {
+ public static final DiffableUtils.NonDiffableValueSerializer CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER =
+ new DiffableUtils.NonDiffableValueSerializer() {
+ @Override
+ public void write(IndexRoutingTable value, StreamOutput out) throws IOException {
+ value.writeTo(out);
+ }
+
+ @Override
+ public IndexRoutingTable read(StreamInput in, String key) throws IOException {
+ return IndexRoutingTable.readFrom(in);
+ }
+ };
+
+ List getIndicesRouting(RoutingTable routingTable);
+
+ CheckedRunnable getAsyncIndexRoutingReadAction(
+ String uploadedFilename,
+ Index index,
+ LatchedActionListener latchedActionListener
+ );
- private static final Logger logger = LogManager.getLogger(RemoteRoutingTableService.class);
- private final Settings settings;
- private final Supplier repositoriesService;
- private BlobStoreRepository blobStoreRepository;
+ List getUpdatedIndexRoutingTableMetadata(
+ List updatedIndicesRouting,
+ List allIndicesRouting
+ );
- public RemoteRoutingTableService(Supplier repositoriesService, Settings settings) {
- assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled";
- this.repositoriesService = repositoriesService;
- this.settings = settings;
- }
+ DiffableUtils.MapDiff> getIndicesRoutingMapDiff(
+ RoutingTable before,
+ RoutingTable after
+ );
- @Override
- protected void doClose() throws IOException {
- if (blobStoreRepository != null) {
- IOUtils.close(blobStoreRepository);
- }
- }
+ CheckedRunnable getIndexRoutingAsyncAction(
+ ClusterState clusterState,
+ IndexRoutingTable indexRouting,
+ LatchedActionListener latchedActionListener,
+ BlobPath clusterBasePath
+ );
- @Override
- protected void doStart() {
- assert isRemoteRoutingTableEnabled(settings) == true : "Remote routing table is not enabled";
- final String remoteStoreRepo = settings.get(
- Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY
- );
- assert remoteStoreRepo != null : "Remote routing table repository is not configured";
- final Repository repository = repositoriesService.get().repository(remoteStoreRepo);
- assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository";
- blobStoreRepository = (BlobStoreRepository) repository;
- }
+ List getAllUploadedIndicesRouting(
+ ClusterMetadataManifest previousManifest,
+ List indicesRoutingUploaded,
+ List indicesRoutingToDelete
+ );
- @Override
- protected void doStop() {}
+ public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException;
}
diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java
new file mode 100644
index 0000000000000..82837191a30b7
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.routing.remote;
+
+import org.opensearch.common.settings.ClusterSettings;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.repositories.RepositoriesService;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.util.function.Supplier;
+
+import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled;
+
+/**
+ * Factory to provide impl for RemoteRoutingTableService based on settings.
+ */
+public class RemoteRoutingTableServiceFactory {
+
+ /**
+ * Returns {@code DefaultRemoteRoutingTableService} if the feature is enabled, otherwise {@code NoopRemoteRoutingTableService}
+ * @param repositoriesService repositoriesService
+ * @param settings settings
+ * @param clusterSettings clusterSettings
+ * @param threadPool threadPool
+ * @return RemoteRoutingTableService
+ */
+ public static RemoteRoutingTableService getService(
+ Supplier repositoriesService,
+ Settings settings,
+ ClusterSettings clusterSettings,
+ ThreadPool threadPool
+ ) {
+ if (isRemoteRoutingTableEnabled(settings)) {
+ return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool);
+ }
+ return new NoopRemoteRoutingTableService();
+ }
+}
diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
index 2ac95178d2ff9..6234427445754 100644
--- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
+++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
@@ -62,6 +62,7 @@
import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
+import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry;
import org.opensearch.telemetry.metrics.tags.Tags;
import org.opensearch.threadpool.Scheduler;
import org.opensearch.threadpool.ThreadPool;
@@ -125,6 +126,10 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
private NodeConnectionsService nodeConnectionsService;
private final ClusterManagerMetrics clusterManagerMetrics;
+ public ClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
+ this(nodeName, settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE));
+ }
+
public ClusterApplierService(
String nodeName,
Settings settings,
diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java
index eaedb36a59f1e..fa8c965b4d538 100644
--- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java
+++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java
@@ -21,6 +21,11 @@
*/
@PublicApi(since = "2.2.0")
public class ClusterManagerService extends MasterService {
+
+ public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
+ super(settings, clusterSettings, threadPool);
+ }
+
public ClusterManagerService(
Settings settings,
ClusterSettings clusterSettings,
diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java
index fa61375e85c25..c3c48dd8b87ef 100644
--- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java
+++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java
@@ -54,6 +54,7 @@
import org.opensearch.common.settings.Settings;
import org.opensearch.index.IndexingPressureService;
import org.opensearch.node.Node;
+import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry;
import org.opensearch.threadpool.ThreadPool;
import java.util.Collections;
@@ -92,6 +93,10 @@ public class ClusterService extends AbstractLifecycleComponent {
private IndexingPressureService indexingPressureService;
+ public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
+ this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE));
+ }
+
public ClusterService(
Settings settings,
ClusterSettings clusterSettings,
diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java
index 6436dcfe33003..686e9793a8fd3 100644
--- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java
+++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java
@@ -71,6 +71,7 @@
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
import org.opensearch.discovery.Discovery;
import org.opensearch.node.Node;
+import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry;
import org.opensearch.telemetry.metrics.tags.Tags;
import org.opensearch.threadpool.Scheduler;
import org.opensearch.threadpool.ThreadPool;
@@ -140,6 +141,10 @@ public class MasterService extends AbstractLifecycleComponent {
private final ClusterStateStats stateStats;
private final ClusterManagerMetrics clusterManagerMetrics;
+ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
+ this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE));
+ }
+
public MasterService(
Settings settings,
ClusterSettings clusterSettings,
diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java
index 6f3e8be7c28b8..68af77714a319 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java
@@ -39,6 +39,7 @@
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Objects;
/**
* The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}.
@@ -110,6 +111,19 @@ public BlobPath parent() {
}
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ BlobPath that = (BlobPath) o;
+ return Objects.equals(paths, that.paths);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(paths);
+ }
+
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java
index d0933741339d9..f58b99daec3c5 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java
@@ -9,6 +9,7 @@
package org.opensearch.common.blobstore;
import org.opensearch.common.CheckedBiConsumer;
+import org.opensearch.common.annotation.ExperimentalApi;
import org.opensearch.common.crypto.CryptoHandler;
import org.opensearch.common.crypto.DecryptedRangedStreamProvider;
import org.opensearch.common.crypto.EncryptedHeaderContentSupplier;
@@ -50,6 +51,14 @@ public InputStream readBlob(String blobName) throws IOException {
return cryptoHandler.createDecryptingStream(inputStream);
}
+ @ExperimentalApi
+ @Override
+ public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException {
+ InputStreamWithMetadata inputStreamWithMetadata = blobContainer.readBlobWithMetadata(blobName);
+ InputStream decryptInputStream = cryptoHandler.createDecryptingStream(inputStreamWithMetadata.getInputStream());
+ return new InputStreamWithMetadata(decryptInputStream, inputStreamWithMetadata.getMetadata());
+ }
+
EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) {
return (start, end) -> {
byte[] buffer;
diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java
index c41641921c822..1214c6cdc7373 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java
@@ -95,6 +95,11 @@ public Map> extendedStats() {
return blobStore.extendedStats();
}
+ @Override
+ public boolean isBlobMetadataEnabled() {
+ return blobStore.isBlobMetadataEnabled();
+ }
+
/**
* Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the
* underlying BlobStore. This ensures proper cleanup of resources.
diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java
index 6d346de25cadf..caae81e4387b4 100644
--- a/server/src/main/java/org/opensearch/common/cache/Cache.java
+++ b/server/src/main/java/org/opensearch/common/cache/Cache.java
@@ -36,9 +36,11 @@
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.util.concurrent.ReleasableLock;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
@@ -396,7 +398,12 @@ private V get(K key, long now, Consumer> onExpiration) {
if (entry == null) {
return null;
} else {
- promote(entry, now);
+ List> removalNotifications = promote(entry, now).v2();
+ if (!removalNotifications.isEmpty()) {
+ for (RemovalNotification removalNotification : removalNotifications) {
+ removalListener.onRemoval(removalNotification);
+ }
+ }
return entry.value;
}
}
@@ -446,8 +453,14 @@ private V compute(K key, CacheLoader loader) throws ExecutionException {
BiFunction super Entry, Throwable, ? extends V> handler = (ok, ex) -> {
if (ok != null) {
+ List> removalNotifications = new ArrayList<>();
try (ReleasableLock ignored = lruLock.acquire()) {
- promote(ok, now);
+ removalNotifications = promote(ok, now).v2();
+ }
+ if (!removalNotifications.isEmpty()) {
+ for (RemovalNotification removalNotification : removalNotifications) {
+ removalListener.onRemoval(removalNotification);
+ }
}
return ok.value;
} else {
@@ -512,16 +525,22 @@ private void put(K key, V value, long now) {
CacheSegment segment = getCacheSegment(key);
Tuple, Entry> tuple = segment.put(key, value, now);
boolean replaced = false;
+ List> removalNotifications = new ArrayList<>();
try (ReleasableLock ignored = lruLock.acquire()) {
if (tuple.v2() != null && tuple.v2().state == State.EXISTING) {
if (unlink(tuple.v2())) {
replaced = true;
}
}
- promote(tuple.v1(), now);
+ removalNotifications = promote(tuple.v1(), now).v2();
}
if (replaced) {
- removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalReason.REPLACED));
+ removalNotifications.add(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalReason.REPLACED));
+ }
+ if (!removalNotifications.isEmpty()) {
+ for (RemovalNotification removalNotification : removalNotifications) {
+ removalListener.onRemoval(removalNotification);
+ }
}
}
@@ -767,8 +786,17 @@ public long getEvictions() {
}
}
- private boolean promote(Entry entry, long now) {
+ /**
+ * Promotes the desired entry to the head of the lru list and tries to see if it needs to evict any entries in
+ * case the cache size is exceeding or the entry got expired.
+ * @param entry Entry to be promoted
+ * @param now the current time
+ * @return Returns a tuple. v1 signifies whether an entry got promoted, v2 signifies the list of removal
+ * notifications that the callers needs to handle.
+ */
+ private Tuple>> promote(Entry entry, long now) {
boolean promoted = true;
+ List> removalNotifications = new ArrayList<>();
try (ReleasableLock ignored = lruLock.acquire()) {
switch (entry.state) {
case DELETED:
@@ -782,10 +810,21 @@ private boolean promote(Entry entry, long now) {
break;
}
if (promoted) {
- evict(now);
+ while (tail != null && shouldPrune(tail, now)) {
+ Entry entryToBeRemoved = tail;
+ CacheSegment segment = getCacheSegment(entryToBeRemoved.key);
+ if (segment != null) {
+ segment.remove(entryToBeRemoved.key, entryToBeRemoved.value, f -> {});
+ }
+ if (unlink(entryToBeRemoved)) {
+ removalNotifications.add(
+ new RemovalNotification<>(entryToBeRemoved.key, entryToBeRemoved.value, RemovalReason.EVICTED)
+ );
+ }
+ }
}
}
- return promoted;
+ return new Tuple<>(promoted, removalNotifications);
}
private void evict(long now) {
diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java
index 68e1cdf6139e2..eaaaec2bb07e0 100644
--- a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java
+++ b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java
@@ -42,5 +42,10 @@
@ExperimentalApi
@FunctionalInterface
public interface RemovalListener {
+
+ /**
+ * This may be called from multiple threads at once. So implementation needs to be thread safe.
+ * @param notification removal notification for desired entry.
+ */
void onRemoval(RemovalNotification notification);
}
diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java
index 632b2b70d61df..23fc9d3ad77cb 100644
--- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java
+++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java
@@ -42,6 +42,8 @@ public AbstractRemoteWritableBlobEntity(
public abstract BlobPathParameters getBlobPathParameters();
+ public abstract String getType();
+
public String getFullBlobName() {
return blobName;
}
diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java
index ccf7cafff1730..385c6f20ba58d 100644
--- a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java
+++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java
@@ -8,6 +8,7 @@
package org.opensearch.common.remote;
+import org.opensearch.common.annotation.ExperimentalApi;
import org.opensearch.core.action.ActionListener;
import java.io.IOException;
@@ -18,6 +19,7 @@
* @param The object type which can be uploaded to or downloaded from remote storage.
* @param The wrapper entity which provides methods for serializing/deserializing entity T.
*/
+@ExperimentalApi
public interface RemoteWritableEntityStore> {
public void writeAsync(U entity, ActionListener listener);
diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java
index 778c24dce2e27..773ddce5b9cc8 100644
--- a/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java
+++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java
@@ -8,6 +8,8 @@
package org.opensearch.common.remote;
+import org.opensearch.common.annotation.ExperimentalApi;
+
import java.io.IOException;
import java.io.InputStream;
@@ -17,6 +19,7 @@
*
* @param The object type which can be uploaded to or downloaded from remote storage.
*/
+@ExperimentalApi
public interface RemoteWriteableEntity {
/**
* @return An InputStream created by serializing the entity T
diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
index 297fc98764d07..7ea04acf00415 100644
--- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java
@@ -77,6 +77,7 @@
import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService;
import org.opensearch.cluster.service.ClusterApplierService;
import org.opensearch.cluster.service.ClusterManagerService;
import org.opensearch.cluster.service.ClusterManagerTaskThrottler;
@@ -180,6 +181,10 @@
import java.util.Set;
import java.util.function.Predicate;
+import static org.opensearch.gateway.remote.RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING;
+import static org.opensearch.gateway.remote.RemoteIndexMetadataManager.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING;
+import static org.opensearch.gateway.remote.RemoteManifestManager.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING;
+
/**
* Encapsulates all valid cluster level settings.
*
@@ -302,10 +307,12 @@ public void apply(Settings value, Settings current, Settings previous) {
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
+ RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_RETRY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING,
RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT,
+ RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
@@ -533,6 +540,7 @@ public void apply(Settings value, Settings current, Settings previous) {
SearchService.MAX_OPEN_PIT_CONTEXT,
SearchService.MAX_PIT_KEEPALIVE_SETTING,
SearchService.MAX_AGGREGATION_REWRITE_FILTERS,
+ SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD,
CreatePitController.PIT_INIT_KEEP_ALIVE,
Node.WRITE_PORTS_FILE_SETTING,
Node.NODE_NAME_SETTING,
@@ -714,13 +722,16 @@ public void apply(Settings value, Settings current, Settings previous) {
// Remote cluster state settings
RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING,
RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING,
- RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING,
- RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING,
- RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING,
+ INDEX_METADATA_UPLOAD_TIMEOUT_SETTING,
+ GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING,
+ METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING,
+ RemoteClusterStateService.REMOTE_STATE_READ_TIMEOUT_SETTING,
RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING,
RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING,
IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING,
IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING,
+ InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING,
+ InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING,
// Admission Control Settings
AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE,
@@ -743,7 +754,8 @@ public void apply(Settings value, Settings current, Settings previous) {
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING,
RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING,
RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS,
- RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA
+ RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA,
+ SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING
)
)
);
diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
index 980c432774f6e..6fe8dec9c21b1 100644
--- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
@@ -237,7 +237,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
// Settings for concurrent segment search
IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING,
-
+ IndexSettings.ALLOW_DERIVED_FIELDS,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> {
Map groups = s.getAsGroups();
diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
index 82f43921d2d28..6c6e2f2d600f0 100644
--- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
+++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
@@ -23,7 +23,7 @@
*/
public class FeatureFlags {
/**
- * Gates the visibility of the remote store migration support from docrep .
+ * Gates the visibility of the remote store to docrep migration.
*/
public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled";
diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java
index 6580b0e0085ef..906a27e9f398c 100644
--- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java
+++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java
@@ -483,6 +483,16 @@ public void addResponseHeader(final String key, final String value) {
addResponseHeader(key, value, v -> v);
}
+ /**
+ * Update the {@code value} for the specified {@code key}
+ *
+ * @param key the header name
+ * @param value the header value
+ */
+ public void updateResponseHeader(final String key, final String value) {
+ updateResponseHeader(key, value, v -> v);
+ }
+
/**
* Add the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate
* {@code value} after applying {@code uniqueValue} is ignored.
@@ -492,7 +502,19 @@ public void addResponseHeader(final String key, final String value) {
* @param uniqueValue the function that produces de-duplication values
*/
public void addResponseHeader(final String key, final String value, final Function uniqueValue) {
- threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize));
+ threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize, false));
+ }
+
+ /**
+ * Update the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate
+ * {@code value} after applying {@code uniqueValue} is ignored.
+ *
+ * @param key the header name
+ * @param value the header value
+ * @param uniqueValue the function that produces de-duplication values
+ */
+ public void updateResponseHeader(final String key, final String value, final Function uniqueValue) {
+ threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize, true));
}
/**
@@ -717,7 +739,8 @@ private ThreadContextStruct putResponse(
final String value,
final Function uniqueValue,
final int maxWarningHeaderCount,
- final long maxWarningHeaderSize
+ final long maxWarningHeaderSize,
+ final boolean replaceExistingKey
) {
assert value != null;
long newWarningHeaderSize = warningHeadersSize;
@@ -759,8 +782,13 @@ private ThreadContextStruct putResponse(
if (existingValues.contains(uniqueValue.apply(value))) {
return this;
}
- // preserve insertion order
- final Set newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR);
+ Set newValues;
+ if (replaceExistingKey) {
+ newValues = Stream.of(value).collect(LINKED_HASH_SET_COLLECTOR);
+ } else {
+ // preserve insertion order
+ newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR);
+ }
newResponseHeaders = new HashMap<>(responseHeaders);
newResponseHeaders.put(key, Collections.unmodifiableSet(newValues));
} else {
diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java
new file mode 100644
index 0000000000000..15b63a0ac2030
--- /dev/null
+++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.common.xcontent.support;
+
+import org.opensearch.common.Nullable;
+import org.opensearch.common.lease.Releasable;
+import org.opensearch.core.common.bytes.BytesArray;
+import org.opensearch.core.common.bytes.BytesReference;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.http.HttpChunk;
+
+/**
+ * Wraps the instance of the {@link XContentBuilder} into {@link HttpChunk}
+ */
+public final class XContentHttpChunk implements HttpChunk {
+ private final BytesReference content;
+
+ /**
+ * Creates a new {@link HttpChunk} from {@link XContentBuilder}
+ * @param builder {@link XContentBuilder} instance
+ * @return new {@link HttpChunk} instance, if passed {@link XContentBuilder} us {@code null}, a last empty {@link HttpChunk} will be returned
+ */
+ public static HttpChunk from(@Nullable final XContentBuilder builder) {
+ return new XContentHttpChunk(builder);
+ }
+
+ /**
+ * Creates a new last empty {@link HttpChunk}
+ * @return last empty {@link HttpChunk} instance
+ */
+ public static HttpChunk last() {
+ return new XContentHttpChunk(null);
+ }
+
+ private XContentHttpChunk(@Nullable final XContentBuilder builder) {
+ if (builder == null /* no content */) {
+ content = BytesArray.EMPTY;
+ } else {
+ content = BytesReference.bytes(builder);
+ }
+ }
+
+ @Override
+ public boolean isLast() {
+ return content == BytesArray.EMPTY;
+ }
+
+ @Override
+ public BytesReference content() {
+ return content;
+ }
+
+ @Override
+ public void close() {
+ if (content instanceof Releasable) {
+ ((Releasable) content).close();
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java
index 538dea5b2e60b..922e23b849d49 100644
--- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java
+++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java
@@ -53,6 +53,7 @@
import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
import org.opensearch.core.common.transport.TransportAddress;
import org.opensearch.gateway.GatewayMetaState;
+import org.opensearch.gateway.remote.RemoteClusterStateService;
import org.opensearch.monitor.NodeHealthService;
import org.opensearch.node.remotestore.RemoteStoreNodeService;
import org.opensearch.plugins.DiscoveryPlugin;
@@ -135,7 +136,8 @@ public DiscoveryModule(
NodeHealthService nodeHealthService,
PersistedStateRegistry persistedStateRegistry,
RemoteStoreNodeService remoteStoreNodeService,
- ClusterManagerMetrics clusterManagerMetrics
+ ClusterManagerMetrics clusterManagerMetrics,
+ RemoteClusterStateService remoteClusterStateService
) {
final Collection> joinValidators = new ArrayList<>();
final Map> hostProviders = new HashMap<>();
@@ -214,7 +216,8 @@ public DiscoveryModule(
nodeHealthService,
persistedStateRegistry,
remoteStoreNodeService,
- clusterManagerMetrics
+ clusterManagerMetrics,
+ remoteClusterStateService
);
} else {
throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]");
diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
index c3056276706a0..80ba57b7db4a9 100644
--- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
+++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
@@ -64,6 +64,7 @@
import org.opensearch.env.NodeMetadata;
import org.opensearch.gateway.remote.ClusterMetadataManifest;
import org.opensearch.gateway.remote.RemoteClusterStateService;
+import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo;
import org.opensearch.index.recovery.RemoteStoreRestoreService;
import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult;
import org.opensearch.node.Node;
@@ -665,6 +666,8 @@ public static class RemotePersistedState implements PersistedState {
private ClusterState lastAcceptedState;
private ClusterMetadataManifest lastAcceptedManifest;
+
+ private String lastUploadedManifestFile;
private final RemoteClusterStateService remoteClusterStateService;
private String previousClusterUUID;
@@ -690,10 +693,14 @@ public void setCurrentTerm(long currentTerm) {
// But for RemotePersistedState, the state is only pushed by the active cluster. So this method is not required.
}
+ public String getLastUploadedManifestFile() {
+ return lastUploadedManifestFile;
+ }
+
@Override
public void setLastAcceptedState(ClusterState clusterState) {
try {
- final ClusterMetadataManifest manifest;
+ final RemoteClusterStateManifestInfo manifestDetails;
if (shouldWriteFullClusterState(clusterState)) {
final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest(
clusterState.getClusterName().value(),
@@ -711,15 +718,21 @@ public void setLastAcceptedState(ClusterState clusterState) {
clusterState.metadata().clusterUUID()
);
}
- manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID);
+ manifestDetails = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID);
} else {
assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true
: "Previous manifest and previous ClusterState are not in sync";
- manifest = remoteClusterStateService.writeIncrementalMetadata(lastAcceptedState, clusterState, lastAcceptedManifest);
+ manifestDetails = remoteClusterStateService.writeIncrementalMetadata(
+ lastAcceptedState,
+ clusterState,
+ lastAcceptedManifest
+ );
}
- assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync";
- lastAcceptedManifest = manifest;
+ assert verifyManifestAndClusterState(manifestDetails.getClusterMetadataManifest(), clusterState) == true
+ : "Manifest and ClusterState are not in sync";
+ lastAcceptedManifest = manifestDetails.getClusterMetadataManifest();
lastAcceptedState = clusterState;
+ lastUploadedManifestFile = manifestDetails.getManifestFileName();
} catch (Exception e) {
remoteClusterStateService.writeMetadataFailed();
handleExceptionOnWrite(e);
@@ -767,12 +780,13 @@ public void markLastAcceptedStateAsCommitted() {
metadataBuilder.clusterUUIDCommitted(true);
clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build();
}
- final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted(
+ final RemoteClusterStateManifestInfo committedManifestDetails = remoteClusterStateService.markLastStateAsCommitted(
clusterState,
lastAcceptedManifest
);
- lastAcceptedManifest = committedManifest;
+ lastAcceptedManifest = committedManifestDetails.getClusterMetadataManifest();
lastAcceptedState = clusterState;
+ lastUploadedManifestFile = committedManifestDetails.getManifestFileName();
} catch (Exception e) {
handleExceptionOnWrite(e);
}
diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java
index b3b1bf37f8696..a89c202dd36be 100644
--- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java
+++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java
@@ -40,8 +40,9 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment {
public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest.
public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file.
- public static final int CODEC_V2 = 2; // In Codec V2, there are seperate metadata files rather than a single global metadata file.
- public static final int CODEC_V3 = 3; // In Codec V3, we introduce index routing-metadata in manifest file.
+ public static final int CODEC_V2 = 2; // In Codec V2, there are separate metadata files rather than a single global metadata file,
+ // also we introduce index routing-metadata, diff and other attributes as part of manifest
+ // required for state publication
private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term");
private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version");
@@ -61,6 +62,15 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment {
private static final ParseField UPLOADED_CUSTOM_METADATA = new ParseField("uploaded_custom_metadata");
private static final ParseField ROUTING_TABLE_VERSION_FIELD = new ParseField("routing_table_version");
private static final ParseField INDICES_ROUTING_FIELD = new ParseField("indices_routing");
+ private static final ParseField METADATA_VERSION = new ParseField("metadata_version");
+ private static final ParseField UPLOADED_TRANSIENT_SETTINGS_METADATA = new ParseField("uploaded_transient_settings_metadata");
+ private static final ParseField UPLOADED_DISCOVERY_NODES_METADATA = new ParseField("uploaded_discovery_nodes_metadata");
+ private static final ParseField UPLOADED_CLUSTER_BLOCKS_METADATA = new ParseField("uploaded_cluster_blocks_metadata");
+ private static final ParseField UPLOADED_HASHES_OF_CONSISTENT_SETTINGS_METADATA = new ParseField(
+ "uploaded_hashes_of_consistent_settings_metadata"
+ );
+ private static final ParseField UPLOADED_CLUSTER_STATE_CUSTOM_METADATA = new ParseField("uploaded_cluster_state_custom_metadata");
+ private static final ParseField DIFF_MANIFEST = new ParseField("diff_manifest");
private static ClusterMetadataManifest.Builder manifestV0Builder(Object[] fields) {
return ClusterMetadataManifest.builder()
@@ -86,13 +96,16 @@ private static ClusterMetadataManifest.Builder manifestV2Builder(Object[] fields
.coordinationMetadata(coordinationMetadata(fields))
.settingMetadata(settingsMetadata(fields))
.templatesMetadata(templatesMetadata(fields))
- .customMetadataMap(customMetadata(fields));
- }
-
- private static ClusterMetadataManifest.Builder manifestV3Builder(Object[] fields) {
- return manifestV2Builder(fields).codecVersion(codecVersion(fields))
+ .customMetadataMap(customMetadata(fields))
.routingTableVersion(routingTableVersion(fields))
- .indicesRouting(indicesRouting(fields));
+ .indicesRouting(indicesRouting(fields))
+ .discoveryNodesMetadata(discoveryNodesMetadata(fields))
+ .clusterBlocksMetadata(clusterBlocksMetadata(fields))
+ .diffManifest(diffManifest(fields))
+ .metadataVersion(metadataVersion(fields))
+ .transientSettingsMetadata(transientSettingsMetadata(fields))
+ .hashesOfConsistentSettings(hashesOfConsistentSettings(fields))
+ .clusterStateCustomMetadataMap(clusterStateCustomMetadata(fields));
}
private static long term(Object[] fields) {
@@ -168,6 +181,35 @@ private static List indicesRouting(Object[] fields) {
return (List) fields[16];
}
+ private static UploadedMetadataAttribute discoveryNodesMetadata(Object[] fields) {
+ return (UploadedMetadataAttribute) fields[17];
+ }
+
+ private static UploadedMetadataAttribute clusterBlocksMetadata(Object[] fields) {
+ return (UploadedMetadataAttribute) fields[18];
+ }
+
+ private static long metadataVersion(Object[] fields) {
+ return (long) fields[19];
+ }
+
+ private static UploadedMetadataAttribute transientSettingsMetadata(Object[] fields) {
+ return (UploadedMetadataAttribute) fields[20];
+ }
+
+ private static UploadedMetadataAttribute hashesOfConsistentSettings(Object[] fields) {
+ return (UploadedMetadataAttribute) fields[21];
+ }
+
+ private static Map clusterStateCustomMetadata(Object[] fields) {
+ List customs = (List) fields[22];
+ return customs.stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity()));
+ }
+
+ private static ClusterStateDiffManifest diffManifest(Object[] fields) {
+ return (ClusterStateDiffManifest) fields[23];
+ }
+
private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>(
"cluster_metadata_manifest",
fields -> manifestV0Builder(fields).build()
@@ -183,18 +225,12 @@ private static List indicesRouting(Object[] fields) {
fields -> manifestV2Builder(fields).build()
);
- private static final ConstructingObjectParser PARSER_V3 = new ConstructingObjectParser<>(
- "cluster_metadata_manifest",
- fields -> manifestV3Builder(fields).build()
- );
-
- private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V3;
+ private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2;
static {
declareParser(PARSER_V0, CODEC_V0);
declareParser(PARSER_V1, CODEC_V1);
declareParser(PARSER_V2, CODEC_V2);
- declareParser(PARSER_V3, CODEC_V3);
}
private static void declareParser(ConstructingObjectParser parser, long codec_version) {
@@ -238,14 +274,43 @@ private static void declareParser(ConstructingObjectParser= CODEC_V3) {
parser.declareLong(ConstructingObjectParser.constructorArg(), ROUTING_TABLE_VERSION_FIELD);
parser.declareObjectArray(
ConstructingObjectParser.constructorArg(),
(p, c) -> UploadedIndexMetadata.fromXContent(p),
INDICES_ROUTING_FIELD
);
+ parser.declareNamedObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ UploadedMetadataAttribute.PARSER,
+ UPLOADED_DISCOVERY_NODES_METADATA
+ );
+ parser.declareNamedObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ UploadedMetadataAttribute.PARSER,
+ UPLOADED_CLUSTER_BLOCKS_METADATA
+ );
+ parser.declareLong(ConstructingObjectParser.constructorArg(), METADATA_VERSION);
+ parser.declareNamedObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ UploadedMetadataAttribute.PARSER,
+ UPLOADED_TRANSIENT_SETTINGS_METADATA
+ );
+ parser.declareNamedObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ UploadedMetadataAttribute.PARSER,
+ UPLOADED_HASHES_OF_CONSISTENT_SETTINGS_METADATA
+ );
+ parser.declareNamedObjects(
+ ConstructingObjectParser.optionalConstructorArg(),
+ UploadedMetadataAttribute.PARSER,
+ UPLOADED_CLUSTER_STATE_CUSTOM_METADATA
+ );
+ parser.declareObject(
+ ConstructingObjectParser.optionalConstructorArg(),
+ (p, c) -> ClusterStateDiffManifest.fromXContent(p),
+ DIFF_MANIFEST
+ );
}
}
@@ -267,6 +332,13 @@ private static void declareParser(ConstructingObjectParser indicesRouting;
+ private final long metadataVersion;
+ private final UploadedMetadataAttribute uploadedTransientSettingsMetadata;
+ private final UploadedMetadataAttribute uploadedDiscoveryNodesMetadata;
+ private final UploadedMetadataAttribute uploadedClusterBlocksMetadata;
+ private final UploadedMetadataAttribute uploadedHashesOfConsistentSettings;
+ private final Map uploadedClusterStateCustomMap;
+ private final ClusterStateDiffManifest diffManifest;
public List getIndices() {
return indices;
@@ -332,6 +404,34 @@ public Map getCustomMetadataMap() {
return uploadedCustomMetadataMap;
}
+ public long getMetadataVersion() {
+ return metadataVersion;
+ }
+
+ public UploadedMetadataAttribute getTransientSettingsMetadata() {
+ return uploadedTransientSettingsMetadata;
+ }
+
+ public UploadedMetadataAttribute getDiscoveryNodesMetadata() {
+ return uploadedDiscoveryNodesMetadata;
+ }
+
+ public UploadedMetadataAttribute getClusterBlocksMetadata() {
+ return uploadedClusterBlocksMetadata;
+ }
+
+ public ClusterStateDiffManifest getDiffManifest() {
+ return diffManifest;
+ }
+
+ public Map