diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml index 9580d510fd108..ef842bb405d60 100644 --- a/.github/workflows/dco.yml +++ b/.github/workflows/dco.yml @@ -9,7 +9,7 @@ jobs: steps: - name: Get PR Commits id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.1.0 + uses: tim-actions/get-pr-commits@v1.3.1 with: token: ${{ secrets.GITHUB_TOKEN }} - name: DCO Check diff --git a/.whitesource b/.whitesource new file mode 100644 index 0000000000000..bb071b4a2b1ce --- /dev/null +++ b/.whitesource @@ -0,0 +1,45 @@ +{ + "scanSettings": { + "configMode": "AUTO", + "configExternalURL": "", + "projectToken": "", + "baseBranches": [] + }, + "scanSettingsSAST": { + "enableScan": false, + "scanPullRequests": false, + "incrementalScan": true, + "baseBranches": [], + "snippetSize": 10 + }, + "checkRunSettings": { + "vulnerableCheckRunConclusionLevel": "failure", + "displayMode": "diff", + "useMendCheckNames": true + }, + "checkRunSettingsSAST": { + "checkRunConclusionLevel": "failure", + "severityThreshold": "high" + }, + "issueSettings": { + "minSeverityLevel": "LOW", + "issueType": "DEPENDENCY" + }, + "issueSettingsSAST": { + "minSeverityLevel": "high", + "issueType": "repo" + }, + "remediateSettings": { + "workflowRules": { + "enabled": true + } + }, + "imageSettings":{ + "imageTracing":{ + "enableImageTracingPR": false, + "addRepositoryCoordinate": false, + "addDockerfilePath": false, + "addMendIdentifier": false + } + } +} \ No newline at end of file diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 1cc12f66d52e1..964383078c38d 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -17,7 +17,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies ### Changed -- Changed locale provider from COMPAT to CLDR ([13988](https://github.com/opensearch-project/OpenSearch/pull/13988)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) diff --git a/CHANGELOG.md b/CHANGELOG.md index 098405f8f1d44..3e46ab0077314 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,9 +14,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637)) - Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819)) - [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131)) +- Add "wildcard" field type that supports efficient wildcard, prefix, and regexp queries ([#13461](https://github.com/opensearch-project/OpenSearch/pull/13461)) - Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776)) +- Add capability to disable source recovery_source for an index ([#13590](https://github.com/opensearch-project/OpenSearch/pull/13590)) - Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304)) +- Add upload flow for writing routing table to remote store ([#13870](https://github.com/opensearch-project/OpenSearch/pull/13870)) +- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022)) - [Remote Store] Add support to disable flush based on translog reader count ([#14027](https://github.com/opensearch-project/OpenSearch/pull/14027)) +- Add recovery chunk size setting ([#13997](https://github.com/opensearch-project/OpenSearch/pull/13997)) +- [Query Insights] Add exporter support for top n queries ([#12982](https://github.com/opensearch-project/OpenSearch/pull/12982)) +- [Query Insights] Add X-Opaque-Id to search request metadata for top n queries ([#13374](https://github.com/opensearch-project/OpenSearch/pull/13374)) +- [Streaming Indexing] Enhance RestAction with request / response streaming support ([#13772](https://github.com/opensearch-project/OpenSearch/pull/13772)) +- Add support for query level resource usage tracking ([#13172](https://github.com/opensearch-project/OpenSearch/pull/13172)) +- Move Remote Store Migration from DocRep to GA and modify remote migration settings name ([#14100](https://github.com/opensearch-project/OpenSearch/pull/14100)) +- Derived field object type support ([#13720](https://github.com/opensearch-project/OpenSearch/pull/13720)) +- [Query Insights] Add cpu and memory metrics to top n queries ([#13739](https://github.com/opensearch-project/OpenSearch/pull/13739)) +- Support Dynamic Pruning in Cardinality Aggregation ([#13821](https://github.com/opensearch-project/OpenSearch/pull/13821)) ### Dependencies - Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559)) @@ -36,11 +49,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839)) - Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935)) - Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933)) +- Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) +- Bump `com.azure:azure-core-http-netty` from 1.12.8 to 1.15.1 ([#14128](https://github.com/opensearch-project/OpenSearch/pull/14128)) +- Bump `tim-actions/get-pr-commits` from 1.1.0 to 1.3.1 ([#14126](https://github.com/opensearch-project/OpenSearch/pull/14126)) ### Changed - Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650)) - Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481)) - Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636)) +- Adds support to provide tags with value in Gauge metric. ([#13994](https://github.com/opensearch-project/OpenSearch/pull/13994)) +- Move cache removal notifications outside lru lock ([#14017](https://github.com/opensearch-project/OpenSearch/pull/14017)) ### Deprecated @@ -55,6 +73,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885)) - Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903)) - Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911)) +- Fix double invocation of postCollection when MultiBucketCollector is present ([#14015](https://github.com/opensearch-project/OpenSearch/pull/14015)) +- Java high-level REST client bulk() is not respecting the bulkRequest.requireAlias(true) method call ([#14146](https://github.com/opensearch-project/OpenSearch/pull/14146)) ### Security diff --git a/TRIAGING.md b/TRIAGING.md index bb04d49a66c54..90842cd8e9393 100644 --- a/TRIAGING.md +++ b/TRIAGING.md @@ -1,6 +1,6 @@ -The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, Cluster Manager, and finally "Core" as a catch-all for all other issues. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. ### Do I need to attend for my issue to be addressed/triaged? @@ -12,25 +12,29 @@ You can track if your issue was triaged by watching your GitHub notifications fo Each meeting we seek to address all new issues. However, should we run out of time before your issue is discussed, you are always welcome to attend the next meeting or to follow up on the issue post itself. -### How do I join the Triage meeting? +### How do I join a Triage meeting? -Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Zoom](https://zoom.us/download), with this [meeting link](https://us02web.zoom.us/j/86287450465) and passcode `805212`. + Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), and [Core](https://www.meetup.com/opensearch/events/301061009/). -After joining the Zoom meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. +After joining the virtual meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting. ### Is there an agenda for each week? -Meetings are 55 minutes and follows this structure: +Meeting structure may vary slightly, but the general structure is as follows: -Yes, each 55-minute meeting follows this structure: 1. **Initial Gathering:** Feel free to turn on your video and engage in informal conversation. Shortly, a volunteer triage [facilitator](#what-is-the-role-of-the-facilitator) will begin the meeting and share their screen. 2. **Record Attendees:** The facilitator will request attendees to share their GitHub profile links. These links will be collected and assembled into a [tag](#how-do-triage-facilitator-tag-comments-during-the-triage-meeting) to annotate comments during the meeting. 3. **Announcements:** Any announcements will be made at the beginning of the meeting. -4. **Review of New Issues:** We start by reviewing all untriaged [issues](https://github.com/search?q=label%3Auntriaged+is%3Aopen++repo%3Aopensearch-project%2FOpenSearch+&type=issues&ref=advsearch&s=created&o=desc) for the OpenSearch repo. +4. **Review of New Issues:** We start by reviewing all untriaged issues. Each meeting has a label-based search to find relevant issues: + - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22) + - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22) + - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) + - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22) 5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. 6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. +7. **Review of Old Untriaged Issues:** Time permitting, each meeting will look at all [untriaged issues older than 14 days](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+created%3A%3C2024-05-20) to prevent issues from falling through the cracks (note the GitHub API does not allow for relative times, so the date in this search must be updated every meeting). ### What is the role of the facilitator? diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index d0cb2da9c1dd3..2ea8c2d015ecc 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -110,7 +110,7 @@ public void execute(Task t) { if (BuildParams.getRuntimeJavaVersion() == JavaVersion.VERSION_1_8) { test.systemProperty("java.locale.providers", "SPI,JRE"); } else { - test.systemProperty("java.locale.providers", "SPI,CLDR"); + test.systemProperty("java.locale.providers", "SPI,COMPAT"); if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) { test.jvmArgs("--illegal-access=warn"); } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3a76cf6e9b7ad..c34409053b915 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.11.0-snapshot-4be6531 +lucene = 9.11.0 bundled_jdk_vendor = adoptium bundled_jdk = 21.0.3+9 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 35d9929a649ff..cf01febbd33cd 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -154,6 +154,9 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); parameters.withPipeline(bulkRequest.pipeline()); parameters.withRouting(bulkRequest.routing()); + if (bulkRequest.requireAlias() != null) { + parameters.withRequireAlias(bulkRequest.requireAlias()); + } // Bulk API only supports newline delimited JSON or Smile. Before executing // the bulk, we need to check that all requests have the same content-type // and this content-type is supported by the Bulk API. @@ -232,6 +235,10 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { metadata.field("_source", updateRequest.fetchSource()); } } + + if (action.isRequireAlias()) { + metadata.field("require_alias", action.isRequireAlias()); + } metadata.endObject(); } metadata.endObject(); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 9d8d771f1eaed..83c3ba8164c4b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -2227,11 +2227,11 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - MediaType medaiType = MediaType.fromMediaType(entity.getContentType()); - if (medaiType == null) { + MediaType mediaType = MediaType.fromMediaType(entity.getContentType()); + if (mediaType == null) { throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType()); } - try (XContentParser parser = medaiType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { + try (XContentParser parser = mediaType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { return entityParser.apply(parser); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java index da9f790215669..f5b1b0768ff4a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java @@ -1299,4 +1299,61 @@ public void testMultiTermvectors() throws IOException { } } } + + public void testBulkWithRequireAlias() throws IOException { + { + String indexAliasName = "testindex-1"; + + BulkRequest bulkRequest = new BulkRequest(indexAliasName); + bulkRequest.requireAlias(true); + bulkRequest.add(new IndexRequest().id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON)); + bulkRequest.add(new IndexRequest().id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON)); + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); + + assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName)); + assertTrue("Bulk response must have failures.", bulkResponse.hasFailures()); + } + { + String indexAliasName = "testindex-2"; + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.requireAlias(true); + bulkRequest.add(new IndexRequest().index(indexAliasName).id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON)); + bulkRequest.add(new IndexRequest().index(indexAliasName).id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON)); + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); + + assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName)); + assertTrue("Bulk response must have failures.", bulkResponse.hasFailures()); + } + { + String indexAliasName = "testindex-3"; + + BulkRequest bulkRequest = new BulkRequest(indexAliasName); + bulkRequest.add(new IndexRequest().id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON)); + bulkRequest.add(new IndexRequest().id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON)); + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); + + assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName)); + assertTrue("Bulk response must have failures.", bulkResponse.hasFailures()); + } + { + String indexAliasName = "testindex-4"; + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add( + new IndexRequest().index(indexAliasName).id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON) + ); + bulkRequest.add( + new IndexRequest().index(indexAliasName).id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON) + ); + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); + + assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName)); + assertTrue("Bulk response must have failures.", bulkResponse.hasFailures()); + } + } } diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index af7138569972a..726c381db09f6 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -105,8 +105,13 @@ private static String javaLocaleProviders() { SPI setting is used to allow loading custom CalendarDataProvider in jdk8 it has to be loaded from jre/lib/ext, in jdk9+ it is already within ES project and on a classpath + + Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date + parsing will break in an incompatible way for some date patterns and locales. + //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 + See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ - return "-Djava.locale.providers=SPI,CLDR"; + return "-Djava.locale.providers=SPI,COMPAT"; } } diff --git a/gradle/ide.gradle b/gradle/ide.gradle index e266d9add172d..ea353f8d92bdd 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') { } runConfigurations { defaults(JUnit) { - vmParameters = '-ea -Djava.locale.providers=SPI,CLDR' + vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { vmParameters += ' -Djava.security.manager=allow' } diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 943a9b2fd214b..0000000000000 --- a/libs/core/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c2361bd633374ae3814b175cc25ccf773f67026 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..b0d38c4165581 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0.jar.sha1 @@ -0,0 +1 @@ +2e487755a6814b2a1bc770c26569dcba86873dcf \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java index af09a7aebba79..711f56c9f3e3b 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java @@ -78,6 +78,19 @@ public static Compressor compressor(final BytesReference bytes) { return null; } + /** + * @param bytes The bytes to check the compression for + * @return The detected compressor. If no compressor detected then return NoneCompressor. + */ + public static Compressor compressorForWritable(final BytesReference bytes) { + for (Compressor compressor : registeredCompressors.values()) { + if (compressor.isCompressed(bytes) == true) { + return compressor; + } + } + return CompressorRegistry.none(); + } + /** Decompress the provided {@link BytesReference}. */ public static BytesReference uncompress(BytesReference bytes) throws IOException { Compressor compressor = compressor(bytes); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java index a278b61894a65..e7b51c3389b52 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java @@ -104,6 +104,10 @@ public long getTotalValue() { return endValue.get() - startValue; } + public long getStartValue() { + return startValue; + } + @Override public String toString() { return String.valueOf(getTotalValue()); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java new file mode 100644 index 0000000000000..373cdbfa7e9a1 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceInfo.java @@ -0,0 +1,225 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.tasks.resourcetracker; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Task resource usage information with minimal information about the task + *

+ * Writeable TaskResourceInfo objects are used to represent resource usage + * information of running tasks, which can be propagated to coordinator node + * to infer query-level resource usage + * + * @opensearch.api + */ +@PublicApi(since = "2.15.0") +public class TaskResourceInfo implements Writeable, ToXContentObject { + private final String action; + private final long taskId; + private final long parentTaskId; + private final String nodeId; + private final TaskResourceUsage taskResourceUsage; + + private static final ParseField ACTION = new ParseField("action"); + private static final ParseField TASK_ID = new ParseField("taskId"); + private static final ParseField PARENT_TASK_ID = new ParseField("parentTaskId"); + private static final ParseField NODE_ID = new ParseField("nodeId"); + private static final ParseField TASK_RESOURCE_USAGE = new ParseField("taskResourceUsage"); + + public TaskResourceInfo( + final String action, + final long taskId, + final long parentTaskId, + final String nodeId, + final TaskResourceUsage taskResourceUsage + ) { + this.action = action; + this.taskId = taskId; + this.parentTaskId = parentTaskId; + this.nodeId = nodeId; + this.taskResourceUsage = taskResourceUsage; + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "task_resource_info", + a -> new Builder().setAction((String) a[0]) + .setTaskId((Long) a[1]) + .setParentTaskId((Long) a[2]) + .setNodeId((String) a[3]) + .setTaskResourceUsage((TaskResourceUsage) a[4]) + .build() + ); + + static { + PARSER.declareString(constructorArg(), ACTION); + PARSER.declareLong(constructorArg(), TASK_ID); + PARSER.declareLong(constructorArg(), PARENT_TASK_ID); + PARSER.declareString(constructorArg(), NODE_ID); + PARSER.declareObject(constructorArg(), TaskResourceUsage.PARSER, TASK_RESOURCE_USAGE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACTION.getPreferredName(), this.action); + builder.field(TASK_ID.getPreferredName(), this.taskId); + builder.field(PARENT_TASK_ID.getPreferredName(), this.parentTaskId); + builder.field(NODE_ID.getPreferredName(), this.nodeId); + builder.startObject(TASK_RESOURCE_USAGE.getPreferredName()); + this.taskResourceUsage.toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + + /** + * Builder for {@link TaskResourceInfo} + */ + public static class Builder { + private TaskResourceUsage taskResourceUsage; + private String action; + private long taskId; + private long parentTaskId; + private String nodeId; + + public Builder setTaskResourceUsage(final TaskResourceUsage taskResourceUsage) { + this.taskResourceUsage = taskResourceUsage; + return this; + } + + public Builder setAction(final String action) { + this.action = action; + return this; + } + + public Builder setTaskId(final long taskId) { + this.taskId = taskId; + return this; + } + + public Builder setParentTaskId(final long parentTaskId) { + this.parentTaskId = parentTaskId; + return this; + } + + public Builder setNodeId(final String nodeId) { + this.nodeId = nodeId; + return this; + } + + public TaskResourceInfo build() { + return new TaskResourceInfo(action, taskId, parentTaskId, nodeId, taskResourceUsage); + } + } + + /** + * Read task info from a stream. + * + * @param in StreamInput to read + * @return {@link TaskResourceInfo} + * @throws IOException IOException + */ + public static TaskResourceInfo readFromStream(StreamInput in) throws IOException { + return new TaskResourceInfo.Builder().setAction(in.readString()) + .setTaskId(in.readLong()) + .setParentTaskId(in.readLong()) + .setNodeId(in.readString()) + .setTaskResourceUsage(TaskResourceUsage.readFromStream(in)) + .build(); + } + + /** + * Get TaskResourceUsage + * + * @return taskResourceUsage + */ + public TaskResourceUsage getTaskResourceUsage() { + return taskResourceUsage; + } + + /** + * Get parent task id + * + * @return parent task id + */ + public long getParentTaskId() { + return parentTaskId; + } + + /** + * Get task id + * @return task id + */ + public long getTaskId() { + return taskId; + } + + /** + * Get node id + * @return node id + */ + public String getNodeId() { + return nodeId; + } + + /** + * Get task action + * @return task action + */ + public String getAction() { + return action; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(action); + out.writeLong(taskId); + out.writeLong(parentTaskId); + out.writeString(nodeId); + taskResourceUsage.writeTo(out); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != TaskResourceInfo.class) { + return false; + } + TaskResourceInfo other = (TaskResourceInfo) obj; + return action.equals(other.action) + && taskId == other.taskId + && parentTaskId == other.parentTaskId + && Objects.equals(nodeId, other.nodeId) + && taskResourceUsage.equals(other.taskResourceUsage); + } + + @Override + public int hashCode() { + return Objects.hash(action, taskId, parentTaskId, nodeId, taskResourceUsage); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java index c861c21f89fc5..bcf5c163cb91f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -48,6 +48,11 @@ public Closeable createGauge(String name, String description, String unit, Suppl return metricsTelemetry.createGauge(name, description, unit, valueProvider, tags); } + @Override + public Closeable createGauge(String name, String description, String unit, Supplier value) { + return metricsTelemetry.createGauge(name, description, unit, value); + } + @Override public void close() throws IOException { metricsTelemetry.close(); diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java index 3ab3dcf82c7a7..3dc212b1341cc 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -63,4 +63,16 @@ public interface MetricsRegistry extends Closeable { */ Closeable createGauge(String name, String description, String unit, Supplier valueProvider, Tags tags); + /** + * Creates the Observable Gauge type of Metric. Where the value provider will be called at a certain frequency + * to capture the value. + * + * @param name name of the observable gauge. + * @param description any description about the metric. + * @param unit unit of the metric. + * @param value value provider. + * @return closeable to dispose/close the Gauge metric. + */ + Closeable createGauge(String name, String description, String unit, Supplier value); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java new file mode 100644 index 0000000000000..707f2c79c62f2 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/TaggedMeasurement.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Observable Measurement for the Asynchronous instruments. + * @opensearch.experimental + */ +@ExperimentalApi +public final class TaggedMeasurement { + private final Double value; + private final Tags tags; + + /** + * Factory method to create the {@link TaggedMeasurement} object. + * @param value value. + * @param tags tags to be added per value. + * @return tagged measurement TaggedMeasurement + */ + public static TaggedMeasurement create(double value, Tags tags) { + return new TaggedMeasurement(value, tags); + } + + private TaggedMeasurement(double value, Tags tags) { + this.value = value; + this.tags = tags; + } + + /** + * Returns the value. + * @return value + */ + public Double getValue() { + return value; + } + + /** + * Returns the tags. + * @return tags + */ + public Tags getTags() { + return tags; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java index 9a913d25e872d..7bec136c42ba7 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -12,6 +12,7 @@ import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.TaggedMeasurement; import org.opensearch.telemetry.metrics.tags.Tags; import java.io.Closeable; @@ -52,6 +53,11 @@ public Closeable createGauge(String name, String description, String unit, Suppl return () -> {}; } + @Override + public Closeable createGauge(String name, String description, String unit, Supplier value) { + return () -> {}; + } + @Override public void close() throws IOException { diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java index 872f697ade09e..e1506eecff6e9 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -79,4 +79,19 @@ public void testGauge() { assertSame(mockCloseable, closeable); } + @SuppressWarnings("unchecked") + public void testGaugeWithValueAndTagSupplier() { + Closeable mockCloseable = mock(Closeable.class); + when(defaultMeterRegistry.createGauge(any(String.class), any(String.class), any(String.class), any(Supplier.class))).thenReturn( + mockCloseable + ); + Closeable closeable = defaultMeterRegistry.createGauge( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testObservableGauge", + "test observable gauge", + "ms", + () -> TaggedMeasurement.create(1.0, Tags.EMPTY) + ); + assertSame(mockCloseable, closeable); + } + } diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index ee00419f52066..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8752daf173a642ae02e081cc0398f2ce59278200 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..29aade3ad4298 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1 @@ -0,0 +1 @@ +5e21d20edee0712472e7c6f605c9d97aeecf16c0 \ No newline at end of file diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml new file mode 100644 index 0000000000000..4f700c3b83e8f --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/10_derived_field_index_mapping_definition.yml @@ -0,0 +1,421 @@ +"Test derived_field supported type using index mapping definition": + - skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + + - do: + index: + index: test + id: 1 + body: { + text: "peter piper", + keyword: "foo", + long: 1, + float: 1.0, + double: 1.0, + date: "2017-01-01T00:00:00Z", + geo: [0.0, 20.0], + ip: "192.168.0.1", + boolean: true, + array_of_long: [1, 2], + json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}" + } + + - do: + index: + index: test + id: 2 + body: { + text: "piper picked a peck", + keyword: "bar", + long: 2, + float: 2.0, + double: 2.0, + date: "2017-01-02T00:00:00Z", + geo: [10.0, 30.0], + ip: "192.168.0.2", + boolean: false, + array_of_long: [2, 3], + json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}" + } + + - do: + index: + index: test + id: 3 + body: { + text: "peck of pickled peppers", + keyword: "baz", + long: -3, + float: -3.0, + double: -3.0, + date: "2017-01-03T00:00:00Z", + geo: [20.0, 40.0], + ip: "192.168.0.3", + boolean: true, + array_of_long: [3, 4], + json_field: "{\"keyword\":\"json_keyword3\",\"long\":30,\"float\":30.0,\"double\":30.0,\"date\":\"2021-03-01T00:00:00Z\",\"ip\":\"10.0.0.3\",\"boolean\":true, \"array_of_long\": [3, 4]}" + } + + - do: + index: + index: test + id: 4 + body: { + text: "pickled peppers", + keyword: "qux", + long: 4, + float: 4.0, + double: 4.0, + date: "2017-01-04T00:00:00Z", + geo: [30.0, 50.0], + ip: "192.168.0.4", + boolean: false, + array_of_long: [4, 5], + json_field: "{\"keyword\":\"json_keyword4\",\"long\":40,\"float\":40.0,\"double\":40.0,\"date\":\"2021-04-01T00:00:00Z\",\"ip\":\"10.0.0.4\",\"boolean\":false, \"array_of_long\": [4, 5]}" + } + + - do: + index: + index: test + id: 5 + body: { + text: "peppers", + keyword: "quux", + long: 5, + float: 5.0, + double: 5.0, + date: "2017-01-05T00:00:00Z", + geo: [40.0, 60.0], + ip: "192.168.0.5", + boolean: true, + array_of_long: [5, 6], + json_field: "{\"keyword\":\"json_keyword5\",\"long\":50,\"float\":50.0,\"double\":50.0,\"date\":\"2021-05-01T00:00:00Z\",\"ip\":\"10.0.0.5\",\"boolean\":true, \"array_of_long\": [5, 6]}" + } + + - do: + indices.refresh: + index: [test] + + # Tests for derived_text + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + derived_text: + query: "peter piper" + + - match: { hits.total: 1 } + + # Tests for derived_keyword + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_keyword: + value: "foo" + + - match: { hits.total: 1 } + + # Tests for derived_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_long: + gte: 1 + + - match: { hits.total: 4 } + + # Tests for derived_float + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_float: + gte: 1.0 + + - match: { hits.total: 4 } + + # Tests for derived_double + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_double: + gte: 1.0 + + - match: { hits.total: 4 } + + # Tests for derived_date + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_date: + gte: "2017-01-02" + + - match: { hits.total: 4 } + + # Tests for derived_geo + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + geo_distance: + distance: "20km" + derived_geo: + lat: 0.0 + lon: 20.0 + + - match: { hits.total: 1 } + + # Tests for derived_ip + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_ip: + value: "192.168.0.1" + + - match: { hits.total: 1 } + + # Tests for derived_boolean + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_boolean: + value: true + + - match: { hits.total: 3 } + + # Tests for derived_array_of_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_array_of_long: + gte: 3 + + - match: { hits.total: 4 } + + # Tests for derived_object.keyword + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_object.keyword: + value: "json_keyword1" + + - match: { hits.total: 1 } + + # Tests for derived_object.long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_object.long: + gte: 11 + + - match: { hits.total: 4 } + + # Tests for derived_object.float + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_object.float: + gte: 10.1 + + - match: { hits.total: 4 } + + # Tests for derived_object.double + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_object.double: + gte: 10.1 + + - match: { hits.total: 4 } + + # Tests for derived_object.date + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_object.date: + gte: "2021-03-01" + + - match: { hits.total: 3 } + + # Tests for derived_object.ip + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_object.ip: + value: "10.0.0.1" + + - match: { hits.total: 1 } + + # Tests for derived_object.boolean + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + derived_object.boolean: + value: true + + - match: { hits.total: 3 } + + # Tests for derived_object.array_of_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + range: + derived_object.array_of_long: + gte: 3 + + - match: { hits.total: 4 } + + # Tests for query string + - do: + search: + rest_total_hits_as_int: true + index: test + q: "derived_keyword:foo" + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test + q: derived_object.keyword:json_keyword1 + + - match: { hits.total: 1 } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml new file mode 100644 index 0000000000000..0370fd94e8548 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/20_derived_field_put_mapping.yml @@ -0,0 +1,123 @@ +--- +"Test create and update mapping for derived fields": + - skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + - do: + indices.create: + index: test_index + + - do: + indices.put_mapping: + index: test_index + body: + properties: + text: + type: text + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: keyword + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_date: + type: date + script: "emit(params._source[\"keyword\"])" + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + + - do: + indices.get_mapping: + index: test_index + + - match: {test_index.mappings.derived.derived_text.type: text} + - match: {test_index.mappings.derived.derived_text_prefilter_field.type: keyword} + - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text} + - match: {test_index.mappings.derived.derived_date.type: date} + - match: {test_index.mappings.derived.derived_object.type: object} + - match: {test_index.mappings.derived.derived_object.properties.keyword: keyword} + - match: {test_index.mappings.derived.derived_object.prefilter_field: json_field} + + + - do: + indices.put_mapping: + index: test_index + body: + properties: + text: + type: text + json_field: + type: text + derived: + derived_text: + type: keyword + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_date: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_object: + type: object + properties: + keyword: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + format: "dd-MM-yyyy" + ignore_malformed: true + + - do: + indices.get_mapping: + index: test_index + + - match: {test_index.mappings.derived.derived_text.type: keyword} + - match: {test_index.mappings.derived.derived_text_prefilter_field.type: text} + - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text} + - match: {test_index.mappings.derived.derived_date.type: keyword} + - match: {test_index.mappings.derived.derived_object.type: object} + - match: {test_index.mappings.derived.derived_object.properties.keyword: text} + - match: {test_index.mappings.derived.derived_object.prefilter_field: text} + - match: {test_index.mappings.derived.derived_object.format: "dd-MM-yyyy"} + - match: {test_index.mappings.derived.derived_object.ignore_malformed: true} + + + - do: + indices.put_mapping: + index: test_index + body: + properties: + text: + type: text + json_field: + type: text + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + ignore_malformed: false + + - do: + indices.get_mapping: + index: test_index + + - match: {test_index.mappings.derived.derived_text.type: keyword} + - match: {test_index.mappings.derived.derived_text_prefilter_field.type: text} + - match: {test_index.mappings.derived.derived_text_prefilter_field.prefilter_field: text} + - match: {test_index.mappings.derived.derived_date.type: keyword} + - match: {test_index.mappings.derived.derived_object.type: object} + - match: {test_index.mappings.derived.derived_object.properties.keyword: keyword} + - match: {test_index.mappings.derived.derived_object.prefilter_field: json_field} + - is_false: test_index.mappings.derived.derived_object.ignore_malformed diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml new file mode 100644 index 0000000000000..bb619dce63010 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/30_derived_field_search_definition.yml @@ -0,0 +1,489 @@ +"Test derived_field supported type using search definition": + - skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + + - do: + index: + index: test + id: 1 + body: { + text: "peter piper", + keyword: "foo", + long: 1, + float: 1.0, + double: 1.0, + date: "2017-01-01T00:00:00Z", + geo: [0.0, 20.0], + ip: "192.168.0.1", + boolean: true, + array_of_long: [1, 2], + json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}" + } + + - do: + index: + index: test + id: 2 + body: { + text: "piper picked a peck", + keyword: "bar", + long: 2, + float: 2.0, + double: 2.0, + date: "2017-01-02T00:00:00Z", + geo: [10.0, 30.0], + ip: "192.168.0.2", + boolean: false, + array_of_long: [2, 3], + json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}" + } + + - do: + index: + index: test + id: 3 + body: { + text: "peck of pickled peppers", + keyword: "baz", + long: -3, + float: -3.0, + double: -3.0, + date: "2017-01-03T00:00:00Z", + geo: [20.0, 40.0], + ip: "192.168.0.3", + boolean: true, + array_of_long: [3, 4], + json_field: "{\"keyword\":\"json_keyword3\",\"long\":30,\"float\":30.0,\"double\":30.0,\"date\":\"2021-03-01T00:00:00Z\",\"ip\":\"10.0.0.3\",\"boolean\":true, \"array_of_long\": [3, 4]}" + } + + - do: + index: + index: test + id: 4 + body: { + text: "pickled peppers", + keyword: "qux", + long: 4, + float: 4.0, + double: 4.0, + date: "2017-01-04T00:00:00Z", + geo: [30.0, 50.0], + ip: "192.168.0.4", + boolean: false, + array_of_long: [4, 5], + json_field: "{\"keyword\":\"json_keyword4\",\"long\":40,\"float\":40.0,\"double\":40.0,\"date\":\"2021-04-01T00:00:00Z\",\"ip\":\"10.0.0.4\",\"boolean\":false, \"array_of_long\": [4, 5]}" + } + + - do: + index: + index: test + id: 5 + body: { + text: "peppers", + keyword: "quux", + long: 5, + float: 5.0, + double: 5.0, + date: "2017-01-05T00:00:00Z", + geo: [40.0, 60.0], + ip: "192.168.0.5", + boolean: true, + array_of_long: [5, 6], + json_field: "{\"keyword\":\"json_keyword5\",\"long\":50,\"float\":50.0,\"double\":50.0,\"date\":\"2021-05-01T00:00:00Z\",\"ip\":\"10.0.0.5\",\"boolean\":true, \"array_of_long\": [5, 6]}" + } + + - do: + indices.refresh: + index: [test] + + # Tests for derived_text + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + query: + match_phrase: + derived_text: + query: "peter piper" + + - match: { hits.total: 1 } + + # Tests for derived_keyword + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + query: + term: + derived_keyword: + value: "foo" + + - match: { hits.total: 1 } + + # Tests for derived_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_long: + type: long + script: "emit(params._source[\"long\"])" + query: + range: + derived_long: + gte: 1 + + - match: { hits.total: 4 } + + # Tests for derived_float + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_float: + type: float + script: "emit(params._source[\"float\"])" + query: + range: + derived_float: + gte: 1.0 + + - match: { hits.total: 4 } + + # Tests for derived_double + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_double: + type: double + script: "emit(params._source[\"double\"])" + query: + range: + derived_double: + gte: 1.0 + + - match: { hits.total: 4 } + + # Tests for derived_date + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + query: + range: + derived_date: + gte: "2017-01-02" + + - match: { hits.total: 4 } + + # Tests for derived_geo + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + query: + geo_distance: + distance: "20km" + derived_geo: + lat: 0.0 + lon: 20.0 + + - match: { hits.total: 1 } + + # Tests for derived_ip + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + query: + term: + derived_ip: + value: "192.168.0.1" + + - match: { hits.total: 1 } + + # Tests for derived_boolean + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + query: + term: + derived_boolean: + value: true + + - match: { hits.total: 3 } + + # Tests for derived_array_of_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + query: + range: + derived_array_of_long: + gte: 3 + + - match: { hits.total: 4 } + + # Tests for derived_object.keyword + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + term: + derived_object.keyword: + value: "json_keyword1" + + - match: { hits.total: 1 } + + # Tests for derived_object.long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + range: + derived_object.long: + gte: 11 + + - match: { hits.total: 4 } + + # Tests for derived_object.float + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + range: + derived_object.float: + gte: 10.1 + + - match: { hits.total: 4 } + + # Tests for derived_object.double + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + range: + derived_object.double: + gte: 10.1 + + - match: { hits.total: 4 } + + # Tests for derived_object.date + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + range: + derived_object.date: + gte: "2021-03-01" + + - match: { hits.total: 3 } + + # Tests for derived_object.ip + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + term: + derived_object.ip: + value: "10.0.0.1" + + - match: { hits.total: 1 } + + # Tests for derived_object.boolean + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + term: + derived_object.boolean: + value: true + + - match: { hits.total: 3 } + + # Tests for derived_object.array_of_long + - do: + search: + rest_total_hits_as_int: true + index: test + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + query: + range: + derived_object.array_of_long: + gte: 3 + + - match: { hits.total: 4 } + + # Tests for query string + - do: + search: + body: + derived: + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + rest_total_hits_as_int: true + index: test + q: "derived_keyword:foo" + + - match: { hits.total: 1 } + + - do: + search: + body: + derived: + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + rest_total_hits_as_int: true + index: test + q: derived_object.keyword:json_keyword1 + + - match: { hits.total: 1 } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml new file mode 100644 index 0000000000000..52a897c341419 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/40_derived_field_fetch_and_highlight.yml @@ -0,0 +1,279 @@ +setup: + - skip: + version: " - 2.14.99" + reason: "derived_field feature was added in 2.15" + +--- +"Test basic field retrieval": + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + keyword: + type: keyword + long: + type: long + float: + type: float + double: + type: double + date: + type: date + geo: + type: geo_point + ip: + type: ip + boolean: + type: boolean + array_of_long: + type: long + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_text_prefilter_field: + type: text + script: "emit(params._source[\"text\"])" + prefilter_field: "text" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_long: + type: long + script: "emit(params._source[\"long\"])" + derived_float: + type: float + script: "emit(params._source[\"float\"])" + derived_double: + type: double + script: "emit(params._source[\"double\"])" + derived_date: + type: date + script: "emit(ZonedDateTime.parse(params._source[\"date\"]).toInstant().toEpochMilli())" + derived_geo: + type: geo_point + script: "emit(params._source[\"geo\"][0], params._source[\"geo\"][1])" + derived_ip: + type: ip + script: "emit(params._source[\"ip\"])" + derived_boolean: + type: boolean + script: "emit(params._source[\"boolean\"])" + derived_array_of_long: + type: long + script: "emit(params._source[\"array_of_long\"][0]);emit(params._source[\"array_of_long\"][1]);" + derived_object: + type: object + properties: + keyword: keyword + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + format: "yyyy-MM-dd" + + - do: + index: + index: test + id: 1 + body: { + text: "peter piper", + keyword: "foo", + long: 1, + float: 1.0, + double: 1.0, + date: "2017-01-01T00:00:00Z", + geo: [0.0, 20.0], + ip: "192.168.0.1", + boolean: true, + array_of_long: [1, 2], + json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}" + } + + - do: + index: + index: test + id: 2 + body: { + text: "piper picked a peck", + keyword: "bar", + long: 2, + float: 2.0, + double: 2.0, + date: "2017-01-02T00:00:00Z", + geo: [10.0, 30.0], + ip: "192.168.0.2", + boolean: false, + array_of_long: [2, 3], + json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}" + } + + - do: + indices.refresh: + index: [test] + + - do: + search: + index: test + body: + fields: [derived_text, derived_keyword, derived_long, derived_float, derived_double, derived_date, derived_geo, derived_ip, derived_boolean, derived_array_of_long, + derived_object, derived_object.keyword, derived_object.long, derived_object.float, derived_object.double, derived_object.date, derived_object.ip, derived_object.boolean, derived_object.array_of_long] + + - is_true: hits.hits.0._id + - is_true: hits.hits.0._source + + - match: { hits.hits.0.fields.derived_text.0: "peter piper" } + - match: { hits.hits.0.fields.derived_keyword.0: foo } + - match: { hits.hits.0.fields.derived_long.0: 1 } + - match: { hits.hits.0.fields.derived_float.0: 1.0 } + - match: { hits.hits.0.fields.derived_double.0: 1 } + - match: { hits.hits.0.fields.derived_date.0: 2017-01-01T00:00:00.000Z } + - match: { hits.hits.0.fields.derived_geo.0.lat: 0.0 } + - match: { hits.hits.0.fields.derived_geo.0.lon: 20.0 } + - match: { hits.hits.0.fields.derived_ip.0: 192.168.0.1 } + - match: { hits.hits.0.fields.derived_array_of_long.0: 1 } + - match: { hits.hits.0.fields.derived_array_of_long.1: 2 } + - match: { hits.hits.0.fields.derived_object.0: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_long\": [1, 2]}}" } + - match: { hits.hits.0.fields.derived_object\.keyword.0: json_keyword1 } + - match: { hits.hits.0.fields.derived_object\.long.0: 10 } + - match: { hits.hits.0.fields.derived_object\.float.0: 10.0 } + - match: { hits.hits.0.fields.derived_object\.double.0: 10.0 } + - match: { hits.hits.0.fields.derived_object\.date.0: 2021-01-01 } + - match: { hits.hits.0.fields.derived_object\.ip.0: 10.0.0.1 } + - match: { hits.hits.0.fields.derived_object\.boolean.0: true } + - match: { hits.hits.0.fields.derived_object\.array_of_long.0: 1 } + - match: { hits.hits.0.fields.derived_object\.array_of_long.1: 2 } + + - match: { hits.hits.1.fields.derived_text.0: "piper picked a peck" } + - match: { hits.hits.1.fields.derived_keyword.0: bar } + - match: { hits.hits.1.fields.derived_long.0: 2 } + - match: { hits.hits.1.fields.derived_float.0: 2.0 } + - match: { hits.hits.1.fields.derived_double.0: 2 } + - match: { hits.hits.1.fields.derived_date.0: 2017-01-02T00:00:00.000Z } + - match: { hits.hits.1.fields.derived_geo.0.lat: 10.0 } + - match: { hits.hits.1.fields.derived_geo.0.lon: 30.0 } + - match: { hits.hits.1.fields.derived_ip.0: 192.168.0.2 } + - match: { hits.hits.1.fields.derived_array_of_long.0: 2 } + - match: { hits.hits.1.fields.derived_array_of_long.1: 3 } + - match: { hits.hits.1.fields.derived_object.0: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_long\": [2, 3]}}" } + - match: { hits.hits.1.fields.derived_object\.keyword.0: json_keyword2 } + - match: { hits.hits.1.fields.derived_object\.long.0: 20 } + - match: { hits.hits.1.fields.derived_object\.float.0: 20.0 } + - match: { hits.hits.1.fields.derived_object\.double.0: 20.0 } + - match: { hits.hits.1.fields.derived_object\.date.0: 2021-02-01 } + - match: { hits.hits.1.fields.derived_object\.ip.0: 10.0.0.2 } + - match: { hits.hits.1.fields.derived_object\.boolean.0: false } + - match: { hits.hits.1.fields.derived_object\.array_of_long.0: 2 } + - match: { hits.hits.1.fields.derived_object\.array_of_long.1: 3 } + + +--- +"Test highlight": + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + array_of_text: + type: text + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_keyword: + type: keyword + script: "emit(params._source[\"keyword\"])" + derived_array_of_text: + type: text + script: "emit(params._source[\"array_of_text\"][0]);emit(params._source[\"array_of_text\"][1]);" + derived_object: + type: object + properties: + array_of_text: text + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + + - do: + index: + index: test + id: 1 + body: { + text: "peter piper", + keyword: "foo", + long: 1, + float: 1.0, + double: 1.0, + date: "2017-01-01T00:00:00Z", + geo: [0.0, 20.0], + ip: "192.168.0.1", + boolean: true, + array_of_text: ["The quick brown fox is brown", "The quick brown fox is black"], + json_field: "{\"keyword\":\"json_keyword1\",\"long\":10,\"float\":10.0,\"double\":10.0,\"date\":\"2021-01-01T00:00:00Z\",\"ip\":\"10.0.0.1\",\"boolean\":true, \"array_of_text\": [\"The quick brown fox is brown\", \"The quick brown fox is black\"]}}" + } + + - do: + index: + index: test + id: 2 + body: { + text: "piper picked a peck", + keyword: "bar", + long: 2, + float: 2.0, + double: 2.0, + date: "2017-01-02T00:00:00Z", + geo: [10.0, 30.0], + ip: "192.168.0.2", + boolean: false, + array_of_text: ["The quick brown fox is brown", "The quick brown fox is black"], + json_field: "{\"keyword\":\"json_keyword2\",\"long\":20,\"float\":20.0,\"double\":20.0,\"date\":\"2021-02-01T00:00:00Z\",\"ip\":\"10.0.0.2\",\"boolean\":false, \"array_of_text\": [\"The quick brown fox is brown\", \"The quick brown fox is black\"]}}" + } + + - do: + indices.refresh: + index: [test] + - do: + search: + rest_total_hits_as_int: true + body: { "query" : {"multi_match" : { "query" : "piper", "fields" : [ "derived_text"] } }, + "fields": [derived_text], + "highlight" : { "type" : "unified", "fields" : { "derived_text" : {} } } + } + + - match: {hits.hits.0.highlight.derived_text.0: "peter piper"} + + + - do: + search: + rest_total_hits_as_int: true + body: { "query" : {"multi_match" : { "query" : "quick brown", "fields" : [ "derived_array_of_text"] } }, + "fields": [derived_array_of_text], + "highlight" : { "type" : "unified", "fields" : { "derived_array_of_text" : {} } } + } + + - match: {hits.hits.0.highlight.derived_array_of_text.0: "The quick brown fox is brown"} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + derived_object.array_of_text: + query: "quick brown" + highlight: + type: unified + fields: + derived_object.array_of_text: {} + + - match: {hits.hits.0.highlight.derived_object\.array_of_text.0: "The quick brown fox is brown"} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml new file mode 100644 index 0000000000000..e10c9cb3c133f --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/derived_fields/50_derived_field_default_analyzer.yml @@ -0,0 +1,105 @@ +--- +"Test default index analyzer simple is applied on derived fields": + - do: + indices.create: + index: test + body: + settings: + index.analysis.analyzer.default.type: simple + mappings: + properties: + text: + type: text + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_object: + type: object + properties: + array_of_text: text + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + + - do: + index: + index: test + id: 1 + body: { + text: "Email: example@example.com, Visit https://example.com for more info.", + json_field: "{\"array_of_text\": [\"Email: example@example.com, Visit https://example.com for more info.\", \"Email: example@example.com, Visit https://example.com for more info.\"]}}" + } + + - do: + indices.refresh: + index: [test] + - do: + search: + index: test + q: "derived_text:example.com" + analyzer: standard + + - match: { hits.total.value: 0 } + + - do: + search: + index: test + q: "derived_text:example.com" + analyzer: simple + + - match: { hits.total.value: 1 } + +--- +"Test default index analyzer standard is applied on derived fields": + - do: + indices.create: + index: test + body: + settings: + index.analysis.analyzer.default.type: standard + mappings: + properties: + text: + type: text + json_field: + type: text + derived: + derived_text: + type: text + script: "emit(params._source[\"text\"])" + derived_object: + type: object + properties: + array_of_text: text + script: "emit(params._source[\"json_field\"])" + prefilter_field: "json_field" + + - do: + index: + index: test + id: 1 + body: { + text: "Email: example@example.com, Visit https://example.com for more info.", + json_field: "{\"array_of_text\": [\"Email: example@example.com, Visit https://example.com for more info.\", \"Email: example@example.com, Visit https://example.com for more info.\"]}}" + } + + - do: + indices.refresh: + index: [test] + - do: + search: + index: test + q: "derived_object.array_of_text:example.com" + analyzer: standard + + - match: { hits.total.value: 1 } + + - do: + search: + index: test + q: "derived_object.array_of_text:example.com" + analyzer: simple + + - match: { hits.total.value: 1 } diff --git a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java index 09fd52ff65c66..6d5020336eb0b 100644 --- a/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java +++ b/modules/opensearch-dashboards/src/main/java/org/opensearch/dashboards/OpenSearchDashboardsModulePlugin.java @@ -54,6 +54,7 @@ import org.opensearch.rest.action.admin.indices.RestRefreshAction; import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.opensearch.rest.action.document.RestBulkAction; +import org.opensearch.rest.action.document.RestBulkStreamingAction; import org.opensearch.rest.action.document.RestDeleteAction; import org.opensearch.rest.action.document.RestGetAction; import org.opensearch.rest.action.document.RestIndexAction; @@ -127,6 +128,7 @@ public List getRestHandlers( new OpenSearchDashboardsWrappedRestHandler(new RestMultiGetAction(settings)), new OpenSearchDashboardsWrappedRestHandler(new RestSearchAction()), new OpenSearchDashboardsWrappedRestHandler(new RestBulkAction(settings)), + new OpenSearchDashboardsWrappedRestHandler(new RestBulkStreamingAction(settings)), new OpenSearchDashboardsWrappedRestHandler(new RestDeleteAction()), new OpenSearchDashboardsWrappedRestHandler(new RestDeleteByQueryAction()), diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 04338d8933590..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12630ff9c56e2a372ba57f519c579ff9e728208a \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..6f0501d3312ae --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1 @@ -0,0 +1 @@ +5c7f2d8eab0fca3fdc3d3e57a7f48a335dc7ac33 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index b8da0dacfe9f1..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -752bfc61c7829be6c27d9c1764250196e2c6b06b \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..25031381c9cb3 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1 @@ -0,0 +1 @@ +efcf65dda1b4e9d7e83926fd5895a47e491cbf29 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index b58adc03938f3..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ca56d42b24498a226cf91f48b94e010b6af5fe2 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..e27d45b217dad --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1 @@ -0,0 +1 @@ +59599d7b8bed2e6bd27d0dad7935c078b98c39cc \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index dea962647d995..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8eb59a89aa8984457798ccffb8e97e5351bebc1f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..ad5473865537d --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1 @@ -0,0 +1 @@ +e55f83bb373ac139e313f64e80afe1eb0a75b8c0 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 1259b95a789a5..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -851c1bd99eaef368e84335853dd448e4f56cdbc8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..68abd162e7266 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1 @@ -0,0 +1 @@ +1be59d91c45a4de069611fb7f8aa3e8fd26020ec \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 8c0d8fd278b89..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63647085d41ae231733580c20a498ce7c9134ce5 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..c5f1521ec3769 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1 @@ -0,0 +1 @@ +d5b5922acf3743b5a0c542959dd93fca8be333a7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 0eb1fb5f2b31f..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3ba7dd03b1df9efed08eb544689d51d2be22aa5 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..b676ca507467a --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1 @@ -0,0 +1 @@ +50fd7b471cbdd6648c4972169f3fc67fae9db7f6 \ No newline at end of file diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java index 4d7e0d486068a..bba676436c39a 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java @@ -70,7 +70,7 @@ public Collection createComponents( final Supplier repositoriesServiceSupplier ) { // create top n queries service - final QueryInsightsService queryInsightsService = new QueryInsightsService(threadPool); + final QueryInsightsService queryInsightsService = new QueryInsightsService(clusterService.getClusterSettings(), threadPool, client); return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService)); } @@ -110,7 +110,16 @@ public List> getSettings() { // Settings for top N queries QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS, + QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS ); } } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java new file mode 100644 index 0000000000000..116bd26e1f9bc --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/DebugExporter.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.util.List; + +/** + * Debug exporter for development purpose + */ +public final class DebugExporter implements QueryInsightsExporter { + /** + * Logger of the debug exporter + */ + private final Logger logger = LogManager.getLogger(); + + /** + * Constructor of DebugExporter + */ + private DebugExporter() {} + + private static class InstanceHolder { + private static final DebugExporter INSTANCE = new DebugExporter(); + } + + /** + Get the singleton instance of DebugExporter + * + @return DebugExporter instance + */ + public static DebugExporter getInstance() { + return InstanceHolder.INSTANCE; + } + + /** + * Write the list of SearchQueryRecord to debug log + * + * @param records list of {@link SearchQueryRecord} + */ + @Override + public void export(final List records) { + logger.debug("QUERY_INSIGHTS_RECORDS: " + records.toString()); + } + + /** + * Close the debugger exporter sink + */ + @Override + public void close() { + logger.debug("Closing the DebugExporter.."); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java new file mode 100644 index 0000000000000..c19fe3655098b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporter.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.bulk.BulkRequestBuilder; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.client.Client; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormatter; + +import java.util.List; + +/** + * Local index exporter for exporting query insights data to local OpenSearch indices. + */ +public final class LocalIndexExporter implements QueryInsightsExporter { + /** + * Logger of the local index exporter + */ + private final Logger logger = LogManager.getLogger(); + private final Client client; + private DateTimeFormatter indexPattern; + + /** + * Constructor of LocalIndexExporter + * + * @param client OS client + * @param indexPattern the pattern of index to export to + */ + public LocalIndexExporter(final Client client, final DateTimeFormatter indexPattern) { + this.indexPattern = indexPattern; + this.client = client; + } + + /** + * Getter of indexPattern + * + * @return indexPattern + */ + public DateTimeFormatter getIndexPattern() { + return indexPattern; + } + + /** + * Setter of indexPattern + * + * @param indexPattern index pattern + * @return the current LocalIndexExporter + */ + public LocalIndexExporter setIndexPattern(DateTimeFormatter indexPattern) { + this.indexPattern = indexPattern; + return this; + } + + /** + * Export a list of SearchQueryRecord to a local index + * + * @param records list of {@link SearchQueryRecord} + */ + @Override + public void export(final List records) { + if (records == null || records.size() == 0) { + return; + } + try { + final String index = getDateTimeFromFormat(); + final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk().setTimeout(TimeValue.timeValueMinutes(1)); + for (SearchQueryRecord record : records) { + bulkRequestBuilder.add( + new IndexRequest(index).source(record.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) + ); + } + bulkRequestBuilder.execute(new ActionListener() { + @Override + public void onResponse(BulkResponse bulkItemResponses) {} + + @Override + public void onFailure(Exception e) { + logger.error("Failed to execute bulk operation for query insights data: ", e); + } + }); + } catch (final Exception e) { + logger.error("Unable to index query insights data: ", e); + } + } + + /** + * Close the exporter sink + */ + @Override + public void close() { + logger.debug("Closing the LocalIndexExporter.."); + } + + private String getDateTimeFromFormat() { + return indexPattern.print(DateTime.now(DateTimeZone.UTC)); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java new file mode 100644 index 0000000000000..42e5354eb1640 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporter.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.Closeable; +import java.util.List; + +/** + * Base interface for Query Insights exporters + */ +public interface QueryInsightsExporter extends Closeable { + /** + * Export a list of SearchQueryRecord to the exporter sink + * + * @param records list of {@link SearchQueryRecord} + */ + void export(final List records); +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java new file mode 100644 index 0000000000000..016911761a3d0 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactory.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.joda.time.format.DateTimeFormat; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; + +/** + * Factory class for validating and creating exporters based on provided settings + */ +public class QueryInsightsExporterFactory { + /** + * Logger of the query insights exporter factory + */ + private final Logger logger = LogManager.getLogger(); + final private Client client; + final private Set exporters; + + /** + * Constructor of QueryInsightsExporterFactory + * + * @param client OS client + */ + public QueryInsightsExporterFactory(final Client client) { + this.client = client; + this.exporters = new HashSet<>(); + } + + /** + * Validate exporter sink config + * + * @param settings exporter sink config {@link Settings} + * @throws IllegalArgumentException if provided exporter sink config settings are invalid + */ + public void validateExporterConfig(final Settings settings) throws IllegalArgumentException { + // Disable exporter if the EXPORTER_TYPE setting is null + if (settings.get(EXPORTER_TYPE) == null) { + return; + } + SinkType type; + try { + type = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Invalid exporter type [%s], type should be one of %s", + settings.get(EXPORTER_TYPE), + SinkType.allSinkTypes() + ) + ); + } + switch (type) { + case LOCAL_INDEX: + final String indexPattern = settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN); + if (indexPattern.length() == 0) { + throw new IllegalArgumentException("Empty index pattern configured for the exporter"); + } + try { + DateTimeFormat.forPattern(indexPattern); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid index pattern [%s] configured for the exporter", indexPattern) + ); + } + } + } + + /** + * Create an exporter based on provided parameters + * + * @param type The type of exporter to create + * @param indexPattern the index pattern if creating a index exporter + * @return QueryInsightsExporter the created exporter sink + */ + public QueryInsightsExporter createExporter(SinkType type, String indexPattern) { + if (SinkType.LOCAL_INDEX.equals(type)) { + QueryInsightsExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern(indexPattern)); + this.exporters.add(exporter); + return exporter; + } + return DebugExporter.getInstance(); + } + + /** + * Update an exporter based on provided parameters + * + * @param exporter The exporter to update + * @param indexPattern the index pattern if creating a index exporter + * @return QueryInsightsExporter the updated exporter sink + */ + public QueryInsightsExporter updateExporter(QueryInsightsExporter exporter, String indexPattern) { + if (exporter.getClass() == LocalIndexExporter.class) { + ((LocalIndexExporter) exporter).setIndexPattern(DateTimeFormat.forPattern(indexPattern)); + } + return exporter; + } + + /** + * Close an exporter + * + * @param exporter the exporter to close + */ + public void closeExporter(QueryInsightsExporter exporter) throws IOException { + if (exporter != null) { + exporter.close(); + this.exporters.remove(exporter); + } + } + + /** + * Close all exporters + * + */ + public void closeAllExporters() { + for (QueryInsightsExporter exporter : exporters) { + try { + closeExporter(exporter); + } catch (IOException e) { + logger.error("Fail to close query insights exporter, error: ", e); + } + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java new file mode 100644 index 0000000000000..c90c9c76b6706 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/SinkType.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Type of supported sinks + */ +public enum SinkType { + /** debug exporter */ + DEBUG("debug"), + /** local index exporter */ + LOCAL_INDEX("local_index"); + + private final String type; + + SinkType(String type) { + this.type = type; + } + + @Override + public String toString() { + return type; + } + + /** + * Parse SinkType from String + * @param type the String representation of the SinkType + * @return SinkType + */ + public static SinkType parse(final String type) { + return valueOf(type.toUpperCase(Locale.ROOT)); + } + + /** + * Get all valid SinkTypes + * + * @return A set contains all valid SinkTypes + */ + public static Set allSinkTypes() { + return Arrays.stream(values()).collect(Collectors.toSet()); + } + + /** + * Get Sink type from exporter + * + * @param exporter the {@link QueryInsightsExporter} + * @return SinkType associated with this exporter + */ + public static SinkType getSinkTypeFromExporter(QueryInsightsExporter exporter) { + if (exporter.getClass().equals(LocalIndexExporter.class)) { + return SinkType.LOCAL_INDEX; + } + return SinkType.DEBUG; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java new file mode 100644 index 0000000000000..7164411194f85 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/exporter/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Query Insights exporter + */ +package org.opensearch.plugin.insights.core.exporter; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java index 9ec8673147c38..a1f810ad5987c 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -14,23 +14,27 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchRequestContext; import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.action.search.SearchTask; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.plugin.insights.core.service.QueryInsightsService; import org.opensearch.plugin.insights.rules.model.Attribute; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.tasks.Task; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; -import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNEnabledSetting; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNSizeSetting; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getTopNWindowSizeSetting; /** * The listener for query insights services. @@ -45,6 +49,7 @@ public final class QueryInsightsListener extends SearchRequestOperationsListener private static final Logger log = LogManager.getLogger(QueryInsightsListener.class); private final QueryInsightsService queryInsightsService; + private final ClusterService clusterService; /** * Constructor for QueryInsightsListener @@ -54,26 +59,32 @@ public final class QueryInsightsListener extends SearchRequestOperationsListener */ @Inject public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) { + this.clusterService = clusterService; this.queryInsightsService = queryInsightsService; - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(TOP_N_LATENCY_QUERIES_ENABLED, v -> this.setEnableTopQueries(MetricType.LATENCY, v)); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer( - TOP_N_LATENCY_QUERIES_SIZE, - v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setTopNSize(v), - v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateTopNSize(v) - ); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer( - TOP_N_LATENCY_QUERIES_WINDOW_SIZE, - v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setWindowSize(v), - v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateWindowSize(v) - ); - this.setEnableTopQueries(MetricType.LATENCY, clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_ENABLED)); - this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) - .setTopNSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_SIZE)); - this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) - .setWindowSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_WINDOW_SIZE)); + // Setting endpoints set up for top n queries, including enabling top n queries, window size and top n size + // Expected metricTypes are Latency, CPU and Memory. + for (MetricType type : MetricType.allMetricTypes()) { + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(getTopNEnabledSetting(type), v -> this.setEnableTopQueries(type, v)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + getTopNSizeSetting(type), + v -> this.queryInsightsService.setTopNSize(type, v), + v -> this.queryInsightsService.validateTopNSize(type, v) + ); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + getTopNWindowSizeSetting(type), + v -> this.queryInsightsService.setWindowSize(type, v), + v -> this.queryInsightsService.validateWindowSize(type, v) + ); + + this.setEnableTopQueries(type, clusterService.getClusterSettings().get(getTopNEnabledSetting(type))); + this.queryInsightsService.validateTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type))); + this.queryInsightsService.setTopNSize(type, clusterService.getClusterSettings().get(getTopNSizeSetting(type))); + this.queryInsightsService.validateWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type))); + this.queryInsightsService.setWindowSize(type, clusterService.getClusterSettings().get(getTopNWindowSizeSetting(type))); + } } /** @@ -123,6 +134,27 @@ public void onRequestStart(SearchRequestContext searchRequestContext) {} @Override public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + constructSearchQueryRecord(context, searchRequestContext); + } + + @Override + public void onRequestFailure(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + constructSearchQueryRecord(context, searchRequestContext); + } + + private void constructSearchQueryRecord(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + SearchTask searchTask = context.getTask(); + List tasksResourceUsages = searchRequestContext.getPhaseResourceUsage(); + tasksResourceUsages.add( + new TaskResourceInfo( + searchTask.getAction(), + searchTask.getId(), + searchTask.getParentTaskId().getId(), + clusterService.localNode().getId(), + searchTask.getTotalResourceStats() + ) + ); + final SearchRequest request = context.getRequest(); try { Map measurements = new HashMap<>(); @@ -132,16 +164,39 @@ public void onRequestEnd(final SearchPhaseContext context, final SearchRequestCo TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos()) ); } + if (queryInsightsService.isCollectionEnabled(MetricType.CPU)) { + measurements.put( + MetricType.CPU, + tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getCpuTimeInNanos()).mapToLong(Long::longValue).sum() + ); + } + if (queryInsightsService.isCollectionEnabled(MetricType.MEMORY)) { + measurements.put( + MetricType.MEMORY, + tasksResourceUsages.stream().map(a -> a.getTaskResourceUsage().getMemoryInBytes()).mapToLong(Long::longValue).sum() + ); + } Map attributes = new HashMap<>(); attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT)); attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS)); attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards()); attributes.put(Attribute.INDICES, request.indices()); attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap()); + attributes.put(Attribute.TASK_RESOURCE_USAGES, tasksResourceUsages); + + Map labels = new HashMap<>(); + // Retrieve user provided label if exists + String userProvidedLabel = context.getTask().getHeader(Task.X_OPAQUE_ID); + if (userProvidedLabel != null) { + labels.put(Task.X_OPAQUE_ID, userProvidedLabel); + } + attributes.put(Attribute.LABELS, labels); + // construct SearchQueryRecord from attributes and measurements SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes); queryInsightsService.addRecord(record); } catch (Exception e) { log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e)); } } + } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java index 525ca0d4a3d33..c63430a1a726c 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java @@ -8,14 +8,20 @@ package org.opensearch.plugin.insights.core.service; +import org.opensearch.client.Client; import org.opensearch.common.inject.Inject; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; +import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; @@ -23,6 +29,8 @@ import java.util.Map; import java.util.concurrent.LinkedBlockingQueue; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.getExporterSettings; + /** * Service responsible for gathering, analyzing, storing and exporting * information related to search queries @@ -56,21 +64,37 @@ public class QueryInsightsService extends AbstractLifecycleComponent { */ protected volatile Scheduler.Cancellable scheduledFuture; + /** + * Query Insights exporter factory + */ + final QueryInsightsExporterFactory queryInsightsExporterFactory; + /** * Constructor of the QueryInsightsService * - * @param threadPool The OpenSearch thread pool to run async tasks + * @param clusterSettings OpenSearch cluster level settings + * @param threadPool The OpenSearch thread pool to run async tasks + * @param client OS client */ @Inject - public QueryInsightsService(final ThreadPool threadPool) { + public QueryInsightsService(final ClusterSettings clusterSettings, final ThreadPool threadPool, final Client client) { enableCollect = new HashMap<>(); queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY); + this.threadPool = threadPool; + this.queryInsightsExporterFactory = new QueryInsightsExporterFactory(client); + // initialize top n queries services and configurations consumers topQueriesServices = new HashMap<>(); for (MetricType metricType : MetricType.allMetricTypes()) { enableCollect.put(metricType, false); - topQueriesServices.put(metricType, new TopQueriesService(metricType)); + topQueriesServices.put(metricType, new TopQueriesService(metricType, threadPool, queryInsightsExporterFactory)); + } + for (MetricType type : MetricType.allMetricTypes()) { + clusterSettings.addSettingsUpdateConsumer( + getExporterSettings(type), + (settings -> setExporter(type, settings)), + (settings -> validateExporterConfig(type, settings)) + ); } - this.threadPool = threadPool; } /** @@ -157,6 +181,78 @@ public boolean isEnabled() { return false; } + /** + * Validate the window size config for a metricType + * + * @param type {@link MetricType} + * @param windowSize {@link TimeValue} + */ + public void validateWindowSize(final MetricType type, final TimeValue windowSize) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).validateWindowSize(windowSize); + } + } + + /** + * Set window size for a metricType + * + * @param type {@link MetricType} + * @param windowSize {@link TimeValue} + */ + public void setWindowSize(final MetricType type, final TimeValue windowSize) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).setWindowSize(windowSize); + } + } + + /** + * Validate the top n size config for a metricType + * + * @param type {@link MetricType} + * @param topNSize top n size + */ + public void validateTopNSize(final MetricType type, final int topNSize) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).validateTopNSize(topNSize); + } + } + + /** + * Set the top n size config for a metricType + * + * @param type {@link MetricType} + * @param topNSize top n size + */ + public void setTopNSize(final MetricType type, final int topNSize) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).setTopNSize(topNSize); + } + } + + /** + * Set the exporter config for a metricType + * + * @param type {@link MetricType} + * @param settings exporter settings + */ + public void setExporter(final MetricType type, final Settings settings) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).setExporter(settings); + } + } + + /** + * Validate the exporter config for a metricType + * + * @param type {@link MetricType} + * @param settings exporter settings + */ + public void validateExporterConfig(final MetricType type, final Settings settings) { + if (topQueriesServices.containsKey(type)) { + topQueriesServices.get(type).validateExporterConfig(settings); + } + } + @Override protected void doStart() { if (isEnabled()) { @@ -176,5 +272,12 @@ protected void doStop() { } @Override - protected void doClose() {} + protected void doClose() throws IOException { + // close all top n queries service + for (TopQueriesService topQueriesService : topQueriesServices.values()) { + topQueriesService.close(); + } + // close any unclosed resources + queryInsightsExporterFactory.closeAllExporters(); + } } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java index d2c30cbdf98e7..c21b89be4dcca 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -8,11 +8,19 @@ package org.opensearch.plugin.insights.core.service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporter; +import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; +import org.opensearch.plugin.insights.core.exporter.SinkType; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.ThreadPool; +import java.io.IOException; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; @@ -27,6 +35,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_N_QUERIES_INDEX_PATTERN; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR; + /** * Service responsible for gathering and storing top N queries * with high latency or resource usage @@ -34,6 +48,10 @@ * @opensearch.internal */ public class TopQueriesService { + /** + * Logger of the local index exporter + */ + private final Logger logger = LogManager.getLogger(); private boolean enabled; /** * The metric type to measure top n queries @@ -63,12 +81,34 @@ public class TopQueriesService { */ private final AtomicReference> topQueriesHistorySnapshot; - TopQueriesService(final MetricType metricType) { + /** + * Factory for validating and creating exporters + */ + private final QueryInsightsExporterFactory queryInsightsExporterFactory; + + /** + * The internal OpenSearch thread pool that execute async processing and exporting tasks + */ + private final ThreadPool threadPool; + + /** + * Exporter for exporting top queries data + */ + private QueryInsightsExporter exporter; + + TopQueriesService( + final MetricType metricType, + final ThreadPool threadPool, + final QueryInsightsExporterFactory queryInsightsExporterFactory + ) { this.enabled = false; this.metricType = metricType; + this.threadPool = threadPool; + this.queryInsightsExporterFactory = queryInsightsExporterFactory; this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE; this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE; this.windowStart = -1L; + this.exporter = null; topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType)); topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>()); topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>()); @@ -169,6 +209,47 @@ public void validateWindowSize(final TimeValue windowSize) { } } + /** + * Set up the top queries exporter based on provided settings + * + * @param settings exporter config {@link Settings} + */ + public void setExporter(final Settings settings) { + if (settings.get(EXPORTER_TYPE) != null) { + SinkType expectedType = SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)); + if (exporter != null && expectedType == SinkType.getSinkTypeFromExporter(exporter)) { + queryInsightsExporterFactory.updateExporter(exporter, settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN)); + } else { + try { + queryInsightsExporterFactory.closeExporter(this.exporter); + } catch (IOException e) { + logger.error("Fail to close the current exporter when updating exporter, error: ", e); + } + this.exporter = queryInsightsExporterFactory.createExporter( + SinkType.parse(settings.get(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE)), + settings.get(EXPORT_INDEX, DEFAULT_TOP_N_QUERIES_INDEX_PATTERN) + ); + } + } else { + // Disable exporter if exporter type is set to null + try { + queryInsightsExporterFactory.closeExporter(this.exporter); + this.exporter = null; + } catch (IOException e) { + logger.error("Fail to close the current exporter when disabling exporter, error: ", e); + } + } + } + + /** + * Validate provided settings for top queries exporter + * + * @param settings settings exporter config {@link Settings} + */ + public void validateExporterConfig(Settings settings) { + queryInsightsExporterFactory.validateExporterConfig(settings); + } + /** * Get all top queries records that are in the current top n queries store * Optionally include top N records from the last window. @@ -254,6 +335,10 @@ private void rotateWindowIfNecessary(final long newWindowStart) { topQueriesStore.clear(); topQueriesCurrentSnapshot.set(new ArrayList<>()); windowStart = newWindowStart; + // export to the configured sink + if (exporter != null) { + threadPool.executor(QUERY_INSIGHTS_EXECUTOR).execute(() -> exporter.export(history)); + } } } @@ -279,4 +364,11 @@ private long calculateWindowStart(final long timestamp) { public List getTopQueriesCurrentSnapshot() { return topQueriesCurrentSnapshot.get(); } + + /** + * Close the top n queries service + */ + public void close() throws IOException { + queryInsightsExporterFactory.closeExporter(this.exporter); + } } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java index c1d17edf9ff14..dcdb085fdc6fa 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java @@ -43,7 +43,15 @@ public enum Attribute { /** * The node id for this request */ - NODE_ID; + NODE_ID, + /** + * Tasks level resource usages in this request + */ + TASK_RESOURCE_USAGES, + /** + * Custom search request labels + */ + LABELS; /** * Read an Attribute from a StreamInput diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java index cdd090fbf4804..4694c757f4ef2 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java @@ -35,7 +35,7 @@ public enum MetricType implements Comparator { /** * JVM heap usage metric type */ - JVM; + MEMORY; /** * Read a MetricType from a StreamInput @@ -93,10 +93,9 @@ public static Set allMetricTypes() { public int compare(final Number a, final Number b) { switch (this) { case LATENCY: - return Long.compare(a.longValue(), b.longValue()); - case JVM: case CPU: - return Double.compare(a.doubleValue(), b.doubleValue()); + case MEMORY: + return Long.compare(a.longValue(), b.longValue()); } return -1; } @@ -110,10 +109,9 @@ public int compare(final Number a, final Number b) { Number parseValue(final Object o) { switch (this) { case LATENCY: - return (Long) o; - case JVM: case CPU: - return (Double) o; + case MEMORY: + return (Long) o; default: return (Number) o; } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java index 060711edb5580..fec00a680ae58 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java @@ -8,9 +8,11 @@ package org.opensearch.plugin.insights.rules.model; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -173,4 +175,9 @@ public boolean equals(final Object o) { public int hashCode() { return Objects.hash(timestamp, measurements, attributes); } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } } diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java index ddf614211bc41..7949b70a16db6 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java @@ -8,7 +8,6 @@ package org.opensearch.plugin.insights.rules.transport.top_queries; -import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.TransportNodesAction; @@ -21,7 +20,6 @@ import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; -import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; @@ -29,7 +27,6 @@ import java.io.IOException; import java.util.List; -import java.util.Locale; /** * Transport action for cluster/node level top queries information. @@ -81,17 +78,18 @@ protected TopQueriesResponse newResponse( final List responses, final List failures ) { - if (topQueriesRequest.getMetricType() == MetricType.LATENCY) { - return new TopQueriesResponse( - clusterService.getClusterName(), - responses, - failures, - clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE), - MetricType.LATENCY - ); - } else { - throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", topQueriesRequest.getMetricType())); + int size; + switch (topQueriesRequest.getMetricType()) { + case CPU: + size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE); + break; + case MEMORY: + size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE); + break; + default: + size = clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); } + return new TopQueriesResponse(clusterService.getClusterName(), responses, failures, size, topQueriesRequest.getMetricType()); } @Override @@ -107,15 +105,10 @@ protected TopQueries newNodeResponse(final StreamInput in) throws IOException { @Override protected TopQueries nodeOperation(final NodeRequest nodeRequest) { final TopQueriesRequest request = nodeRequest.request; - if (request.getMetricType() == MetricType.LATENCY) { - return new TopQueries( - clusterService.localNode(), - queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(true) - ); - } else { - throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", request.getMetricType())); - } - + return new TopQueries( + clusterService.localNode(), + queryInsightsService.getTopQueriesService(request.getMetricType()).getTopQueriesRecords(true) + ); } /** diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java index 52cc1fbde790f..25309b5721792 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java @@ -9,7 +9,10 @@ package org.opensearch.plugin.insights.settings; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.core.exporter.SinkType; +import org.opensearch.plugin.insights.rules.model.MetricType; import java.util.Arrays; import java.util.HashSet; @@ -79,6 +82,10 @@ public class QueryInsightsSettings { public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries"; /** Default prefix for top N queries by latency feature */ public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency"; + /** Default prefix for top N queries by cpu feature */ + public static final String TOP_N_CPU_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".cpu"; + /** Default prefix for top N queries by memory feature */ + public static final String TOP_N_MEMORY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".memory"; /** * Boolean setting for enabling top queries by latency. */ @@ -109,6 +116,187 @@ public class QueryInsightsSettings { Setting.Property.Dynamic ); + /** + * Boolean setting for enabling top queries by cpu. + */ + public static final Setting TOP_N_CPU_QUERIES_ENABLED = Setting.boolSetting( + TOP_N_CPU_QUERIES_PREFIX + ".enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Int setting to define the top n size for top queries by cpu. + */ + public static final Setting TOP_N_CPU_QUERIES_SIZE = Setting.intSetting( + TOP_N_CPU_QUERIES_PREFIX + ".top_n_size", + DEFAULT_TOP_N_SIZE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Time setting to define the window size in seconds for top queries by cpu. + */ + public static final Setting TOP_N_CPU_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( + TOP_N_CPU_QUERIES_PREFIX + ".window_size", + DEFAULT_WINDOW_SIZE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Boolean setting for enabling top queries by memory. + */ + public static final Setting TOP_N_MEMORY_QUERIES_ENABLED = Setting.boolSetting( + TOP_N_MEMORY_QUERIES_PREFIX + ".enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Int setting to define the top n size for top queries by memory. + */ + public static final Setting TOP_N_MEMORY_QUERIES_SIZE = Setting.intSetting( + TOP_N_MEMORY_QUERIES_PREFIX + ".top_n_size", + DEFAULT_TOP_N_SIZE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Time setting to define the window size in seconds for top queries by memory. + */ + public static final Setting TOP_N_MEMORY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( + TOP_N_MEMORY_QUERIES_PREFIX + ".window_size", + DEFAULT_WINDOW_SIZE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Config key for exporter type + */ + public static final String EXPORTER_TYPE = "type"; + /** + * Config key for export index + */ + public static final String EXPORT_INDEX = "config.index"; + + /** + * Settings and defaults for top queries exporters + */ + private static final String TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX = TOP_N_LATENCY_QUERIES_PREFIX + ".exporter."; + /** + * Prefix for top n queries by cpu exporters + */ + private static final String TOP_N_CPU_QUERIES_EXPORTER_PREFIX = TOP_N_CPU_QUERIES_PREFIX + ".exporter."; + /** + * Prefix for top n queries by memory exporters + */ + private static final String TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX = TOP_N_MEMORY_QUERIES_PREFIX + ".exporter."; + /** + * Default index pattern of top n queries + */ + public static final String DEFAULT_TOP_N_QUERIES_INDEX_PATTERN = "'top_queries-'YYYY.MM.dd"; + /** + * Default exporter type of top queries + */ + public static final String DEFAULT_TOP_QUERIES_EXPORTER_TYPE = SinkType.LOCAL_INDEX.toString(); + + /** + * Settings for the exporter of top latency queries + */ + public static final Setting TOP_N_LATENCY_EXPORTER_SETTINGS = Setting.groupSetting( + TOP_N_LATENCY_QUERIES_EXPORTER_PREFIX, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Settings for the exporter of top cpu queries + */ + public static final Setting TOP_N_CPU_EXPORTER_SETTINGS = Setting.groupSetting( + TOP_N_CPU_QUERIES_EXPORTER_PREFIX, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Settings for the exporter of top cpu queries + */ + public static final Setting TOP_N_MEMORY_EXPORTER_SETTINGS = Setting.groupSetting( + TOP_N_MEMORY_QUERIES_EXPORTER_PREFIX, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Get the enabled setting based on type + * @param type MetricType + * @return enabled setting + */ + public static Setting getTopNEnabledSetting(MetricType type) { + switch (type) { + case CPU: + return TOP_N_CPU_QUERIES_ENABLED; + case MEMORY: + return TOP_N_MEMORY_QUERIES_ENABLED; + default: + return TOP_N_LATENCY_QUERIES_ENABLED; + } + } + + /** + * Get the top n size setting based on type + * @param type MetricType + * @return top n size setting + */ + public static Setting getTopNSizeSetting(MetricType type) { + switch (type) { + case CPU: + return TOP_N_CPU_QUERIES_SIZE; + case MEMORY: + return TOP_N_MEMORY_QUERIES_SIZE; + default: + return TOP_N_LATENCY_QUERIES_SIZE; + } + } + + /** + * Get the window size setting based on type + * @param type MetricType + * @return top n queries window size setting + */ + public static Setting getTopNWindowSizeSetting(MetricType type) { + switch (type) { + case CPU: + return TOP_N_CPU_QUERIES_WINDOW_SIZE; + case MEMORY: + return TOP_N_MEMORY_QUERIES_WINDOW_SIZE; + default: + return TOP_N_LATENCY_QUERIES_WINDOW_SIZE; + } + } + + /** + * Get the exporter settings based on type + * @param type MetricType + * @return exporter setting + */ + public static Setting getExporterSettings(MetricType type) { + switch (type) { + case CPU: + return TOP_N_CPU_EXPORTER_SETTINGS; + case MEMORY: + return TOP_N_MEMORY_EXPORTER_SETTINGS; + default: + return TOP_N_LATENCY_EXPORTER_SETTINGS; + } + } + /** * Default constructor */ diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java index 2f353f2a53329..2efe9085a39ee 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -47,10 +47,7 @@ public void setup() { Settings.Builder settingsBuilder = Settings.builder(); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - + QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, threadPool); } @@ -59,7 +56,16 @@ public void testGetSettings() { Arrays.asList( QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, - QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS, + QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE, + QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS ), queryInsightsPlugin.getSettings() ); diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java index 870ef5b9c8be9..7fa4e9841c20e 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java @@ -10,6 +10,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.util.Maps; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -17,6 +18,7 @@ import org.opensearch.plugin.insights.rules.model.Attribute; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.test.VersionUtils; import java.io.IOException; @@ -36,7 +38,6 @@ import static org.opensearch.test.OpenSearchTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween; import static org.opensearch.test.OpenSearchTestCase.randomArray; -import static org.opensearch.test.OpenSearchTestCase.randomDouble; import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; import static org.opensearch.test.OpenSearchTestCase.randomLong; import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; @@ -63,9 +64,9 @@ public static List generateQueryInsightRecords(int lower, int MetricType.LATENCY, randomLongBetween(1000, 10000), MetricType.CPU, - randomDouble(), - MetricType.JVM, - randomDouble() + randomLongBetween(1000, 10000), + MetricType.MEMORY, + randomLongBetween(1000, 10000) ); Map phaseLatencyMap = new HashMap<>(); @@ -186,4 +187,19 @@ public static boolean checkRecordsEqualsWithoutOrder( } return true; } + + public static void registerAllQueryInsightsSettings(ClusterSettings clusterSettings) { + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_EXPORTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_QUERIES_WINDOW_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_CPU_EXPORTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_QUERIES_WINDOW_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_MEMORY_EXPORTER_SETTINGS); + } } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java new file mode 100644 index 0000000000000..736e406289b2c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/DebugExporterTests.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.List; + +/** + * Granular tests for the {@link DebugExporterTests} class. + */ +public class DebugExporterTests extends OpenSearchTestCase { + private DebugExporter debugExporter; + + @Before + public void setup() { + debugExporter = DebugExporter.getInstance(); + } + + public void testExport() { + List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); + try { + debugExporter.export(records); + } catch (Exception e) { + fail("No exception should be thrown when exporting query insights data"); + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java new file mode 100644 index 0000000000000..9ea864a7083f4 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/LocalIndexExporterTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.opensearch.action.bulk.BulkAction; +import org.opensearch.action.bulk.BulkRequestBuilder; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.Client; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.test.OpenSearchTestCase; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; +import org.junit.Before; + +import java.util.List; + +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * Granular tests for the {@link LocalIndexExporterTests} class. + */ +public class LocalIndexExporterTests extends OpenSearchTestCase { + private final DateTimeFormatter format = DateTimeFormat.forPattern("YYYY.MM.dd"); + private final Client client = mock(Client.class); + private LocalIndexExporter localIndexExporter; + + @Before + public void setup() { + localIndexExporter = new LocalIndexExporter(client, format); + } + + public void testExportEmptyRecords() { + List records = List.of(); + try { + localIndexExporter.export(records); + } catch (Exception e) { + fail("No exception should be thrown when exporting empty query insights data"); + } + } + + @SuppressWarnings("unchecked") + public void testExportRecords() { + BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE)); + final PlainActionFuture future = mock(PlainActionFuture.class); + when(future.actionGet()).thenReturn(null); + doAnswer(invocation -> future).when(bulkRequestBuilder).execute(); + when(client.prepareBulk()).thenReturn(bulkRequestBuilder); + + List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); + try { + localIndexExporter.export(records); + } catch (Exception e) { + fail("No exception should be thrown when exporting query insights data"); + } + assertEquals(2, bulkRequestBuilder.numberOfActions()); + } + + @SuppressWarnings("unchecked") + public void testExportRecordsWithError() { + BulkRequestBuilder bulkRequestBuilder = spy(new BulkRequestBuilder(client, BulkAction.INSTANCE)); + final PlainActionFuture future = mock(PlainActionFuture.class); + when(future.actionGet()).thenReturn(null); + doThrow(new RuntimeException()).when(bulkRequestBuilder).execute(); + when(client.prepareBulk()).thenReturn(bulkRequestBuilder); + + List records = QueryInsightsTestUtils.generateQueryInsightRecords(2); + try { + localIndexExporter.export(records); + } catch (Exception e) { + fail("No exception should be thrown when exporting query insights data"); + } + } + + public void testClose() { + try { + localIndexExporter.close(); + } catch (Exception e) { + fail("No exception should be thrown when closing local index exporter"); + } + } + + public void testGetAndSetIndexPattern() { + DateTimeFormatter newFormatter = mock(DateTimeFormatter.class); + localIndexExporter.setIndexPattern(newFormatter); + assert (localIndexExporter.getIndexPattern() == newFormatter); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java new file mode 100644 index 0000000000000..f01dd2c17509c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/exporter/QueryInsightsExporterFactoryTests.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.exporter; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.joda.time.format.DateTimeFormat; +import org.junit.Before; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.DEFAULT_TOP_QUERIES_EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORTER_TYPE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.EXPORT_INDEX; +import static org.mockito.Mockito.mock; + +/** + * Granular tests for the {@link QueryInsightsExporterFactoryTests} class. + */ +public class QueryInsightsExporterFactoryTests extends OpenSearchTestCase { + private final String format = "YYYY.MM.dd"; + + private final Client client = mock(Client.class); + private QueryInsightsExporterFactory queryInsightsExporterFactory; + + @Before + public void setup() { + queryInsightsExporterFactory = new QueryInsightsExporterFactory(client); + } + + public void testValidateConfigWhenResetExporter() { + Settings.Builder settingsBuilder = Settings.builder(); + // empty settings + Settings settings = settingsBuilder.build(); + try { + queryInsightsExporterFactory.validateExporterConfig(settings); + } catch (Exception e) { + fail("No exception should be thrown when setting is null"); + } + } + + public void testInvalidExporterTypeConfig() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.put(EXPORTER_TYPE, "some_invalid_type").build(); + assertThrows(IllegalArgumentException.class, () -> { queryInsightsExporterFactory.validateExporterConfig(settings); }); + } + + public void testInvalidLocalIndexConfig() { + Settings.Builder settingsBuilder = Settings.builder(); + assertThrows(IllegalArgumentException.class, () -> { + queryInsightsExporterFactory.validateExporterConfig( + settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "").build() + ); + }); + assertThrows(IllegalArgumentException.class, () -> { + queryInsightsExporterFactory.validateExporterConfig( + settingsBuilder.put(EXPORTER_TYPE, DEFAULT_TOP_QUERIES_EXPORTER_TYPE).put(EXPORT_INDEX, "some_invalid_pattern").build() + ); + }); + } + + public void testCreateAndCloseExporter() { + QueryInsightsExporter exporter1 = queryInsightsExporterFactory.createExporter(SinkType.LOCAL_INDEX, format); + assertTrue(exporter1 instanceof LocalIndexExporter); + QueryInsightsExporter exporter2 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format); + assertTrue(exporter2 instanceof DebugExporter); + QueryInsightsExporter exporter3 = queryInsightsExporterFactory.createExporter(SinkType.DEBUG, format); + assertTrue(exporter3 instanceof DebugExporter); + try { + queryInsightsExporterFactory.closeExporter(exporter1); + queryInsightsExporterFactory.closeExporter(exporter2); + queryInsightsExporterFactory.closeAllExporters(); + } catch (Exception e) { + fail("No exception should be thrown when closing exporter"); + } + } + + public void testUpdateExporter() { + LocalIndexExporter exporter = new LocalIndexExporter(client, DateTimeFormat.forPattern("yyyy-MM-dd")); + queryInsightsExporterFactory.updateExporter(exporter, "yyyy-MM-dd-HH"); + assertEquals(DateTimeFormat.forPattern("yyyy-MM-dd-HH"), exporter.getIndexPattern()); + } + +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java index 328ed0cd2ed15..86de44c680188 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -11,27 +11,44 @@ import org.opensearch.action.search.SearchPhaseContext; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; import org.opensearch.plugin.insights.core.service.QueryInsightsService; import org.opensearch.plugin.insights.core.service.TopQueriesService; +import org.opensearch.plugin.insights.rules.model.Attribute; import org.opensearch.plugin.insights.rules.model.MetricType; -import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.tasks.Task; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.junit.Before; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; + +import org.mockito.ArgumentCaptor; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -48,6 +65,7 @@ public class QueryInsightsListenerTests extends OpenSearchTestCase { private final SearchRequest searchRequest = mock(SearchRequest.class); private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class); private final TopQueriesService topQueriesService = mock(TopQueriesService.class); + private final ThreadPool threadPool = new TestThreadPool("QueryInsightsThreadPool"); private ClusterService clusterService; @Before @@ -55,14 +73,25 @@ public void setup() { Settings.Builder settingsBuilder = Settings.builder(); Settings settings = settingsBuilder.build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); - clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); - clusterService = ClusterServiceUtils.createClusterService(settings, clusterSettings, null); + QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); + ClusterState state = ClusterStateCreationUtils.stateWithActivePrimary("test", true, 1 + randomInt(3), randomInt(2)); + clusterService = ClusterServiceUtils.createClusterService(threadPool, state.getNodes().getLocalNode(), clusterSettings); + ClusterServiceUtils.setState(clusterService, state); when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); + + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadPool.getThreadContext().setHeaders(new Tuple<>(Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel"), new HashMap<>())); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + IOUtils.close(clusterService); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } + @SuppressWarnings("unchecked") public void testOnRequestEnd() throws InterruptedException { Long timestamp = System.currentTimeMillis() - 100L; SearchType searchType = SearchType.QUERY_THEN_FETCH; @@ -70,6 +99,14 @@ public void testOnRequestEnd() throws InterruptedException { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); searchSourceBuilder.size(0); + SearchTask task = new SearchTask( + 0, + "n/a", + "n/a", + () -> "test", + TaskId.EMPTY_TASK_ID, + Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel") + ); String[] indices = new String[] { "index-1", "index-2" }; @@ -89,10 +126,19 @@ public void testOnRequestEnd() throws InterruptedException { when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); when(searchPhaseContext.getRequest()).thenReturn(searchRequest); when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + when(searchPhaseContext.getTask()).thenReturn(task); + ArgumentCaptor captor = ArgumentCaptor.forClass(SearchQueryRecord.class); queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext); - verify(queryInsightsService, times(1)).addRecord(any()); + verify(queryInsightsService, times(1)).addRecord(captor.capture()); + SearchQueryRecord generatedRecord = captor.getValue(); + assertEquals(timestamp.longValue(), generatedRecord.getTimestamp()); + assertEquals(numberOfShards, generatedRecord.getAttributes().get(Attribute.TOTAL_SHARDS)); + assertEquals(searchType.toString().toLowerCase(Locale.ROOT), generatedRecord.getAttributes().get(Attribute.SEARCH_TYPE)); + assertEquals(searchSourceBuilder.toString(), generatedRecord.getAttributes().get(Attribute.SOURCE)); + Map labels = (Map) generatedRecord.getAttributes().get(Attribute.LABELS); + assertEquals("userLabel", labels.get(Task.X_OPAQUE_ID)); } public void testConcurrentOnRequestEnd() throws InterruptedException { @@ -102,6 +148,14 @@ public void testConcurrentOnRequestEnd() throws InterruptedException { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); searchSourceBuilder.size(0); + SearchTask task = new SearchTask( + 0, + "n/a", + "n/a", + () -> "test", + TaskId.EMPTY_TASK_ID, + Collections.singletonMap(Task.X_OPAQUE_ID, "userLabel") + ); String[] indices = new String[] { "index-1", "index-2" }; @@ -121,6 +175,7 @@ public void testConcurrentOnRequestEnd() throws InterruptedException { when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); when(searchPhaseContext.getRequest()).thenReturn(searchRequest); when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + when(searchPhaseContext.getTask()).thenReturn(task); int numRequests = 50; Thread[] threads = new Thread[numRequests]; @@ -155,7 +210,7 @@ public void testSetEnabled() { when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false); when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false); - when(queryInsightsService.isCollectionEnabled(MetricType.JVM)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.MEMORY)).thenReturn(false); queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false); assertFalse(queryInsightsListener.isEnabled()); } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java index c29b48b9690d1..75a5768f50681 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java @@ -8,6 +8,9 @@ package org.opensearch.plugin.insights.core.service; +import org.opensearch.client.Client; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.plugin.insights.QueryInsightsTestUtils; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; @@ -23,14 +26,19 @@ */ public class QueryInsightsServiceTests extends OpenSearchTestCase { private final ThreadPool threadPool = mock(ThreadPool.class); + private final Client client = mock(Client.class); private QueryInsightsService queryInsightsService; @Before public void setup() { - queryInsightsService = new QueryInsightsService(threadPool); + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + QueryInsightsTestUtils.registerAllQueryInsightsSettings(clusterSettings); + queryInsightsService = new QueryInsightsService(clusterSettings, threadPool, client); queryInsightsService.enableCollection(MetricType.LATENCY, true); queryInsightsService.enableCollection(MetricType.CPU, true); - queryInsightsService.enableCollection(MetricType.JVM, true); + queryInsightsService.enableCollection(MetricType.MEMORY, true); } public void testAddRecordToLimitAndDrain() { @@ -46,4 +54,12 @@ public void testAddRecordToLimitAndDrain() { queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size() ); } + + public void testClose() { + try { + queryInsightsService.doClose(); + } catch (Exception e) { + fail("No exception expected when closing query insights service"); + } + } } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java index 060df84a89485..3efd4c86833cc 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -11,24 +11,30 @@ import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.core.exporter.QueryInsightsExporterFactory; import org.opensearch.plugin.insights.rules.model.MetricType; import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; import org.opensearch.plugin.insights.settings.QueryInsightsSettings; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; import org.junit.Before; import java.util.List; import java.util.concurrent.TimeUnit; +import static org.mockito.Mockito.mock; + /** * Unit Tests for {@link QueryInsightsService}. */ public class TopQueriesServiceTests extends OpenSearchTestCase { private TopQueriesService topQueriesService; + private final ThreadPool threadPool = mock(ThreadPool.class); + private final QueryInsightsExporterFactory queryInsightsExporterFactory = mock(QueryInsightsExporterFactory.class); @Before public void setup() { - topQueriesService = new TopQueriesService(MetricType.LATENCY); + topQueriesService = new TopQueriesService(MetricType.LATENCY, threadPool, queryInsightsExporterFactory); topQueriesService.setTopNSize(Integer.MAX_VALUE); topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE)); topQueriesService.setEnabled(true); diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java index 793d5878e2300..ad45b53ec5363 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java @@ -39,7 +39,7 @@ public void testSerializationAndEquals() throws Exception { public void testAllMetricTypes() { Set allMetrics = MetricType.allMetricTypes(); - Set expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.JVM)); + Set expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.MEMORY)); assertEquals(expected, allMetrics); } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index ff62c328c7e74..61e9f71712eaf 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,10 +44,11 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.47.0' + api 'com.azure:azure-core:1.49.1' api 'com.azure:azure-json:1.1.0' + api 'com.azure:azure-xml:1.0.0' api 'com.azure:azure-storage-common:12.21.2' - api 'com.azure:azure-core-http-netty:1.12.8' + api 'com.azure:azure-core-http-netty:1.15.1' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 deleted file mode 100644 index 42e35aacc63b1..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b300175826f0bb0916fca2fa5f70885b716e93f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 new file mode 100644 index 0000000000000..d487c08c26e94 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.49.1.jar.sha1 @@ -0,0 +1 @@ +a7c44282eaa0f5a3be4b920d6a057509adfe8674 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1 deleted file mode 100644 index e6ee1dec64641..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -511ed2d02afb0f43f029df3d10ff80d2d3539f05 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 new file mode 100644 index 0000000000000..3a0747a0daacb --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 @@ -0,0 +1 @@ +036f7466a521aa99c79a491a9cf20444667df78b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 new file mode 100644 index 0000000000000..798ec5d95c6ac --- /dev/null +++ b/plugins/repository-azure/licenses/azure-xml-1.0.0.jar.sha1 @@ -0,0 +1 @@ +ba584703bd47e9e789343ee3332f0f5a64f7f187 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java index 90143d907cd99..b0582624e21d5 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -23,10 +23,13 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import java.util.stream.Collectors; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.metrics.data.DoublePointData; import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramPointData; @@ -147,6 +150,36 @@ public void testGauge() throws Exception { } + public void testGaugeWithValueAndTagSupplier() throws Exception { + String metricName = "test-gauge"; + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + Tags tags = Tags.create().addTag("test", "integ-test"); + final AtomicInteger testValue = new AtomicInteger(0); + Supplier valueProvider = () -> { + return TaggedMeasurement.create(Double.valueOf(testValue.incrementAndGet()), tags); + }; + Closeable gaugeCloseable = metricsRegistry.createGauge(metricName, "test", "ms", valueProvider); + // Sleep for about 2.2s to wait for metrics to be published. + Thread.sleep(2200); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + + assertTrue(getMaxObservableGaugeValue(exporter, metricName) >= 2.0); + + gaugeCloseable.close(); + double observableGaugeValueAfterStop = getMaxObservableGaugeValue(exporter, metricName); + + Map, Object> attributes = getMetricAttributes(exporter, metricName); + + assertEquals("integ-test", attributes.get(AttributeKey.stringKey("test"))); + + // Sleep for about 1.2s to wait for metrics to see that closed observableGauge shouldn't execute the callable. + Thread.sleep(1200); + assertEquals(observableGaugeValueAfterStop, getMaxObservableGaugeValue(exporter, metricName), 0.0); + + } + private static double getMaxObservableGaugeValue(InMemorySingletonMetricsExporter exporter, String metricName) { List dataPoints = exporter.getFinishedMetricItems() .stream() @@ -159,6 +192,15 @@ private static double getMaxObservableGaugeValue(InMemorySingletonMetricsExporte return totalValue; } + private static Map, Object> getMetricAttributes(InMemorySingletonMetricsExporter exporter, String metricName) { + List dataPoints = exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().contains(metricName)) + .collect(Collectors.toList()); + Attributes attributes = dataPoints.get(0).getDoubleGaugeData().getPoints().stream().findAny().get().getAttributes(); + return attributes.asMap(); + } + @After public void reset() { InMemorySingletonMetricsExporter.INSTANCE.reset(); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java index 6fe08040d7af5..3258e91738ba6 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -101,6 +101,17 @@ public Closeable createGauge(String name, String description, String unit, Suppl return () -> doubleObservableGauge.close(); } + @Override + public Closeable createGauge(String name, String description, String unit, Supplier value) { + ObservableDoubleGauge doubleObservableGauge = AccessController.doPrivileged( + (PrivilegedAction) () -> otelMeter.gaugeBuilder(name) + .setUnit(unit) + .setDescription(description) + .buildWithCallback(record -> record.record(value.get().getValue(), OTelAttributesConverter.convert(value.get().getTags()))) + ); + return () -> doubleObservableGauge.close(); + } + @Override public void close() throws IOException { meterProvider.close(); diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java index 2e89a3c488d5c..794cafc1fb608 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -180,4 +180,34 @@ public void testGauge() throws Exception { closeable.close(); verify(observableDoubleGauge).close(); } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testGaugeWithValueAndTagsSupplier() throws Exception { + String observableGaugeName = "test-gauge"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + ObservableDoubleGauge observableDoubleGauge = mock(ObservableDoubleGauge.class); + DoubleGaugeBuilder mockOTelDoubleGaugeBuilder = mock(DoubleGaugeBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.gaugeBuilder(Mockito.contains(observableGaugeName))).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.setDescription(description)).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.setUnit(unit)).thenReturn(mockOTelDoubleGaugeBuilder); + when(mockOTelDoubleGaugeBuilder.buildWithCallback(any(Consumer.class))).thenReturn(observableDoubleGauge); + + Closeable closeable = metricsTelemetry.createGauge( + observableGaugeName, + description, + unit, + () -> TaggedMeasurement.create(1.0, Tags.EMPTY) + ); + closeable.close(); + verify(observableDoubleGauge).close(); + } } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java new file mode 100644 index 0000000000000..3b4a308691e7b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpChunk.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.http.HttpChunk; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.buffer.ByteBuf; + +class ReactorNetty4HttpChunk implements HttpChunk { + private final AtomicBoolean released; + private final boolean pooled; + private final ByteBuf content; + private final boolean last; + + ReactorNetty4HttpChunk(ByteBuf content, boolean last) { + this.content = content; + this.pooled = true; + this.released = new AtomicBoolean(false); + this.last = last; + } + + @Override + public BytesReference content() { + assert released.get() == false; + return Netty4Utils.toBytesReference(content); + } + + @Override + public void close() { + if (pooled && released.compareAndSet(false, true)) { + content.release(); + } + } + + @Override + public boolean isLast() { + return last; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java index 4406c555a5b04..491c7aa885103 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java @@ -44,6 +44,10 @@ class ReactorNetty4HttpRequest implements HttpRequest { private final Exception inboundException; private final boolean pooled; + ReactorNetty4HttpRequest(HttpServerRequest request) { + this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), false, Unpooled.EMPTY_BUFFER); + } + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) { this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content); } diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index bd1646d753016..906bbfd072da8 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -26,6 +26,8 @@ import org.opensearch.http.HttpServerChannel; import org.opensearch.http.reactor.netty4.ssl.SslUtils; import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest.Method; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.reactor.SharedGroupFactory; @@ -40,6 +42,7 @@ import java.time.Duration; import java.util.Arrays; import java.util.List; +import java.util.Optional; import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelOption; @@ -351,24 +354,45 @@ public List protocols() { * @return response publisher */ protected Publisher incomingRequest(HttpServerRequest request, HttpServerResponse response) { - final NonStreamingRequestConsumer consumer = new NonStreamingRequestConsumer<>( - this, - request, - response, - maxCompositeBufferComponents + final Method method = HttpConversionUtil.convertMethod(request.method()); + final Optional dispatchHandlerOpt = dispatcher.dispatchHandler( + request.uri(), + request.fullPath(), + method, + request.params() ); + if (dispatchHandlerOpt.map(RestHandler::supportsStreaming).orElse(false)) { + final ReactorNetty4StreamingRequestConsumer consumer = new ReactorNetty4StreamingRequestConsumer<>( + request, + response + ); + + request.receiveContent() + .switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)) + .subscribe(consumer, error -> {}, () -> consumer.accept(DefaultLastHttpContent.EMPTY_LAST_CONTENT)); + + incomingStream(new ReactorNetty4HttpRequest(request), consumer.httpChannel()); + return response.sendObject(consumer); + } else { + final ReactorNetty4NonStreamingRequestConsumer consumer = new ReactorNetty4NonStreamingRequestConsumer<>( + this, + request, + response, + maxCompositeBufferComponents + ); - request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); - - return Mono.from(consumer).flatMap(hc -> { - final FullHttpResponse r = (FullHttpResponse) hc; - response.status(r.status()); - response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); - response.chunkedTransfer(false); - response.compression(true); - r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); - return Mono.from(response.sendObject(r.content())); - }); + request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); + + return Mono.from(consumer).flatMap(hc -> { + final FullHttpResponse r = (FullHttpResponse) hc; + response.status(r.status()); + response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); + response.chunkedTransfer(false); + response.compression(true); + r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); + return Mono.from(response.sendObject(r.content())); + }); + } } /** diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java similarity index 92% rename from plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java rename to plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java index 98b359319ff1b..7df0b3c0c35fe 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingHttpChannel.java @@ -23,13 +23,13 @@ import reactor.netty.http.server.HttpServerRequest; import reactor.netty.http.server.HttpServerResponse; -class NonStreamingHttpChannel implements HttpChannel { +class ReactorNetty4NonStreamingHttpChannel implements HttpChannel { private final HttpServerRequest request; private final HttpServerResponse response; private final CompletableContext closeContext = new CompletableContext<>(); private final FluxSink emitter; - NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) { + ReactorNetty4NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink emitter) { this.request = request; this.response = response; this.emitter = emitter; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java similarity index 89% rename from plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java rename to plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java index d43e23e800e65..c09e7755b1670 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4NonStreamingRequestConsumer.java @@ -25,7 +25,7 @@ import reactor.netty.http.server.HttpServerRequest; import reactor.netty.http.server.HttpServerResponse; -class NonStreamingRequestConsumer implements Consumer, Publisher, Disposable { +class ReactorNetty4NonStreamingRequestConsumer implements Consumer, Publisher, Disposable { private final HttpServerRequest request; private final HttpServerResponse response; private final CompositeByteBuf content; @@ -34,7 +34,7 @@ class NonStreamingRequestConsumer implements Consumer, private final AtomicBoolean disposed = new AtomicBoolean(false); private volatile FluxSink emitter; - NonStreamingRequestConsumer( + ReactorNetty4NonStreamingRequestConsumer( AbstractHttpServerTransport transport, HttpServerRequest request, HttpServerResponse response, @@ -64,12 +64,12 @@ public void accept(T message) { } } - public void process(HttpContent in, FluxSink emitter) { + void process(HttpContent in, FluxSink emitter) { // Consume request body in full before dispatching it content.addComponent(true, in.content().retain()); if (in instanceof LastHttpContent) { - final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter); + final ReactorNetty4NonStreamingHttpChannel channel = new ReactorNetty4NonStreamingHttpChannel(request, response, emitter); final HttpRequest r = createRequest(request, content); try { diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java new file mode 100644 index 0000000000000..56dadea0477c5 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingHttpChannel.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.http.HttpChunk; +import org.opensearch.http.HttpResponse; +import org.opensearch.http.StreamingHttpChannel; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.util.List; +import java.util.Map; + +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class ReactorNetty4StreamingHttpChannel implements StreamingHttpChannel { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompletableContext closeContext = new CompletableContext<>(); + private final Publisher receiver; + private final StreamingHttpContentSender sender; + private volatile FluxSink producer; + private volatile boolean lastChunkReceived = false; + + ReactorNetty4StreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, StreamingHttpContentSender sender) { + this.request = request; + this.response = response; + this.sender = sender; + this.receiver = Flux.create(producer -> this.producer = producer); + this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext)); + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() { + request.withConnection(connection -> connection.channel().close()); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void sendChunk(HttpChunk chunk, ActionListener listener) { + sender.send(createContent(chunk), listener, chunk.isLast()); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + sender.send(createContent(response), listener, true); + } + + @Override + public void prepareResponse(int status, Map> headers) { + this.response.status(status); + headers.forEach((k, vs) -> vs.forEach(v -> this.response.addHeader(k, v))); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) response.remoteAddress(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) response.hostAddress(); + } + + @Override + public void receiveChunk(HttpChunk message) { + try { + if (lastChunkReceived) { + return; + } + + producer.next(message); + if (message.isLast()) { + lastChunkReceived = true; + producer.complete(); + } + } finally { + message.close(); + } + } + + @Override + public boolean isReadable() { + return producer != null; + } + + @Override + public boolean isWritable() { + return sender.isReady(); + } + + @Override + public void subscribe(Subscriber subscriber) { + receiver.subscribe(subscriber); + } + + private static HttpContent createContent(HttpResponse response) { + final FullHttpResponse fullHttpResponse = (FullHttpResponse) response; + return new DefaultHttpContent(fullHttpResponse.content()); + } + + private static HttpContent createContent(HttpChunk chunk) { + return new DefaultHttpContent(Unpooled.copiedBuffer(BytesReference.toByteBuffers(chunk.content()))); + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java new file mode 100644 index 0000000000000..f34f54e561021 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingRequestConsumer.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.http.HttpChunk; +import org.opensearch.http.StreamingHttpChannel; + +import java.util.function.Consumer; + +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.LastHttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class ReactorNetty4StreamingRequestConsumer implements Consumer, Publisher { + private final ReactorNetty4StreamingResponseProducer sender; + private final StreamingHttpChannel httpChannel; + + ReactorNetty4StreamingRequestConsumer(HttpServerRequest request, HttpServerResponse response) { + this.sender = new ReactorNetty4StreamingResponseProducer(); + this.httpChannel = new ReactorNetty4StreamingHttpChannel(request, response, sender); + } + + @Override + public void accept(T message) { + if (message instanceof LastHttpContent) { + httpChannel.receiveChunk(createChunk(message, true)); + } else if (message instanceof HttpContent) { + httpChannel.receiveChunk(createChunk(message, false)); + } + } + + @Override + public void subscribe(Subscriber s) { + sender.subscribe(s); + } + + HttpChunk createChunk(HttpContent chunk, boolean last) { + return new ReactorNetty4HttpChunk(chunk.content().retain(), last); + } + + StreamingHttpChannel httpChannel() { + return httpChannel; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java new file mode 100644 index 0000000000000..616edccdfc396 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4StreamingResponseProducer.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.action.ActionListener; + +import io.netty.handler.codec.http.HttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; + +class ReactorNetty4StreamingResponseProducer implements StreamingHttpContentSender, Publisher { + private final Publisher sender; + private volatile FluxSink emitter; + + ReactorNetty4StreamingResponseProducer() { + this.sender = Flux.create(emitter -> this.emitter = emitter); + } + + @Override + public void send(HttpContent content, ActionListener listener, boolean isLast) { + try { + emitter.next(content); + listener.onResponse(null); + if (isLast) { + emitter.complete(); + } + } catch (final Exception ex) { + emitter.error(ex); + listener.onFailure(ex); + } + } + + @Override + public void subscribe(Subscriber s) { + sender.subscribe(s); + } + + @Override + public boolean isReady() { + return emitter != null; + } + + FluxSink emitter() { + return emitter; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java new file mode 100644 index 0000000000000..f07d6fbb88349 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/StreamingHttpContentSender.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.action.ActionListener; + +import io.netty.handler.codec.http.HttpContent; + +/** + * The generic interface for chunked {@link HttpContent} producers (response streaming). + */ +interface StreamingHttpContentSender { + /** + * Sends the next {@link HttpContent} over the wire + * @param content next {@link HttpContent} + * @param listener action listener + * @param isLast {@code true} if this is the last chunk, {@code false} otherwise + */ + void send(HttpContent content, ActionListener listener, boolean isLast); + + /** + * Returns {@code true} is this channel is ready for streaming response data, {@code false} otherwise + * @return {@code true} is this channel is ready for streaming response data, {@code false} otherwise + */ + boolean isReady(); +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java index 920c895205023..0953e51484bd3 100644 --- a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -14,16 +14,22 @@ package org.opensearch.http.reactor.netty4; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; import java.io.Closeable; +import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.stream.Stream; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -36,6 +42,7 @@ import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http2.HttpConversionUtil; @@ -121,6 +128,11 @@ public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequ return responses.get(0); } + public final FullHttpResponse stream(InetSocketAddress remoteAddress, HttpRequest httpRequest, Stream stream) + throws InterruptedException { + return sendRequestStream(remoteAddress, httpRequest, stream); + } + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content) throws InterruptedException { final List responses = sendRequests( @@ -207,6 +219,46 @@ private List sendRequests( } } + private FullHttpResponse sendRequestStream( + final InetSocketAddress remoteAddress, + final HttpRequest request, + final Stream stream + ) { + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); + try { + final HttpClient client = createClient(remoteAddress, eventLoopGroup); + + return client.headers(h -> h.add(request.headers())) + .baseUrl(request.getUri()) + .request(request.method()) + .send(Flux.fromStream(stream).map(s -> { + try (XContentBuilder builder = XContentType.JSON.contentBuilder()) { + return Unpooled.wrappedBuffer( + s.toXContent(builder, ToXContent.EMPTY_PARAMS).toString().getBytes(StandardCharsets.UTF_8) + ); + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } + })) + .response( + (r, c) -> c.aggregate() + .map( + b -> new DefaultFullHttpResponse( + r.version(), + r.status(), + b.retain(), + r.responseHeaders(), + EmptyHttpHeaders.INSTANCE + ) + ) + ) + .blockLast(); + + } finally { + eventLoopGroup.shutdownGracefully().awaitUninterruptibly(); + } + } + private HttpClient createClient(final InetSocketAddress remoteAddress, final NioEventLoopGroup eventLoopGroup) { final HttpClient client = HttpClient.newConnection() .resolver(DefaultAddressResolverGroup.INSTANCE) diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java new file mode 100644 index 0000000000000..a7bf71e58e9b6 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportStreamingTests.java @@ -0,0 +1,211 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.support.XContentHttpChunk; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestRequest.Method; +import org.opensearch.rest.StreamingRestChannel; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; + +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests for the {@link ReactorNetty4HttpServerTransport} class with streaming support. + */ +public class ReactorNetty4HttpServerTransportStreamingTests extends OpenSearchTestCase { + private static final Function XCONTENT_CONVERTER = (str) -> new ToXContent() { + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + return builder.startObject().field("doc", str).endObject(); + } + }; + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + public void testRequestResponseStreaming() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024); + final String url = "/stream/"; + + final ToXContent[] chunks = newChunks(responseString); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public Optional dispatchHandler(String uri, String rawPath, Method method, Map params) { + return Optional.of(new RestHandler() { + @Override + public boolean supportsStreaming() { + return true; + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + logger.error("--> Unexpected request [{}]", request.uri()); + throw new AssertionError(); + } + }); + } + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + assertThat(channel, instanceOf(StreamingRestChannel.class)); + final StreamingRestChannel streamingChannel = (StreamingRestChannel) channel; + + // Await at most 5 seconds till channel is ready for writing the response stream, fail otherwise + final Mono ready = Mono.fromRunnable(() -> { + while (!streamingChannel.isWritable()) { + Thread.onSpinWait(); + } + }).timeout(Duration.ofSeconds(5)); + + threadPool.executor(ThreadPool.Names.WRITE) + .execute(() -> Flux.concat(Flux.fromArray(newChunks(responseString)).map(e -> { + try (XContentBuilder builder = channel.newBuilder(XContentType.JSON, true)) { + return XContentHttpChunk.from(e.toXContent(builder, ToXContent.EMPTY_PARAMS)); + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } + }), Mono.just(XContentHttpChunk.last())) + .delaySubscription(ready) + .subscribe(streamingChannel::sendChunk, null, () -> { + if (channel.bytesOutput() instanceof Releasable) { + ((Releasable) channel.bytesOutput()).close(); + } + })); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create(false)) { + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + final FullHttpResponse response = client.stream(remoteAddress.address(), request, Arrays.stream(chunks)); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(Arrays.stream(newChunks(responseString)).map(s -> { + try (XContentBuilder builder = XContentType.JSON.contentBuilder()) { + return s.toXContent(builder, ToXContent.EMPTY_PARAMS).toString(); + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } + }).collect(Collectors.joining("")))); + } finally { + response.release(); + } + } + } + } + + private static ToXContent[] newChunks(final String responseString) { + final ToXContent[] chunks = new ToXContent[responseString.length() / 16]; + + for (int chunk = 0; chunk < responseString.length(); chunk += 16) { + chunks[chunk / 16] = XCONTENT_CONVERTER.apply(responseString.substring(chunk, chunk + 16)); + } + + return chunks; + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index fa71137912a91..996c2aae8cfe4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -658,6 +658,7 @@ setup: settings: number_of_replicas: 0 number_of_shards: 1 + refresh_interval: -1 mappings: properties: date: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 3a0099dae3b33..78e2e6858c6ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -1083,6 +1083,7 @@ setup: settings: number_of_replicas: 0 number_of_shards: 1 + refresh_interval: -1 mappings: properties: date: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml index 1356eac41ae79..fc82517788c91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml @@ -106,8 +106,36 @@ setup: version: " - 2.99.99" reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.14.0) + - do: + indices.create: + index: test_profile + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + refresh_interval: -1 + mappings: + properties: + date: + type: date + + - do: + bulk: + index: test_profile + refresh: true + body: + - '{"index": {}}' + - '{"date": "2020-03-01", "v": 1}' + - '{"index": {}}' + - '{"date": "2020-03-02", "v": 2}' + - '{"index": {}}' + - '{"date": "2020-03-08", "v": 3}' + - '{"index": {}}' + - '{"date": "2020-03-09", "v": 4}' + - do: search: + index: test_profile body: profile: true size: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml new file mode 100644 index 0000000000000..05b6b2e5ed712 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml @@ -0,0 +1,229 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "Added in 2.15, but need to skip pre-3.0 before backport" + + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field: + type: wildcard + fields: + lower: + type: wildcard + normalizer: lowercase + doc_values: + type: wildcard + doc_values: true + + - do: + index: + index: test + id: 1 + body: + my_field: "org.opensearch.transport.NodeDisconnectedException: [node_s0][127.0.0.1:39953][disconnected] disconnected" + - do: + index: + index: test + id: 2 + body: + my_field: "[2024-06-08T06:31:37,443][INFO ][o.o.c.c.Coordinator ] [node_s2] cluster-manager node [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}] failed, restarting discovery" + + - do: + index: + index: test + id: 3 + body: + my_field: "[2024-06-08T06:31:37,451][INFO ][o.o.c.s.ClusterApplierService] [node_s2] cluster-manager node changed {previous [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}], current []}, term: 1, version: 24, reason: becoming candidate: onLeaderFailure" + - do: + index: + index: test + id: 4 + body: + my_field: "[2024-06-08T06:31:37,452][WARN ][o.o.c.NodeConnectionsService] [node_s1] failed to connect to {node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true} (tried [1] times)" + - do: + index: + index: test + id: 5 + body: + my_field: "AbCd" + - do: + index: + index: test + id: 6 + body: + other_field: "test" + - do: + indices.refresh: {} + +--- +"term query matches exact value": + - do: + search: + index: test + body: + query: + term: + my_field: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.doc_values: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + +--- +"term query matches lowercase-normalized value": + - do: + search: + index: test + body: + query: + term: + my_field.lower: "abcd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field.lower: "ABCD" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + + - do: + search: + index: test + body: + query: + term: + my_field: "abcd" + - match: { hits.total.value: 0 } + +--- +"wildcard query matches": + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*Node*Exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + +--- +"wildcard query matches lowercase-normalized field": + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*node*exception*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field.lower: + value: "*NODE*EXCEPTION*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*node*exception*" + - match: { hits.total.value: 0 } + +--- +"prefix query matches": + - do: + search: + index: test + body: + query: + prefix: + my_field: + value: "[2024-06-08T" + - match: { hits.total.value: 3 } + +--- +"regexp query matches": + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*cluster-manager node.*" + - match: { hits.total.value: 2 } + +--- +"regexp query matches lowercase-normalized field": + - do: + search: + index: test + body: + query: + regexp: + my_field.lower: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 2 } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*06-08.*Cluster-Manager Node.*" + - match: { hits.total.value: 0 } + +--- +"wildcard match-all works": + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "*" + - match: { hits.total.value: 5 } +--- +"regexp match-all works": + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: ".*" + - match: { hits.total.value: 5 } diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 08339fa8a4ce1..0000000000000 --- a/server/licenses/lucene-analysis-common-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cc4e600289bf1171b47de74536bd34c476f85a8 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.11.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..7139f6a43a15a --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0.jar.sha1 @@ -0,0 +1 @@ +75a0a333cf1e043102743066c929e65fe51cbcda \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 3dce8a2162edd..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8babfe85be7e36c893741e08072c11e71db09715 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..735e80b60b001 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1 @@ -0,0 +1 @@ +db385446bc3fd70e7c6a744276c0a157bd60ee0a \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 943a9b2fd214b..0000000000000 --- a/server/licenses/lucene-core-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c2361bd633374ae3814b175cc25ccf773f67026 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0.jar.sha1 b/server/licenses/lucene-core-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..b0d38c4165581 --- /dev/null +++ b/server/licenses/lucene-core-9.11.0.jar.sha1 @@ -0,0 +1 @@ +2e487755a6814b2a1bc770c26569dcba86873dcf \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 8587c3ed5e82a..0000000000000 --- a/server/licenses/lucene-grouping-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9f29b49cd1e0a061ff7fa4a53e8605bd49bd3d0 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0.jar.sha1 b/server/licenses/lucene-grouping-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..562de95605b60 --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0.jar.sha1 @@ -0,0 +1 @@ +882bdaf209b0acb332aa34836616424bcbecf462 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 25579432a9cbd..0000000000000 --- a/server/licenses/lucene-highlighter-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -33bc26d46d62bb1cf3bf725db637226a43db7625 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..e0ef36d321c9d --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0.jar.sha1 @@ -0,0 +1 @@ +44accdc03c5482e602718f7bf91e5940ba4e4870 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 1bfef89965e67..0000000000000 --- a/server/licenses/lucene-join-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82966698abdb8f0367a162f642560566a6085dc8 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0.jar.sha1 b/server/licenses/lucene-join-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..34c618ccfbcc7 --- /dev/null +++ b/server/licenses/lucene-join-9.11.0.jar.sha1 @@ -0,0 +1 @@ +32a30ee03ed4f3e43bf63250270b2d4d53050045 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 73adf3fcb2829..0000000000000 --- a/server/licenses/lucene-memory-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -746f392e7ec27a7cd6ca2add7dd8441d2a6085da \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0.jar.sha1 b/server/licenses/lucene-memory-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..d730cfb4b7660 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0.jar.sha1 @@ -0,0 +1 @@ +b3e80aa6aa3299118e76a23edc23b58f3ba5a515 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 7f7dfead4c329..0000000000000 --- a/server/licenses/lucene-misc-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0eb06ecc39c0ec0db380a6e5aad1b16907e0bfd9 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0.jar.sha1 b/server/licenses/lucene-misc-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..9be27f004435b --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0.jar.sha1 @@ -0,0 +1 @@ +54fe308908194e1b0697a1157a45c5998c9e1083 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index e3d400003efd8..0000000000000 --- a/server/licenses/lucene-queries-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e56eb18cceffcd5ce2e47b679e873420254df74 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0.jar.sha1 b/server/licenses/lucene-queries-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..b445610c25858 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0.jar.sha1 @@ -0,0 +1 @@ +987d1286949ddf514b8405fd453ed47bebdfb12d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 8e8c7f5171107..0000000000000 --- a/server/licenses/lucene-queryparser-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dee3997a72eeae905e92930f53e724b6bef279da \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..a1620ba9c7708 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0.jar.sha1 @@ -0,0 +1 @@ +e97fe1c0d102edb8d6e1c01454992fd2b8d80ae0 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 2d1df051e30b4..0000000000000 --- a/server/licenses/lucene-sandbox-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -946bc45b87b3d770ab6828b0d0a5f8684f2c3624 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..0dc193f054973 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0.jar.sha1 @@ -0,0 +1 @@ +5e46b790744bd9118ccc053f70235364213312a5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 0f9b7c0e90218..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d73667f61fb5e7fde4cec52fcfbbfd9847068aec \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..9d3a8d2857db6 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1 @@ -0,0 +1 @@ +079ca5aaf544a3acde84b8b88423ace6dedc23eb \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 87894603e0d84..0000000000000 --- a/server/licenses/lucene-spatial3d-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8e8ab80bfb6abd70932e50fe31e13ecf2e00987 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..fd5ff875a0113 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0.jar.sha1 @@ -0,0 +1 @@ +564558818d70fc384db5b36fbc8a0ab27b107609 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 deleted file mode 100644 index 6100f6fe0d585..0000000000000 --- a/server/licenses/lucene-suggest-9.11.0-snapshot-4be6531.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45d6f0facd45d4e49585f0dabfa62ed5a1883033 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0.jar.sha1 b/server/licenses/lucene-suggest-9.11.0.jar.sha1 new file mode 100644 index 0000000000000..2fa96e97f307a --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0.jar.sha1 @@ -0,0 +1 @@ +aa345db9b6caaf881e7890ea5b8911357d592167 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java index ab2f0f0080566..f6c7355ea06f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -26,13 +26,13 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.CUSTOM_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; -import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; -import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA; -import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 766ca2c1189e5..9888d2d8abd98 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -631,6 +631,7 @@ public void testCacheWithFilteredAlias() throws InterruptedException { assertCacheState(client, index, 2, 2); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/11374") public void testProfileDisableCache() throws Exception { Client client = client(); String index = "index"; @@ -673,6 +674,7 @@ public void testProfileDisableCache() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/12308") public void testCacheWithInvalidation() throws Exception { Client client = client(); String index = "index"; @@ -758,6 +760,7 @@ public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throw } // when staleness threshold is lower than staleness, it should clean the stale keys from cache + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13540") public void testStaleKeysCleanupWithLowThreshold() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -804,6 +807,7 @@ public void testStaleKeysCleanupWithLowThreshold() throws Exception { } // when staleness threshold is equal to staleness, it should clean the stale keys from cache + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13503") public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -982,6 +986,7 @@ public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception { } // when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13711") public void testCacheCleanupWithDefaultSettings() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -1022,6 +1027,7 @@ public void testCacheCleanupWithDefaultSettings() throws Exception { } // staleness threshold updates flows through to the cache cleaner + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13949") public void testDynamicStalenessThresholdUpdate() throws Exception { int cacheCleanIntervalInMillis = 1; String node = internalCluster().startNode( @@ -1169,6 +1175,7 @@ public void testCacheCleanupAfterIndexDeletion() throws Exception { } // when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13437") public void testStaleKeysCleanupWithMultipleIndices() throws Exception { int cacheCleanIntervalInMillis = 10; String node = internalCluster().startNode( @@ -1223,6 +1230,7 @@ public void testStaleKeysCleanupWithMultipleIndices() throws Exception { }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13600") public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { String node_1 = internalCluster().startNode(Settings.builder().build()); Client client = client(node_1); @@ -1280,8 +1288,8 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { final Index index = state.metadata().index(indexName).getIndex(); assertBusy(() -> { - assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true)); + assertFalse(Arrays.stream(shardDirectory(node_1, index, 0)).anyMatch(Files::exists)); + assertEquals(1, Arrays.stream(shardDirectory(node_2, index, 0)).filter(Files::exists).count()); }); logger.info("Moving the shard: {} again from node:{} to node:{}", indexName + "#0", node_2, node_1); @@ -1294,11 +1302,10 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { .setWaitForNoInitializingShards(true) .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertBusy(() -> { - assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false)); + assertEquals(1, Arrays.stream(shardDirectory(node_1, index, 0)).filter(Files::exists).count()); + assertFalse(Arrays.stream(shardDirectory(node_2, index, 0)).anyMatch(Files::exists)); }); logger.info("Clearing the cache for index:{}. And verify the request stats doesn't go negative", indexName); @@ -1311,11 +1318,12 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } - private Path shardDirectory(String server, Index index, int shard) { + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); - assert paths.length == 1; - return paths[0]; + // the available paths of the shard may be bigger than the 1, + // it depends on `InternalTestCluster.numDataPaths`. + return paths; } private void setupIndex(Client client, String index) throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 8ce87f37d77cd..cf93a432d0371 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -104,7 +104,6 @@ import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.node.NodeClosedException; -import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; @@ -156,7 +155,7 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.action.DocWriteResponse.Result.CREATED; import static org.opensearch.action.DocWriteResponse.Result.UPDATED; -import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; +import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; @@ -187,7 +186,6 @@ protected Collection> nodePlugins() { return Arrays.asList( MockTransportService.TestPlugin.class, MockFSIndexStore.TestPlugin.class, - RecoverySettingsChunkSizePlugin.class, TestAnalysisPlugin.class, InternalSettingsPlugin.class, MockEngineFactoryPlugin.class @@ -263,7 +261,7 @@ private void slowDownRecovery(ByteSizeValue shardSize) { // one chunk per sec.. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) // small chunks - .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)) + .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)) ) .get() .isAcknowledged() @@ -278,7 +276,10 @@ private void restoreRecoverySpeed() { .setTransientSettings( Settings.builder() .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb") - .put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) + .put( + INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING.getDefault(Settings.EMPTY) + ) ) .get() .isAcknowledged() diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index 0752ab7c9d0f1..d9e3cec426edf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -45,6 +46,8 @@ import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -253,4 +256,144 @@ public void testNoRebalanceOnRollingRestart() throws Exception { ); } } + + public void testFullRollingRestart_withNoRecoveryPayloadAndSource() throws Exception { + internalCluster().startNode(); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("_source") + .field("enabled") + .value(false) + .field("recovery_source_enabled") + .value(false) + .endObject() + .endObject(); + CreateIndexResponse response = prepareCreate("test").setMapping(builder).get(); + logger.info("Create index response is : {}", response); + + final String healthTimeout = "1m"; + + for (int i = 0; i < 1000; i++) { + client().prepareIndex("test") + .setId(Long.toString(i)) + .setSource(MapBuilder.newMapBuilder().put("test", "value" + i).map()) + .execute() + .actionGet(); + } + + for (int i = 1000; i < 2000; i++) { + client().prepareIndex("test") + .setId(Long.toString(i)) + .setSource(MapBuilder.newMapBuilder().put("test", "value" + i).map()) + .execute() + .actionGet(); + } + // ensuring all docs are committed to file system + flush(); + + logger.info("--> now start adding nodes"); + internalCluster().startNode(); + internalCluster().startNode(); + + // make sure the cluster state is green, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("3") + ); + + logger.info("--> add two more nodes"); + internalCluster().startNode(); + internalCluster().startNode(); + + // make sure the cluster state is green, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("5") + ); + + logger.info("--> refreshing and checking data"); + refreshAndWaitForReplication(); + for (int i = 0; i < 10; i++) { + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); + } + + // now start shutting nodes down + internalCluster().stopRandomDataNode(); + // make sure the cluster state is green, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("4") + ); + + internalCluster().stopRandomDataNode(); + // make sure the cluster state is green, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("3") + ); + + logger.info("--> stopped two nodes, verifying data"); + refreshAndWaitForReplication(); + for (int i = 0; i < 10; i++) { + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); + } + + // closing the 3rd node + internalCluster().stopRandomDataNode(); + // make sure the cluster state is green, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("2") + ); + + internalCluster().stopRandomDataNode(); + + // make sure the cluster state is yellow, and all has been recovered + assertTimeout( + client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setTimeout(healthTimeout) + .setWaitForYellowStatus() + .setWaitForNoRelocatingShards(true) + .setWaitForNodes("1") + ); + + logger.info("--> one node left, verifying data"); + refreshAndWaitForReplication(); + for (int i = 0; i < 10; i++) { + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index bf0533143cf91..692beb86279b9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -46,7 +46,6 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; @@ -61,7 +60,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; +import static org.opensearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -81,7 +80,7 @@ public static Collection parameters() { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class); } /** @@ -96,7 +95,8 @@ public void testCancelRecoveryAndResume() throws Exception { .cluster() .prepareUpdateSettings() .setTransientSettings( - Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) + Settings.builder() + .put(INDICES_RECOVERY_CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) ) .get() .isAcknowledged() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 0493bcf800c97..901b36f872622 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -21,7 +21,6 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -87,11 +86,6 @@ protected Settings nodeSettings(int nodeOrdinal) { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); - } - protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); @@ -186,10 +180,11 @@ private Thread getIndexingThread() { indexSingleDoc(indexName); long currentDocCount = indexedDocs.incrementAndGet(); if (currentDocCount > 0 && currentDocCount % refreshFrequency == 0) { - logger.info("--> [iteration {}] flushing index", currentDocCount); if (rarely()) { + logger.info("--> [iteration {}] flushing index", currentDocCount); client().admin().indices().prepareFlush(indexName).get(); } else { + logger.info("--> [iteration {}] refreshing index", currentDocCount); client().admin().indices().prepareRefresh(indexName).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java index 5094a7cf29c6a..d046f41ce0590 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -18,6 +18,7 @@ import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexService; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeases; @@ -665,6 +666,43 @@ public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() thro }); } + /* + Performs the same experiment as testRemotePrimaryDocRepReplica. + + This ensures that the primary shard for the index has moved over to remote + enabled node whereas the replica copy is still left behind on the docrep nodes + + At this stage, segrep lag computation shouldn't consider the docrep shard copy while calculating bytes lag + */ + public void testZeroSegrepLagForShardsWithMixedReplicationGroup() throws Exception { + testRemotePrimaryDocRepReplica(); + String remoteNodeName = internalCluster().client() + .admin() + .cluster() + .prepareNodesStats() + .get() + .getNodes() + .stream() + .filter(nodeStats -> nodeStats.getNode().isRemoteStoreNode()) + .findFirst() + .get() + .getNode() + .getName(); + ReplicationStats replicationStats = internalCluster().client() + .admin() + .cluster() + .prepareNodesStats(remoteNodeName) + .get() + .getNodes() + .get(0) + .getIndices() + .getSegments() + .getReplicationStats(); + assertEquals(0, replicationStats.getMaxBytesBehind()); + assertEquals(0, replicationStats.getTotalBytesBehind()); + assertEquals(0, replicationStats.getMaxReplicationLag()); + } + private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception { assertBusy(() -> { Map shardStatsMap = internalCluster().client() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index c72b6851c1125..793adef0594fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -273,6 +273,7 @@ initalMetadataVersion < internalCluster().client() * After shard relocation completes, shuts down the docrep nodes and asserts remote * index settings are applied even when the index is in YELLOW state */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13737") public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Exception { internalCluster().startClusterManagerOnlyNode(); @@ -329,6 +330,7 @@ public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Except * After shard relocation completes, restarts the docrep node holding extra replica shard copy * and asserts remote index settings are applied as soon as the docrep replica copy is unassigned */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13871") public void testIndexSettingsUpdatedWhenDocrepNodeIsRestarted() throws Exception { internalCluster().startClusterManagerOnlyNode(); @@ -469,6 +471,7 @@ public void testRemotePathMetadataAddedWithFirstPrimaryMovingToRemote() throws E * exclude docrep nodes, assert that remote index path file exists * when shards start relocating to the remote nodes. */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13939") public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { String docrepClusterManager = internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java index 293691ace2edd..cea653c0ead4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java @@ -8,14 +8,11 @@ package org.opensearch.remotemigration; -import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.action.delete.DeleteResponse; -import org.opensearch.action.index.IndexResponse; import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; @@ -66,8 +63,8 @@ public void testRemotePrimaryRelocation() throws Exception { AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); - + AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test"); + asyncIndexingService.startIndexing(); refresh("test"); // add remote node in mixed mode cluster @@ -141,17 +138,19 @@ public void testRemotePrimaryRelocation() throws Exception { logger.info("--> relocation from remote to remote complete"); finished.set(true); - indexingThread.join(); + asyncIndexingService.stopIndexing(); refresh("test"); - OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test").setTrackTotalHits(true).get(), + asyncIndexingService.getIndexedDocs() + ); OpenSearchAssertions.assertHitCount( client().prepareSearch("test") .setTrackTotalHits(true)// extra paranoia ;) .setQuery(QueryBuilders.termQuery("auto", true)) .get(), - numAutoGenDocs.get() + asyncIndexingService.getIndexedDocs() ); - } public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { @@ -165,9 +164,8 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); - AtomicInteger numAutoGenDocs = new AtomicInteger(); - final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = getIndexingThread(finished, numAutoGenDocs); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test"); + asyncIndexingService.startIndexing(); refresh("test"); @@ -209,27 +207,11 @@ public void testMixedModeRelocation_RemoteSeedingFail() throws Exception { assertEquals(actionGet.getRelocatingShards(), 0); assertEquals(docRepNode, primaryNodeName("test")); - finished.set(true); - indexingThread.join(); + asyncIndexingService.stopIndexing(); client().admin() .cluster() .prepareUpdateSettings() .setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null)) .get(); } - - private static Thread getIndexingThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) { - Thread indexingThread = new Thread(() -> { - while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); - numAutoGenDocs.incrementAndGet(); - } - }); - indexingThread.start(); - return indexingThread; - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java index 196ecb991bbc0..aae726fe2a6bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java @@ -8,32 +8,27 @@ package org.opensearch.remotemigration; -import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - -import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.action.delete.DeleteResponse; -import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.TimeUnit; import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) - public class RemoteReplicaRecoveryIT extends MigrationBaseTestCase { protected int maximumNumberOfShards() { @@ -52,6 +47,7 @@ protected int minimumNumberOfReplicas() { Brings up new replica copies on remote and docrep nodes, when primary is on a remote node Live indexing is happening meanwhile */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13473") public void testReplicaRecovery() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); String primaryNode = internalCluster().startNode(); @@ -63,10 +59,8 @@ public void testReplicaRecovery() throws Exception { client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); String replicaNode = internalCluster().startNode(); ensureGreen("test"); - - AtomicInteger numAutoGenDocs = new AtomicInteger(); - final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = getThread(finished, numAutoGenDocs); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService("test"); + asyncIndexingService.startIndexing(); refresh("test"); @@ -78,12 +72,10 @@ public void testReplicaRecovery() throws Exception { updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store")); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - String remoteNode2 = internalCluster().startNode(); + internalCluster().startNode(); internalCluster().validateClusterFormed(); // identify the primary - - Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); logger.info("--> relocating primary from {} to {} ", primaryNode, remoteNode); client().admin() .cluster() @@ -102,7 +94,6 @@ public void testReplicaRecovery() throws Exception { assertEquals(0, clusterHealthResponse.getRelocatingShards()); logger.info("--> relocation of primary from docrep to remote complete"); - Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); logger.info("--> getting up the new replicas now to doc rep node as well as remote node "); // Increase replica count to 3 @@ -129,52 +120,33 @@ public void testReplicaRecovery() throws Exception { logger.info("--> replica is up now on another docrep now as well as remote node"); assertEquals(0, clusterHealthResponse.getRelocatingShards()); + asyncIndexingService.stopIndexing(); + refresh("test"); - Thread.sleep(RandomNumbers.randomIntBetween(random(), 0, 2000)); + // segrep lag should be zero + assertBusy(() -> { + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats("test") + .setDetailed(true) + .execute() + .actionGet(); + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get("test").get(0); + assertEquals(segmentReplicationStatsResponse.getReplicationStats().size(), 1); + perGroupStats.getReplicaStats().stream().forEach(e -> assertEquals(e.getCurrentReplicationLagMillis(), 0)); + }, 20, TimeUnit.SECONDS); - // Stop replicas on docrep now. - // ToDo : Remove once we have dual replication enabled - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest("test").settings( - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.routing.allocation.exclude._name", primaryNode + "," + replicaNode) - .build() - ) - ) - .get(); - - finished.set(true); - indexingThread.join(); - refresh("test"); - OpenSearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + OpenSearchAssertions.assertHitCount( + client().prepareSearch("test").setTrackTotalHits(true).get(), + asyncIndexingService.getIndexedDocs() + ); OpenSearchAssertions.assertHitCount( client().prepareSearch("test") .setTrackTotalHits(true)// extra paranoia ;) .setQuery(QueryBuilders.termQuery("auto", true)) - // .setPreference("_prefer_nodes:" + (remoteNode+ "," + remoteNode2)) .get(), - numAutoGenDocs.get() + asyncIndexingService.getIndexedDocs() ); } - - private Thread getThread(AtomicBoolean finished, AtomicInteger numAutoGenDocs) { - Thread indexingThread = new Thread(() -> { - while (finished.get() == false && numAutoGenDocs.get() < 100) { - IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); - numAutoGenDocs.incrementAndGet(); - logger.info("Indexed {} docs here", numAutoGenDocs.get()); - } - }); - indexingThread.start(); - return indexingThread; - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index 4b1c91f1d57ca..4e4f6da56d622 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -17,6 +17,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.SnapshotInfo; @@ -42,6 +43,11 @@ protected int minimumNumberOfReplicas() { return 1; } + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + } + public void testMixedModeAddRemoteNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); List cmNodes = internalCluster().startNodes(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java index b817906a8f828..b804e6dbc1231 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -27,6 +28,11 @@ public class ResizeIndexMigrationTestCase extends MigrationBaseTestCase { private final static String DOC_REP_DIRECTION = "docrep"; private final static String MIXED_MODE = "mixed"; + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + } + /* * This test will verify the resize request failure, when cluster mode is mixed * and index is on DocRep node, and migration to remote store is in progress. diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index b22817ef19d1b..11260e0914dc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -57,6 +57,7 @@ import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.encodeString; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -326,9 +327,7 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh // Step - 3 Delete index metadata file in remote try { Files.move( - segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" - ), + segmentRepoPath.resolve(encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"), segmentRepoPath.resolve("cluster-state/") ); } catch (IOException e) { @@ -354,10 +353,7 @@ public void testRemoteStateFullRestart() throws Exception { try { Files.move( segmentRepoPath.resolve( - RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) - + "/cluster-state/" - + prevClusterUUID - + "/manifest" + encodeString(clusterService().state().getClusterName().value()) + "/cluster-state/" + prevClusterUUID + "/manifest" ), segmentRepoPath.resolve("cluster-state/") ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 4a8b00ea45738..4051bee3e4e5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -187,4 +187,27 @@ public void testAggsOnEmptyShards() { // Validate non-global agg does not throw an exception assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get()); } + + public void testAggsWithTerminateAfter() throws InterruptedException { + assertAcked( + prepareCreate( + "terminate_index", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("f", "type=keyword").get() + ); + List docs = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(5, 20); ++i) { + docs.add(client().prepareIndex("terminate_index").setSource("f", Integer.toString(i / 3))); + } + indexRandom(true, docs); + + SearchResponse response = client().prepareSearch("terminate_index") + .setSize(2) + .setTerminateAfter(1) + .addAggregation(terms("f").field("f")) + .get(); + assertSearchResponse(response); + assertTrue(response.isTerminatedEarly()); + assertEquals(response.getHits().getHits().length, 1); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index edf9cd432dda2..f5d018b2ef491 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -42,10 +42,12 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; +import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.filter.Filter; +import org.opensearch.search.aggregations.bucket.filter.InternalFilters; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.ExtendedStats; @@ -999,6 +1001,72 @@ public void testOtherDocCount() { testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME); } + public void testDeferredSubAggs() { + // Tests subAgg doc count is the same with different collection modes and additional top level aggs + SearchResponse r1 = client().prepareSearch("idx") + .setSize(0) + .addAggregation( + terms("terms1").collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field("s_value") + .size(2) + .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery())) + ) + .addAggregation(AggregationBuilders.min("min").field("constant")) + .get(); + + SearchResponse r2 = client().prepareSearch("idx") + .setSize(0) + .addAggregation( + terms("terms1").collectMode(SubAggCollectionMode.DEPTH_FIRST) + .field("s_value") + .size(2) + .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery())) + ) + .addAggregation(AggregationBuilders.min("min").field("constant")) + .get(); + + SearchResponse r3 = client().prepareSearch("idx") + .setSize(0) + .addAggregation( + terms("terms1").collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field("s_value") + .size(2) + .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery())) + ) + .get(); + + SearchResponse r4 = client().prepareSearch("idx") + .setSize(0) + .addAggregation( + terms("terms1").collectMode(SubAggCollectionMode.DEPTH_FIRST) + .field("s_value") + .size(2) + .subAggregation(AggregationBuilders.filters("filter", QueryBuilders.boolQuery())) + ) + .get(); + + assertNotNull(r1.getAggregations().get("terms1")); + assertNotNull(r2.getAggregations().get("terms1")); + assertNotNull(r3.getAggregations().get("terms1")); + assertNotNull(r4.getAggregations().get("terms1")); + + Terms terms = r1.getAggregations().get("terms1"); + Bucket b1 = terms.getBucketByKey("val0"); + InternalFilters f1 = b1.getAggregations().get("filter"); + long docCount1 = f1.getBuckets().get(0).getDocCount(); + Bucket b2 = terms.getBucketByKey("val1"); + InternalFilters f2 = b2.getAggregations().get("filter"); + long docCount2 = f1.getBuckets().get(0).getDocCount(); + + for (SearchResponse response : new SearchResponse[] { r2, r3, r4 }) { + terms = response.getAggregations().get("terms1"); + f1 = terms.getBucketByKey(b1.getKeyAsString()).getAggregations().get("filter"); + f2 = terms.getBucketByKey(b2.getKeyAsString()).getAggregations().get("filter"); + assertEquals(docCount1, f1.getBuckets().get(0).getDocCount()); + assertEquals(docCount2, f2.getBuckets().get(0).getDocCount()); + } + } + /** * Make sure that a request using a deterministic script or not using a script get cached. * Ensure requests using nondeterministic scripts do not get cached. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index db4ee3571d141..b2ed689622e7d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -59,6 +60,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -255,6 +257,36 @@ public void testSingleValuedString() throws Exception { assertCount(count, numDocs); } + public void testDisableDynamicPruning() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); + assertSearchResponse(response); + + Cardinality count1 = response.getAggregations().get("cardinality"); + + final ClusterUpdateSettingsResponse updateSettingResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey(), 0)) + .get(); + assertEquals(updateSettingResponse.getTransientSettings().get(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()), "0"); + + response = client().prepareSearch("idx") + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); + assertSearchResponse(response); + Cardinality count2 = response.getAggregations().get("cardinality"); + + assertEquals(count1, count2); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey())) + .get(); + } + public void testSingleValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 01ad06757640c..a58db51780826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -1914,8 +1914,14 @@ public void testRangeQueryWithTimeZone() throws Exception { * Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week * as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query * as "Dez" for "Dezember (December)". + * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set. + * When running with gradle this is done implicitly through the BuildPlugin, but when running from + * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section + * on "Configuring IDEs And Running Tests". */ public void testRangeQueryWithLocaleMapping() throws Exception { + assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set"; + assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1932,21 +1938,17 @@ public void testRangeQueryWithLocaleMapping() throws Exception { indexRandom( true, - client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), - client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") + client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"), + client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800") ); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery( - QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800") - ) + .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800")) .get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch("test") - .setQuery( - QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800") - ) + .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800")) .get(); assertHitCount(searchResponse, 2L); } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 5e2b62614fc47..16c15f553951c 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -441,6 +441,7 @@ import org.opensearch.rest.action.cat.RestTemplatesAction; import org.opensearch.rest.action.cat.RestThreadPoolAction; import org.opensearch.rest.action.document.RestBulkAction; +import org.opensearch.rest.action.document.RestBulkStreamingAction; import org.opensearch.rest.action.document.RestDeleteAction; import org.opensearch.rest.action.document.RestGetAction; import org.opensearch.rest.action.document.RestGetSourceAction; @@ -887,6 +888,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestTermVectorsAction()); registerHandler.accept(new RestMultiTermVectorsAction()); registerHandler.accept(new RestBulkAction(settings)); + registerHandler.accept(new RestBulkStreamingAction(settings)); registerHandler.accept(new RestUpdateAction()); registerHandler.accept(new RestSearchAction()); diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 9bf4a4b1e18f1..f0fc05c595d6f 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -51,6 +51,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.AliasFilter; @@ -469,6 +470,10 @@ private void onRequestEnd(SearchRequestContext searchRequestContext) { this.searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(this, searchRequestContext); } + private void onRequestFailure(SearchRequestContext searchRequestContext) { + this.searchRequestContext.getSearchRequestOperationsListener().onRequestFailure(this, searchRequestContext); + } + private void executePhase(SearchPhase phase) { Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]")); try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) { @@ -507,6 +512,7 @@ ShardSearchFailure[] buildShardFailures() { private void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shard, final SearchShardIterator shardIt, Exception e) { // we always add the shard failure for a specific shard instance // we do make sure to clean it on a successful response from a shard + setPhaseResourceUsages(); onShardFailure(shardIndex, shard, e); SearchShardTarget nextShard = FailAwareWeightedRouting.getInstance() .findNext(shardIt, clusterState, e, () -> totalOps.incrementAndGet()); @@ -618,9 +624,15 @@ protected void onShardResult(Result result, SearchShardIterator shardIt) { if (logger.isTraceEnabled()) { logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null); } + this.setPhaseResourceUsages(); results.consumeResult(result, () -> onShardResultConsumed(result, shardIt)); } + public void setPhaseResourceUsages() { + TaskResourceInfo taskResourceUsage = searchRequestContext.getTaskResourceUsageSupplier().get(); + searchRequestContext.recordPhaseResourceUsage(taskResourceUsage); + } + private void onShardResultConsumed(Result result, SearchShardIterator shardIt) { successfulOps.incrementAndGet(); // clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level @@ -751,6 +763,7 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + setPhaseResourceUsages(); if (currentPhaseHasLifecycle) { this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause); } @@ -780,6 +793,7 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { }); } Releasables.close(releasables); + onRequestFailure(searchRequestContext); listener.onFailure(exception); } diff --git a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java index ebb2f33f8f37d..2ad7f8a29896c 100644 --- a/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/FetchSearchPhase.java @@ -240,6 +240,7 @@ private void executeFetch( public void innerOnResponse(FetchSearchResult result) { try { progressListener.notifyFetchResult(shardIndex); + context.setPhaseResourceUsages(); counter.onResult(result); } catch (Exception e) { context.onPhaseFailure(FetchSearchPhase.this, "", e); @@ -254,6 +255,7 @@ public void onFailure(Exception e) { e ); progressListener.notifyFetchFailure(shardIndex, shardTarget, e); + context.setPhaseResourceUsages(); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index df451e0745e3c..55f2a22749e70 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -150,4 +150,9 @@ default void sendReleaseSearchContext( * Registers a {@link Releasable} that will be closed when the search request finishes or fails. */ void addReleasable(Releasable releasable); + + /** + * Set the resource usage info for this phase + */ + void setPhaseResourceUsages(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java index b8bbde65ca6bc..111d9c64550b3 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -8,13 +8,20 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TotalHits; import org.opensearch.common.annotation.InternalApi; +import org.opensearch.core.tasks.resourcetracker.TaskResourceInfo; +import java.util.ArrayList; import java.util.EnumMap; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.function.Supplier; /** * This class holds request-level context for search queries at the coordinator node @@ -23,6 +30,7 @@ */ @InternalApi public class SearchRequestContext { + private static final Logger logger = LogManager.getLogger(); private final SearchRequestOperationsListener searchRequestOperationsListener; private long absoluteStartNanos; private final Map phaseTookMap; @@ -30,13 +38,21 @@ public class SearchRequestContext { private final EnumMap shardStats; private final SearchRequest searchRequest; - - SearchRequestContext(final SearchRequestOperationsListener searchRequestOperationsListener, final SearchRequest searchRequest) { + private final LinkedBlockingQueue phaseResourceUsage; + private final Supplier taskResourceUsageSupplier; + + SearchRequestContext( + final SearchRequestOperationsListener searchRequestOperationsListener, + final SearchRequest searchRequest, + final Supplier taskResourceUsageSupplier + ) { this.searchRequestOperationsListener = searchRequestOperationsListener; this.absoluteStartNanos = System.nanoTime(); this.phaseTookMap = new HashMap<>(); this.shardStats = new EnumMap<>(ShardStatsFieldNames.class); this.searchRequest = searchRequest; + this.phaseResourceUsage = new LinkedBlockingQueue<>(); + this.taskResourceUsageSupplier = taskResourceUsageSupplier; } SearchRequestOperationsListener getSearchRequestOperationsListener() { @@ -107,6 +123,24 @@ String formattedShardStats() { ); } } + + public Supplier getTaskResourceUsageSupplier() { + return taskResourceUsageSupplier; + } + + public void recordPhaseResourceUsage(TaskResourceInfo usage) { + if (usage != null) { + this.phaseResourceUsage.add(usage); + } + } + + public List getPhaseResourceUsage() { + return new ArrayList<>(phaseResourceUsage); + } + + public SearchRequest getRequest() { + return searchRequest; + } } enum ShardStatsFieldNames { diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 53efade174502..61f19977ae5ce 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -41,16 +41,18 @@ protected SearchRequestOperationsListener(final boolean enabled) { this.enabled = enabled; } - protected abstract void onPhaseStart(SearchPhaseContext context); + protected void onPhaseStart(SearchPhaseContext context) {}; - protected abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {}; - protected abstract void onPhaseFailure(SearchPhaseContext context, Throwable cause); + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {}; protected void onRequestStart(SearchRequestContext searchRequestContext) {} protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + protected boolean isEnabled(SearchRequest searchRequest) { return isEnabled(); } @@ -133,6 +135,17 @@ public void onRequestEnd(SearchPhaseContext context, SearchRequestContext search } } + @Override + public void onRequestFailure(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestFailure(context, searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestFailure listener [{}] failed", listener), e); + } + } + } + public List getListeners() { return listeners; } diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 143b01af3f62f..6e380775355a2 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -87,6 +87,7 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanBuilder; @@ -186,6 +187,7 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -224,6 +227,7 @@ public TransportSearchAction( clusterService.getClusterSettings() .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); this.tracer = tracer; + this.taskResourceTrackingService = taskResourceTrackingService; } private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { @@ -451,7 +455,11 @@ private void executeRequest( logger, TraceableSearchRequestOperationsListener.create(tracer, requestSpan) ); - SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); + SearchRequestContext searchRequestContext = new SearchRequestContext( + requestOperationsListeners, + originalSearchRequest, + taskResourceTrackingService::getTaskResourceUsageFromThreadContext + ); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); PipelinedRequest searchRequest; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index f56c906db1002..c7fd263bda56a 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -84,7 +84,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; @@ -384,9 +383,7 @@ public static Collection createAllocationDeciders( addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new NodeLoadAwareAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new TargetPoolAllocationDecider()); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING)) { - addAllocationDecider(deciders, new RemoteStoreMigrationAllocationDecider(settings, clusterSettings)); - } + addAllocationDecider(deciders, new RemoteStoreMigrationAllocationDecider(settings, clusterSettings)); clusterPlugins.stream() .flatMap(p -> p.createAllocationDeciders(settings, clusterSettings).stream()) diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index a38fc81bebc08..d21cd354bf659 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -494,6 +494,18 @@ public void writeDiff(Diff value, StreamOutput out) throws IOException { * @opensearch.internal */ public abstract static class NonDiffableValueSerializer implements ValueSerializer { + private static final NonDiffableValueSerializer ABSTRACT_INSTANCE = new NonDiffableValueSerializer<>() { + @Override + public void write(Object value, StreamOutput out) { + throw new UnsupportedOperationException(); + } + + @Override + public Object read(StreamInput in, Object key) { + throw new UnsupportedOperationException(); + } + }; + @Override public boolean supportsDiffableValues() { return false; @@ -513,6 +525,10 @@ public void writeDiff(Diff value, StreamOutput out) throws IOException { public Diff readDiff(StreamInput in, K key) throws IOException { throw new UnsupportedOperationException(); } + + public static NonDiffableValueSerializer getAbstractInstance() { + return ABSTRACT_INSTANCE; + } } /** diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 72a3519aca6f8..4c76858107ed8 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -45,6 +45,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * Information passed during repository cleanup @@ -118,6 +119,24 @@ public Version getMinimalSupportedVersion() { return LegacyESVersion.fromId(7040099); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RepositoryCleanupInProgress that = (RepositoryCleanupInProgress) o; + return entries.equals(that.entries); + } + + @Override + public int hashCode() { + return 31 + entries.hashCode(); + } + /** * Entry in the collection. * @@ -155,6 +174,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(repositoryStateId); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RepositoryCleanupInProgress.Entry that = (RepositoryCleanupInProgress.Entry) o; + return repository.equals(that.repository) && repositoryStateId == that.repositoryStateId; + } + + @Override + public int hashCode() { + return Objects.hash(repository, repositoryStateId); + } + @Override public String toString() { return "{" + repository + '}' + '{' + repositoryStateId + '}'; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 987a3e3ffa7d3..7fa63ae8abc62 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import java.io.Closeable; @@ -52,6 +53,7 @@ import java.util.Set; import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -79,6 +81,7 @@ public class CoordinationState { private VotingConfiguration lastPublishedConfiguration; private VoteCollection publishVotes; private final boolean isRemoteStateEnabled; + private final boolean isRemotePublicationEnabled; public CoordinationState( DiscoveryNode localNode, @@ -102,6 +105,12 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); + this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + && localNode.isRemoteStatePublicationEnabled(); + } + + public boolean isRemotePublicationEnabled() { + return isRemotePublicationEnabled; } public long getCurrentTerm() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index f53e6837a67f5..87f02c6891be6 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -85,6 +85,7 @@ import org.opensearch.discovery.PeerFinder; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.remotestore.RemoteStoreNodeService; @@ -209,7 +210,8 @@ public Coordinator( NodeHealthService nodeHealthService, PersistedStateRegistry persistedStateRegistry, RemoteStoreNodeService remoteStoreNodeService, - ClusterManagerMetrics clusterManagerMetrics + ClusterManagerMetrics clusterManagerMetrics, + RemoteClusterStateService remoteClusterStateService ) { this.settings = settings; this.transportService = transportService; @@ -261,7 +263,8 @@ public Coordinator( transportService, namedWriteableRegistry, this::handlePublishRequest, - this::handleApplyCommit + this::handleApplyCommit, + remoteClusterStateService ); this.leaderChecker = new LeaderChecker( settings, @@ -1330,7 +1333,9 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) + clusterState; final PublicationTransportHandler.PublicationContext publicationContext = publicationHandler.newPublicationContext( - clusterChangedEvent + clusterChangedEvent, + coordinationState.get().isRemotePublicationEnabled(), + persistedStateRegistry ); final PublishRequest publishRequest = coordinationState.get().handleClientValue(clusterState); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 1fdaeead0d28d..36eabd51ffda1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; import org.opensearch.cluster.IncompatibleClusterStateVersionException; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.action.ActionListener; @@ -47,6 +48,9 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BytesTransportRequest; import org.opensearch.transport.TransportChannel; @@ -74,6 +78,7 @@ public class PublicationTransportHandler { private static final Logger logger = LogManager.getLogger(PublicationTransportHandler.class); public static final String PUBLISH_STATE_ACTION_NAME = "internal:cluster/coordination/publish_state"; + public static final String PUBLISH_REMOTE_STATE_ACTION_NAME = "internal:cluster/coordination/publish_remote_state"; public static final String COMMIT_STATE_ACTION_NAME = "internal:cluster/coordination/commit_state"; private final TransportService transportService; @@ -97,16 +102,19 @@ public class PublicationTransportHandler { private final TransportRequestOptions stateRequestOptions = TransportRequestOptions.builder() .withType(TransportRequestOptions.Type.STATE) .build(); + private final RemoteClusterStateService remoteClusterStateService; public PublicationTransportHandler( TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, Function handlePublishRequest, - BiConsumer> handleApplyCommit + BiConsumer> handleApplyCommit, + RemoteClusterStateService remoteClusterStateService ) { this.transportService = transportService; this.namedWriteableRegistry = namedWriteableRegistry; this.handlePublishRequest = handlePublishRequest; + this.remoteClusterStateService = remoteClusterStateService; transportService.registerRequestHandler( PUBLISH_STATE_ACTION_NAME, @@ -117,6 +125,15 @@ public PublicationTransportHandler( (request, channel, task) -> channel.sendResponse(handleIncomingPublishRequest(request)) ); + transportService.registerRequestHandler( + PUBLISH_REMOTE_STATE_ACTION_NAME, + ThreadPool.Names.GENERIC, + false, + false, + RemotePublishRequest::new, + (request, channel, task) -> channel.sendResponse(handleIncomingRemotePublishRequest(request)) + ); + transportService.registerRequestHandler( COMMIT_STATE_ACTION_NAME, ThreadPool.Names.GENERIC, @@ -211,6 +228,74 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque } } + // package private for testing + PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest request) throws IOException { + if (transportService.getLocalNode().equals(request.getSourceNode())) { + return acceptRemoteStateOnLocalNode(request); + } + // TODO Make cluster state download non-blocking: https://github.com/opensearch-project/OpenSearch/issues/14102 + ClusterMetadataManifest manifest = remoteClusterStateService.getClusterMetadataManifestByFileName( + request.getClusterUUID(), + request.getManifestFile() + ); + if (manifest == null) { + throw new IllegalStateException("Publication failed as manifest was not found for " + request); + } + boolean applyFullState = false; + final ClusterState lastSeen = lastSeenClusterState.get(); + if (lastSeen == null) { + logger.debug(() -> "Diff cannot be applied as there is no last cluster state"); + applyFullState = true; + } else if (manifest.getDiffManifest() == null) { + logger.trace(() -> "There is no diff in the manifest"); + applyFullState = true; + } else if (manifest.getDiffManifest().getFromStateUUID().equals(lastSeen.stateUUID()) == false) { + logger.debug(() -> "Last cluster state not compatible with the diff"); + applyFullState = true; + } + + if (applyFullState == true) { + logger.debug( + () -> new ParameterizedMessage( + "Downloading full cluster state for term {}, version {}, stateUUID {}", + manifest.getClusterTerm(), + manifest.getStateVersion(), + manifest.getStateUUID() + ) + ); + ClusterState clusterState = remoteClusterStateService.getClusterStateForManifest( + request.getClusterName(), + manifest, + transportService.getLocalNode().getId(), + true + ); + fullClusterStateReceivedCount.incrementAndGet(); + final PublishWithJoinResponse response = acceptState(clusterState); + lastSeenClusterState.set(clusterState); + return response; + } else { + logger.debug( + () -> new ParameterizedMessage( + "Downloading diff cluster state for term {}, version {}, previousUUID {}, current UUID {}", + manifest.getClusterTerm(), + manifest.getStateVersion(), + manifest.getDiffManifest().getFromStateUUID(), + manifest.getStateUUID() + ) + ); + ClusterState clusterState = remoteClusterStateService.getClusterStateUsingDiff( + request.getClusterName(), + manifest, + lastSeen, + transportService.getLocalNode().getId() + ); + compatibleClusterStateDiffReceivedCount.incrementAndGet(); + final PublishWithJoinResponse response = acceptState(clusterState); + lastSeenClusterState.compareAndSet(lastSeen, clusterState); + return response; + } + } + private PublishWithJoinResponse acceptState(ClusterState incomingState) { // if the state is coming from the current node, use original request instead (see currentPublishRequestToSelf for explanation) if (transportService.getLocalNode().equals(incomingState.nodes().getClusterManagerNode())) { @@ -224,8 +309,35 @@ private PublishWithJoinResponse acceptState(ClusterState incomingState) { return handlePublishRequest.apply(new PublishRequest(incomingState)); } - public PublicationContext newPublicationContext(ClusterChangedEvent clusterChangedEvent) { - final PublicationContext publicationContext = new PublicationContext(clusterChangedEvent); + private PublishWithJoinResponse acceptRemoteStateOnLocalNode(RemotePublishRequest remotePublishRequest) { + final PublishRequest publishRequest = currentPublishRequestToSelf.get(); + if (publishRequest == null + || publishRequest.getAcceptedState().coordinationMetadata().term() != remotePublishRequest.term + || publishRequest.getAcceptedState().version() != remotePublishRequest.version) { + logger.debug( + () -> new ParameterizedMessage( + "Publication failure for current publish request : {} and remote publish request: {}", + publishRequest, + remotePublishRequest + ) + ); + throw new IllegalStateException("publication to self failed for " + remotePublishRequest); + } + PublishWithJoinResponse publishWithJoinResponse = handlePublishRequest.apply(publishRequest); + lastSeenClusterState.set(publishRequest.getAcceptedState()); + return publishWithJoinResponse; + } + + public PublicationContext newPublicationContext( + ClusterChangedEvent clusterChangedEvent, + boolean isRemotePublicationEnabled, + PersistedStateRegistry persistedStateRegistry + ) { + final PublicationContext publicationContext = new PublicationContext( + clusterChangedEvent, + isRemotePublicationEnabled, + persistedStateRegistry + ); // Build the serializations we expect to need now, early in the process, so that an error during serialization fails the publication // straight away. This isn't watertight since we send diffs on a best-effort basis and may fall back to sending a full state (and @@ -234,6 +346,16 @@ public PublicationContext newPublicationContext(ClusterChangedEvent clusterChang return publicationContext; } + // package private for testing + void setCurrentPublishRequestToSelf(PublishRequest publishRequest) { + this.currentPublishRequestToSelf.set(publishRequest); + } + + // package private for testing + void setLastSeenClusterState(ClusterState clusterState) { + this.lastSeenClusterState.set(clusterState); + } + private static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { final BytesReference serializedState = CompressedStreamUtils.createCompressedStream(nodeVersion, stream -> { stream.writeBoolean(true); @@ -270,12 +392,20 @@ public class PublicationContext { private final boolean sendFullVersion; private final Map serializedStates = new HashMap<>(); private final Map serializedDiffs = new HashMap<>(); + private final boolean sendRemoteState; + private final PersistedStateRegistry persistedStateRegistry; - PublicationContext(ClusterChangedEvent clusterChangedEvent) { + PublicationContext( + ClusterChangedEvent clusterChangedEvent, + boolean isRemotePublicationEnabled, + PersistedStateRegistry persistedStateRegistry + ) { discoveryNodes = clusterChangedEvent.state().nodes(); newState = clusterChangedEvent.state(); previousState = clusterChangedEvent.previousState(); sendFullVersion = previousState.getBlocks().disableStatePersistence(); + sendRemoteState = isRemotePublicationEnabled; + this.persistedStateRegistry = persistedStateRegistry; } void buildDiffAndSerializeStates() { @@ -339,7 +469,11 @@ public void onFailure(Exception e) { } else { responseActionListener = listener; } - if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) { + // TODO Decide to send remote state before starting publication by checking remote publication on all nodes + if (sendRemoteState && destination.isRemoteStatePublicationEnabled()) { + logger.trace("sending remote cluster state version [{}] to [{}]", newState.version(), destination); + sendRemoteClusterState(destination, publishRequest.getAcceptedState(), responseActionListener); + } else if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) { logger.trace("sending full cluster state version [{}] to [{}]", newState.version(), destination); sendFullClusterState(destination, responseActionListener); } else { @@ -384,6 +518,61 @@ public String executor() { ); } + private void sendRemoteClusterState( + final DiscoveryNode destination, + final ClusterState clusterState, + final ActionListener listener + ) { + try { + final String manifestFileName = ((RemotePersistedState) persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE)) + .getLastUploadedManifestFile(); + final RemotePublishRequest remotePublishRequest = new RemotePublishRequest( + discoveryNodes.getLocalNode(), + clusterState.term(), + clusterState.getVersion(), + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID(), + manifestFileName + ); + final Consumer transportExceptionHandler = exp -> { + logger.debug(() -> new ParameterizedMessage("failed to send remote cluster state to {}", destination), exp); + listener.onFailure(exp); + }; + final TransportResponseHandler responseHandler = new TransportResponseHandler<>() { + + @Override + public PublishWithJoinResponse read(StreamInput in) throws IOException { + return new PublishWithJoinResponse(in); + } + + @Override + public void handleResponse(PublishWithJoinResponse response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + transportExceptionHandler.accept(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + transportService.sendRequest( + destination, + PUBLISH_REMOTE_STATE_ACTION_NAME, + remotePublishRequest, + stateRequestOptions, + responseHandler + ); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("error sending remote cluster state to {}", destination), e); + listener.onFailure(e); + } + } + private void sendFullClusterState(DiscoveryNode destination, ActionListener listener) { BytesReference bytes = serializedStates.get(destination.getVersion()); if (bytes == null) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java new file mode 100644 index 0000000000000..9461c5ee63627 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/RemotePublishRequest.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Send the publish request with the remote cluster state details + * @opensearch.internal + */ +public class RemotePublishRequest extends TermVersionRequest { + + private final String clusterName; + private final String clusterUUID; + private final String manifestFile; + + public RemotePublishRequest( + DiscoveryNode sourceNode, + long term, + long version, + String clusterName, + String clusterUUID, + String manifestFile + ) { + super(sourceNode, term, version); + this.clusterName = clusterName; + this.clusterUUID = clusterUUID; + this.manifestFile = manifestFile; + } + + public RemotePublishRequest(StreamInput in) throws IOException { + super(in); + this.clusterName = in.readString(); + this.clusterUUID = in.readString(); + this.manifestFile = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(clusterName); + out.writeString(clusterUUID); + out.writeString(manifestFile); + } + + @Override + public String toString() { + return "RemotePublishRequest{" + + "term=" + + term + + ", version=" + + version + + ", clusterName=" + + clusterName + + ", clusterUUID=" + + clusterUUID + + ", sourceNode=" + + sourceNode + + ", manifestFile=" + + manifestFile + + '}'; + } + + public String getClusterName() { + return clusterName; + } + + public String getClusterUUID() { + return clusterUUID; + } + + public String getManifestFile() { + return manifestFile; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index a8102182576ff..5865891c8a7f9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -66,7 +66,7 @@ public static DiffableStringMap readFrom(StreamInput in) throws IOException { return map.isEmpty() ? EMPTY : new DiffableStringMap(map); } - DiffableStringMap(final Map map) { + public DiffableStringMap(final Map map) { this.innerMap = Collections.unmodifiableMap(map); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index e1aa5626f36c1..a0ef8de07fbf2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -973,10 +973,18 @@ public static boolean isSettingsMetadataEqual(Metadata metadata1, Metadata metad return metadata1.persistentSettings.equals(metadata2.persistentSettings); } + public static boolean isTransientSettingsMetadataEqual(Metadata metadata1, Metadata metadata2) { + return metadata1.transientSettings.equals(metadata2.transientSettings); + } + public static boolean isTemplatesMetadataEqual(Metadata metadata1, Metadata metadata2) { return metadata1.templates.equals(metadata2.templates); } + public static boolean isHashesOfConsistentSettingsEqual(Metadata metadata1, Metadata metadata2) { + return metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings); + } + public static boolean isCustomMetadataEqual(Metadata metadata1, Metadata metadata2) { int customCount1 = 0; for (Map.Entry cursor : metadata1.customs.entrySet()) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 5226e9570ac14..690621c2e7bca 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -61,7 +61,9 @@ import java.util.stream.Stream; import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; /** * A discovery node represents a node that is part of the cluster. @@ -470,6 +472,18 @@ public boolean isRemoteStoreNode() { return this.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); } + /** + * Returns whether remote cluster state publication is enabled on this node + * @return true if the node contains remote cluster state node attribute and remote routing table node attribute + */ + public boolean isRemoteStatePublicationEnabled() { + return this.getAttributes() + .keySet() + .stream() + .anyMatch(key -> (key.equals(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY))) + && this.getAttributes().keySet().stream().anyMatch(key -> key.equals(REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index fd8cbea42c12f..479143fa9a2f0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -738,9 +738,7 @@ public boolean equals(Object o) { IndexShardRoutingTable that = (IndexShardRoutingTable) o; if (!shardId.equals(that.shardId)) return false; - if (!shards.equals(that.shards)) return false; - - return true; + return shards.size() == that.shards.size() && shards.containsAll(that.shards) && that.shards.containsAll(shards); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index e4095a84be081..6c7b94f316da2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -79,7 +79,7 @@ public class RoutingTable implements Iterable, Diffable indicesRouting; - private RoutingTable(long version, final Map indicesRouting) { + public RoutingTable(long version, final Map indicesRouting) { this.version = version; this.indicesRouting = Collections.unmodifiableMap(indicesRouting); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 3864e282a310b..5ad3a2fd47ce3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -584,10 +584,7 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { /* Use batch mode if enabled and there is no custom allocator set for Allocation service */ - Boolean batchModeEnabled = EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings); - if (batchModeEnabled - && allocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0) - && existingShardsAllocators.size() == 2) { + if (isBatchModeEnabled(allocation)) { /* If we do not have any custom allocator set then we will be using ShardsBatchGatewayAllocator Currently AllocationService will not run any custom Allocator that implements allocateAllUnassignedShards @@ -724,13 +721,24 @@ private AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting private ExistingShardsAllocator getAllocatorForShard(ShardRouting shardRouting, RoutingAllocation routingAllocation) { assert assertInitialized(); - final String allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( - routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings() - ); + String allocatorName; + if (isBatchModeEnabled(routingAllocation)) { + allocatorName = ShardsBatchGatewayAllocator.ALLOCATOR_NAME; + } else { + allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( + routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings() + ); + } final ExistingShardsAllocator existingShardsAllocator = existingShardsAllocators.get(allocatorName); return existingShardsAllocator != null ? existingShardsAllocator : new NotFoundAllocator(allocatorName); } + private boolean isBatchModeEnabled(RoutingAllocation routingAllocation) { + return EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.get(settings) + && routingAllocation.nodes().getMinNodeVersion().onOrAfter(Version.V_2_14_0) + && existingShardsAllocators.size() == 2; + } + private boolean assertInitialized() { assert existingShardsAllocators != null : "must have set allocators first"; return true; diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java new file mode 100644 index 0000000000000..cc1b0713393f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/InternalRemoteRoutingTableService.java @@ -0,0 +1,381 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.IndexInput; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteStateTransferException; +import org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; + +/** + * A Service which provides APIs to upload and download routing table from remote store. + * + * @opensearch.internal + */ +public class InternalRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService { + + /** + * This setting is used to set the remote routing table store blob store path type strategy. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_type", + RemoteStoreEnums.PathType.HASHED_PREFIX.toString(), + RemoteStoreEnums.PathType::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * This setting is used to set the remote routing table store blob store path hash algorithm strategy. + * This setting will come to effect if the {@link #REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING} + * is either {@code HASHED_PREFIX} or {@code HASHED_INFIX}. + */ + public static final Setting REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING = new Setting<>( + "cluster.remote_store.routing_table.path_hash_algo", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64.toString(), + RemoteStoreEnums.PathHashAlgorithm::parseString, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final String INDEX_ROUTING_PATH_TOKEN = "index-routing"; + public static final String INDEX_ROUTING_FILE_PREFIX = "index_routing"; + public static final String INDEX_ROUTING_METADATA_PREFIX = "indexRouting--"; + + private static final Logger logger = LogManager.getLogger(InternalRemoteRoutingTableService.class); + private final Settings settings; + private final Supplier repositoriesService; + private BlobStoreRepository blobStoreRepository; + private RemoteStoreEnums.PathType pathType; + private RemoteStoreEnums.PathHashAlgorithm pathHashAlgo; + private ThreadPool threadPool; + + public InternalRemoteRoutingTableService( + Supplier repositoriesService, + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadpool + ) { + assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; + this.repositoriesService = repositoriesService; + this.settings = settings; + this.pathType = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING); + this.pathHashAlgo = clusterSettings.get(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, this::setPathTypeSetting); + clusterSettings.addSettingsUpdateConsumer(REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, this::setPathHashAlgoSetting); + this.threadPool = threadpool; + } + + private void setPathTypeSetting(RemoteStoreEnums.PathType pathType) { + this.pathType = pathType; + } + + private void setPathHashAlgoSetting(RemoteStoreEnums.PathHashAlgorithm pathHashAlgo) { + this.pathHashAlgo = pathHashAlgo; + } + + public List getIndicesRouting(RoutingTable routingTable) { + return new ArrayList<>(routingTable.indicesRouting().values()); + } + + /** + * Returns diff between the two routing tables, which includes upserts and deletes. + * @param before previous routing table + * @param after current routing table + * @return diff of the previous and current routing table + */ + public DiffableUtils.MapDiff> getIndicesRoutingMapDiff( + RoutingTable before, + RoutingTable after + ) { + return DiffableUtils.diff( + before.getIndicesRouting(), + after.getIndicesRouting(), + DiffableUtils.getStringKeySerializer(), + CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER + ); + } + + /** + * Create async action for writing one {@code IndexRoutingTable} to remote store + * @param clusterState current cluster state + * @param indexRouting indexRoutingTable to write to remote store + * @param latchedActionListener listener for handling async action response + * @param clusterBasePath base path for remote file + * @return returns runnable async action + */ + public CheckedRunnable getIndexRoutingAsyncAction( + ClusterState clusterState, + IndexRoutingTable indexRouting, + LatchedActionListener latchedActionListener, + BlobPath clusterBasePath + ) { + + BlobPath indexRoutingPath = clusterBasePath.add(INDEX_ROUTING_PATH_TOKEN); + BlobPath path = pathType.path( + RemoteStorePathStrategy.PathInput.builder().basePath(indexRoutingPath).indexUUID(indexRouting.getIndex().getUUID()).build(), + pathHashAlgo + ); + final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(path); + + final String fileName = getIndexRoutingFileName(clusterState.term(), clusterState.version()); + + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse( + new ClusterMetadataManifest.UploadedIndexMetadata( + indexRouting.getIndex().getName(), + indexRouting.getIndex().getUUID(), + path.buildAsString() + fileName, + INDEX_ROUTING_METADATA_PREFIX + ) + ), + ex -> latchedActionListener.onFailure( + new RemoteStateTransferException("Exception in writing index to remote store: " + indexRouting.getIndex().toString(), ex) + ) + ); + + return () -> uploadIndex(indexRouting, fileName, blobContainer, completionListener); + } + + /** + * Combines IndicesRoutingMetadata from previous manifest and current uploaded indices, removes deleted indices. + * @param previousManifest previous manifest, used to get all existing indices routing paths + * @param indicesRoutingUploaded current uploaded indices routings + * @param indicesRoutingToDelete indices to delete + * @return combined list of metadata + */ + public List getAllUploadedIndicesRouting( + ClusterMetadataManifest previousManifest, + List indicesRoutingUploaded, + List indicesRoutingToDelete + ) { + final Map allUploadedIndicesRouting = previousManifest.getIndicesRouting() + .stream() + .collect(Collectors.toMap(ClusterMetadataManifest.UploadedIndexMetadata::getIndexName, Function.identity())); + + indicesRoutingUploaded.forEach( + uploadedIndexRouting -> allUploadedIndicesRouting.put(uploadedIndexRouting.getIndexName(), uploadedIndexRouting) + ); + indicesRoutingToDelete.forEach(allUploadedIndicesRouting::remove); + + return new ArrayList<>(allUploadedIndicesRouting.values()); + } + + private void uploadIndex( + IndexRoutingTable indexRouting, + String fileName, + BlobContainer blobContainer, + ActionListener completionListener + ) { + RemoteIndexRoutingTable indexRoutingInput = new RemoteIndexRoutingTable(indexRouting); + BytesReference bytesInput = null; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + indexRoutingInput.writeTo(streamOutput); + bytesInput = streamOutput.bytes(); + } catch (IOException e) { + logger.error("Failed to serialize IndexRoutingTable for [{}]: [{}]", indexRouting, e); + completionListener.onFailure(e); + return; + } + + if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { + try { + blobContainer.writeBlob(fileName, bytesInput.streamInput(), bytesInput.length(), true); + completionListener.onResponse(null); + } catch (IOException e) { + logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); + completionListener.onFailure(e); + } + return; + } + + try (IndexInput input = new ByteArrayIndexInput("indexrouting", BytesReference.toBytes(bytesInput))) { + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + fileName, + fileName, + input.length(), + true, + WritePriority.URGENT, + (size, position) -> new OffsetRangeIndexInputStream(input, size, position), + null, + false + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload( + remoteTransferContainer.createWriteContext(), + completionListener + ); + } catch (IOException e) { + logger.error("Failed to write IndexRoutingTable to remote store for indexRouting [{}]: [{}]", indexRouting, e); + completionListener.onFailure(e); + } + } catch (IOException e) { + logger.error( + "Failed to create transfer object for IndexRoutingTable for remote store upload for indexRouting [{}]: [{}]", + indexRouting, + e + ); + completionListener.onFailure(e); + } + } + + @Override + public CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ) { + int idx = uploadedFilename.lastIndexOf("/"); + String blobFileName = uploadedFilename.substring(idx + 1); + BlobContainer blobContainer = blobStoreRepository.blobStore() + .blobContainer(BlobPath.cleanPath().add(uploadedFilename.substring(0, idx))); + + return () -> readAsync( + blobContainer, + blobFileName, + index, + threadPool.executor(ThreadPool.Names.REMOTE_STATE_READ), + ActionListener.wrap( + response -> latchedActionListener.onResponse(response.getIndexRoutingTable()), + latchedActionListener::onFailure + ) + ); + } + + private void readAsync( + BlobContainer blobContainer, + String name, + Index index, + ExecutorService executorService, + ActionListener listener + ) { + executorService.execute(() -> { + try { + listener.onResponse(read(blobContainer, name, index)); + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private RemoteIndexRoutingTable read(BlobContainer blobContainer, String path, Index index) { + try { + return new RemoteIndexRoutingTable(blobContainer.readBlob(path), index); + } catch (IOException | AssertionError e) { + logger.error(() -> new ParameterizedMessage("RoutingTable read failed for path {}", path), e); + throw new RemoteStateTransferException("Failed to read RemoteRoutingTable from Manifest with error ", e); + } + } + + @Override + public List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ) { + return updatedIndicesRouting.stream().map(idx -> { + Optional uploadedIndexMetadataOptional = allIndicesRouting.stream() + .filter(idx2 -> idx2.getIndexName().equals(idx)) + .findFirst(); + assert uploadedIndexMetadataOptional.isPresent() == true; + return uploadedIndexMetadataOptional.get(); + }).collect(Collectors.toList()); + } + + private String getIndexRoutingFileName(long term, long version) { + return String.join( + DELIMITER, + INDEX_ROUTING_FILE_PREFIX, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + RemoteStoreUtils.invertLong(System.currentTimeMillis()) + ); + } + + @Override + protected void doClose() throws IOException { + if (blobStoreRepository != null) { + IOUtils.close(blobStoreRepository); + } + } + + @Override + protected void doStart() { + assert isRemoteRoutingTableEnabled(settings) == true : "Remote routing table is not enabled"; + final String remoteStoreRepo = settings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY + ); + assert remoteStoreRepo != null : "Remote routing table repository is not configured"; + final Repository repository = repositoriesService.get().repository(remoteStoreRepo); + assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; + blobStoreRepository = (BlobStoreRepository) repository; + } + + @Override + protected void doStop() {} + + @Override + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { + try { + logger.debug(() -> "Deleting stale index routing files from remote - " + stalePaths); + blobStoreRepository.blobStore().blobContainer(BlobPath.cleanPath()).deleteBlobsIgnoringIfNotExists(stalePaths); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete some stale index routing paths from {}", stalePaths), e); + throw e; + } + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java new file mode 100644 index 0000000000000..6236d107d0220 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/NoopRemoteRoutingTableService.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.ClusterMetadataManifest; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Noop impl for RemoteRoutingTableService. + */ +public class NoopRemoteRoutingTableService extends AbstractLifecycleComponent implements RemoteRoutingTableService { + + @Override + public List getIndicesRouting(RoutingTable routingTable) { + return List.of(); + } + + @Override + public DiffableUtils.MapDiff> getIndicesRoutingMapDiff( + RoutingTable before, + RoutingTable after + ) { + return DiffableUtils.diff(Map.of(), Map.of(), DiffableUtils.getStringKeySerializer(), CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER); + } + + @Override + public CheckedRunnable getIndexRoutingAsyncAction( + ClusterState clusterState, + IndexRoutingTable indexRouting, + LatchedActionListener latchedActionListener, + BlobPath clusterBasePath + ) { + // noop + return () -> {}; + } + + @Override + public List getAllUploadedIndicesRouting( + ClusterMetadataManifest previousManifest, + List indicesRoutingUploaded, + List indicesRoutingToDelete + ) { + // noop + return List.of(); + } + + @Override + public CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ) { + // noop + return () -> {}; + } + + @Override + public List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ) { + // noop + return List.of(); + } + + @Override + protected void doStart() { + // noop + } + + @Override + protected void doStop() { + // noop + } + + @Override + protected void doClose() throws IOException { + // noop + } + + @Override + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException { + // noop + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java index ba2208e17df1f..d455dfb58eabc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableService.java @@ -8,60 +8,73 @@ package org.opensearch.cluster.routing.remote; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.node.Node; -import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.repositories.Repository; -import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.IOException; -import java.util.function.Supplier; - -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; +import java.util.List; +import java.util.Map; /** * A Service which provides APIs to upload and download routing table from remote store. * * @opensearch.internal */ -public class RemoteRoutingTableService extends AbstractLifecycleComponent { +public interface RemoteRoutingTableService extends LifecycleComponent { + public static final DiffableUtils.NonDiffableValueSerializer CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER = + new DiffableUtils.NonDiffableValueSerializer() { + @Override + public void write(IndexRoutingTable value, StreamOutput out) throws IOException { + value.writeTo(out); + } + + @Override + public IndexRoutingTable read(StreamInput in, String key) throws IOException { + return IndexRoutingTable.readFrom(in); + } + }; + + List getIndicesRouting(RoutingTable routingTable); + + CheckedRunnable getAsyncIndexRoutingReadAction( + String uploadedFilename, + Index index, + LatchedActionListener latchedActionListener + ); - private static final Logger logger = LogManager.getLogger(RemoteRoutingTableService.class); - private final Settings settings; - private final Supplier repositoriesService; - private BlobStoreRepository blobStoreRepository; + List getUpdatedIndexRoutingTableMetadata( + List updatedIndicesRouting, + List allIndicesRouting + ); - public RemoteRoutingTableService(Supplier repositoriesService, Settings settings) { - assert isRemoteRoutingTableEnabled(settings) : "Remote routing table is not enabled"; - this.repositoriesService = repositoriesService; - this.settings = settings; - } + DiffableUtils.MapDiff> getIndicesRoutingMapDiff( + RoutingTable before, + RoutingTable after + ); - @Override - protected void doClose() throws IOException { - if (blobStoreRepository != null) { - IOUtils.close(blobStoreRepository); - } - } + CheckedRunnable getIndexRoutingAsyncAction( + ClusterState clusterState, + IndexRoutingTable indexRouting, + LatchedActionListener latchedActionListener, + BlobPath clusterBasePath + ); - @Override - protected void doStart() { - assert isRemoteRoutingTableEnabled(settings) == true : "Remote routing table is not enabled"; - final String remoteStoreRepo = settings.get( - Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY - ); - assert remoteStoreRepo != null : "Remote routing table repository is not configured"; - final Repository repository = repositoriesService.get().repository(remoteStoreRepo); - assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; - blobStoreRepository = (BlobStoreRepository) repository; - } + List getAllUploadedIndicesRouting( + ClusterMetadataManifest previousManifest, + List indicesRoutingUploaded, + List indicesRoutingToDelete + ); - @Override - protected void doStop() {} + public void deleteStaleIndexRoutingPaths(List stalePaths) throws IOException; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java new file mode 100644 index 0000000000000..82837191a30b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactory.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.remote; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.threadpool.ThreadPool; + +import java.util.function.Supplier; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; + +/** + * Factory to provide impl for RemoteRoutingTableService based on settings. + */ +public class RemoteRoutingTableServiceFactory { + + /** + * Returns {@code DefaultRemoteRoutingTableService} if the feature is enabled, otherwise {@code NoopRemoteRoutingTableService} + * @param repositoriesService repositoriesService + * @param settings settings + * @param clusterSettings clusterSettings + * @param threadPool threadPool + * @return RemoteRoutingTableService + */ + public static RemoteRoutingTableService getService( + Supplier repositoriesService, + Settings settings, + ClusterSettings clusterSettings, + ThreadPool threadPool + ) { + if (isRemoteRoutingTableEnabled(settings)) { + return new InternalRemoteRoutingTableService(repositoriesService, settings, clusterSettings, threadPool); + } + return new NoopRemoteRoutingTableService(); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 2ac95178d2ff9..6234427445754 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -62,6 +62,7 @@ import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -125,6 +126,10 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements private NodeConnectionsService nodeConnectionsService; private final ClusterManagerMetrics clusterManagerMetrics; + public ClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this(nodeName, settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + } + public ClusterApplierService( String nodeName, Settings settings, diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index eaedb36a59f1e..fa8c965b4d538 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -21,6 +21,11 @@ */ @PublicApi(since = "2.2.0") public class ClusterManagerService extends MasterService { + + public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + super(settings, clusterSettings, threadPool); + } + public ClusterManagerService( Settings settings, ClusterSettings clusterSettings, diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index fa61375e85c25..c3c48dd8b87ef 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -54,6 +54,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexingPressureService; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.threadpool.ThreadPool; import java.util.Collections; @@ -92,6 +93,10 @@ public class ClusterService extends AbstractLifecycleComponent { private IndexingPressureService indexingPressureService; + public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + } + public ClusterService( Settings settings, ClusterSettings clusterSettings, diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 6436dcfe33003..686e9793a8fd3 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -71,6 +71,7 @@ import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.discovery.Discovery; import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -140,6 +141,10 @@ public class MasterService extends AbstractLifecycleComponent { private final ClusterStateStats stateStats; private final ClusterManagerMetrics clusterManagerMetrics; + public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); + } + public MasterService( Settings settings, ClusterSettings clusterSettings, diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index 6f3e8be7c28b8..68af77714a319 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -39,6 +39,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Objects; /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. @@ -110,6 +111,19 @@ public BlobPath parent() { } } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BlobPath that = (BlobPath) o; + return Objects.equals(paths, that.paths); + } + + @Override + public int hashCode() { + return Objects.hashCode(paths); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java index d0933741339d9..f58b99daec3c5 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java @@ -9,6 +9,7 @@ package org.opensearch.common.blobstore; import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.common.crypto.DecryptedRangedStreamProvider; import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; @@ -50,6 +51,14 @@ public InputStream readBlob(String blobName) throws IOException { return cryptoHandler.createDecryptingStream(inputStream); } + @ExperimentalApi + @Override + public InputStreamWithMetadata readBlobWithMetadata(String blobName) throws IOException { + InputStreamWithMetadata inputStreamWithMetadata = blobContainer.readBlobWithMetadata(blobName); + InputStream decryptInputStream = cryptoHandler.createDecryptingStream(inputStreamWithMetadata.getInputStream()); + return new InputStreamWithMetadata(decryptInputStream, inputStreamWithMetadata.getMetadata()); + } + EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) { return (start, end) -> { byte[] buffer; diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index c41641921c822..1214c6cdc7373 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -95,6 +95,11 @@ public Map> extendedStats() { return blobStore.extendedStats(); } + @Override + public boolean isBlobMetadataEnabled() { + return blobStore.isBlobMetadataEnabled(); + } + /** * Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the * underlying BlobStore. This ensures proper cleanup of resources. diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 6d346de25cadf..caae81e4387b4 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -36,9 +36,11 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ReleasableLock; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CompletableFuture; @@ -396,7 +398,12 @@ private V get(K key, long now, Consumer> onExpiration) { if (entry == null) { return null; } else { - promote(entry, now); + List> removalNotifications = promote(entry, now).v2(); + if (!removalNotifications.isEmpty()) { + for (RemovalNotification removalNotification : removalNotifications) { + removalListener.onRemoval(removalNotification); + } + } return entry.value; } } @@ -446,8 +453,14 @@ private V compute(K key, CacheLoader loader) throws ExecutionException { BiFunction, Throwable, ? extends V> handler = (ok, ex) -> { if (ok != null) { + List> removalNotifications = new ArrayList<>(); try (ReleasableLock ignored = lruLock.acquire()) { - promote(ok, now); + removalNotifications = promote(ok, now).v2(); + } + if (!removalNotifications.isEmpty()) { + for (RemovalNotification removalNotification : removalNotifications) { + removalListener.onRemoval(removalNotification); + } } return ok.value; } else { @@ -512,16 +525,22 @@ private void put(K key, V value, long now) { CacheSegment segment = getCacheSegment(key); Tuple, Entry> tuple = segment.put(key, value, now); boolean replaced = false; + List> removalNotifications = new ArrayList<>(); try (ReleasableLock ignored = lruLock.acquire()) { if (tuple.v2() != null && tuple.v2().state == State.EXISTING) { if (unlink(tuple.v2())) { replaced = true; } } - promote(tuple.v1(), now); + removalNotifications = promote(tuple.v1(), now).v2(); } if (replaced) { - removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalReason.REPLACED)); + removalNotifications.add(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalReason.REPLACED)); + } + if (!removalNotifications.isEmpty()) { + for (RemovalNotification removalNotification : removalNotifications) { + removalListener.onRemoval(removalNotification); + } } } @@ -767,8 +786,17 @@ public long getEvictions() { } } - private boolean promote(Entry entry, long now) { + /** + * Promotes the desired entry to the head of the lru list and tries to see if it needs to evict any entries in + * case the cache size is exceeding or the entry got expired. + * @param entry Entry to be promoted + * @param now the current time + * @return Returns a tuple. v1 signifies whether an entry got promoted, v2 signifies the list of removal + * notifications that the callers needs to handle. + */ + private Tuple>> promote(Entry entry, long now) { boolean promoted = true; + List> removalNotifications = new ArrayList<>(); try (ReleasableLock ignored = lruLock.acquire()) { switch (entry.state) { case DELETED: @@ -782,10 +810,21 @@ private boolean promote(Entry entry, long now) { break; } if (promoted) { - evict(now); + while (tail != null && shouldPrune(tail, now)) { + Entry entryToBeRemoved = tail; + CacheSegment segment = getCacheSegment(entryToBeRemoved.key); + if (segment != null) { + segment.remove(entryToBeRemoved.key, entryToBeRemoved.value, f -> {}); + } + if (unlink(entryToBeRemoved)) { + removalNotifications.add( + new RemovalNotification<>(entryToBeRemoved.key, entryToBeRemoved.value, RemovalReason.EVICTED) + ); + } + } } } - return promoted; + return new Tuple<>(promoted, removalNotifications); } private void evict(long now) { diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java index 68e1cdf6139e2..eaaaec2bb07e0 100644 --- a/server/src/main/java/org/opensearch/common/cache/RemovalListener.java +++ b/server/src/main/java/org/opensearch/common/cache/RemovalListener.java @@ -42,5 +42,10 @@ @ExperimentalApi @FunctionalInterface public interface RemovalListener { + + /** + * This may be called from multiple threads at once. So implementation needs to be thread safe. + * @param notification removal notification for desired entry. + */ void onRemoval(RemovalNotification notification); } diff --git a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java index 632b2b70d61df..23fc9d3ad77cb 100644 --- a/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/AbstractRemoteWritableBlobEntity.java @@ -42,6 +42,8 @@ public AbstractRemoteWritableBlobEntity( public abstract BlobPathParameters getBlobPathParameters(); + public abstract String getType(); + public String getFullBlobName() { return blobName; } diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java index ccf7cafff1730..385c6f20ba58d 100644 --- a/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWritableEntityStore.java @@ -8,6 +8,7 @@ package org.opensearch.common.remote; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.action.ActionListener; import java.io.IOException; @@ -18,6 +19,7 @@ * @param The object type which can be uploaded to or downloaded from remote storage. * @param The wrapper entity which provides methods for serializing/deserializing entity T. */ +@ExperimentalApi public interface RemoteWritableEntityStore> { public void writeAsync(U entity, ActionListener listener); diff --git a/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java index 778c24dce2e27..773ddce5b9cc8 100644 --- a/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java +++ b/server/src/main/java/org/opensearch/common/remote/RemoteWriteableEntity.java @@ -8,6 +8,8 @@ package org.opensearch.common.remote; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.IOException; import java.io.InputStream; @@ -17,6 +19,7 @@ * * @param The object type which can be uploaded to or downloaded from remote storage. */ +@ExperimentalApi public interface RemoteWriteableEntity { /** * @return An InputStream created by serializing the entity T diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 297fc98764d07..7ea04acf00415 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -77,6 +77,7 @@ import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; @@ -180,6 +181,10 @@ import java.util.Set; import java.util.function.Predicate; +import static org.opensearch.gateway.remote.RemoteGlobalMetadataManager.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.gateway.remote.RemoteIndexMetadataManager.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING; +import static org.opensearch.gateway.remote.RemoteManifestManager.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING; + /** * Encapsulates all valid cluster level settings. * @@ -302,10 +307,12 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_RETRY_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, + RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -533,6 +540,7 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.MAX_OPEN_PIT_CONTEXT, SearchService.MAX_PIT_KEEPALIVE_SETTING, SearchService.MAX_AGGREGATION_REWRITE_FILTERS, + SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD, CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, @@ -714,13 +722,16 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, - RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, - RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, - RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, + METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.REMOTE_STATE_READ_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_TYPE_SETTING, + InternalRemoteRoutingTableService.REMOTE_ROUTING_TABLE_PATH_HASH_ALGO_SETTING, // Admission Control Settings AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, @@ -743,7 +754,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, - RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA + RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, + SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 980c432774f6e..6fe8dec9c21b1 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -237,7 +237,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for concurrent segment search IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, - + IndexSettings.ALLOW_DERIVED_FIELDS, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 82f43921d2d28..6c6e2f2d600f0 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -23,7 +23,7 @@ */ public class FeatureFlags { /** - * Gates the visibility of the remote store migration support from docrep . + * Gates the visibility of the remote store to docrep migration. */ public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled"; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 6580b0e0085ef..906a27e9f398c 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -483,6 +483,16 @@ public void addResponseHeader(final String key, final String value) { addResponseHeader(key, value, v -> v); } + /** + * Update the {@code value} for the specified {@code key} + * + * @param key the header name + * @param value the header value + */ + public void updateResponseHeader(final String key, final String value) { + updateResponseHeader(key, value, v -> v); + } + /** * Add the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate * {@code value} after applying {@code uniqueValue} is ignored. @@ -492,7 +502,19 @@ public void addResponseHeader(final String key, final String value) { * @param uniqueValue the function that produces de-duplication values */ public void addResponseHeader(final String key, final String value, final Function uniqueValue) { - threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize)); + threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize, false)); + } + + /** + * Update the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate + * {@code value} after applying {@code uniqueValue} is ignored. + * + * @param key the header name + * @param value the header value + * @param uniqueValue the function that produces de-duplication values + */ + public void updateResponseHeader(final String key, final String value, final Function uniqueValue) { + threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize, true)); } /** @@ -717,7 +739,8 @@ private ThreadContextStruct putResponse( final String value, final Function uniqueValue, final int maxWarningHeaderCount, - final long maxWarningHeaderSize + final long maxWarningHeaderSize, + final boolean replaceExistingKey ) { assert value != null; long newWarningHeaderSize = warningHeadersSize; @@ -759,8 +782,13 @@ private ThreadContextStruct putResponse( if (existingValues.contains(uniqueValue.apply(value))) { return this; } - // preserve insertion order - final Set newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR); + Set newValues; + if (replaceExistingKey) { + newValues = Stream.of(value).collect(LINKED_HASH_SET_COLLECTOR); + } else { + // preserve insertion order + newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR); + } newResponseHeaders = new HashMap<>(responseHeaders); newResponseHeaders.put(key, Collections.unmodifiableSet(newValues)); } else { diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java new file mode 100644 index 0000000000000..15b63a0ac2030 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentHttpChunk.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent.support; + +import org.opensearch.common.Nullable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.http.HttpChunk; + +/** + * Wraps the instance of the {@link XContentBuilder} into {@link HttpChunk} + */ +public final class XContentHttpChunk implements HttpChunk { + private final BytesReference content; + + /** + * Creates a new {@link HttpChunk} from {@link XContentBuilder} + * @param builder {@link XContentBuilder} instance + * @return new {@link HttpChunk} instance, if passed {@link XContentBuilder} us {@code null}, a last empty {@link HttpChunk} will be returned + */ + public static HttpChunk from(@Nullable final XContentBuilder builder) { + return new XContentHttpChunk(builder); + } + + /** + * Creates a new last empty {@link HttpChunk} + * @return last empty {@link HttpChunk} instance + */ + public static HttpChunk last() { + return new XContentHttpChunk(null); + } + + private XContentHttpChunk(@Nullable final XContentBuilder builder) { + if (builder == null /* no content */) { + content = BytesArray.EMPTY; + } else { + content = BytesReference.bytes(builder); + } + } + + @Override + public boolean isLast() { + return content == BytesArray.EMPTY; + } + + @Override + public BytesReference content() { + return content; + } + + @Override + public void close() { + if (content instanceof Releasable) { + ((Releasable) content).close(); + } + } +} diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 538dea5b2e60b..922e23b849d49 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -53,6 +53,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.monitor.NodeHealthService; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; @@ -135,7 +136,8 @@ public DiscoveryModule( NodeHealthService nodeHealthService, PersistedStateRegistry persistedStateRegistry, RemoteStoreNodeService remoteStoreNodeService, - ClusterManagerMetrics clusterManagerMetrics + ClusterManagerMetrics clusterManagerMetrics, + RemoteClusterStateService remoteClusterStateService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -214,7 +216,8 @@ public DiscoveryModule( nodeHealthService, persistedStateRegistry, remoteStoreNodeService, - clusterManagerMetrics + clusterManagerMetrics, + remoteClusterStateService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index c3056276706a0..80ba57b7db4a9 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -64,6 +64,7 @@ import org.opensearch.env.NodeMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; @@ -665,6 +666,8 @@ public static class RemotePersistedState implements PersistedState { private ClusterState lastAcceptedState; private ClusterMetadataManifest lastAcceptedManifest; + + private String lastUploadedManifestFile; private final RemoteClusterStateService remoteClusterStateService; private String previousClusterUUID; @@ -690,10 +693,14 @@ public void setCurrentTerm(long currentTerm) { // But for RemotePersistedState, the state is only pushed by the active cluster. So this method is not required. } + public String getLastUploadedManifestFile() { + return lastUploadedManifestFile; + } + @Override public void setLastAcceptedState(ClusterState clusterState) { try { - final ClusterMetadataManifest manifest; + final RemoteClusterStateManifestInfo manifestDetails; if (shouldWriteFullClusterState(clusterState)) { final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( clusterState.getClusterName().value(), @@ -711,15 +718,21 @@ public void setLastAcceptedState(ClusterState clusterState) { clusterState.metadata().clusterUUID() ); } - manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); + manifestDetails = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true : "Previous manifest and previous ClusterState are not in sync"; - manifest = remoteClusterStateService.writeIncrementalMetadata(lastAcceptedState, clusterState, lastAcceptedManifest); + manifestDetails = remoteClusterStateService.writeIncrementalMetadata( + lastAcceptedState, + clusterState, + lastAcceptedManifest + ); } - assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync"; - lastAcceptedManifest = manifest; + assert verifyManifestAndClusterState(manifestDetails.getClusterMetadataManifest(), clusterState) == true + : "Manifest and ClusterState are not in sync"; + lastAcceptedManifest = manifestDetails.getClusterMetadataManifest(); lastAcceptedState = clusterState; + lastUploadedManifestFile = manifestDetails.getManifestFileName(); } catch (Exception e) { remoteClusterStateService.writeMetadataFailed(); handleExceptionOnWrite(e); @@ -767,12 +780,13 @@ public void markLastAcceptedStateAsCommitted() { metadataBuilder.clusterUUIDCommitted(true); clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); } - final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( + final RemoteClusterStateManifestInfo committedManifestDetails = remoteClusterStateService.markLastStateAsCommitted( clusterState, lastAcceptedManifest ); - lastAcceptedManifest = committedManifest; + lastAcceptedManifest = committedManifestDetails.getClusterMetadataManifest(); lastAcceptedState = clusterState; + lastUploadedManifestFile = committedManifestDetails.getManifestFileName(); } catch (Exception e) { handleExceptionOnWrite(e); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index b3b1bf37f8696..a89c202dd36be 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -40,8 +40,9 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. - public static final int CODEC_V2 = 2; // In Codec V2, there are seperate metadata files rather than a single global metadata file. - public static final int CODEC_V3 = 3; // In Codec V3, we introduce index routing-metadata in manifest file. + public static final int CODEC_V2 = 2; // In Codec V2, there are separate metadata files rather than a single global metadata file, + // also we introduce index routing-metadata, diff and other attributes as part of manifest + // required for state publication private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); @@ -61,6 +62,15 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField UPLOADED_CUSTOM_METADATA = new ParseField("uploaded_custom_metadata"); private static final ParseField ROUTING_TABLE_VERSION_FIELD = new ParseField("routing_table_version"); private static final ParseField INDICES_ROUTING_FIELD = new ParseField("indices_routing"); + private static final ParseField METADATA_VERSION = new ParseField("metadata_version"); + private static final ParseField UPLOADED_TRANSIENT_SETTINGS_METADATA = new ParseField("uploaded_transient_settings_metadata"); + private static final ParseField UPLOADED_DISCOVERY_NODES_METADATA = new ParseField("uploaded_discovery_nodes_metadata"); + private static final ParseField UPLOADED_CLUSTER_BLOCKS_METADATA = new ParseField("uploaded_cluster_blocks_metadata"); + private static final ParseField UPLOADED_HASHES_OF_CONSISTENT_SETTINGS_METADATA = new ParseField( + "uploaded_hashes_of_consistent_settings_metadata" + ); + private static final ParseField UPLOADED_CLUSTER_STATE_CUSTOM_METADATA = new ParseField("uploaded_cluster_state_custom_metadata"); + private static final ParseField DIFF_MANIFEST = new ParseField("diff_manifest"); private static ClusterMetadataManifest.Builder manifestV0Builder(Object[] fields) { return ClusterMetadataManifest.builder() @@ -86,13 +96,16 @@ private static ClusterMetadataManifest.Builder manifestV2Builder(Object[] fields .coordinationMetadata(coordinationMetadata(fields)) .settingMetadata(settingsMetadata(fields)) .templatesMetadata(templatesMetadata(fields)) - .customMetadataMap(customMetadata(fields)); - } - - private static ClusterMetadataManifest.Builder manifestV3Builder(Object[] fields) { - return manifestV2Builder(fields).codecVersion(codecVersion(fields)) + .customMetadataMap(customMetadata(fields)) .routingTableVersion(routingTableVersion(fields)) - .indicesRouting(indicesRouting(fields)); + .indicesRouting(indicesRouting(fields)) + .discoveryNodesMetadata(discoveryNodesMetadata(fields)) + .clusterBlocksMetadata(clusterBlocksMetadata(fields)) + .diffManifest(diffManifest(fields)) + .metadataVersion(metadataVersion(fields)) + .transientSettingsMetadata(transientSettingsMetadata(fields)) + .hashesOfConsistentSettings(hashesOfConsistentSettings(fields)) + .clusterStateCustomMetadataMap(clusterStateCustomMetadata(fields)); } private static long term(Object[] fields) { @@ -168,6 +181,35 @@ private static List indicesRouting(Object[] fields) { return (List) fields[16]; } + private static UploadedMetadataAttribute discoveryNodesMetadata(Object[] fields) { + return (UploadedMetadataAttribute) fields[17]; + } + + private static UploadedMetadataAttribute clusterBlocksMetadata(Object[] fields) { + return (UploadedMetadataAttribute) fields[18]; + } + + private static long metadataVersion(Object[] fields) { + return (long) fields[19]; + } + + private static UploadedMetadataAttribute transientSettingsMetadata(Object[] fields) { + return (UploadedMetadataAttribute) fields[20]; + } + + private static UploadedMetadataAttribute hashesOfConsistentSettings(Object[] fields) { + return (UploadedMetadataAttribute) fields[21]; + } + + private static Map clusterStateCustomMetadata(Object[] fields) { + List customs = (List) fields[22]; + return customs.stream().collect(Collectors.toMap(UploadedMetadataAttribute::getAttributeName, Function.identity())); + } + + private static ClusterStateDiffManifest diffManifest(Object[] fields) { + return (ClusterStateDiffManifest) fields[23]; + } + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> manifestV0Builder(fields).build() @@ -183,18 +225,12 @@ private static List indicesRouting(Object[] fields) { fields -> manifestV2Builder(fields).build() ); - private static final ConstructingObjectParser PARSER_V3 = new ConstructingObjectParser<>( - "cluster_metadata_manifest", - fields -> manifestV3Builder(fields).build() - ); - - private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V3; + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V2; static { declareParser(PARSER_V0, CODEC_V0); declareParser(PARSER_V1, CODEC_V1); declareParser(PARSER_V2, CODEC_V2); - declareParser(PARSER_V3, CODEC_V3); } private static void declareParser(ConstructingObjectParser parser, long codec_version) { @@ -238,14 +274,43 @@ private static void declareParser(ConstructingObjectParser= CODEC_V3) { parser.declareLong(ConstructingObjectParser.constructorArg(), ROUTING_TABLE_VERSION_FIELD); parser.declareObjectArray( ConstructingObjectParser.constructorArg(), (p, c) -> UploadedIndexMetadata.fromXContent(p), INDICES_ROUTING_FIELD ); + parser.declareNamedObject( + ConstructingObjectParser.optionalConstructorArg(), + UploadedMetadataAttribute.PARSER, + UPLOADED_DISCOVERY_NODES_METADATA + ); + parser.declareNamedObject( + ConstructingObjectParser.optionalConstructorArg(), + UploadedMetadataAttribute.PARSER, + UPLOADED_CLUSTER_BLOCKS_METADATA + ); + parser.declareLong(ConstructingObjectParser.constructorArg(), METADATA_VERSION); + parser.declareNamedObject( + ConstructingObjectParser.optionalConstructorArg(), + UploadedMetadataAttribute.PARSER, + UPLOADED_TRANSIENT_SETTINGS_METADATA + ); + parser.declareNamedObject( + ConstructingObjectParser.optionalConstructorArg(), + UploadedMetadataAttribute.PARSER, + UPLOADED_HASHES_OF_CONSISTENT_SETTINGS_METADATA + ); + parser.declareNamedObjects( + ConstructingObjectParser.optionalConstructorArg(), + UploadedMetadataAttribute.PARSER, + UPLOADED_CLUSTER_STATE_CUSTOM_METADATA + ); + parser.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ClusterStateDiffManifest.fromXContent(p), + DIFF_MANIFEST + ); } } @@ -267,6 +332,13 @@ private static void declareParser(ConstructingObjectParser indicesRouting; + private final long metadataVersion; + private final UploadedMetadataAttribute uploadedTransientSettingsMetadata; + private final UploadedMetadataAttribute uploadedDiscoveryNodesMetadata; + private final UploadedMetadataAttribute uploadedClusterBlocksMetadata; + private final UploadedMetadataAttribute uploadedHashesOfConsistentSettings; + private final Map uploadedClusterStateCustomMap; + private final ClusterStateDiffManifest diffManifest; public List getIndices() { return indices; @@ -332,6 +404,34 @@ public Map getCustomMetadataMap() { return uploadedCustomMetadataMap; } + public long getMetadataVersion() { + return metadataVersion; + } + + public UploadedMetadataAttribute getTransientSettingsMetadata() { + return uploadedTransientSettingsMetadata; + } + + public UploadedMetadataAttribute getDiscoveryNodesMetadata() { + return uploadedDiscoveryNodesMetadata; + } + + public UploadedMetadataAttribute getClusterBlocksMetadata() { + return uploadedClusterBlocksMetadata; + } + + public ClusterStateDiffManifest getDiffManifest() { + return diffManifest; + } + + public Map getClusterStateCustomMap() { + return uploadedClusterStateCustomMap; + } + + public UploadedMetadataAttribute getHashesOfConsistentSettings() { + return uploadedHashesOfConsistentSettings; + } + public boolean hasMetadataAttributesFiles() { return uploadedCoordinationMetadata != null || uploadedSettingsMetadata != null @@ -365,7 +465,14 @@ public ClusterMetadataManifest( UploadedMetadataAttribute uploadedTemplatesMetadata, Map uploadedCustomMetadataMap, long routingTableVersion, - List indicesRouting + List indicesRouting, + long metadataVersion, + UploadedMetadataAttribute discoveryNodesMetadata, + UploadedMetadataAttribute clusterBlocksMetadata, + UploadedMetadataAttribute uploadedTransientSettingsMetadata, + UploadedMetadataAttribute uploadedHashesOfConsistentSettings, + Map uploadedClusterStateCustomMap, + ClusterStateDiffManifest diffManifest ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -387,6 +494,15 @@ public ClusterMetadataManifest( this.uploadedCustomMetadataMap = Collections.unmodifiableMap( uploadedCustomMetadataMap != null ? uploadedCustomMetadataMap : new HashMap<>() ); + this.uploadedDiscoveryNodesMetadata = discoveryNodesMetadata; + this.uploadedClusterBlocksMetadata = clusterBlocksMetadata; + this.diffManifest = diffManifest; + this.metadataVersion = metadataVersion; + this.uploadedTransientSettingsMetadata = uploadedTransientSettingsMetadata; + this.uploadedHashesOfConsistentSettings = uploadedHashesOfConsistentSettings; + this.uploadedClusterStateCustomMap = Collections.unmodifiableMap( + uploadedClusterStateCustomMap != null ? uploadedClusterStateCustomMap : new HashMap<>() + ); } public ClusterMetadataManifest(StreamInput in) throws IOException { @@ -411,24 +527,56 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.globalMetadataFileName = null; this.routingTableVersion = in.readLong(); this.indicesRouting = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); - } else if (in.getVersion().onOrAfter(Version.V_2_12_0)) { - this.codecVersion = in.readInt(); - this.globalMetadataFileName = in.readString(); - this.uploadedCoordinationMetadata = null; - this.uploadedSettingsMetadata = null; - this.uploadedTemplatesMetadata = null; - this.uploadedCustomMetadataMap = null; - this.routingTableVersion = -1; - this.indicesRouting = null; + this.metadataVersion = in.readLong(); + if (in.readBoolean()) { + this.uploadedDiscoveryNodesMetadata = new UploadedMetadataAttribute(in); + } else { + this.uploadedDiscoveryNodesMetadata = null; + } + if (in.readBoolean()) { + this.uploadedClusterBlocksMetadata = new UploadedMetadataAttribute(in); + } else { + this.uploadedClusterBlocksMetadata = null; + } + if (in.readBoolean()) { + this.uploadedTransientSettingsMetadata = new UploadedMetadataAttribute(in); + } else { + this.uploadedTransientSettingsMetadata = null; + } + if (in.readBoolean()) { + this.uploadedHashesOfConsistentSettings = new UploadedMetadataAttribute(in); + } else { + this.uploadedHashesOfConsistentSettings = null; + } + this.uploadedClusterStateCustomMap = Collections.unmodifiableMap( + in.readMap(StreamInput::readString, UploadedMetadataAttribute::new) + ); + if (in.readBoolean()) { + this.diffManifest = new ClusterStateDiffManifest(in); + } else { + this.diffManifest = null; + } } else { - this.codecVersion = CODEC_V0; // Default codec - this.globalMetadataFileName = null; + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.codecVersion = in.readInt(); + this.globalMetadataFileName = in.readString(); + } else { + this.codecVersion = CODEC_V0; // Default codec + this.globalMetadataFileName = null; + } this.uploadedCoordinationMetadata = null; this.uploadedSettingsMetadata = null; this.uploadedTemplatesMetadata = null; this.uploadedCustomMetadataMap = null; this.routingTableVersion = -1; this.indicesRouting = null; + this.uploadedDiscoveryNodesMetadata = null; + this.uploadedClusterBlocksMetadata = null; + this.diffManifest = null; + this.metadataVersion = -1; + this.uploadedTransientSettingsMetadata = null; + this.uploadedHashesOfConsistentSettings = null; + this.uploadedClusterStateCustomMap = null; } } @@ -482,11 +630,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws attribute.toXContent(builder, params); } builder.endObject(); - } else if (onOrAfterCodecVersion(CODEC_V1)) { - builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); - builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); - } - if (onOrAfterCodecVersion(CODEC_V3)) { builder.field(ROUTING_TABLE_VERSION_FIELD.getPreferredName(), getRoutingTableVersion()); builder.startArray(INDICES_ROUTING_FIELD.getPreferredName()); { @@ -497,6 +640,40 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endArray(); + if (getDiscoveryNodesMetadata() != null) { + builder.startObject(UPLOADED_DISCOVERY_NODES_METADATA.getPreferredName()); + getDiscoveryNodesMetadata().toXContent(builder, params); + builder.endObject(); + } + if (getClusterBlocksMetadata() != null) { + builder.startObject(UPLOADED_CLUSTER_BLOCKS_METADATA.getPreferredName()); + getClusterBlocksMetadata().toXContent(builder, params); + builder.endObject(); + } + if (getTransientSettingsMetadata() != null) { + builder.startObject(UPLOADED_TRANSIENT_SETTINGS_METADATA.getPreferredName()); + getTransientSettingsMetadata().toXContent(builder, params); + builder.endObject(); + } + if (getDiffManifest() != null) { + builder.startObject(DIFF_MANIFEST.getPreferredName()); + getDiffManifest().toXContent(builder, params); + builder.endObject(); + } + builder.field(METADATA_VERSION.getPreferredName(), getMetadataVersion()); + if (getHashesOfConsistentSettings() != null) { + builder.startObject(UPLOADED_HASHES_OF_CONSISTENT_SETTINGS_METADATA.getPreferredName()); + getHashesOfConsistentSettings().toXContent(builder, params); + builder.endObject(); + } + builder.startObject(UPLOADED_CLUSTER_STATE_CUSTOM_METADATA.getPreferredName()); + for (UploadedMetadataAttribute attribute : getClusterStateCustomMap().values()) { + attribute.toXContent(builder, params); + } + builder.endObject(); + } else if (onOrAfterCodecVersion(CODEC_V1)) { + builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); + builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); } return builder; } @@ -521,6 +698,38 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(uploadedCustomMetadataMap, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeLong(routingTableVersion); out.writeCollection(indicesRouting); + out.writeLong(metadataVersion); + if (uploadedDiscoveryNodesMetadata != null) { + out.writeBoolean(true); + uploadedDiscoveryNodesMetadata.writeTo(out); + } else { + out.writeBoolean(false); + } + if (uploadedClusterBlocksMetadata != null) { + out.writeBoolean(true); + uploadedClusterBlocksMetadata.writeTo(out); + } else { + out.writeBoolean(false); + } + if (uploadedTransientSettingsMetadata != null) { + out.writeBoolean(true); + uploadedTransientSettingsMetadata.writeTo(out); + } else { + out.writeBoolean(false); + } + if (uploadedHashesOfConsistentSettings != null) { + out.writeBoolean(true); + uploadedHashesOfConsistentSettings.writeTo(out); + } else { + out.writeBoolean(false); + } + out.writeMap(uploadedClusterStateCustomMap, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + if (diffManifest != null) { + out.writeBoolean(true); + diffManifest.writeTo(out); + } else { + out.writeBoolean(false); + } } else if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeInt(codecVersion); out.writeString(globalMetadataFileName); @@ -549,7 +758,18 @@ public boolean equals(Object o) { && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) && Objects.equals(codecVersion, that.codecVersion) && Objects.equals(routingTableVersion, that.routingTableVersion) - && Objects.equals(indicesRouting, that.indicesRouting); + && Objects.equals(indicesRouting, that.indicesRouting) + && Objects.equals(uploadedCoordinationMetadata, that.uploadedCoordinationMetadata) + && Objects.equals(uploadedSettingsMetadata, that.uploadedSettingsMetadata) + && Objects.equals(uploadedTemplatesMetadata, that.uploadedTemplatesMetadata) + && Objects.equals(uploadedCustomMetadataMap, that.uploadedCustomMetadataMap) + && Objects.equals(metadataVersion, that.metadataVersion) + && Objects.equals(uploadedDiscoveryNodesMetadata, that.uploadedDiscoveryNodesMetadata) + && Objects.equals(uploadedClusterBlocksMetadata, that.uploadedClusterBlocksMetadata) + && Objects.equals(uploadedTransientSettingsMetadata, that.uploadedTransientSettingsMetadata) + && Objects.equals(uploadedHashesOfConsistentSettings, that.uploadedHashesOfConsistentSettings) + && Objects.equals(uploadedClusterStateCustomMap, that.uploadedClusterStateCustomMap) + && Objects.equals(diffManifest, that.diffManifest); } @Override @@ -568,7 +788,18 @@ public int hashCode() { previousClusterUUID, clusterUUIDCommitted, routingTableVersion, - indicesRouting + indicesRouting, + uploadedCoordinationMetadata, + uploadedSettingsMetadata, + uploadedTemplatesMetadata, + uploadedCustomMetadataMap, + metadataVersion, + uploadedDiscoveryNodesMetadata, + uploadedClusterBlocksMetadata, + uploadedTransientSettingsMetadata, + uploadedHashesOfConsistentSettings, + uploadedClusterStateCustomMap, + diffManifest ); } @@ -622,6 +853,13 @@ public static class Builder { private boolean clusterUUIDCommitted; private long routingTableVersion; private List indicesRouting; + private long metadataVersion; + private UploadedMetadataAttribute discoveryNodesMetadata; + private UploadedMetadataAttribute clusterBlocksMetadata; + private UploadedMetadataAttribute transientSettingsMetadata; + private UploadedMetadataAttribute hashesOfConsistentSettings; + private Map clusterStateCustomMetadataMap; + private ClusterStateDiffManifest diffManifest; public Builder indices(List indices) { this.indices = indices; @@ -726,10 +964,46 @@ public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { return this; } + public Builder metadataVersion(long metadataVersion) { + this.metadataVersion = metadataVersion; + return this; + } + + public Builder discoveryNodesMetadata(UploadedMetadataAttribute discoveryNodesMetadata) { + this.discoveryNodesMetadata = discoveryNodesMetadata; + return this; + } + + public Builder clusterBlocksMetadata(UploadedMetadataAttribute clusterBlocksMetadata) { + this.clusterBlocksMetadata = clusterBlocksMetadata; + return this; + } + + public Builder transientSettingsMetadata(UploadedMetadataAttribute settingsMetadata) { + this.transientSettingsMetadata = settingsMetadata; + return this; + } + + public Builder hashesOfConsistentSettings(UploadedMetadataAttribute hashesOfConsistentSettings) { + this.hashesOfConsistentSettings = hashesOfConsistentSettings; + return this; + } + + public Builder clusterStateCustomMetadataMap(Map clusterStateCustomMetadataMap) { + this.clusterStateCustomMetadataMap = clusterStateCustomMetadataMap; + return this; + } + + public Builder diffManifest(ClusterStateDiffManifest diffManifest) { + this.diffManifest = diffManifest; + return this; + } + public Builder() { indices = new ArrayList<>(); customMetadataMap = new HashMap<>(); indicesRouting = new ArrayList<>(); + clusterStateCustomMetadataMap = new HashMap<>(); } public Builder(ClusterMetadataManifest manifest) { @@ -751,6 +1025,12 @@ public Builder(ClusterMetadataManifest manifest) { this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; this.routingTableVersion = manifest.routingTableVersion; this.indicesRouting = new ArrayList<>(manifest.indicesRouting); + this.discoveryNodesMetadata = manifest.uploadedDiscoveryNodesMetadata; + this.clusterBlocksMetadata = manifest.uploadedClusterBlocksMetadata; + this.transientSettingsMetadata = manifest.uploadedTransientSettingsMetadata; + this.diffManifest = manifest.diffManifest; + this.hashesOfConsistentSettings = manifest.uploadedHashesOfConsistentSettings; + this.clusterStateCustomMetadataMap = manifest.uploadedClusterStateCustomMap; } public ClusterMetadataManifest build() { @@ -772,7 +1052,14 @@ public ClusterMetadataManifest build() { templatesMetadata, customMetadataMap, routingTableVersion, - indicesRouting + indicesRouting, + metadataVersion, + discoveryNodesMetadata, + clusterBlocksMetadata, + transientSettingsMetadata, + hashesOfConsistentSettings, + clusterStateCustomMetadataMap, + diffManifest ); } @@ -807,6 +1094,7 @@ public static class UploadedIndexMetadata implements UploadedMetadata, Writeable private static final ParseField INDEX_NAME_FIELD = new ParseField("index_name"); private static final ParseField INDEX_UUID_FIELD = new ParseField("index_uuid"); private static final ParseField UPLOADED_FILENAME_FIELD = new ParseField("uploaded_filename"); + private static final ParseField COMPONENT_PREFIX_FIELD = new ParseField("component_prefix"); private static String indexName(Object[] fields) { return (String) fields[0]; @@ -820,23 +1108,34 @@ private static String uploadedFilename(Object[] fields) { return (String) fields[2]; } + private static String componentPrefix(Object[] fields) { + return (String) fields[3]; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "uploaded_index_metadata", - fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields)) + fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields), componentPrefix(fields)) ); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), COMPONENT_PREFIX_FIELD); } static final String COMPONENT_PREFIX = "index--"; + private final String componentPrefix; private final String indexName; private final String indexUUID; private final String uploadedFilename; public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName) { + this(indexName, indexUUID, uploadedFileName, COMPONENT_PREFIX); + } + + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName, String componentPrefix) { + this.componentPrefix = componentPrefix; this.indexName = indexName; this.indexUUID = indexUUID; this.uploadedFilename = uploadedFileName; @@ -846,6 +1145,7 @@ public UploadedIndexMetadata(StreamInput in) throws IOException { this.indexName = in.readString(); this.indexUUID = in.readString(); this.uploadedFilename = in.readString(); + this.componentPrefix = in.readString(); } public String getUploadedFilePath() { @@ -854,12 +1154,11 @@ public String getUploadedFilePath() { @Override public String getComponent() { - return COMPONENT_PREFIX + getIndexName(); + return componentPrefix + getIndexName(); } public String getUploadedFilename() { - String[] splitPath = uploadedFilename.split("/"); - return splitPath[splitPath.length - 1]; + return uploadedFilename; } public String getIndexName() { @@ -870,11 +1169,16 @@ public String getIndexUUID() { return indexUUID; } + public String getComponentPrefix() { + return componentPrefix; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) - .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()); + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) + .field(COMPONENT_PREFIX_FIELD.getPreferredName(), getComponentPrefix()); } @Override @@ -882,6 +1186,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); out.writeString(indexUUID); out.writeString(uploadedFilename); + out.writeString(componentPrefix); } @Override @@ -895,12 +1200,13 @@ public boolean equals(Object o) { final UploadedIndexMetadata that = (UploadedIndexMetadata) o; return Objects.equals(indexName, that.indexName) && Objects.equals(indexUUID, that.indexUUID) - && Objects.equals(uploadedFilename, that.uploadedFilename); + && Objects.equals(uploadedFilename, that.uploadedFilename) + && Objects.equals(componentPrefix, that.componentPrefix); } @Override public int hashCode() { - return Objects.hash(indexName, indexUUID, uploadedFilename); + return Objects.hash(indexName, indexUUID, uploadedFilename, componentPrefix); } @Override @@ -979,6 +1285,19 @@ public static UploadedMetadataAttribute fromXContent(XContentParser parser) thro return PARSER.parse(parser, null, parser.currentName()); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UploadedMetadataAttribute that = (UploadedMetadataAttribute) o; + return Objects.equals(attributeName, that.attributeName) && Objects.equals(uploadedFilename, that.uploadedFilename); + } + + @Override + public int hashCode() { + return Objects.hash(attributeName, uploadedFilename); + } + @Override public String toString() { return "UploadedMetadataAttribute{" diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java new file mode 100644 index 0000000000000..65ae2675a95da --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -0,0 +1,675 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.DiffableUtils.NonDiffableValueSerializer.getAbstractInstance; +import static org.opensearch.cluster.DiffableUtils.getStringKeySerializer; +import static org.opensearch.cluster.routing.remote.RemoteRoutingTableService.CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Manifest of diff between two cluster states + * + * @opensearch.internal + */ +public class ClusterStateDiffManifest implements ToXContentFragment, Writeable { + private static final String FROM_STATE_UUID_FIELD = "from_state_uuid"; + private static final String TO_STATE_UUID_FIELD = "to_state_uuid"; + private static final String METADATA_DIFF_FIELD = "metadata_diff"; + private static final String COORDINATION_METADATA_UPDATED_FIELD = "coordination_metadata_diff"; + private static final String SETTINGS_METADATA_UPDATED_FIELD = "settings_metadata_diff"; + private static final String TRANSIENT_SETTINGS_METADATA_UPDATED_FIELD = "transient_settings_metadata_diff"; + private static final String TEMPLATES_METADATA_UPDATED_FIELD = "templates_metadata_diff"; + private static final String HASHES_OF_CONSISTENT_SETTINGS_UPDATED_FIELD = "hashes_of_consistent_settings_diff"; + private static final String INDICES_DIFF_FIELD = "indices_diff"; + private static final String METADATA_CUSTOM_DIFF_FIELD = "metadata_custom_diff"; + private static final String UPSERTS_FIELD = "upserts"; + private static final String DELETES_FIELD = "deletes"; + private static final String CLUSTER_BLOCKS_UPDATED_FIELD = "cluster_blocks_diff"; + private static final String DISCOVERY_NODES_UPDATED_FIELD = "discovery_nodes_diff"; + private static final String ROUTING_TABLE_DIFF = "routing_table_diff"; + private static final String CLUSTER_STATE_CUSTOM_DIFF_FIELD = "cluster_state_custom_diff"; + + private final String fromStateUUID; + private final String toStateUUID; + private final boolean coordinationMetadataUpdated; + private final boolean settingsMetadataUpdated; + private final boolean transientSettingsMetadataUpdated; + private final boolean templatesMetadataUpdated; + private final List indicesUpdated; + private final List indicesDeleted; + private final List customMetadataUpdated; + private final List customMetadataDeleted; + private final boolean clusterBlocksUpdated; + private final boolean discoveryNodesUpdated; + private final List indicesRoutingUpdated; + private final List indicesRoutingDeleted; + private final boolean hashesOfConsistentSettingsUpdated; + private final List clusterStateCustomUpdated; + private final List clusterStateCustomDeleted; + + public ClusterStateDiffManifest(ClusterState state, ClusterState previousState) { + fromStateUUID = previousState.stateUUID(); + toStateUUID = state.stateUUID(); + coordinationMetadataUpdated = !Metadata.isCoordinationMetadataEqual(state.metadata(), previousState.metadata()); + settingsMetadataUpdated = !Metadata.isSettingsMetadataEqual(state.metadata(), previousState.metadata()); + transientSettingsMetadataUpdated = !Metadata.isTransientSettingsMetadataEqual(state.metadata(), previousState.metadata()); + templatesMetadataUpdated = !Metadata.isTemplatesMetadataEqual(state.metadata(), previousState.metadata()); + DiffableUtils.MapDiff> indicesDiff = DiffableUtils.diff( + previousState.metadata().indices(), + state.metadata().indices(), + getStringKeySerializer() + ); + indicesDeleted = indicesDiff.getDeletes(); + indicesUpdated = new ArrayList<>(indicesDiff.getDiffs().keySet()); + indicesUpdated.addAll(indicesDiff.getUpserts().keySet()); + clusterBlocksUpdated = !state.blocks().equals(previousState.blocks()); + discoveryNodesUpdated = state.nodes().delta(previousState.nodes()).hasChanges(); + DiffableUtils.MapDiff> customDiff = DiffableUtils.diff( + previousState.metadata().customs(), + state.metadata().customs(), + getStringKeySerializer(), + getAbstractInstance() + ); + customMetadataUpdated = new ArrayList<>(customDiff.getDiffs().keySet()); + customMetadataUpdated.addAll(customDiff.getUpserts().keySet()); + customMetadataDeleted = customDiff.getDeletes(); + + DiffableUtils.MapDiff> routingTableDiff = DiffableUtils.diff( + previousState.getRoutingTable().getIndicesRouting(), + state.getRoutingTable().getIndicesRouting(), + DiffableUtils.getStringKeySerializer(), + CUSTOM_ROUTING_TABLE_VALUE_SERIALIZER + ); + + indicesRoutingUpdated = new ArrayList<>(); + routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingUpdated.add(k)); + + indicesRoutingDeleted = routingTableDiff.getDeletes(); + hashesOfConsistentSettingsUpdated = !state.metadata() + .hashesOfConsistentSettings() + .equals(previousState.metadata().hashesOfConsistentSettings()); + DiffableUtils.MapDiff> clusterStateCustomDiff = DiffableUtils.diff( + previousState.customs(), + state.customs(), + getStringKeySerializer(), + getAbstractInstance() + ); + clusterStateCustomUpdated = new ArrayList<>(clusterStateCustomDiff.getDiffs().keySet()); + clusterStateCustomUpdated.addAll(clusterStateCustomDiff.getUpserts().keySet()); + clusterStateCustomDeleted = clusterStateCustomDiff.getDeletes(); + } + + public ClusterStateDiffManifest( + String fromStateUUID, + String toStateUUID, + boolean coordinationMetadataUpdated, + boolean settingsMetadataUpdated, + boolean transientSettingsMetadataUpdate, + boolean templatesMetadataUpdated, + List customMetadataUpdated, + List customMetadataDeleted, + List indicesUpdated, + List indicesDeleted, + boolean clusterBlocksUpdated, + boolean discoveryNodesUpdated, + List indicesRoutingUpdated, + List indicesRoutingDeleted, + boolean hashesOfConsistentSettingsUpdated, + List clusterStateCustomUpdated, + List clusterStateCustomDeleted + ) { + this.fromStateUUID = fromStateUUID; + this.toStateUUID = toStateUUID; + this.coordinationMetadataUpdated = coordinationMetadataUpdated; + this.settingsMetadataUpdated = settingsMetadataUpdated; + this.transientSettingsMetadataUpdated = transientSettingsMetadataUpdate; + this.templatesMetadataUpdated = templatesMetadataUpdated; + this.customMetadataUpdated = customMetadataUpdated; + this.customMetadataDeleted = customMetadataDeleted; + this.indicesUpdated = indicesUpdated; + this.indicesDeleted = indicesDeleted; + this.clusterBlocksUpdated = clusterBlocksUpdated; + this.discoveryNodesUpdated = discoveryNodesUpdated; + this.indicesRoutingUpdated = indicesRoutingUpdated; + this.indicesRoutingDeleted = indicesRoutingDeleted; + this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; + this.clusterStateCustomUpdated = clusterStateCustomUpdated; + this.clusterStateCustomDeleted = clusterStateCustomDeleted; + } + + public ClusterStateDiffManifest(StreamInput in) throws IOException { + this.fromStateUUID = in.readString(); + this.toStateUUID = in.readString(); + this.coordinationMetadataUpdated = in.readBoolean(); + this.settingsMetadataUpdated = in.readBoolean(); + this.transientSettingsMetadataUpdated = in.readBoolean(); + this.templatesMetadataUpdated = in.readBoolean(); + this.indicesUpdated = in.readStringList(); + this.indicesDeleted = in.readStringList(); + this.customMetadataUpdated = in.readStringList(); + this.customMetadataDeleted = in.readStringList(); + this.clusterBlocksUpdated = in.readBoolean(); + this.discoveryNodesUpdated = in.readBoolean(); + this.indicesRoutingUpdated = in.readStringList(); + this.indicesRoutingDeleted = in.readStringList(); + this.hashesOfConsistentSettingsUpdated = in.readBoolean(); + this.clusterStateCustomUpdated = in.readStringList(); + this.clusterStateCustomDeleted = in.readStringList(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(FROM_STATE_UUID_FIELD, fromStateUUID); + builder.field(TO_STATE_UUID_FIELD, toStateUUID); + builder.startObject(METADATA_DIFF_FIELD); + { + builder.field(COORDINATION_METADATA_UPDATED_FIELD, coordinationMetadataUpdated); + builder.field(SETTINGS_METADATA_UPDATED_FIELD, settingsMetadataUpdated); + builder.field(TRANSIENT_SETTINGS_METADATA_UPDATED_FIELD, transientSettingsMetadataUpdated); + builder.field(TEMPLATES_METADATA_UPDATED_FIELD, templatesMetadataUpdated); + builder.startObject(INDICES_DIFF_FIELD); + builder.startArray(UPSERTS_FIELD); + for (String index : indicesUpdated) { + builder.value(index); + } + builder.endArray(); + builder.startArray(DELETES_FIELD); + for (String index : indicesDeleted) { + builder.value(index); + } + builder.endArray(); + builder.endObject(); + builder.startObject(METADATA_CUSTOM_DIFF_FIELD); + builder.startArray(UPSERTS_FIELD); + for (String custom : customMetadataUpdated) { + builder.value(custom); + } + builder.endArray(); + builder.startArray(DELETES_FIELD); + for (String custom : customMetadataDeleted) { + builder.value(custom); + } + builder.endArray(); + builder.endObject(); + builder.field(HASHES_OF_CONSISTENT_SETTINGS_UPDATED_FIELD, hashesOfConsistentSettingsUpdated); + } + builder.endObject(); + builder.field(CLUSTER_BLOCKS_UPDATED_FIELD, clusterBlocksUpdated); + builder.field(DISCOVERY_NODES_UPDATED_FIELD, discoveryNodesUpdated); + + builder.startObject(ROUTING_TABLE_DIFF); + builder.startArray(UPSERTS_FIELD); + for (String index : indicesRoutingUpdated) { + builder.value(index); + } + builder.endArray(); + builder.startArray(DELETES_FIELD); + for (String index : indicesRoutingDeleted) { + builder.value(index); + } + builder.endArray(); + builder.endObject(); + builder.startObject(CLUSTER_STATE_CUSTOM_DIFF_FIELD); + builder.startArray(UPSERTS_FIELD); + for (String custom : clusterStateCustomUpdated) { + builder.value(custom); + } + builder.endArray(); + builder.startArray(DELETES_FIELD); + for (String custom : clusterStateCustomDeleted) { + builder.value(custom); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public static ClusterStateDiffManifest fromXContent(XContentParser parser) throws IOException { + Builder builder = new Builder(); + if (parser.currentToken() == null) { // fresh parser? move to next token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName = parser.currentName(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (currentFieldName.equals(METADATA_DIFF_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (token.isValue()) { + switch (currentFieldName) { + case COORDINATION_METADATA_UPDATED_FIELD: + builder.coordinationMetadataUpdated(parser.booleanValue()); + break; + case SETTINGS_METADATA_UPDATED_FIELD: + builder.settingsMetadataUpdated(parser.booleanValue()); + break; + case TRANSIENT_SETTINGS_METADATA_UPDATED_FIELD: + builder.transientSettingsMetadataUpdate(parser.booleanValue()); + break; + case TEMPLATES_METADATA_UPDATED_FIELD: + builder.templatesMetadataUpdated(parser.booleanValue()); + break; + case HASHES_OF_CONSISTENT_SETTINGS_UPDATED_FIELD: + builder.hashesOfConsistentSettingsUpdated(parser.booleanValue()); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (currentFieldName.equals(INDICES_DIFF_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + currentFieldName = parser.currentName(); + token = parser.nextToken(); + switch (currentFieldName) { + case UPSERTS_FIELD: + builder.indicesUpdated(convertListToString(parser.listOrderedMap())); + break; + case DELETES_FIELD: + builder.indicesDeleted(convertListToString(parser.listOrderedMap())); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } + } else if (currentFieldName.equals(METADATA_CUSTOM_DIFF_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + currentFieldName = parser.currentName(); + token = parser.nextToken(); + switch (currentFieldName) { + case UPSERTS_FIELD: + builder.customMetadataUpdated(convertListToString(parser.listOrderedMap())); + break; + case DELETES_FIELD: + builder.customMetadataDeleted(convertListToString(parser.listOrderedMap())); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } + } else { + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } else { + throw new XContentParseException("Unexpected token [" + token + "]"); + } + } + } else if (currentFieldName.equals(ROUTING_TABLE_DIFF)) { + while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { + currentFieldName = parser.currentName(); + parser.nextToken(); + switch (currentFieldName) { + case UPSERTS_FIELD: + builder.indicesRoutingUpdated(convertListToString(parser.listOrderedMap())); + break; + case DELETES_FIELD: + builder.indicesRoutingDeleted(convertListToString(parser.listOrderedMap())); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } + } else if (currentFieldName.equals(CLUSTER_STATE_CUSTOM_DIFF_FIELD)) { + while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { + currentFieldName = parser.currentName(); + parser.nextToken(); + switch (currentFieldName) { + case UPSERTS_FIELD: + builder.clusterStateCustomUpdated(convertListToString(parser.listOrderedMap())); + break; + case DELETES_FIELD: + builder.clusterStateCustomDeleted(convertListToString(parser.listOrderedMap())); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } + } else { + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } else if (token.isValue()) { + switch (currentFieldName) { + case FROM_STATE_UUID_FIELD: + builder.fromStateUUID(parser.text()); + break; + case TO_STATE_UUID_FIELD: + builder.toStateUUID(parser.text()); + break; + case CLUSTER_BLOCKS_UPDATED_FIELD: + builder.clusterBlocksUpdated(parser.booleanValue()); + break; + case DISCOVERY_NODES_UPDATED_FIELD: + builder.discoveryNodesUpdated(parser.booleanValue()); + break; + default: + throw new XContentParseException("Unexpected field [" + currentFieldName + "]"); + } + } else { + throw new XContentParseException("Unexpected token [" + token + "]"); + } + } + return builder.build(); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + private static List convertListToString(List list) { + List convertedList = new ArrayList<>(); + for (Object o : list) { + convertedList.add(o.toString()); + } + return convertedList; + } + + public String getFromStateUUID() { + return fromStateUUID; + } + + public String getToStateUUID() { + return toStateUUID; + } + + public boolean isCoordinationMetadataUpdated() { + return coordinationMetadataUpdated; + } + + public boolean isSettingsMetadataUpdated() { + return settingsMetadataUpdated; + } + + public boolean isTransientSettingsMetadataUpdated() { + return transientSettingsMetadataUpdated; + } + + public boolean isTemplatesMetadataUpdated() { + return templatesMetadataUpdated; + } + + public List getCustomMetadataUpdated() { + return customMetadataUpdated; + } + + public List getCustomMetadataDeleted() { + return customMetadataDeleted; + } + + public List getIndicesUpdated() { + return indicesUpdated; + } + + public List getIndicesDeleted() { + return indicesDeleted; + } + + public boolean isClusterBlocksUpdated() { + return clusterBlocksUpdated; + } + + public boolean isDiscoveryNodesUpdated() { + return discoveryNodesUpdated; + } + + public boolean isHashesOfConsistentSettingsUpdated() { + return hashesOfConsistentSettingsUpdated; + } + + public List getIndicesRoutingUpdated() { + return indicesRoutingUpdated; + } + + public List getIndicesRoutingDeleted() { + return indicesRoutingDeleted; + } + + public List getClusterStateCustomUpdated() { + return clusterStateCustomUpdated; + } + + public List getClusterStateCustomDeleted() { + return clusterStateCustomDeleted; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterStateDiffManifest that = (ClusterStateDiffManifest) o; + return coordinationMetadataUpdated == that.coordinationMetadataUpdated + && settingsMetadataUpdated == that.settingsMetadataUpdated + && transientSettingsMetadataUpdated == that.transientSettingsMetadataUpdated + && templatesMetadataUpdated == that.templatesMetadataUpdated + && clusterBlocksUpdated == that.clusterBlocksUpdated + && discoveryNodesUpdated == that.discoveryNodesUpdated + && hashesOfConsistentSettingsUpdated == that.hashesOfConsistentSettingsUpdated + && Objects.equals(fromStateUUID, that.fromStateUUID) + && Objects.equals(toStateUUID, that.toStateUUID) + && Objects.equals(customMetadataUpdated, that.customMetadataUpdated) + && Objects.equals(customMetadataDeleted, that.customMetadataDeleted) + && Objects.equals(indicesUpdated, that.indicesUpdated) + && Objects.equals(indicesDeleted, that.indicesDeleted) + && Objects.equals(indicesRoutingUpdated, that.indicesRoutingUpdated) + && Objects.equals(indicesRoutingDeleted, that.indicesRoutingDeleted) + && Objects.equals(clusterStateCustomUpdated, that.clusterStateCustomUpdated) + && Objects.equals(clusterStateCustomDeleted, that.clusterStateCustomDeleted); + } + + @Override + public int hashCode() { + return Objects.hash( + fromStateUUID, + toStateUUID, + coordinationMetadataUpdated, + settingsMetadataUpdated, + transientSettingsMetadataUpdated, + templatesMetadataUpdated, + customMetadataUpdated, + customMetadataDeleted, + indicesUpdated, + indicesDeleted, + clusterBlocksUpdated, + discoveryNodesUpdated, + indicesRoutingUpdated, + indicesRoutingDeleted, + hashesOfConsistentSettingsUpdated, + clusterStateCustomUpdated, + clusterStateCustomDeleted + ); + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fromStateUUID); + out.writeString(toStateUUID); + out.writeBoolean(coordinationMetadataUpdated); + out.writeBoolean(settingsMetadataUpdated); + out.writeBoolean(transientSettingsMetadataUpdated); + out.writeBoolean(templatesMetadataUpdated); + out.writeStringCollection(indicesUpdated); + out.writeStringCollection(indicesDeleted); + out.writeStringCollection(customMetadataUpdated); + out.writeStringCollection(customMetadataDeleted); + out.writeBoolean(clusterBlocksUpdated); + out.writeBoolean(discoveryNodesUpdated); + out.writeStringCollection(indicesRoutingUpdated); + out.writeStringCollection(indicesRoutingDeleted); + out.writeBoolean(hashesOfConsistentSettingsUpdated); + out.writeStringCollection(clusterStateCustomUpdated); + out.writeStringCollection(clusterStateCustomDeleted); + } + + /** + * Builder for ClusterStateDiffManifest + * + * @opensearch.internal + */ + public static class Builder { + private String fromStateUUID; + private String toStateUUID; + private boolean coordinationMetadataUpdated; + private boolean settingsMetadataUpdated; + private boolean transientSettingsMetadataUpdated; + private boolean templatesMetadataUpdated; + private List customMetadataUpdated; + private List customMetadataDeleted; + private List indicesUpdated; + private List indicesDeleted; + private boolean clusterBlocksUpdated; + private boolean discoveryNodesUpdated; + private List indicesRoutingUpdated; + private List indicesRoutingDeleted; + private boolean hashesOfConsistentSettingsUpdated; + private List clusterStateCustomUpdated; + private List clusterStateCustomDeleted; + + public Builder() {} + + public Builder fromStateUUID(String fromStateUUID) { + this.fromStateUUID = fromStateUUID; + return this; + } + + public Builder toStateUUID(String toStateUUID) { + this.toStateUUID = toStateUUID; + return this; + } + + public Builder coordinationMetadataUpdated(boolean coordinationMetadataUpdated) { + this.coordinationMetadataUpdated = coordinationMetadataUpdated; + return this; + } + + public Builder settingsMetadataUpdated(boolean settingsMetadataUpdated) { + this.settingsMetadataUpdated = settingsMetadataUpdated; + return this; + } + + public Builder transientSettingsMetadataUpdate(boolean settingsMetadataUpdated) { + this.transientSettingsMetadataUpdated = settingsMetadataUpdated; + return this; + } + + public Builder templatesMetadataUpdated(boolean templatesMetadataUpdated) { + this.templatesMetadataUpdated = templatesMetadataUpdated; + return this; + } + + public Builder hashesOfConsistentSettingsUpdated(boolean hashesOfConsistentSettingsUpdated) { + this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; + return this; + } + + public Builder customMetadataUpdated(List customMetadataUpdated) { + this.customMetadataUpdated = customMetadataUpdated; + return this; + } + + public Builder customMetadataDeleted(List customMetadataDeleted) { + this.customMetadataDeleted = customMetadataDeleted; + return this; + } + + public Builder indicesUpdated(List indicesUpdated) { + this.indicesUpdated = indicesUpdated; + return this; + } + + public Builder indicesDeleted(List indicesDeleted) { + this.indicesDeleted = indicesDeleted; + return this; + } + + public Builder clusterBlocksUpdated(boolean clusterBlocksUpdated) { + this.clusterBlocksUpdated = clusterBlocksUpdated; + return this; + } + + public Builder discoveryNodesUpdated(boolean discoveryNodesUpdated) { + this.discoveryNodesUpdated = discoveryNodesUpdated; + return this; + } + + public Builder indicesRoutingUpdated(List indicesRoutingUpdated) { + this.indicesRoutingUpdated = indicesRoutingUpdated; + return this; + } + + public Builder indicesRoutingDeleted(List indicesRoutingDeleted) { + this.indicesRoutingDeleted = indicesRoutingDeleted; + return this; + } + + public Builder clusterStateCustomUpdated(List clusterStateCustomUpdated) { + this.clusterStateCustomUpdated = clusterStateCustomUpdated; + return this; + } + + public Builder clusterStateCustomDeleted(List clusterStateCustomDeleted) { + this.clusterStateCustomDeleted = clusterStateCustomDeleted; + return this; + } + + public ClusterStateDiffManifest build() { + return new ClusterStateDiffManifest( + fromStateUUID, + toStateUUID, + coordinationMetadataUpdated, + settingsMetadataUpdated, + transientSettingsMetadataUpdated, + templatesMetadataUpdated, + customMetadataUpdated, + customMetadataDeleted, + indicesUpdated, + indicesDeleted, + clusterBlocksUpdated, + discoveryNodesUpdated, + indicesRoutingUpdated, + indicesRoutingDeleted, + hashesOfConsistentSettingsUpdated, + clusterStateCustomUpdated, + clusterStateCustomDeleted + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java new file mode 100644 index 0000000000000..4098993246073 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManager.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.gateway.remote.model.RemoteClusterBlocks; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; +import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * A Manager which provides APIs to upload and download attributes of ClusterState to the {@link RemoteClusterStateBlobStore} + * + * @opensearch.internal + */ +public class RemoteClusterStateAttributesManager { + public static final String CLUSTER_STATE_ATTRIBUTE = "cluster_state_attribute"; + public static final String DISCOVERY_NODES = "nodes"; + public static final String CLUSTER_BLOCKS = "blocks"; + public static final int CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION = 1; + private final Map remoteWritableEntityStores; + private final NamedWriteableRegistry namedWriteableRegistry; + + RemoteClusterStateAttributesManager( + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadpool + ) { + this.namedWriteableRegistry = namedWriteableRegistry; + this.remoteWritableEntityStores = new HashMap<>(); + this.remoteWritableEntityStores.put( + RemoteDiscoveryNodes.DISCOVERY_NODES, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteClusterBlocks.CLUSTER_BLOCKS, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + } + + /** + * Allows async upload of Cluster State Attribute components to remote + */ + CheckedRunnable getAsyncMetadataWriteAction( + String component, + AbstractRemoteWritableBlobEntity blobEntity, + LatchedActionListener latchedActionListener + ) { + return () -> getStore(blobEntity).writeAsync(blobEntity, getActionListener(component, blobEntity, latchedActionListener)); + } + + private ActionListener getActionListener( + String component, + AbstractRemoteWritableBlobEntity remoteObject, + LatchedActionListener latchedActionListener + ) { + return ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteObject.getUploadedMetadata()), + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(component, remoteObject, ex)) + ); + } + + private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); + if (remoteStore == null) { + throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); + } + return remoteStore; + } + + public CheckedRunnable getAsyncMetadataReadAction( + String component, + AbstractRemoteWritableBlobEntity blobEntity, + LatchedActionListener listener + ) { + final ActionListener actionListener = ActionListener.wrap( + response -> listener.onResponse(new RemoteReadResult((ToXContent) response, CLUSTER_STATE_ATTRIBUTE, component)), + listener::onFailure + ); + return () -> getStore(blobEntity).readAsync(blobEntity, actionListener); + } + + public Map getUpdatedCustoms(ClusterState clusterState, ClusterState previousClusterState) { + Map updatedCustoms = new HashMap<>(); + Set currentCustoms = new HashSet<>(clusterState.customs().keySet()); + for (Map.Entry entry : previousClusterState.customs().entrySet()) { + if (currentCustoms.contains(entry.getKey()) && !entry.getValue().equals(clusterState.customs().get(entry.getKey()))) { + updatedCustoms.put(entry.getKey(), clusterState.customs().get(entry.getKey())); + } + currentCustoms.remove(entry.getKey()); + } + for (String custom : currentCustoms) { + updatedCustoms.put(custom, clusterState.customs().get(custom)); + } + return updatedCustoms; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java index 2fca239b10efd..99235bc96bfe3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManager.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Strings; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobMetadata; @@ -34,12 +35,9 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.gateway.remote.RemoteClusterStateService.GLOBAL_METADATA_FORMAT; -import static org.opensearch.gateway.remote.RemoteClusterStateService.GLOBAL_METADATA_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_FORMAT; -import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_PATH_TOKEN; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; -import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; +import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST; +import static org.opensearch.gateway.remote.model.RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT; /** * A Manager which provides APIs to clean up stale cluster state files and runs an async stale cleanup task @@ -74,8 +72,14 @@ public class RemoteClusterStateCleanupManager implements Closeable { private long lastCleanupAttemptStateVersion; private final ThreadPool threadpool; private final ClusterApplierService clusterApplierService; + private RemoteManifestManager remoteManifestManager; + private final RemoteRoutingTableService remoteRoutingTableService; - public RemoteClusterStateCleanupManager(RemoteClusterStateService remoteClusterStateService, ClusterService clusterService) { + public RemoteClusterStateCleanupManager( + RemoteClusterStateService remoteClusterStateService, + ClusterService clusterService, + RemoteRoutingTableService remoteRoutingTableService + ) { this.remoteClusterStateService = remoteClusterStateService; this.remoteStateStats = remoteClusterStateService.getStats(); ClusterSettings clusterSettings = clusterService.getClusterSettings(); @@ -85,10 +89,12 @@ public RemoteClusterStateCleanupManager(RemoteClusterStateService remoteClusterS // initialize with 0, a cleanup will be done when this node is elected master node and version is incremented more than threshold this.lastCleanupAttemptStateVersion = 0; clusterSettings.addSettingsUpdateConsumer(REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, this::updateCleanupInterval); + this.remoteRoutingTableService = remoteRoutingTableService; } void start() { staleFileDeletionTask = new AsyncStaleFileDeletion(this); + remoteManifestManager = remoteClusterStateService.getRemoteManifestManager(); } @Override @@ -171,14 +177,20 @@ void deleteClusterMetadata( Set staleManifestPaths = new HashSet<>(); Set staleIndexMetadataPaths = new HashSet<>(); Set staleGlobalMetadataPaths = new HashSet<>(); + Set staleEphemeralAttributePaths = new HashSet<>(); + Set staleIndexRoutingPaths = new HashSet<>(); activeManifestBlobMetadata.forEach(blobMetadata -> { - ClusterMetadataManifest clusterMetadataManifest = remoteClusterStateService.fetchRemoteClusterMetadataManifest( + ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, clusterUUID, blobMetadata.name() ); clusterMetadataManifest.getIndices() - .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + .forEach( + uploadedIndexMetadata -> filesToKeep.add( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()) + ) + ); if (clusterMetadataManifest.getCodecVersion() == ClusterMetadataManifest.CODEC_V1) { filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); } else if (clusterMetadataManifest.getCodecVersion() >= ClusterMetadataManifest.CODEC_V2) { @@ -189,47 +201,101 @@ void deleteClusterMetadata( .values() .forEach(attribute -> filesToKeep.add(attribute.getUploadedFilename())); } + if (clusterMetadataManifest.getTransientSettingsMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getHashesOfConsistentSettings() != null) { + filesToKeep.add(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename()); + } + if (clusterMetadataManifest.getDiscoveryNodesMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterBlocksMetadata() != null) { + filesToKeep.add(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterStateCustomMap() != null) { + clusterMetadataManifest.getClusterStateCustomMap() + .values() + .forEach(attribute -> filesToKeep.add(attribute.getUploadedFilename())); + } + if (clusterMetadataManifest.getIndicesRouting() != null) { + clusterMetadataManifest.getIndicesRouting() + .forEach(uploadedIndicesRouting -> filesToKeep.add(uploadedIndicesRouting.getUploadedFilename())); + } }); staleManifestBlobMetadata.forEach(blobMetadata -> { - ClusterMetadataManifest clusterMetadataManifest = remoteClusterStateService.fetchRemoteClusterMetadataManifest( + ClusterMetadataManifest clusterMetadataManifest = remoteManifestManager.fetchRemoteClusterMetadataManifest( clusterName, clusterUUID, blobMetadata.name() ); - staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + staleManifestPaths.add( + remoteManifestManager.getManifestFolderPath(clusterName, clusterUUID).buildAsString() + blobMetadata.name() + ); if (clusterMetadataManifest.getCodecVersion() == ClusterMetadataManifest.CODEC_V1) { addStaleGlobalMetadataPath(clusterMetadataManifest.getGlobalMetadataFileName(), filesToKeep, staleGlobalMetadataPaths); } else if (clusterMetadataManifest.getCodecVersion() >= ClusterMetadataManifest.CODEC_V2) { - addStaleGlobalMetadataPath( - clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); - addStaleGlobalMetadataPath( - clusterMetadataManifest.getSettingsMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); - addStaleGlobalMetadataPath( - clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename(), - filesToKeep, - staleGlobalMetadataPaths - ); + if (filesToKeep.contains(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename()); + } + if (filesToKeep.contains(clusterMetadataManifest.getSettingsMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getSettingsMetadata().getUploadedFilename()); + } + if (filesToKeep.contains(clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename()) == false) { + staleGlobalMetadataPaths.add(clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename()); + } clusterMetadataManifest.getCustomMetadataMap() .values() - .forEach( - attribute -> addStaleGlobalMetadataPath(attribute.getUploadedFilename(), filesToKeep, staleGlobalMetadataPaths) - ); + .stream() + .map(ClusterMetadataManifest.UploadedMetadataAttribute::getUploadedFilename) + .filter(file -> filesToKeep.contains(file) == false) + .forEach(staleGlobalMetadataPaths::add); + } + if (clusterMetadataManifest.getIndicesRouting() != null) { + clusterMetadataManifest.getIndicesRouting().forEach(uploadedIndicesRouting -> { + if (!filesToKeep.contains(uploadedIndicesRouting.getUploadedFilename())) { + staleIndexRoutingPaths.add(uploadedIndicesRouting.getUploadedFilename()); + logger.debug( + () -> new ParameterizedMessage( + "Indices routing paths in stale manifest: {}", + uploadedIndicesRouting.getUploadedFilename() + ) + ); + } + }); } clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { - if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { - staleIndexMetadataPaths.add( - new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() - + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) - ); + String fileName = RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()); + if (filesToKeep.contains(fileName) == false) { + staleIndexMetadataPaths.add(fileName); } }); + + if (clusterMetadataManifest.getClusterBlocksMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getClusterBlocksMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getDiscoveryNodesMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getDiscoveryNodesMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getTransientSettingsMetadata() != null + && !filesToKeep.contains(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getTransientSettingsMetadata().getUploadedFilename()); + } + if (clusterMetadataManifest.getHashesOfConsistentSettings() != null + && !filesToKeep.contains(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename())) { + staleEphemeralAttributePaths.add(clusterMetadataManifest.getHashesOfConsistentSettings().getUploadedFilename()); + } + if (clusterMetadataManifest.getClusterStateCustomMap() != null) { + clusterMetadataManifest.getCustomMetadataMap() + .values() + .stream() + .filter(u -> !filesToKeep.contains(u.getUploadedFilename())) + .forEach(attribute -> staleEphemeralAttributePaths.add(attribute.getUploadedFilename())); + } + }); if (staleManifestPaths.isEmpty()) { @@ -237,9 +303,19 @@ void deleteClusterMetadata( return; } - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); - deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); + deleteStalePaths(new ArrayList<>(staleGlobalMetadataPaths)); + deleteStalePaths(new ArrayList<>(staleIndexMetadataPaths)); + deleteStalePaths(new ArrayList<>(staleEphemeralAttributePaths)); + deleteStalePaths(new ArrayList<>(staleManifestPaths)); + try { + remoteRoutingTableService.deleteStaleIndexRoutingPaths(new ArrayList<>(staleIndexRoutingPaths)); + } catch (IOException e) { + logger.error( + () -> new ParameterizedMessage("Error while deleting stale index routing files {}", staleIndexRoutingPaths), + e + ); + remoteStateStats.indexRoutingFilesCleanupAttemptFailed(); + } } catch (IllegalStateException e) { logger.error("Error while fetching Remote Cluster Metadata manifests", e); } catch (IOException e) { @@ -267,8 +343,8 @@ void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int mani try { getBlobStoreTransferService().listAllInSortedOrderAsync( ThreadPool.Names.REMOTE_PURGE, - remoteClusterStateService.getManifestFolderPath(clusterName, clusterUUID), - MANIFEST_FILE_PREFIX, + remoteManifestManager.getManifestFolderPath(clusterName, clusterUUID), + MANIFEST, Integer.MAX_VALUE, new ActionListener<>() { @Override @@ -312,7 +388,11 @@ void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUI clusterUUIDs.forEach( clusterUUID -> getBlobStoreTransferService().deleteAsync( ThreadPool.Names.REMOTE_PURGE, - remoteClusterStateService.getCusterMetadataBasePath(clusterName, clusterUUID), + RemoteClusterStateUtils.getClusterMetadataBasePath( + remoteClusterStateService.getBlobStoreRepository(), + clusterName, + clusterUUID + ), new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -336,12 +416,9 @@ public void onFailure(Exception e) { } // package private for testing - void deleteStalePaths(String clusterName, String clusterUUID, List stalePaths) throws IOException { + void deleteStalePaths(List stalePaths) throws IOException { logger.debug(String.format(Locale.ROOT, "Deleting stale files from remote - %s", stalePaths)); - getBlobStoreTransferService().deleteBlobs( - remoteClusterStateService.getCusterMetadataBasePath(clusterName, clusterUUID), - stalePaths - ); + getBlobStoreTransferService().deleteBlobs(BlobPath.cleanPath(), stalePaths); } /** diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index d0593dcd51475..bd371ae671cf4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -11,20 +11,28 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNodes.Builder; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.routing.remote.RemoteRoutingTableService; +import org.opensearch.cluster.routing.remote.RemoteRoutingTableServiceFactory; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Nullable; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobMetadata; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -33,33 +41,43 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; -import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.gateway.remote.model.RemoteClusterBlocks; +import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; +import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; +import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; +import org.opensearch.gateway.remote.model.RemoteCustomMetadata; +import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; +import org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings; +import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.node.Node; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -68,9 +86,25 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static java.util.Objects.requireNonNull; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.UploadedMetadataResults; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.clusterUUIDContainer; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getClusterMetadataBasePath; +import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; +import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS; +import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -80,99 +114,8 @@ */ public class RemoteClusterStateService implements Closeable { - public static final String METADATA_NAME_FORMAT = "%s.dat"; - - public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; - - public static final String DELIMITER = "__"; - public static final String CUSTOM_DELIMITER = "--"; - private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); - public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); - - public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.index_metadata.upload_timeout", - INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.global_metadata.upload_timeout", - GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( - "cluster.remote_store.state.metadata_manifest.upload_timeout", - METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "index-metadata", - METADATA_NAME_FORMAT, - IndexMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "metadata", - METADATA_NAME_FORMAT, - Metadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "coordination", - METADATA_NAME_FORMAT, - CoordinationMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat SETTINGS_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "settings", - METADATA_NAME_FORMAT, - Settings::fromXContent - ); - - public static final ChecksumBlobStoreFormat TEMPLATES_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "templates", - METADATA_NAME_FORMAT, - TemplatesMetadata::fromXContent - ); - - public static final ChecksumBlobStoreFormat CUSTOM_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( - "custom", - METADATA_NAME_FORMAT, - null // no need of reader here, as this object is only used to write/serialize the object - ); - - /** - * Manifest format compatible with older codec v0, where codec version was missing. - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = - new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); - - /** - * Manifest format compatible with older codec v1, where codec versions/global metadata was introduced. - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V1 = - new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV1); - - /** - * Manifest format compatible with codec v2, where global metadata file is replaced with multiple metadata attribute files - */ - public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( - "cluster-metadata-manifest", - METADATA_MANIFEST_NAME_FORMAT, - ClusterMetadataManifest::fromXContent - ); - /** * Used to specify if cluster state metadata should be published to remote store */ @@ -183,18 +126,16 @@ public class RemoteClusterStateService implements Closeable { Property.Final ); - public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; - public static final String INDEX_PATH_TOKEN = "index"; - public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; - public static final String MANIFEST_PATH_TOKEN = "manifest"; - public static final String MANIFEST_FILE_PREFIX = "manifest"; - public static final String METADATA_FILE_PREFIX = "metadata"; - public static final String COORDINATION_METADATA = "coordination"; - public static final String SETTING_METADATA = "settings"; - public static final String TEMPLATES_METADATA = "templates"; - public static final String CUSTOM_METADATA = "custom"; - public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion + public static final TimeValue REMOTE_STATE_READ_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + public static final Setting REMOTE_STATE_READ_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.read_timeout", + REMOTE_STATE_READ_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private TimeValue remoteStateReadTimeout; private final String nodeId; private final Supplier repositoriesService; private final Settings settings; @@ -203,21 +144,21 @@ public class RemoteClusterStateService implements Closeable { private final List indexMetadataUploadListeners; private BlobStoreRepository blobStoreRepository; private BlobStoreTransferService blobStoreTransferService; - private Optional remoteRoutingTableService; + private final RemoteRoutingTableService remoteRoutingTableService; private volatile TimeValue slowWriteLoggingThreshold; - private volatile TimeValue indexMetadataUploadTimeout; - private volatile TimeValue globalMetadataUploadTimeout; - private volatile TimeValue metadataManifestUploadTimeout; - private RemoteClusterStateCleanupManager remoteClusterStateCleanupManager; private final RemotePersistenceStats remoteStateStats; + private RemoteClusterStateCleanupManager remoteClusterStateCleanupManager; + private RemoteIndexMetadataManager remoteIndexMetadataManager; + private RemoteGlobalMetadataManager remoteGlobalMetadataManager; + private RemoteClusterStateAttributesManager remoteClusterStateAttributesManager; + private RemoteManifestManager remoteManifestManager; + private ClusterSettings clusterSettings; + private final NamedWriteableRegistry namedWriteableRegistry; private final String CLUSTER_STATE_UPLOAD_TIME_LOG_STRING = "writing cluster state for version [{}] took [{}ms]"; private final String METADATA_UPDATE_LOG_STRING = "wrote metadata for [{}] indices and skipped [{}] unchanged " + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " - + "updated : [{}], custom metadata updated : [{}]"; - public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; - public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V3; - public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 2; + + "updated : [{}], custom metadata updated : [{}], indices routing updated : [{}]"; // ToXContent Params with gateway mode. // We are using gateway context mode to persist all custom metadata. @@ -236,7 +177,8 @@ public RemoteClusterStateService( ClusterService clusterService, LongSupplier relativeTimeNanosSupplier, ThreadPool threadPool, - List indexMetadataUploadListeners + List indexMetadataUploadListeners, + NamedWriteableRegistry namedWriteableRegistry ) { assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; this.nodeId = nodeId; @@ -244,21 +186,21 @@ public RemoteClusterStateService( this.settings = settings; this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; this.threadpool = threadPool; - ClusterSettings clusterSettings = clusterService.getClusterSettings(); + clusterSettings = clusterService.getClusterSettings(); this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); - this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); - this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); - this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); - clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); - clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); - clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.remoteStateReadTimeout = clusterSettings.get(REMOTE_STATE_READ_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(REMOTE_STATE_READ_TIMEOUT_SETTING, this::setRemoteStateReadTimeout); this.remoteStateStats = new RemotePersistenceStats(); - this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService); + this.namedWriteableRegistry = namedWriteableRegistry; this.indexMetadataUploadListeners = indexMetadataUploadListeners; - this.remoteRoutingTableService = isRemoteRoutingTableEnabled(settings) - ? Optional.of(new RemoteRoutingTableService(repositoriesService, settings)) - : Optional.empty(); + this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( + repositoriesService, + settings, + clusterSettings, + threadPool + ); + this.remoteClusterStateCleanupManager = new RemoteClusterStateCleanupManager(this, clusterService, remoteRoutingTableService); } /** @@ -268,7 +210,7 @@ public RemoteClusterStateService( * @return A manifest object which contains the details of uploaded entity metadata. */ @Nullable - public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { + public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); @@ -278,40 +220,47 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri UploadedMetadataResults uploadedMetadataResults = writeMetadataInParallel( clusterState, new ArrayList<>(clusterState.metadata().indices().values()), - Collections.emptyMap(), + emptyMap(), clusterState.metadata().customs(), true, true, - true + true, + true, + true, + true, + clusterState.customs(), + true, + remoteRoutingTableService.getIndicesRouting(clusterState.getRoutingTable()) ); - final ClusterMetadataManifest manifest = uploadManifest( + final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, - uploadedMetadataResults.uploadedIndexMetadata, + uploadedMetadataResults, previousClusterUUID, - uploadedMetadataResults.uploadedCoordinationMetadata, - uploadedMetadataResults.uploadedSettingsMetadata, - uploadedMetadataResults.uploadedTemplatesMetadata, - uploadedMetadataResults.uploadedCustomMetadataMap, + new ClusterStateDiffManifest(clusterState, ClusterState.EMPTY_STATE), false ); + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); remoteStateStats.stateSucceeded(); remoteStateStats.stateTook(durationMillis); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( - "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote full state with [{}] indices", + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote full state with [{}] indices and [{}] indicesRouting", durationMillis, slowWriteLoggingThreshold, - uploadedMetadataResults.uploadedIndexMetadata.size() + uploadedMetadataResults.uploadedIndexMetadata.size(), + uploadedMetadataResults.uploadedIndicesRoutingMetadata.size() ); } else { logger.info( - "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices and global metadata", + "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices, [{}] indicesRouting and global metadata", durationMillis, - uploadedMetadataResults.uploadedIndexMetadata.size() + uploadedMetadataResults.uploadedIndexMetadata.size(), + uploadedMetadataResults.uploadedIndicesRoutingMetadata.size() ); } - return manifest; + return manifestDetails; } /** @@ -319,14 +268,16 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, Stri * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current * cluster state. * - * @return The uploaded ClusterMetadataManifest file + * @return {@link RemoteClusterStateManifestInfo} object containing uploaded manifest detail */ @Nullable - public ClusterMetadataManifest writeIncrementalMetadata( + public RemoteClusterStateManifestInfo writeIncrementalMetadata( ClusterState previousClusterState, ClusterState clusterState, ClusterMetadataManifest previousManifest ) throws IOException { + logger.info("WRITING INCREMENTAL STATE"); + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); @@ -335,15 +286,27 @@ public ClusterMetadataManifest writeIncrementalMetadata( assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); final Map customsToBeDeletedFromRemote = new HashMap<>(previousManifest.getCustomMetadataMap()); - final Map customsToUpload = getUpdatedCustoms(clusterState, previousClusterState); + final Map customsToUpload = remoteGlobalMetadataManager.getUpdatedCustoms( + clusterState, + previousClusterState + ); + final Map clusterStateCustomsToBeDeleted = new HashMap<>( + previousManifest.getClusterStateCustomMap() + ); + final Map clusterStateCustomsToUpload = remoteClusterStateAttributesManager.getUpdatedCustoms( + clusterState, + previousClusterState + ); final Map allUploadedCustomMap = new HashMap<>(previousManifest.getCustomMetadataMap()); for (final String custom : clusterState.metadata().customs().keySet()) { // remove all the customs which are present currently customsToBeDeletedFromRemote.remove(custom); } - final Map indicesToBeDeletedFromRemote = new HashMap<>(previousClusterState.metadata().indices()); - + for (final String custom : clusterState.customs().keySet()) { + // remove all the custom which are present currently + clusterStateCustomsToBeDeleted.remove(custom); + } int numIndicesUpdated = 0; int numIndicesUnchanged = 0; final Map allUploadedIndexMetadata = previousManifest.getIndices() @@ -373,6 +336,12 @@ public ClusterMetadataManifest writeIncrementalMetadata( // index present in current cluster state indicesToBeDeletedFromRemote.remove(indexMetadata.getIndex().getName()); } + + DiffableUtils.MapDiff> routingTableDiff = remoteRoutingTableService + .getIndicesRoutingMapDiff(previousClusterState.getRoutingTable(), clusterState.getRoutingTable()); + List indicesRoutingToUpload = new ArrayList<>(); + routingTableDiff.getUpserts().forEach((k, v) -> indicesRoutingToUpload.add(v)); + UploadedMetadataResults uploadedMetadataResults; // For migration case from codec V0 or V1 to V2, we have added null check on metadata attribute files, // If file is empty and codec is 1 then write global metadata. @@ -382,8 +351,15 @@ public ClusterMetadataManifest writeIncrementalMetadata( ; boolean updateSettingsMetadata = firstUploadForSplitGlobalMetadata || Metadata.isSettingsMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + boolean updateTransientSettingsMetadata = firstUploadForSplitGlobalMetadata + || Metadata.isTransientSettingsMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; boolean updateTemplatesMetadata = firstUploadForSplitGlobalMetadata || Metadata.isTemplatesMetadataEqual(previousClusterState.metadata(), clusterState.metadata()) == false; + // ToDo: check if these needs to be updated or not + final boolean updateDiscoveryNodes = clusterState.getNodes().delta(previousClusterState.getNodes()).hasChanges(); + final boolean updateClusterBlocks = !clusterState.blocks().equals(previousClusterState.blocks()); + final boolean updateHashesOfConsistentSettings = firstUploadForSplitGlobalMetadata + || Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false; uploadedMetadataResults = writeMetadataInParallel( clusterState, @@ -392,7 +368,13 @@ public ClusterMetadataManifest writeIncrementalMetadata( firstUploadForSplitGlobalMetadata ? clusterState.metadata().customs() : customsToUpload, updateCoordinationMetadata, updateSettingsMetadata, - updateTemplatesMetadata + updateTemplatesMetadata, + updateDiscoveryNodes, + updateClusterBlocks, + updateTransientSettingsMetadata, + clusterStateCustomsToUpload, + updateHashesOfConsistentSettings, + indicesRoutingToUpload ); // update the map if the metadata was uploaded @@ -403,17 +385,51 @@ public ClusterMetadataManifest writeIncrementalMetadata( // remove the data for removed custom/indices customsToBeDeletedFromRemote.keySet().forEach(allUploadedCustomMap::remove); indicesToBeDeletedFromRemote.keySet().forEach(allUploadedIndexMetadata::remove); + clusterStateCustomsToBeDeleted.keySet().forEach(allUploadedCustomMap::remove); + + if (!updateCoordinationMetadata) { + uploadedMetadataResults.uploadedCoordinationMetadata = previousManifest.getCoordinationMetadata(); + } + if (!updateSettingsMetadata) { + uploadedMetadataResults.uploadedSettingsMetadata = previousManifest.getSettingsMetadata(); + } + if (!updateTransientSettingsMetadata) { + uploadedMetadataResults.uploadedTransientSettingsMetadata = previousManifest.getTransientSettingsMetadata(); + } + if (!updateTemplatesMetadata) { + uploadedMetadataResults.uploadedTemplatesMetadata = previousManifest.getTemplatesMetadata(); + } + if (!updateDiscoveryNodes && !firstUploadForSplitGlobalMetadata) { + uploadedMetadataResults.uploadedDiscoveryNodes = previousManifest.getDiscoveryNodesMetadata(); + } + if (!updateClusterBlocks && !firstUploadForSplitGlobalMetadata) { + uploadedMetadataResults.uploadedClusterBlocks = previousManifest.getClusterBlocksMetadata(); + } + if (!updateHashesOfConsistentSettings && !firstUploadForSplitGlobalMetadata) { + uploadedMetadataResults.uploadedHashesOfConsistentSettings = previousManifest.getHashesOfConsistentSettings(); + } + if (!firstUploadForSplitGlobalMetadata && customsToUpload.isEmpty()) { + uploadedMetadataResults.uploadedCustomMetadataMap = previousManifest.getCustomMetadataMap(); + } + if (!firstUploadForSplitGlobalMetadata && clusterStateCustomsToUpload.isEmpty()) { + uploadedMetadataResults.uploadedClusterStateCustomMetadataMap = previousManifest.getClusterStateCustomMap(); + } + uploadedMetadataResults.uploadedCustomMetadataMap = allUploadedCustomMap; + uploadedMetadataResults.uploadedIndexMetadata = new ArrayList<>(allUploadedIndexMetadata.values()); + + List allUploadedIndicesRouting = new ArrayList<>(); + allUploadedIndicesRouting = remoteRoutingTableService.getAllUploadedIndicesRouting( + previousManifest, + uploadedMetadataResults.uploadedIndicesRoutingMetadata, + routingTableDiff.getDeletes() + ); + uploadedMetadataResults.uploadedIndicesRoutingMetadata = allUploadedIndicesRouting; - final ClusterMetadataManifest manifest = uploadManifest( + final RemoteClusterStateManifestInfo manifestDetails = remoteManifestManager.uploadManifest( clusterState, - new ArrayList<>(allUploadedIndexMetadata.values()), + uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), - updateCoordinationMetadata ? uploadedMetadataResults.uploadedCoordinationMetadata : previousManifest.getCoordinationMetadata(), - updateSettingsMetadata ? uploadedMetadataResults.uploadedSettingsMetadata : previousManifest.getSettingsMetadata(), - updateTemplatesMetadata ? uploadedMetadataResults.uploadedTemplatesMetadata : previousManifest.getTemplatesMetadata(), - firstUploadForSplitGlobalMetadata || !customsToUpload.isEmpty() - ? allUploadedCustomMap - : previousManifest.getCustomMetadataMap(), + new ClusterStateDiffManifest(clusterState, previousClusterState), false ); @@ -422,7 +438,7 @@ public ClusterMetadataManifest writeIncrementalMetadata( remoteStateStats.stateTook(durationMillis); ParameterizedMessage clusterStateUploadTimeMessage = new ParameterizedMessage( CLUSTER_STATE_UPLOAD_TIME_LOG_STRING, - manifest.getStateVersion(), + manifestDetails.getClusterMetadataManifest().getStateVersion(), durationMillis ); ParameterizedMessage metadataUpdateMessage = new ParameterizedMessage( @@ -432,19 +448,41 @@ public ClusterMetadataManifest writeIncrementalMetadata( updateCoordinationMetadata, updateSettingsMetadata, updateTemplatesMetadata, - customsToUpload.size() + customsToUpload.size(), + indicesRoutingToUpload.size() ); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + // TODO update logs to add more details about objects uploaded logger.warn( - "{} which is above the warn threshold of [{}]; {}", - clusterStateUploadTimeMessage, + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", + durationMillis, slowWriteLoggingThreshold, - metadataUpdateMessage + numIndicesUpdated, + numIndicesUnchanged, + updateCoordinationMetadata, + updateSettingsMetadata, + updateTemplatesMetadata, + customsToUpload.size() ); } else { logger.info("{}; {}", clusterStateUploadTimeMessage, metadataUpdateMessage); + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, coordination metadata updated : [{}], " + + "settings metadata updated : [{}], templates metadata updated : [{}], custom metadata updated : [{}]", + manifestDetails.getClusterMetadataManifest().getStateVersion(), + durationMillis, + numIndicesUpdated, + numIndicesUnchanged, + updateCoordinationMetadata, + updateSettingsMetadata, + updateTemplatesMetadata, + customsToUpload.size() + ); } - return manifest; + return manifestDetails; } private UploadedMetadataResults writeMetadataInParallel( @@ -454,14 +492,22 @@ private UploadedMetadataResults writeMetadataInParallel( Map customToUpload, boolean uploadCoordinationMetadata, boolean uploadSettingsMetadata, - boolean uploadTemplateMetadata + boolean uploadTemplateMetadata, + boolean uploadDiscoveryNodes, + boolean uploadClusterBlock, + boolean uploadTransientSettingMetadata, + Map clusterStateCustomToUpload, + boolean uploadHashesOfConsistentSettings, + List indicesRoutingToUpload ) throws IOException { assert Objects.nonNull(indexMetadataUploadListeners) : "indexMetadataUploadListeners can not be null"; int totalUploadTasks = indexToUpload.size() + indexMetadataUploadListeners.size() + customToUpload.size() - + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0); + + (uploadCoordinationMetadata ? 1 : 0) + (uploadSettingsMetadata ? 1 : 0) + (uploadTemplateMetadata ? 1 : 0) + + (uploadDiscoveryNodes ? 1 : 0) + (uploadClusterBlock ? 1 : 0) + (uploadTransientSettingMetadata ? 1 : 0) + + clusterStateCustomToUpload.size() + (uploadHashesOfConsistentSettings ? 1 : 0) + indicesRoutingToUpload.size(); CountDownLatch latch = new CountDownLatch(totalUploadTasks); - Map> uploadTasks = new HashMap<>(totalUploadTasks); - Map results = new HashMap<>(totalUploadTasks); + Map> uploadTasks = new ConcurrentHashMap<>(totalUploadTasks); + Map results = new ConcurrentHashMap<>(totalUploadTasks); List exceptionList = Collections.synchronizedList(new ArrayList<>(totalUploadTasks)); LatchedActionListener listener = new LatchedActionListener<>( @@ -481,11 +527,29 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadSettingsMetadata) { uploadTasks.put( SETTING_METADATA, - getAsyncMetadataWriteAction( - clusterState, - SETTING_METADATA, - SETTINGS_METADATA_FORMAT, - clusterState.metadata().persistentSettings(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemotePersistentSettingsMetadata( + clusterState.metadata().persistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener + ) + ); + } + if (uploadTransientSettingMetadata) { + uploadTasks.put( + TRANSIENT_SETTING_METADATA, + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteTransientSettingsMetadata( + clusterState.metadata().transientSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), listener ) ); @@ -493,11 +557,14 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadCoordinationMetadata) { uploadTasks.put( COORDINATION_METADATA, - getAsyncMetadataWriteAction( - clusterState, - COORDINATION_METADATA, - COORDINATION_METADATA_FORMAT, - clusterState.metadata().coordinationMetadata(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteCoordinationMetadata( + clusterState.metadata().coordinationMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), listener ) ); @@ -505,11 +572,58 @@ private UploadedMetadataResults writeMetadataInParallel( if (uploadTemplateMetadata) { uploadTasks.put( TEMPLATES_METADATA, - getAsyncMetadataWriteAction( - clusterState, - TEMPLATES_METADATA, - TEMPLATES_METADATA_FORMAT, - clusterState.metadata().templatesMetadata(), + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteTemplatesMetadata( + clusterState.metadata().templatesMetadata(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + listener + ) + ); + } + if (uploadDiscoveryNodes) { + uploadTasks.put( + DISCOVERY_NODES, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + RemoteDiscoveryNodes.DISCOVERY_NODES, + new RemoteDiscoveryNodes( + clusterState.nodes(), + clusterState.version(), + clusterState.stateUUID(), + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } + if (uploadClusterBlock) { + uploadTasks.put( + CLUSTER_BLOCKS, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + RemoteClusterBlocks.CLUSTER_BLOCKS, + new RemoteClusterBlocks( + clusterState.blocks(), + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), + listener + ) + ); + } + if (uploadHashesOfConsistentSettings) { + uploadTasks.put( + HASHES_OF_CONSISTENT_SETTINGS, + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteHashesOfConsistentSettings( + (DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings(), + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor() + ), listener ) ); @@ -518,22 +632,67 @@ private UploadedMetadataResults writeMetadataInParallel( String customComponent = String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, key); uploadTasks.put( customComponent, - getAsyncMetadataWriteAction(clusterState, customComponent, CUSTOM_METADATA_FORMAT, value, listener) + remoteGlobalMetadataManager.getAsyncMetadataWriteAction( + new RemoteCustomMetadata( + value, + key, + clusterState.metadata().version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) ); }); indexToUpload.forEach(indexMetadata -> { - uploadTasks.put(indexMetadata.getIndex().getName(), getIndexMetadataAsyncAction(clusterState, indexMetadata, listener)); + uploadTasks.put( + indexMetadata.getIndex().getName(), + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction(indexMetadata, clusterState.metadata().clusterUUID(), listener) + ); + }); + + clusterStateCustomToUpload.forEach((key, value) -> { + uploadTasks.put( + key, + remoteClusterStateAttributesManager.getAsyncMetadataWriteAction( + CLUSTER_STATE_CUSTOM, + new RemoteClusterStateCustoms( + value, + key, + clusterState.version(), + clusterState.metadata().clusterUUID(), + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) + ); + }); + indicesRoutingToUpload.forEach(indexRoutingTable -> { + uploadTasks.put( + InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX + indexRoutingTable.getIndex().getName(), + remoteRoutingTableService.getIndexRoutingAsyncAction( + clusterState, + indexRoutingTable, + listener, + getClusterMetadataBasePath( + blobStoreRepository, + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ) + ); }); // start async upload of all required metadata files for (CheckedRunnable uploadTask : uploadTasks.values()) { uploadTask.run(); } - invokeIndexMetadataUploadListeners(indexToUpload, prevIndexMetadataByName, latch, exceptionList); try { - if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + if (latch.await(remoteGlobalMetadataManager.getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { // TODO: We should add metrics where transfer is timing out. [Issue: #10687] RemoteStateTransferException ex = new RemoteStateTransferException( String.format( @@ -571,25 +730,43 @@ private UploadedMetadataResults writeMetadataInParallel( } UploadedMetadataResults response = new UploadedMetadataResults(); results.forEach((name, uploadedMetadata) -> { - if (name.contains(CUSTOM_METADATA)) { + if (uploadedMetadata.getClass().equals(UploadedIndexMetadata.class) + && uploadedMetadata.getComponent().contains(InternalRemoteRoutingTableService.INDEX_ROUTING_METADATA_PREFIX)) { + response.uploadedIndicesRoutingMetadata.add((UploadedIndexMetadata) uploadedMetadata); + } else if (name.startsWith(CUSTOM_METADATA)) { // component name for custom metadata will look like custom-- String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; response.uploadedCustomMetadataMap.put( custom, new UploadedMetadataAttribute(custom, uploadedMetadata.getUploadedFilename()) ); + } else if (name.startsWith(CLUSTER_STATE_CUSTOM)) { + String custom = name.split(DELIMITER)[0].split(CUSTOM_DELIMITER)[1]; + response.uploadedClusterStateCustomMetadataMap.put( + custom, + new UploadedMetadataAttribute(custom, uploadedMetadata.getUploadedFilename()) + ); } else if (COORDINATION_METADATA.equals(name)) { response.uploadedCoordinationMetadata = (UploadedMetadataAttribute) uploadedMetadata; - } else if (SETTING_METADATA.equals(name)) { + } else if (RemotePersistentSettingsMetadata.SETTING_METADATA.equals(name)) { response.uploadedSettingsMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (TEMPLATES_METADATA.equals(name)) { response.uploadedTemplatesMetadata = (UploadedMetadataAttribute) uploadedMetadata; } else if (name.contains(UploadedIndexMetadata.COMPONENT_PREFIX)) { response.uploadedIndexMetadata.add((UploadedIndexMetadata) uploadedMetadata); + } else if (RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA.equals(name)) { + response.uploadedTransientSettingsMetadata = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteDiscoveryNodes.DISCOVERY_NODES.equals(uploadedMetadata.getComponent())) { + response.uploadedDiscoveryNodes = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteClusterBlocks.CLUSTER_BLOCKS.equals(uploadedMetadata.getComponent())) { + response.uploadedClusterBlocks = (UploadedMetadataAttribute) uploadedMetadata; + } else if (RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS.equals(uploadedMetadata.getComponent())) { + response.uploadedHashesOfConsistentSettings = (UploadedMetadataAttribute) uploadedMetadata; } else { throw new IllegalStateException("Unknown metadata component name " + name); } }); + logger.info("response {}", response.uploadedIndicesRoutingMetadata.toString()); return response; } @@ -650,73 +827,8 @@ private ActionListener getIndexMetadataUploadActionListener( ); } - /** - * Allows async Upload of IndexMetadata to remote - * - * @param clusterState current ClusterState - * @param indexMetadata {@link IndexMetadata} to upload - * @param latchedActionListener listener to respond back on after upload finishes - */ - private CheckedRunnable getIndexMetadataAsyncAction( - ClusterState clusterState, - IndexMetadata indexMetadata, - LatchedActionListener latchedActionListener - ) { - final BlobContainer indexMetadataContainer = indexMetadataContainer( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID(), - indexMetadata.getIndexUUID() - ); - final String indexMetadataFilename = indexMetadataFileName(indexMetadata); - ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse( - new UploadedIndexMetadata( - indexMetadata.getIndex().getName(), - indexMetadata.getIndexUUID(), - indexMetadataContainer.path().buildAsString() + indexMetadataFilename - ) - ), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().toString(), ex)) - ); - - return () -> INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( - indexMetadata, - indexMetadataContainer, - indexMetadataFilename, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS - ); - } - - /** - * Allows async upload of Metadata components to remote - */ - - private CheckedRunnable getAsyncMetadataWriteAction( - ClusterState clusterState, - String component, - ChecksumBlobStoreFormat componentMetadataBlobStore, - ToXContent componentMetadata, - LatchedActionListener latchedActionListener - ) { - final BlobContainer globalMetadataContainer = globalMetadataContainer( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ); - final String componentMetadataFilename = metadataAttributeFileName(component, clusterState.metadata().version()); - ActionListener completionListener = ActionListener.wrap( - resp -> latchedActionListener.onResponse(new UploadedMetadataAttribute(component, componentMetadataFilename)), - ex -> latchedActionListener.onFailure(new RemoteStateTransferException(component, ex)) - ); - return () -> componentMetadataBlobStore.writeAsyncWithUrgentPriority( - componentMetadata, - globalMetadataContainer, - componentMetadataFilename, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS - ); + public RemoteManifestManager getRemoteManifestManager() { + return remoteManifestManager; } public RemoteClusterStateCleanupManager getCleanupManager() { @@ -724,7 +836,7 @@ public RemoteClusterStateCleanupManager getCleanupManager() { } @Nullable - public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) + public RemoteClusterStateManifestInfo markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) throws IOException { assert clusterState != null : "Last accepted cluster state is not set"; if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { @@ -732,21 +844,47 @@ public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterStat return null; } assert previousManifest != null : "Last cluster metadata manifest is not set"; - ClusterMetadataManifest committedManifest = uploadManifest( - clusterState, + UploadedMetadataResults uploadedMetadataResults = new UploadedMetadataResults( previousManifest.getIndices(), - previousManifest.getPreviousClusterUUID(), + previousManifest.getCustomMetadataMap(), previousManifest.getCoordinationMetadata(), previousManifest.getSettingsMetadata(), previousManifest.getTemplatesMetadata(), - previousManifest.getCustomMetadataMap(), + previousManifest.getTransientSettingsMetadata(), + previousManifest.getDiscoveryNodesMetadata(), + previousManifest.getClusterBlocksMetadata(), + previousManifest.getIndicesRouting(), + previousManifest.getHashesOfConsistentSettings(), + previousManifest.getClusterStateCustomMap() + ); + + RemoteClusterStateManifestInfo committedManifestDetails = remoteManifestManager.uploadManifest( + clusterState, + uploadedMetadataResults, + previousManifest.getPreviousClusterUUID(), + previousManifest.getDiffManifest(), true ); - if (!previousManifest.isClusterUUIDCommitted() && committedManifest.isClusterUUIDCommitted()) { - remoteClusterStateCleanupManager.deleteStaleClusterUUIDs(clusterState, committedManifest); + if (!previousManifest.isClusterUUIDCommitted() && committedManifestDetails.getClusterMetadataManifest().isClusterUUIDCommitted()) { + remoteClusterStateCleanupManager.deleteStaleClusterUUIDs(clusterState, committedManifestDetails.getClusterMetadataManifest()); } - return committedManifest; + return committedManifestDetails; + } + + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + return remoteManifestManager.getLatestClusterMetadataManifest(clusterName, clusterUUID); + } + + public ClusterMetadataManifest getClusterMetadataManifestByFileName(String clusterUUID, String fileName) { + return remoteManifestManager.getRemoteClusterMetadataManifestByFileName(clusterUUID, fileName); } @Override @@ -755,9 +893,7 @@ public void close() throws IOException { if (blobStoreRepository != null) { IOUtils.close(blobStoreRepository); } - if (this.remoteRoutingTableService.isPresent()) { - this.remoteRoutingTableService.get().close(); - } + this.remoteRoutingTableService.close(); } public void start() { @@ -769,474 +905,513 @@ public void start() { final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; blobStoreRepository = (BlobStoreRepository) repository; + this.remoteRoutingTableService.start(); + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); + String clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); + + remoteGlobalMetadataManager = new RemoteGlobalMetadataManager( + clusterSettings, + clusterName, + blobStoreRepository, + blobStoreTransferService, + namedWriteableRegistry, + threadpool + ); + remoteIndexMetadataManager = new RemoteIndexMetadataManager( + clusterSettings, + clusterName, + blobStoreRepository, + blobStoreTransferService, + threadpool + ); + remoteManifestManager = new RemoteManifestManager( + clusterSettings, + clusterName, + nodeId, + blobStoreRepository, + blobStoreTransferService, + threadpool + ); + remoteClusterStateAttributesManager = new RemoteClusterStateAttributesManager( + clusterName, + blobStoreRepository, + blobStoreTransferService, + namedWriteableRegistry, + threadpool + ); remoteClusterStateCleanupManager.start(); - this.remoteRoutingTableService.ifPresent(RemoteRoutingTableService::start); } - private ClusterMetadataManifest uploadManifest( - ClusterState clusterState, - List uploadedIndexMetadata, - String previousClusterUUID, - UploadedMetadataAttribute uploadedCoordinationMetadata, - UploadedMetadataAttribute uploadedSettingsMetadata, - UploadedMetadataAttribute uploadedTemplatesMetadata, - Map uploadedCustomMetadataMap, - boolean committed - ) throws IOException { - synchronized (this) { - final String manifestFileName = getManifestFileName( - clusterState.term(), - clusterState.version(), - committed, - MANIFEST_CURRENT_CODEC_VERSION - ); - final ClusterMetadataManifest manifest = new ClusterMetadataManifest( - clusterState.term(), - clusterState.getVersion(), - clusterState.metadata().clusterUUID(), - clusterState.stateUUID(), - Version.CURRENT, - nodeId, - committed, - MANIFEST_CURRENT_CODEC_VERSION, - null, - uploadedIndexMetadata, - previousClusterUUID, - clusterState.metadata().clusterUUIDCommitted(), - uploadedCoordinationMetadata, - uploadedSettingsMetadata, - uploadedTemplatesMetadata, - uploadedCustomMetadataMap, - clusterState.routingTable().version(), - // TODO: Add actual list of changed indices routing with index routing upload flow. - new ArrayList<>() - ); - writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); - return manifest; - } + private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { + this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; } - private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) - throws IOException { - AtomicReference result = new AtomicReference(); - AtomicReference exceptionReference = new AtomicReference(); - - final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); - - // latch to wait until upload is not finished - CountDownLatch latch = new CountDownLatch(1); - - LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { - logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); - }, ex -> { exceptionReference.set(ex); }), latch); - - getClusterMetadataManifestBlobStoreFormat(fileName).writeAsyncWithUrgentPriority( - uploadManifest, - metadataManifestContainer, - fileName, - blobStoreRepository.getCompressor(), - completionListener, - FORMAT_PARAMS - ); - - try { - if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { - RemoteStateTransferException ex = new RemoteStateTransferException( - String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") - ); - throw ex; - } - } catch (InterruptedException ex) { - RemoteStateTransferException exception = new RemoteStateTransferException( - String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), - ex - ); - Thread.currentThread().interrupt(); - throw exception; - } - if (exceptionReference.get() != null) { - throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); - } - logger.debug( - "Metadata manifest file [{}] written during [{}] phase. ", - fileName, - uploadManifest.isCommitted() ? "commit" : "publish" - ); + // Package private for unit test + RemoteRoutingTableService getRemoteRoutingTableService() { + return this.remoteRoutingTableService; } ThreadPool getThreadpool() { return threadpool; } + BlobStoreRepository getBlobStoreRepository() { + return blobStoreRepository; + } + BlobStore getBlobStore() { return blobStoreRepository.blobStore(); } - private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX - return blobStoreRepository.blobStore() - .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); - } + /** + * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public ClusterState getLatestClusterState(String clusterName, String clusterUUID, boolean includeEphemeral) throws IOException { + Optional clusterMetadataManifest = remoteManifestManager.getLatestClusterMetadataManifest( + clusterName, + clusterUUID + ); + if (clusterMetadataManifest.isEmpty()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + ); + } - private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ - return blobStoreRepository.blobStore() - .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + return getClusterStateForManifest(clusterName, clusterMetadataManifest.get(), nodeId, includeEphemeral); } - private BlobContainer manifestContainer(String clusterName, String clusterUUID) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest - return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); - } + private ClusterState readClusterStateInParallel( + ClusterState previousState, + ClusterMetadataManifest manifest, + String clusterUUID, + String localNodeId, + List indicesToRead, + Map customToRead, + boolean readCoordinationMetadata, + boolean readSettingsMetadata, + boolean readTransientSettingsMetadata, + boolean readTemplatesMetadata, + boolean readDiscoveryNodes, + boolean readClusterBlocks, + List indicesRoutingToRead, + boolean readHashesOfConsistentSettings, + Map clusterStateCustomToRead, + boolean includeEphemeral + ) throws IOException { + int totalReadTasks = indicesToRead.size() + customToRead.size() + (readCoordinationMetadata ? 1 : 0) + (readSettingsMetadata + ? 1 + : 0) + (readTemplatesMetadata ? 1 : 0) + (readDiscoveryNodes ? 1 : 0) + (readClusterBlocks ? 1 : 0) + + (readTransientSettingsMetadata ? 1 : 0) + (readHashesOfConsistentSettings ? 1 : 0) + clusterStateCustomToRead.size() + + indicesRoutingToRead.size(); + CountDownLatch latch = new CountDownLatch(totalReadTasks); + List> asyncMetadataReadActions = new ArrayList<>(); + List readResults = Collections.synchronizedList(new ArrayList<>()); + List readIndexRoutingTableResults = Collections.synchronizedList(new ArrayList<>()); + List exceptionList = Collections.synchronizedList(new ArrayList<>(totalReadTasks)); + + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(response -> { + logger.debug("Successfully read cluster state component from remote"); + readResults.add(response); + }, ex -> { + logger.error("Failed to read cluster state from remote", ex); + exceptionList.add(ex); + }), latch); + + for (UploadedIndexMetadata indexMetadata : indicesToRead) { + asyncMetadataReadActions.add( + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction(clusterUUID, indexMetadata.getUploadedFilename(), listener) + ); + } - BlobPath getCusterMetadataBasePath(String clusterName, String clusterUUID) { - return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); - } + LatchedActionListener routingTableLatchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(response -> { + logger.debug("Successfully read cluster state component from remote"); + readIndexRoutingTableResults.add(response); + }, ex -> { + logger.error("Failed to read cluster state from remote", ex); + exceptionList.add(ex); + }), + latch + ); - private BlobContainer clusterUUIDContainer(String clusterName) { - return blobStoreRepository.blobStore() - .blobContainer( - blobStoreRepository.basePath() - .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) - .add(CLUSTER_STATE_PATH_TOKEN) + for (UploadedIndexMetadata indexRouting : indicesRoutingToRead) { + asyncMetadataReadActions.add( + remoteRoutingTableService.getAsyncIndexRoutingReadAction( + indexRouting.getUploadedFilename(), + new Index(indexRouting.getIndexName(), indexRouting.getIndexUUID()), + routingTableLatchedActionListener + ) ); - } + } - private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { - this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; - } + for (Map.Entry entry : customToRead.entrySet()) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteCustomMetadata( + entry.getValue().getUploadedFilename(), + entry.getKey(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + entry.getValue().getAttributeName(), + listener + ) + ); + } - private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { - this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; - } + if (readCoordinationMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteCoordinationMetadata( + manifest.getCoordinationMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + COORDINATION_METADATA, + listener + ) + ); + } - private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { - this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; - } + if (readSettingsMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemotePersistentSettingsMetadata( + manifest.getSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + SETTING_METADATA, + listener + ) + ); + } - private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { - this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; - } + if (readTransientSettingsMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteTransientSettingsMetadata( + manifest.getTransientSettingsMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + TRANSIENT_SETTING_METADATA, + listener + ) + ); + } - private Map getUpdatedCustoms(ClusterState currentState, ClusterState previousState) { - if (Metadata.isCustomMetadataEqual(previousState.metadata(), currentState.metadata())) { - return new HashMap<>(); + if (readTemplatesMetadata) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteTemplatesMetadata( + manifest.getTemplatesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor(), + blobStoreRepository.getNamedXContentRegistry() + ), + TEMPLATES_METADATA, + listener + ) + ); } - Map updatedCustom = new HashMap<>(); - Set currentCustoms = new HashSet<>(currentState.metadata().customs().keySet()); - for (Map.Entry cursor : previousState.metadata().customs().entrySet()) { - if (cursor.getValue().context().contains(Metadata.XContentContext.GATEWAY)) { - if (currentCustoms.contains(cursor.getKey()) - && !cursor.getValue().equals(currentState.metadata().custom(cursor.getKey()))) { - // If the custom metadata is updated, we need to upload the new version. - updatedCustom.put(cursor.getKey(), currentState.metadata().custom(cursor.getKey())); - } - currentCustoms.remove(cursor.getKey()); - } + + if (readDiscoveryNodes) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + DISCOVERY_NODES, + new RemoteDiscoveryNodes( + manifest.getDiscoveryNodesMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener + ) + ); } - for (String custom : currentCustoms) { - Metadata.Custom cursor = currentState.metadata().custom(custom); - if (cursor.context().contains(Metadata.XContentContext.GATEWAY)) { - updatedCustom.put(custom, cursor); - } + + if (readClusterBlocks) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + CLUSTER_BLOCKS, + new RemoteClusterBlocks( + manifest.getClusterBlocksMetadata().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + listener + ) + ); } - return updatedCustom; - } - public TimeValue getIndexMetadataUploadTimeout() { - return this.indexMetadataUploadTimeout; - } + if (readHashesOfConsistentSettings) { + asyncMetadataReadActions.add( + remoteGlobalMetadataManager.getAsyncMetadataReadAction( + new RemoteHashesOfConsistentSettings( + manifest.getHashesOfConsistentSettings().getUploadedFilename(), + clusterUUID, + blobStoreRepository.getCompressor() + ), + HASHES_OF_CONSISTENT_SETTINGS, + listener + ) + ); + } - public TimeValue getGlobalMetadataUploadTimeout() { - return this.globalMetadataUploadTimeout; - } + for (Map.Entry entry : clusterStateCustomToRead.entrySet()) { + asyncMetadataReadActions.add( + remoteClusterStateAttributesManager.getAsyncMetadataReadAction( + CLUSTER_STATE_CUSTOM, + new RemoteClusterStateCustoms( + entry.getValue().getUploadedFilename(), + entry.getValue().getAttributeName(), + clusterUUID, + blobStoreRepository.getCompressor(), + namedWriteableRegistry + ), + listener + ) + ); + } - public TimeValue getMetadataManifestUploadTimeout() { - return this.metadataManifestUploadTimeout; - } + for (CheckedRunnable asyncMetadataReadAction : asyncMetadataReadActions) { + asyncMetadataReadAction.run(); + } - // Package private for unit test - Optional getRemoteRoutingTableService() { - return this.remoteRoutingTableService; - } + try { + if (latch.await(this.remoteStateReadTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException exception = new RemoteStateTransferException( + "Timed out waiting to read cluster state from remote within timeout " + this.remoteStateReadTimeout + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + } catch (InterruptedException e) { + exceptionList.forEach(e::addSuppressed); + RemoteStateTransferException ex = new RemoteStateTransferException( + "Interrupted while waiting to read cluster state from metadata" + ); + Thread.currentThread().interrupt(); + throw ex; + } - static String getManifestFileName(long term, long version, boolean committed, int codecVersion) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ - return String.join( - DELIMITER, - MANIFEST_PATH_TOKEN, - RemoteStoreUtils.invertLong(term), - RemoteStoreUtils.invertLong(version), - (committed ? "C" : "P"), // C for committed and P for published - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(codecVersion) // Keep the codec version at last place only, during read we reads last place to - // determine codec version. - ); - } + if (!exceptionList.isEmpty()) { + RemoteStateTransferException exception = new RemoteStateTransferException("Exception during reading cluster state from remote"); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } - static String indexMetadataFileName(IndexMetadata indexMetadata) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index//metadata______ - return String.join( - DELIMITER, - METADATA_FILE_PREFIX, - RemoteStoreUtils.invertLong(indexMetadata.getVersion()), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last - // place to determine codec version. - ); - } + final ClusterState.Builder clusterStateBuilder = ClusterState.builder(previousState); + AtomicReference discoveryNodesBuilder = new AtomicReference<>(DiscoveryNodes.builder()); + Metadata.Builder metadataBuilder = Metadata.builder(previousState.metadata()); + metadataBuilder.version(manifest.getMetadataVersion()); + metadataBuilder.clusterUUID(manifest.getClusterUUID()); + metadataBuilder.clusterUUIDCommitted(manifest.isClusterUUIDCommitted()); + Map indexMetadataMap = new HashMap<>(); + Map indicesRouting = new HashMap<>(previousState.routingTable().getIndicesRouting()); + + readResults.forEach(remoteReadResult -> { + switch (remoteReadResult.getComponent()) { + case RemoteIndexMetadata.INDEX: + IndexMetadata indexMetadata = (IndexMetadata) remoteReadResult.getObj(); + indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); + break; + case CUSTOM_METADATA: + Metadata.Custom metadataCustom = (Metadata.Custom) remoteReadResult.getObj(); + if (includeEphemeral || (!includeEphemeral && metadataCustom.context().contains(XContentContext.GATEWAY))) { + metadataBuilder.putCustom(remoteReadResult.getComponentName(), (Metadata.Custom) remoteReadResult.getObj()); + } + break; + case COORDINATION_METADATA: + metadataBuilder.coordinationMetadata((CoordinationMetadata) remoteReadResult.getObj()); + break; + case SETTING_METADATA: + metadataBuilder.persistentSettings((Settings) remoteReadResult.getObj()); + break; + case TRANSIENT_SETTING_METADATA: + metadataBuilder.transientSettings((Settings) remoteReadResult.getObj()); + break; + case TEMPLATES_METADATA: + metadataBuilder.templates((TemplatesMetadata) remoteReadResult.getObj()); + break; + case HASHES_OF_CONSISTENT_SETTINGS: + metadataBuilder.hashesOfConsistentSettings((DiffableStringMap) remoteReadResult.getObj()); + break; + case CLUSTER_STATE_ATTRIBUTE: + if (remoteReadResult.getComponentName().equals(DISCOVERY_NODES)) { + discoveryNodesBuilder.set(DiscoveryNodes.builder((DiscoveryNodes) remoteReadResult.getObj())); + } else if (remoteReadResult.getComponentName().equals(CLUSTER_BLOCKS)) { + clusterStateBuilder.blocks((ClusterBlocks) remoteReadResult.getObj()); + } else if (remoteReadResult.getComponentName().startsWith(CLUSTER_STATE_CUSTOM)) { + // component name for mat is "cluster-state-custom--custom_name" + String custom = remoteReadResult.getComponentName().split(CUSTOM_DELIMITER)[1]; + clusterStateBuilder.putCustom(custom, (ClusterState.Custom) remoteReadResult.getObj()); + } + break; + default: + throw new IllegalStateException("Unknown component: " + remoteReadResult.getComponent()); + } + }); - private static String globalMetadataFileName(Metadata metadata) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata______ - return String.join( - DELIMITER, - METADATA_FILE_PREFIX, - RemoteStoreUtils.invertLong(metadata.version()), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) - ); - } + metadataBuilder.indices(indexMetadataMap); + if (readDiscoveryNodes) { + clusterStateBuilder.nodes(discoveryNodesBuilder.get().localNodeId(localNodeId)); + } + + clusterStateBuilder.metadata(metadataBuilder).version(manifest.getStateVersion()).stateUUID(manifest.getStateUUID()); - private static String metadataAttributeFileName(String componentPrefix, Long metadataVersion) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ - return String.join( - DELIMITER, - componentPrefix, - RemoteStoreUtils.invertLong(metadataVersion), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), - String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + readIndexRoutingTableResults.forEach( + indexRoutingTable -> indicesRouting.put(indexRoutingTable.getIndex().getName(), indexRoutingTable) ); - } + clusterStateBuilder.routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indicesRouting)); - BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { - return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); + return clusterStateBuilder.build(); } - /** - * Fetch latest index metadata from remote cluster state - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param clusterMetadataManifest manifest file of cluster - * @return {@code Map} latest IndexUUID to IndexMetadata map - */ - private Map getIndexMetadataMap( + public ClusterState getClusterStateForManifest( String clusterName, - String clusterUUID, - ClusterMetadataManifest clusterMetadataManifest - ) { - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) - : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - Map remoteIndexMetadata = new HashMap<>(); - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { - IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); - remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); - } - return remoteIndexMetadata; - } - - /** - * Fetch index metadata from remote cluster state - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata - * @return {@link IndexMetadata} - */ - private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { - BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); - try { - String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); - return INDEX_METADATA_FORMAT.read( - blobContainer, - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), - e + ClusterMetadataManifest manifest, + String localNodeId, + boolean includeEphemeral + ) throws IOException { + if (manifest.onOrAfterCodecVersion(CODEC_V2)) { + return readClusterStateInParallel( + ClusterState.builder(new ClusterName(clusterName)).build(), + manifest, + manifest.getClusterUUID(), + localNodeId, + manifest.getIndices(), + manifest.getCustomMetadataMap(), + manifest.getCoordinationMetadata() != null, + manifest.getSettingsMetadata() != null, + manifest.getTransientSettingsMetadata() != null, + manifest.getTemplatesMetadata() != null, + includeEphemeral && manifest.getDiscoveryNodesMetadata() != null, + includeEphemeral && manifest.getClusterBlocksMetadata() != null, + includeEphemeral ? manifest.getIndicesRouting() : emptyList(), + includeEphemeral && manifest.getHashesOfConsistentSettings() != null, + includeEphemeral ? manifest.getClusterStateCustomMap() : emptyMap(), + includeEphemeral ); - } - } - - /** - * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return {@link IndexMetadata} - */ - public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { - Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - if (clusterMetadataManifest.isEmpty()) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + } else { + ClusterState clusterState = readClusterStateInParallel( + ClusterState.builder(new ClusterName(clusterName)).build(), + manifest, + manifest.getClusterUUID(), + localNodeId, + manifest.getIndices(), + // for manifest codec V1, we don't have the following objects to read, so not passing anything + emptyMap(), + false, + false, + false, + false, + false, + false, + emptyList(), + false, + emptyMap(), + false ); + Metadata.Builder mb = Metadata.builder(remoteGlobalMetadataManager.getGlobalMetadata(manifest.getClusterUUID(), manifest)); + mb.indices(clusterState.metadata().indices()); + return ClusterState.builder(clusterState).metadata(mb).build(); } - // Fetch Global Metadata - Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); - - // Fetch Index Metadata - Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); - - Map indexMetadataMap = new HashMap<>(); - indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); - - return ClusterState.builder(ClusterState.EMPTY_STATE) - .version(clusterMetadataManifest.get().getStateVersion()) - .metadata(Metadata.builder(globalMetadata).indices(indexMetadataMap).build()) - .build(); } - private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { - String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); - try { - // Fetch Global metadata - if (globalMetadataFileName != null) { - String[] splitPath = globalMetadataFileName.split("/"); - return GLOBAL_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else if (clusterMetadataManifest.hasMetadataAttributesFiles()) { - CoordinationMetadata coordinationMetadata = getCoordinationMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename() - ); - Settings settingsMetadata = getSettingsMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getSettingsMetadata().getUploadedFilename() - ); - TemplatesMetadata templatesMetadata = getTemplatesMetadata( - clusterName, - clusterUUID, - clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename() - ); - Metadata.Builder builder = new Metadata.Builder(); - builder.coordinationMetadata(coordinationMetadata); - builder.persistentSettings(settingsMetadata); - builder.templates(templatesMetadata); - clusterMetadataManifest.getCustomMetadataMap() - .forEach( - (key, value) -> builder.putCustom( - key, - getCustomsMetadata(clusterName, clusterUUID, value.getUploadedFilename(), key) - ) - ); - return builder.build(); - } else { - return Metadata.EMPTY_METADATA; + public ClusterState getClusterStateUsingDiff( + String clusterName, + ClusterMetadataManifest manifest, + ClusterState previousState, + String localNodeId + ) throws IOException { + assert manifest.getDiffManifest() != null; + ClusterStateDiffManifest diff = manifest.getDiffManifest(); + List updatedIndices = diff.getIndicesUpdated().stream().map(idx -> { + Optional uploadedIndexMetadataOptional = manifest.getIndices() + .stream() + .filter(idx2 -> idx2.getIndexName().equals(idx)) + .findFirst(); + assert uploadedIndexMetadataOptional.isPresent() == true; + return uploadedIndexMetadataOptional.get(); + }).collect(Collectors.toList()); + + Map updatedCustomMetadata = new HashMap<>(); + if (diff.getCustomMetadataUpdated() != null) { + for (String customType : diff.getCustomMetadataUpdated()) { + updatedCustomMetadata.put(customType, manifest.getCustomMetadataMap().get(customType)); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), - e - ); } - } - - private CoordinationMetadata getCoordinationMetadata(String clusterName, String clusterUUID, String coordinationMetadataFileName) { - try { - // Fetch Coordination metadata - if (coordinationMetadataFileName != null) { - String[] splitPath = coordinationMetadataFileName.split("/"); - return COORDINATION_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return CoordinationMetadata.EMPTY_METADATA; + Map updatedClusterStateCustom = new HashMap<>(); + if (diff.getClusterStateCustomUpdated() != null) { + for (String customType : diff.getClusterStateCustomUpdated()) { + updatedClusterStateCustom.put(customType, manifest.getClusterStateCustomMap().get(customType)); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Coordination Metadata - %s", coordinationMetadataFileName), - e - ); } - } - private Settings getSettingsMetadata(String clusterName, String clusterUUID, String settingsMetadataFileName) { - try { - // Fetch Settings metadata - if (settingsMetadataFileName != null) { - String[] splitPath = settingsMetadataFileName.split("/"); - return SETTINGS_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return Settings.EMPTY; + List updatedIndexRouting = new ArrayList<>(); + updatedIndexRouting.addAll( + remoteRoutingTableService.getUpdatedIndexRoutingTableMetadata(diff.getIndicesRoutingUpdated(), manifest.getIndicesRouting()) + ); + + ClusterState updatedClusterState = readClusterStateInParallel( + previousState, + manifest, + manifest.getClusterUUID(), + localNodeId, + updatedIndices, + updatedCustomMetadata, + diff.isCoordinationMetadataUpdated(), + diff.isSettingsMetadataUpdated(), + diff.isTransientSettingsMetadataUpdated(), + diff.isTemplatesMetadataUpdated(), + diff.isDiscoveryNodesUpdated(), + diff.isClusterBlocksUpdated(), + updatedIndexRouting, + diff.isHashesOfConsistentSettingsUpdated(), + updatedClusterStateCustom, + true + ); + ClusterState.Builder clusterStateBuilder = ClusterState.builder(updatedClusterState); + Metadata.Builder metadataBuilder = Metadata.builder(updatedClusterState.metadata()); + // remove the deleted indices from the metadata + for (String index : diff.getIndicesDeleted()) { + metadataBuilder.remove(index); + } + // remove the deleted metadata customs from the metadata + if (diff.getCustomMetadataDeleted() != null) { + for (String customType : diff.getCustomMetadataDeleted()) { + metadataBuilder.removeCustom(customType); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Settings Metadata - %s", settingsMetadataFileName), - e - ); } - } - private TemplatesMetadata getTemplatesMetadata(String clusterName, String clusterUUID, String templatesMetadataFileName) { - try { - // Fetch Templates metadata - if (templatesMetadataFileName != null) { - String[] splitPath = templatesMetadataFileName.split("/"); - return TEMPLATES_METADATA_FORMAT.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } else { - return TemplatesMetadata.EMPTY_METADATA; + // remove the deleted cluster state customs from the metadata + if (diff.getClusterStateCustomDeleted() != null) { + for (String customType : diff.getClusterStateCustomDeleted()) { + clusterStateBuilder.removeCustom(customType); } - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Templates Metadata - %s", templatesMetadataFileName), - e - ); } - } - private Metadata.Custom getCustomsMetadata(String clusterName, String clusterUUID, String customMetadataFileName, String custom) { - requireNonNull(customMetadataFileName); - try { - // Fetch Custom metadata - String[] splitPath = customMetadataFileName.split("/"); - ChecksumBlobStoreFormat customChecksumBlobStoreFormat = new ChecksumBlobStoreFormat<>( - "custom", - METADATA_NAME_FORMAT, - (parser -> Metadata.Custom.fromXContent(parser, custom)) - ); - return customChecksumBlobStoreFormat.read( - globalMetadataContainer(clusterName, clusterUUID), - splitPath[splitPath.length - 1], - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException( - String.format(Locale.ROOT, "Error while downloading Custom Metadata - %s", customMetadataFileName), - e - ); + HashMap indexRoutingTables = new HashMap<>(updatedClusterState.getRoutingTable().getIndicesRouting()); + + for (String indexName : diff.getIndicesRoutingDeleted()) { + indexRoutingTables.remove(indexName); } - } - /** - * Fetch latest ClusterMetadataManifest from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return ClusterMetadataManifest - */ - public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { - Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + return clusterStateBuilder.stateUUID(manifest.getStateUUID()) + .version(manifest.getStateVersion()) + .metadata(metadataBuilder) + .routingTable(new RoutingTable(manifest.getRoutingTableVersion(), indexRoutingTables)) + .build(); } /** @@ -1248,7 +1423,10 @@ public Optional getLatestClusterMetadataManifest(String public String getLastKnownUUIDFromRemote(String clusterName) { try { Set clusterUUIDs = getAllClusterUUIDs(clusterName); - Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + Map latestManifests = remoteManifestManager.getLatestManifestForAllClusterUUIDs( + clusterName, + clusterUUIDs + ); List validChain = createClusterChain(latestManifests, clusterName); if (validChain.isEmpty()) { return ClusterState.UNKNOWN_UUID; @@ -1262,8 +1440,19 @@ public String getLastKnownUUIDFromRemote(String clusterName) { } } + public void setRemoteStateReadTimeout(TimeValue remoteStateReadTimeout) { + this.remoteStateReadTimeout = remoteStateReadTimeout; + } + + private BlobStoreTransferService getBlobStoreTransferService() { + if (blobStoreTransferService == null) { + blobStoreTransferService = new BlobStoreTransferService(getBlobStore(), threadpool); + } + return blobStoreTransferService; + } + Set getAllClusterUUIDs(String clusterName) throws IOException { - Map clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + Map clusterUUIDMetadata = clusterUUIDContainer(blobStoreRepository, clusterName).children(); if (clusterUUIDMetadata == null) { return Collections.emptySet(); } @@ -1308,7 +1497,7 @@ private List createClusterChain(final Map 1) { logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); @@ -1364,7 +1553,7 @@ private Map trimClusterUUIDs( if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); if (isMetadataEqual(currentManifest, previousManifest, clusterName) - && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { + && remoteGlobalMetadataManager.isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { trimmedUUIDs.remove(clusterUUID); } } @@ -1379,14 +1568,20 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa } final Map secondIndices = second.getIndices() .stream() - .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { - final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final IndexMetadata firstIndexMetadata = remoteIndexMetadataManager.getIndexMetadata( + uploadedIndexMetadata, + first.getClusterUUID() + ); final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); if (secondUploadedIndexMetadata == null) { return false; } - final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + final IndexMetadata secondIndexMetadata = remoteIndexMetadataManager.getIndexMetadata( + secondUploadedIndexMetadata, + second.getClusterUUID() + ); if (firstIndexMetadata.equals(secondIndexMetadata) == false) { return false; } @@ -1394,156 +1589,15 @@ private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataMa return true; } - private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { - Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); - Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); - return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); - } - private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { return manifest.isClusterUUIDCommitted(); } - /** - * Fetch ClusterMetadataManifest files from remote state store in order - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @param limit max no of files to fetch - * @return all manifest file names - */ - private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { - try { - - /* - {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first - as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures - when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. - */ - return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( - MANIFEST_FILE_PREFIX + DELIMITER, - limit, - BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC - ); - } catch (IOException e) { - throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); - } - } - - /** - * Fetch latest ClusterMetadataManifest file from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return latest ClusterMetadataManifest filename - */ - private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { - List manifestFilesMetadata = getManifestFileNames(clusterName, clusterUUID, 1); - if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { - return Optional.of(manifestFilesMetadata.get(0).name()); - } - logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); - return Optional.empty(); - } - - /** - * Fetch ClusterMetadataManifest from remote state store - * - * @param clusterUUID uuid of cluster state to refer to in remote - * @param clusterName name of the cluster - * @return ClusterMetadataManifest - */ - ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) - throws IllegalStateException { - try { - return getClusterMetadataManifestBlobStoreFormat(filename).read( - manifestContainer(clusterName, clusterUUID), - filename, - blobStoreRepository.getNamedXContentRegistry() - ); - } catch (IOException e) { - throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); - } - } - - private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat(String fileName) { - long codecVersion = getManifestCodecVersion(fileName); - if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { - return CLUSTER_METADATA_MANIFEST_FORMAT; - } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { - return CLUSTER_METADATA_MANIFEST_FORMAT_V1; - } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { - return CLUSTER_METADATA_MANIFEST_FORMAT_V0; - } - - throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); - } - - private int getManifestCodecVersion(String fileName) { - String[] splitName = fileName.split(DELIMITER); - if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { - return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. - } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 - // is used. - return ClusterMetadataManifest.CODEC_V0; - } else { - throw new IllegalArgumentException("Manifest file name is corrupted"); - } - } - - public static String encodeString(String content) { - return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); - } - public void writeMetadataFailed() { getStats().stateFailed(); } - /** - * Exception for Remote state transfer. - */ - public static class RemoteStateTransferException extends RuntimeException { - - public RemoteStateTransferException(String errorDesc) { - super(errorDesc); - } - - public RemoteStateTransferException(String errorDesc, Throwable cause) { - super(errorDesc, cause); - } - } - public RemotePersistenceStats getStats() { return remoteStateStats; } - - private static class UploadedMetadataResults { - List uploadedIndexMetadata; - Map uploadedCustomMetadataMap; - UploadedMetadataAttribute uploadedCoordinationMetadata; - UploadedMetadataAttribute uploadedSettingsMetadata; - UploadedMetadataAttribute uploadedTemplatesMetadata; - - public UploadedMetadataResults( - List uploadedIndexMetadata, - Map uploadedCustomMetadataMap, - UploadedMetadataAttribute uploadedCoordinationMetadata, - UploadedMetadataAttribute uploadedSettingsMetadata, - UploadedMetadataAttribute uploadedTemplatesMetadata - ) { - this.uploadedIndexMetadata = uploadedIndexMetadata; - this.uploadedCustomMetadataMap = uploadedCustomMetadataMap; - this.uploadedCoordinationMetadata = uploadedCoordinationMetadata; - this.uploadedSettingsMetadata = uploadedSettingsMetadata; - this.uploadedTemplatesMetadata = uploadedTemplatesMetadata; - } - - public UploadedMetadataResults() { - this.uploadedIndexMetadata = new ArrayList<>(); - this.uploadedCustomMetadataMap = new HashMap<>(); - this.uploadedCoordinationMetadata = null; - this.uploadedSettingsMetadata = null; - this.uploadedTemplatesMetadata = null; - } - } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java index 500d1af0211e8..f2b93c3784407 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateUtils.java @@ -8,16 +8,125 @@ package org.opensearch.gateway.remote; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; /** * Utility class for Remote Cluster State */ public class RemoteClusterStateUtils { + + public static final String DELIMITER = "__"; + public static final String METADATA_NAME_FORMAT = "%s.dat"; + public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; + public static final String CLUSTER_STATE_EPHEMERAL_PATH_TOKEN = "ephemeral"; + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + public static final String METADATA_FILE_PREFIX = "metadata"; + public static final String CUSTOM_DELIMITER = "--"; public static final String PATH_DELIMITER = "/"; + public static final String METADATA_NAME_PLAIN_FORMAT = "%s"; + + // ToXContent Params with gateway mode. + // We are using gateway context mode to persist all custom metadata. + public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams( + Map.of(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY) + ); + + public static BlobPath getClusterMetadataBasePath(BlobStoreRepository blobStoreRepository, String clusterName, String clusterUUID) { + return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); + } public static String encodeString(String content) { return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); } + + public static String getFormattedIndexFileName(String fileName) { + String[] pathTokens = fileName.split(DELIMITER); + // last value added is the codec version in IndexMetadata file + int codecVersion = Integer.parseInt(pathTokens[pathTokens.length - 1]); + if (codecVersion == CODEC_V1) { + return String.format(Locale.ROOT, METADATA_NAME_FORMAT, fileName); + } + return fileName; + } + + static BlobContainer clusterUUIDContainer(BlobStoreRepository blobStoreRepository, String clusterName) { + return blobStoreRepository.blobStore() + .blobContainer( + blobStoreRepository.basePath() + .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) + .add(CLUSTER_STATE_PATH_TOKEN) + ); + } + + /** + * Container class to keep metadata of all uploaded attributes + */ + public static class UploadedMetadataResults { + List uploadedIndexMetadata; + Map uploadedCustomMetadataMap; + Map uploadedClusterStateCustomMetadataMap; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedCoordinationMetadata; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedSettingsMetadata; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedTransientSettingsMetadata; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedTemplatesMetadata; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedDiscoveryNodes; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedClusterBlocks; + List uploadedIndicesRoutingMetadata; + ClusterMetadataManifest.UploadedMetadataAttribute uploadedHashesOfConsistentSettings; + + public UploadedMetadataResults( + List uploadedIndexMetadata, + Map uploadedCustomMetadataMap, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedCoordinationMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedSettingsMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedTemplatesMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedTransientSettingsMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedDiscoveryNodes, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedClusterBlocks, + List uploadedIndicesRoutingMetadata, + ClusterMetadataManifest.UploadedMetadataAttribute uploadedHashesOfConsistentSettings, + Map uploadedClusterStateCustomMap + ) { + this.uploadedIndexMetadata = uploadedIndexMetadata; + this.uploadedCustomMetadataMap = uploadedCustomMetadataMap; + this.uploadedCoordinationMetadata = uploadedCoordinationMetadata; + this.uploadedSettingsMetadata = uploadedSettingsMetadata; + this.uploadedTransientSettingsMetadata = uploadedTransientSettingsMetadata; + this.uploadedTemplatesMetadata = uploadedTemplatesMetadata; + this.uploadedDiscoveryNodes = uploadedDiscoveryNodes; + this.uploadedClusterBlocks = uploadedClusterBlocks; + this.uploadedIndicesRoutingMetadata = uploadedIndicesRoutingMetadata; + this.uploadedHashesOfConsistentSettings = uploadedHashesOfConsistentSettings; + this.uploadedClusterStateCustomMetadataMap = uploadedClusterStateCustomMap; + } + + public UploadedMetadataResults() { + this.uploadedIndexMetadata = new ArrayList<>(); + this.uploadedCustomMetadataMap = new HashMap<>(); + this.uploadedCoordinationMetadata = null; + this.uploadedSettingsMetadata = null; + this.uploadedTransientSettingsMetadata = null; + this.uploadedTemplatesMetadata = null; + this.uploadedDiscoveryNodes = null; + this.uploadedClusterBlocks = null; + this.uploadedIndicesRoutingMetadata = new ArrayList<>(); + this.uploadedHashesOfConsistentSettings = null; + this.uploadedClusterStateCustomMetadataMap = new HashMap<>(); + } + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java new file mode 100644 index 0000000000000..cd29114e05684 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -0,0 +1,318 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteCoordinationMetadata; +import org.opensearch.gateway.remote.model.RemoteCustomMetadata; +import org.opensearch.gateway.remote.model.RemoteGlobalMetadata; +import org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTemplatesMetadata; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; + +/** + * A Manager which provides APIs to write and read Global Metadata attributes to remote store + * + * @opensearch.internal + */ +public class RemoteGlobalMetadataManager { + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + private volatile TimeValue globalMetadataUploadTimeout; + private Map remoteWritableEntityStores; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + private final NamedWriteableRegistry namedWriteableRegistry; + + RemoteGlobalMetadataManager( + ClusterSettings clusterSettings, + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadpool + ) { + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + this.compressor = blobStoreRepository.getCompressor(); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.namedWriteableRegistry = namedWriteableRegistry; + this.remoteWritableEntityStores = new HashMap<>(); + this.remoteWritableEntityStores.put( + RemoteGlobalMetadata.GLOBAL_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteCoordinationMetadata.COORDINATION_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemotePersistentSettingsMetadata.SETTING_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteTemplatesMetadata.TEMPLATES_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + this.remoteWritableEntityStores.put( + RemoteCustomMetadata.CUSTOM_METADATA, + new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ) + ); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + } + + /** + * Allows async upload of Metadata components to remote + */ + CheckedRunnable getAsyncMetadataWriteAction( + AbstractRemoteWritableBlobEntity writeEntity, + LatchedActionListener latchedActionListener + ) { + return (() -> getStore(writeEntity).writeAsync(writeEntity, getActionListener(writeEntity, latchedActionListener))); + } + + private RemoteWritableEntityStore getStore(AbstractRemoteWritableBlobEntity entity) { + RemoteWritableEntityStore remoteStore = remoteWritableEntityStores.get(entity.getType()); + if (remoteStore == null) { + throw new IllegalArgumentException("Unknown entity type [" + entity.getType() + "]"); + } + return remoteStore; + } + + private ActionListener getActionListener( + AbstractRemoteWritableBlobEntity remoteBlobStoreObject, + LatchedActionListener latchedActionListener + ) { + return ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteBlobStoreObject.getUploadedMetadata()), + ex -> latchedActionListener.onFailure( + new RemoteStateTransferException("Upload failed for " + remoteBlobStoreObject.getType(), ex) + ) + ); + } + + CheckedRunnable getAsyncMetadataReadAction( + AbstractRemoteWritableBlobEntity readEntity, + String componentName, + LatchedActionListener listener + ) { + ActionListener actionListener = ActionListener.wrap( + response -> listener.onResponse(new RemoteReadResult((ToXContent) response, readEntity.getType(), componentName)), + listener::onFailure + ); + return () -> getStore(readEntity).readAsync(readEntity, actionListener); + } + + Metadata getGlobalMetadata(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + RemoteGlobalMetadata remoteGlobalMetadata = new RemoteGlobalMetadata( + String.format(Locale.ROOT, METADATA_NAME_FORMAT, globalMetadataFileName), + clusterUUID, + compressor, + namedXContentRegistry + ); + return (Metadata) getStore(remoteGlobalMetadata).read(remoteGlobalMetadata); + } else if (clusterMetadataManifest.hasMetadataAttributesFiles()) { + // from CODEC_V2, we have started uploading all the metadata in granular files instead of a single entity + Metadata.Builder builder = new Metadata.Builder(); + if (clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename() != null) { + RemoteCoordinationMetadata remoteCoordinationMetadata = new RemoteCoordinationMetadata( + clusterMetadataManifest.getCoordinationMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.coordinationMetadata( + (CoordinationMetadata) getStore(remoteCoordinationMetadata).read(remoteCoordinationMetadata) + ); + } + if (clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename() != null) { + RemoteTemplatesMetadata remoteTemplatesMetadata = new RemoteTemplatesMetadata( + clusterMetadataManifest.getTemplatesMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.templates((TemplatesMetadata) getStore(remoteTemplatesMetadata).read(remoteTemplatesMetadata)); + } + if (clusterMetadataManifest.getSettingsMetadata().getUploadedFilename() != null) { + RemotePersistentSettingsMetadata remotePersistentSettingsMetadata = new RemotePersistentSettingsMetadata( + clusterMetadataManifest.getSettingsMetadata().getUploadedFilename(), + clusterUUID, + compressor, + namedXContentRegistry + ); + builder.persistentSettings( + (Settings) getStore(remotePersistentSettingsMetadata).read(remotePersistentSettingsMetadata) + ); + } + builder.clusterUUID(clusterMetadataManifest.getClusterUUID()); + builder.clusterUUIDCommitted(clusterMetadataManifest.isClusterUUIDCommitted()); + clusterMetadataManifest.getCustomMetadataMap().forEach((key, value) -> { + try { + RemoteCustomMetadata remoteCustomMetadata = new RemoteCustomMetadata( + value.getUploadedFilename(), + key, + clusterUUID, + compressor, + namedWriteableRegistry + ); + builder.putCustom(key, (Custom) getStore(remoteCustomMetadata).read(remoteCustomMetadata)); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Custom Metadata - %s", value.getUploadedFilename()), + e + ); + } + }); + return builder.build(); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + + Map getUpdatedCustoms(ClusterState currentState, ClusterState previousState) { + if (Metadata.isCustomMetadataEqual(previousState.metadata(), currentState.metadata())) { + return new HashMap<>(); + } + Map updatedCustom = new HashMap<>(); + Set currentCustoms = new HashSet<>(currentState.metadata().customs().keySet()); + for (Map.Entry cursor : previousState.metadata().customs().entrySet()) { + if (cursor.getValue().context().contains(Metadata.XContentContext.GATEWAY)) { + if (currentCustoms.contains(cursor.getKey()) + && !cursor.getValue().equals(currentState.metadata().custom(cursor.getKey()))) { + // If the custom metadata is updated, we need to upload the new version. + updatedCustom.put(cursor.getKey(), currentState.metadata().custom(cursor.getKey())); + } + currentCustoms.remove(cursor.getKey()); + } + } + for (String custom : currentCustoms) { + Metadata.Custom cursor = currentState.metadata().custom(custom); + if (cursor.context().contains(Metadata.XContentContext.GATEWAY)) { + updatedCustom.put(custom, cursor); + } + } + return updatedCustom; + } + + boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java new file mode 100644 index 0000000000000..a84161b202a22 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.remote.RemoteWritableEntityStore; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +/** + * A Manager which provides APIs to write and read Index Metadata to remote store + * + * @opensearch.internal + */ +public class RemoteIndexMetadataManager { + + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope, + Setting.Property.Deprecated + ); + + private final RemoteWritableEntityStore indexMetadataBlobStore; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + + private volatile TimeValue indexMetadataUploadTimeout; + + public RemoteIndexMetadataManager( + ClusterSettings clusterSettings, + String clusterName, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + ThreadPool threadpool + ) { + this.indexMetadataBlobStore = new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.compressor = blobStoreRepository.getCompressor(); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + } + + /** + * Allows async Upload of IndexMetadata to remote + * + * @param indexMetadata {@link IndexMetadata} to upload + * @param latchedActionListener listener to respond back on after upload finishes + */ + CheckedRunnable getAsyncIndexMetadataWriteAction( + IndexMetadata indexMetadata, + String clusterUUID, + LatchedActionListener latchedActionListener + ) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata(indexMetadata, clusterUUID, compressor, namedXContentRegistry); + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse(remoteIndexMetadata.getUploadedMetadata()), + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().getName(), ex)) + ); + return () -> indexMetadataBlobStore.writeAsync(remoteIndexMetadata, completionListener); + } + + CheckedRunnable getAsyncIndexMetadataReadAction( + String clusterUUID, + String uploadedFilename, + LatchedActionListener latchedActionListener + ) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedFilename), + clusterUUID, + compressor, + namedXContentRegistry + ); + ActionListener actionListener = ActionListener.wrap( + response -> latchedActionListener.onResponse( + new RemoteReadResult(response, RemoteIndexMetadata.INDEX, response.getIndex().getName()) + ), + latchedActionListener::onFailure + ); + return () -> indexMetadataBlobStore.readAsync(remoteIndexMetadata, actionListener); + } + + /** + * Fetch index metadata from remote cluster state + * + * @param uploadedIndexMetadata {@link ClusterMetadataManifest.UploadedIndexMetadata} contains details about remote location of index metadata + * @return {@link IndexMetadata} + */ + IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata, String clusterUUID) { + RemoteIndexMetadata remoteIndexMetadata = new RemoteIndexMetadata( + RemoteClusterStateUtils.getFormattedIndexFileName(uploadedIndexMetadata.getUploadedFilename()), + clusterUUID, + compressor, + namedXContentRegistry + ); + try { + return indexMetadataBlobStore.read(remoteIndexMetadata); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), + e + ); + } + } + + /** + * Fetch latest index metadata from remote cluster state + * + * @param clusterMetadataManifest manifest file of cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @return {@code Map} latest IndexUUID to IndexMetadata map + */ + Map getIndexMetadataMap(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; + Map remoteIndexMetadata = new HashMap<>(); + for (ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + IndexMetadata indexMetadata = getIndexMetadata(uploadedIndexMetadata, clusterUUID); + remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); + } + return remoteIndexMetadata; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; + } + + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java new file mode 100644 index 0000000000000..cb09de1a6ec44 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -0,0 +1,319 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; +import org.opensearch.gateway.remote.model.RemoteClusterStateBlobStore; +import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * A Manager which provides APIs to write and read {@link ClusterMetadataManifest} to remote store + * + * @opensearch.internal + */ +public class RemoteManifestManager { + + public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.metadata_manifest.upload_timeout", + METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private static final Logger logger = LogManager.getLogger(RemoteManifestManager.class); + + private volatile TimeValue metadataManifestUploadTimeout; + private final String nodeId; + private final RemoteClusterStateBlobStore manifestBlobStore; + private final Compressor compressor; + private final NamedXContentRegistry namedXContentRegistry; + // todo remove blobStorerepo from here + private final BlobStoreRepository blobStoreRepository; + + RemoteManifestManager( + ClusterSettings clusterSettings, + String clusterName, + String nodeId, + BlobStoreRepository blobStoreRepository, + BlobStoreTransferService blobStoreTransferService, + ThreadPool threadpool + ) { + this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); + this.nodeId = nodeId; + this.manifestBlobStore = new RemoteClusterStateBlobStore<>( + blobStoreTransferService, + blobStoreRepository, + clusterName, + threadpool, + ThreadPool.Names.REMOTE_STATE_READ + ); + ; + clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.compressor = blobStoreRepository.getCompressor(); + this.namedXContentRegistry = blobStoreRepository.getNamedXContentRegistry(); + this.blobStoreRepository = blobStoreRepository; + } + + RemoteClusterStateManifestInfo uploadManifest( + ClusterState clusterState, + RemoteClusterStateUtils.UploadedMetadataResults uploadedMetadataResult, + String previousClusterUUID, + ClusterStateDiffManifest clusterDiffManifest, + boolean committed + ) { + synchronized (this) { + ClusterMetadataManifest.Builder manifestBuilder = ClusterMetadataManifest.builder(); + manifestBuilder.clusterTerm(clusterState.term()) + .stateVersion(clusterState.getVersion()) + .clusterUUID(clusterState.metadata().clusterUUID()) + .stateUUID(clusterState.stateUUID()) + .opensearchVersion(Version.CURRENT) + .nodeId(nodeId) + .committed(committed) + .codecVersion(RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION) + .indices(uploadedMetadataResult.uploadedIndexMetadata) + .previousClusterUUID(previousClusterUUID) + .clusterUUIDCommitted(clusterState.metadata().clusterUUIDCommitted()) + .coordinationMetadata(uploadedMetadataResult.uploadedCoordinationMetadata) + .settingMetadata(uploadedMetadataResult.uploadedSettingsMetadata) + .templatesMetadata(uploadedMetadataResult.uploadedTemplatesMetadata) + .customMetadataMap(uploadedMetadataResult.uploadedCustomMetadataMap) + .routingTableVersion(clusterState.getRoutingTable().version()) + .indicesRouting(uploadedMetadataResult.uploadedIndicesRoutingMetadata) + .discoveryNodesMetadata(uploadedMetadataResult.uploadedDiscoveryNodes) + .clusterBlocksMetadata(uploadedMetadataResult.uploadedClusterBlocks) + .diffManifest(clusterDiffManifest) + .metadataVersion(clusterState.metadata().version()) + .transientSettingsMetadata(uploadedMetadataResult.uploadedTransientSettingsMetadata) + .clusterStateCustomMetadataMap(uploadedMetadataResult.uploadedClusterStateCustomMetadataMap) + .hashesOfConsistentSettings(uploadedMetadataResult.uploadedHashesOfConsistentSettings); + final ClusterMetadataManifest manifest = manifestBuilder.build(); + String manifestFileName = writeMetadataManifest(clusterState.metadata().clusterUUID(), manifest); + return new RemoteClusterStateManifestInfo(manifest, manifestFileName); + } + } + + private String writeMetadataManifest(String clusterUUID, ClusterMetadataManifest uploadManifest) { + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); + }, ex -> { exceptionReference.set(ex); }), latch); + + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + uploadManifest, + clusterUUID, + compressor, + namedXContentRegistry + ); + manifestBlobStore.writeAsync(remoteClusterMetadataManifest, completionListener); + + try { + if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + remoteClusterMetadataManifest.getBlobFileName(), + uploadManifest.isCommitted() ? "commit" : "publish" + ); + return remoteClusterMetadataManifest.getUploadedMetadata().getUploadedFilename(); + } + + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + } + + public ClusterMetadataManifest getRemoteClusterMetadataManifestByFileName(String clusterUUID, String filename) + throws IllegalStateException { + try { + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + filename, + clusterUUID, + compressor, + namedXContentRegistry + ); + return manifestBlobStore.read(remoteClusterMetadataManifest); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + /** + * Fetch ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) + throws IllegalStateException { + try { + String fullBlobName = getManifestFolderPath(clusterName, clusterUUID).buildAsString() + filename; + RemoteClusterMetadataManifest remoteClusterMetadataManifest = new RemoteClusterMetadataManifest( + fullBlobName, + clusterUUID, + compressor, + namedXContentRegistry + ); + return manifestBlobStore.read(remoteClusterMetadataManifest); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e + ); + } + } + return manifestsByClusterUUID; + } + + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest + return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + } + + BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { + return RemoteClusterStateUtils.getClusterMetadataBasePath(blobStoreRepository, clusterName, clusterUUID) + .add(RemoteClusterMetadataManifest.MANIFEST); + } + + public TimeValue getMetadataManifestUploadTimeout() { + return this.metadataManifestUploadTimeout; + } + + private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { + this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; + } + + /** + * Fetch ClusterMetadataManifest files from remote state store in order + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param limit max no of files to fetch + * @return all manifest file names + */ + private List getManifestFileNames(String clusterName, String clusterUUID, String filePrefix, int limit) + throws IllegalStateException { + try { + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + */ + return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( + filePrefix, + limit, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ); + } catch (IOException e) { + throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); + } + } + + static String getManifestFilePrefixForTermVersion(long term, long version) { + return String.join( + DELIMITER, + RemoteClusterMetadataManifest.MANIFEST, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version) + ) + DELIMITER; + } + + /** + * Fetch latest ClusterMetadataManifest file from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return latest ClusterMetadataManifest filename + */ + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + List manifestFilesMetadata = getManifestFileNames( + clusterName, + clusterUUID, + RemoteClusterMetadataManifest.MANIFEST + DELIMITER, + 1 + ); + if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { + return Optional.of(manifestFilesMetadata.get(0).name()); + } + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java index f2330846fa23e..36d107a99d258 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -19,12 +19,16 @@ */ public class RemotePersistenceStats extends PersistedStateStats { static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT = "index_routing_files_cleanup_attempt_failed_count"; static final String REMOTE_UPLOAD = "remote_upload"; private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + private AtomicLong indexRoutingFilesCleanupAttemptFailedCount = new AtomicLong(0); + public RemotePersistenceStats() { super(REMOTE_UPLOAD); addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + addToExtendedFields(INDEX_ROUTING_FILES_CLEANUP_ATTEMPT_FAILED_COUNT, indexRoutingFilesCleanupAttemptFailedCount); } public void cleanUpAttemptFailed() { @@ -34,4 +38,12 @@ public void cleanUpAttemptFailed() { public long getCleanupAttemptFailedCount() { return cleanupAttemptFailedCount.get(); } + + public void indexRoutingFilesCleanupAttemptFailed() { + indexRoutingFilesCleanupAttemptFailedCount.incrementAndGet(); + } + + public long getIndexRoutingFilesCleanupAttemptFailedCount() { + return indexRoutingFilesCleanupAttemptFailedCount.get(); + } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteStateTransferException.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteStateTransferException.java new file mode 100644 index 0000000000000..5b75b6c18ee5b --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteStateTransferException.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.common.remote.RemoteWriteableEntity; + +/** + * Exception for Remote state transfer. + */ +public class RemoteStateTransferException extends RuntimeException { + private RemoteWriteableEntity entity; + + public RemoteStateTransferException(String errorDesc) { + super(errorDesc); + } + + public RemoteStateTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + + public RemoteStateTransferException(String errorDesc, RemoteWriteableEntity entity) { + super(errorDesc); + this.entity = entity; + } + + public RemoteStateTransferException(String errorDesc, RemoteWriteableEntity entity, Throwable cause) { + super(errorDesc, cause); + this.entity = entity; + } + + @Override + public String toString() { + String message = super.toString(); + if (entity != null) { + message += ", failed entity:" + entity; + } + return message; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java new file mode 100644 index 0000000000000..9c5fbd5941640 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterBlocks.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link ClusterBlocks} to/from remote blob store + */ +public class RemoteClusterBlocks extends AbstractRemoteWritableBlobEntity { + + public static final String CLUSTER_BLOCKS = "blocks"; + public static final ChecksumWritableBlobStoreFormat CLUSTER_BLOCKS_FORMAT = new ChecksumWritableBlobStoreFormat<>( + "blocks", + ClusterBlocks::readFrom + ); + + private ClusterBlocks clusterBlocks; + private long stateVersion; + + public RemoteClusterBlocks(final ClusterBlocks clusterBlocks, long stateVersion, String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor, null); + this.clusterBlocks = clusterBlocks; + this.stateVersion = stateVersion; + } + + public RemoteClusterBlocks(final String blobName, final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor, null); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), CLUSTER_BLOCKS); + } + + @Override + public String getType() { + return CLUSTER_BLOCKS; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/transient/______ + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(stateVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(CLUSTER_BLOCKS, blobName); + } + + @Override + public InputStream serialize() throws IOException { + return CLUSTER_BLOCKS_FORMAT.serialize(clusterBlocks, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public ClusterBlocks deserialize(final InputStream inputStream) throws IOException { + return CLUSTER_BLOCKS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java new file mode 100644 index 0000000000000..328601139c150 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterMetadataManifest.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link ClusterMetadataManifest} to/from remote blob store + */ +public class RemoteClusterMetadataManifest extends AbstractRemoteWritableBlobEntity { + + public static final String MANIFEST = "manifest"; + public static final int SPLITTED_MANIFEST_FILE_LENGTH = 6; + + public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V2; + public static final String COMMITTED = "C"; + public static final String PUBLISHED = "P"; + + /** + * Manifest format compatible with older codec v0, where codec version was missing. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); + /** + * Manifest format compatible with older codec v1, where global metadata was missing. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V1 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV1); + + /** + * Manifest format compatible with codec v2, where we introduced codec versions/global metadata. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( + "cluster-metadata-manifest", + METADATA_MANIFEST_NAME_FORMAT, + ClusterMetadataManifest::fromXContent + ); + + private ClusterMetadataManifest clusterMetadataManifest; + + public RemoteClusterMetadataManifest( + final ClusterMetadataManifest clusterMetadataManifest, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.clusterMetadataManifest = clusterMetadataManifest; + } + + public RemoteClusterMetadataManifest( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(MANIFEST), MANIFEST); + } + + @Override + public String getType() { + return MANIFEST; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ + // + String blobFileName = String.join( + DELIMITER, + MANIFEST, + RemoteStoreUtils.invertLong(clusterMetadataManifest.getClusterTerm()), + RemoteStoreUtils.invertLong(clusterMetadataManifest.getStateVersion()), + (clusterMetadataManifest.isCommitted() ? COMMITTED : PUBLISHED), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(clusterMetadataManifest.getCodecVersion()) + // Keep the codec version at last place only, during we read last place to determine codec version. + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(MANIFEST, blobName); + } + + @Override + public InputStream serialize() throws IOException { + return CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public ClusterMetadataManifest deserialize(final InputStream inputStream) throws IOException { + ChecksumBlobStoreFormat blobStoreFormat = getClusterMetadataManifestBlobStoreFormat(); + return blobStoreFormat.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + + private int getManifestCodecVersion() { + assert blobName != null; + String[] splitName = blobName.split(DELIMITER); + if (splitName.length == SPLITTED_MANIFEST_FILE_LENGTH) { + return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. + } else if (splitName.length < SPLITTED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 + // is used. + return ClusterMetadataManifest.CODEC_V0; + } else { + throw new IllegalArgumentException("Manifest file name is corrupted"); + } + } + + private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat() { + long codecVersion = getManifestCodecVersion(); + if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { + return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V1) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V1; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V0; + } + throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java index 1aeecc4e70382..83326f65f0d43 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateBlobStore.java @@ -9,6 +9,7 @@ package org.opensearch.gateway.remote.model; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.remote.RemoteWritableEntityStore; import org.opensearch.common.remote.RemoteWriteableEntity; @@ -54,15 +55,20 @@ public void writeAsync(final U entity, final ActionListener listener) { try (InputStream inputStream = entity.serialize()) { BlobPath blobPath = getBlobPathForUpload(entity); entity.setFullBlobName(blobPath); - // TODO uncomment below logic after merging PR https://github.com/opensearch-project/OpenSearch/pull/13836 - // transferService.uploadBlob(inputStream, getBlobPathForUpload(entity), entity.getBlobFileName(), WritePriority.URGENT, - // listener); + transferService.uploadBlob( + inputStream, + getBlobPathForUpload(entity), + entity.getBlobFileName(), + WritePriority.URGENT, + listener + ); } } catch (Exception e) { listener.onFailure(e); } } + @Override public T read(final U entity) throws IOException { // TODO Add timing logs and tracing assert entity.getFullBlobName() != null; diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java new file mode 100644 index 0000000000000..f384908bc6b65 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateCustoms.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store + */ +public class RemoteClusterStateCustoms extends AbstractRemoteWritableBlobEntity { + public static final String CLUSTER_STATE_CUSTOM = "cluster-state-custom"; + + private long stateVersion; + private final String customType; + private ClusterState.Custom custom; + private final NamedWriteableRegistry namedWriteableRegistry; + private final ChecksumWritableBlobStoreFormat clusterStateCustomsFormat; + + public RemoteClusterStateCustoms( + final ClusterState.Custom custom, + final String customType, + final long stateVersion, + final String clusterUUID, + final Compressor compressor, + final NamedWriteableRegistry namedWriteableRegistry + ) { + super(clusterUUID, compressor, null); + this.stateVersion = stateVersion; + this.customType = customType; + this.custom = custom; + this.namedWriteableRegistry = namedWriteableRegistry; + this.clusterStateCustomsFormat = new ChecksumWritableBlobStoreFormat<>( + "cluster-state-custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); + } + + public RemoteClusterStateCustoms( + final String blobName, + final String customType, + final String clusterUUID, + final Compressor compressor, + final NamedWriteableRegistry namedWriteableRegistry + ) { + super(clusterUUID, compressor, null); + this.blobName = blobName; + this.customType = customType; + this.namedWriteableRegistry = namedWriteableRegistry; + this.clusterStateCustomsFormat = new ChecksumWritableBlobStoreFormat<>( + "cluster-state-custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); + } + + @Override + public BlobPathParameters getBlobPathParameters() { + String prefix = String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, customType); + return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), prefix); + } + + @Override + public String getType() { + return CLUSTER_STATE_CUSTOM; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/ephemeral/______ + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(stateVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new ClusterMetadataManifest.UploadedMetadataAttribute( + String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, customType), + blobName + ); + } + + @Override + public InputStream serialize() throws IOException { + return clusterStateCustomsFormat.serialize(custom, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public ClusterState.Custom deserialize(final InputStream inputStream) throws IOException { + return clusterStateCustomsFormat.deserialize(blobName, Streams.readFully(inputStream)); + } + + public static ClusterState.Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType) + throws IOException { + return namedWriteableRegistry.getReader(ClusterState.Custom.class, customType).read(streamInput); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateManifestInfo.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateManifestInfo.java new file mode 100644 index 0000000000000..5d987e5e21e1a --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteClusterStateManifestInfo.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.gateway.remote.ClusterMetadataManifest; + +/** + * A class encapsulating the cluster state manifest and its remote uploaded path + */ +public class RemoteClusterStateManifestInfo { + + private final ClusterMetadataManifest clusterMetadataManifest; + private final String manifestFileName; + + public RemoteClusterStateManifestInfo(final ClusterMetadataManifest manifest, final String manifestFileName) { + this.clusterMetadataManifest = manifest; + this.manifestFileName = manifestFileName; + } + + public ClusterMetadataManifest getClusterMetadataManifest() { + return clusterMetadataManifest; + } + + public String getManifestFileName() { + return manifestFileName; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java new file mode 100644 index 0000000000000..a90721ab59f66 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCoordinationMetadata.java @@ -0,0 +1,112 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; + +/** + * Wrapper class for uploading/downloading {@link CoordinationMetadata} to/from remote blob store + */ +public class RemoteCoordinationMetadata extends AbstractRemoteWritableBlobEntity { + + public static final String COORDINATION_METADATA = "coordination"; + public static final ChecksumBlobStoreFormat COORDINATION_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "coordination", + METADATA_NAME_PLAIN_FORMAT, + CoordinationMetadata::fromXContent + ); + + private CoordinationMetadata coordinationMetadata; + private long metadataVersion; + + public RemoteCoordinationMetadata( + final CoordinationMetadata coordinationMetadata, + final long metadataVersion, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.coordinationMetadata = coordinationMetadata; + this.metadataVersion = metadataVersion; + } + + public RemoteCoordinationMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of("global-metadata"), COORDINATION_METADATA); + } + + @Override + public String getType() { + return COORDINATION_METADATA; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public InputStream serialize() throws IOException { + return COORDINATION_METADATA_FORMAT.serialize( + coordinationMetadata, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public CoordinationMetadata deserialize(final InputStream inputStream) throws IOException { + return COORDINATION_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(COORDINATION_METADATA, blobName); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java new file mode 100644 index 0000000000000..4c7069ee8be9e --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.metadata.Metadata.Custom; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; + +/** + * Wrapper class for uploading/downloading {@link Custom} to/from remote blob store + */ +public class RemoteCustomMetadata extends AbstractRemoteWritableBlobEntity { + + public static final String CUSTOM_METADATA = "custom"; + public static final String CUSTOM_DELIMITER = "--"; + public final ChecksumWritableBlobStoreFormat customBlobStoreFormat; + + private Custom custom; + private final String customType; + private long metadataVersion; + private final NamedWriteableRegistry namedWriteableRegistry; + + public RemoteCustomMetadata( + final Custom custom, + final String customType, + final long metadataVersion, + final String clusterUUID, + Compressor compressor, + NamedWriteableRegistry namedWriteableRegistry + ) { + super(clusterUUID, compressor, null); + this.custom = custom; + this.customType = customType; + this.metadataVersion = metadataVersion; + this.namedWriteableRegistry = namedWriteableRegistry; + this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>( + "custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); + } + + public RemoteCustomMetadata( + final String blobName, + final String customType, + final String clusterUUID, + final Compressor compressor, + final NamedWriteableRegistry namedWriteableRegistry + ) { + super(clusterUUID, compressor, null); + this.blobName = blobName; + this.customType = customType; + this.namedWriteableRegistry = namedWriteableRegistry; + this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>( + "custom", + is -> readFrom(is, namedWriteableRegistry, customType) + ); + } + + @Override + public BlobPathParameters getBlobPathParameters() { + String prefix = String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, customType); + return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), prefix); + } + + @Override + public String getType() { + return CUSTOM_METADATA; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ + // + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public InputStream serialize() throws IOException { + return customBlobStoreFormat.serialize(custom, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public Custom deserialize(final InputStream inputStream) throws IOException { + return customBlobStoreFormat.deserialize(blobName, Streams.readFully(inputStream)); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(String.join(CUSTOM_DELIMITER, CUSTOM_METADATA, customType), blobName); + } + + public static Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType) + throws IOException { + return namedWriteableRegistry.getReader(Custom.class, customType).read(streamInput); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java new file mode 100644 index 0000000000000..fb399e2899cdd --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteDiscoveryNodes.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; + +/** + * Wrapper class for uploading/downloading {@link DiscoveryNodes} to/from remote blob store + */ +public class RemoteDiscoveryNodes extends AbstractRemoteWritableBlobEntity { + + public static final String DISCOVERY_NODES = "nodes"; + public static final ChecksumWritableBlobStoreFormat DISCOVERY_NODES_FORMAT = new ChecksumWritableBlobStoreFormat<>( + "nodes", + is -> DiscoveryNodes.readFrom(is, null) + ); + + private DiscoveryNodes discoveryNodes; + private long stateVersion; + + public RemoteDiscoveryNodes( + final DiscoveryNodes discoveryNodes, + final long stateVersion, + final String clusterUUID, + final Compressor compressor + ) { + super(clusterUUID, compressor, null); + this.discoveryNodes = discoveryNodes; + this.stateVersion = stateVersion; + } + + public RemoteDiscoveryNodes(final String blobName, final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor, null); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(CLUSTER_STATE_EPHEMERAL_PATH_TOKEN), DISCOVERY_NODES); + } + + @Override + public String getType() { + return DISCOVERY_NODES; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/ephemeral/______ + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(stateVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(DISCOVERY_NODES, blobName); + } + + @Override + public InputStream serialize() throws IOException { + return DISCOVERY_NODES_FORMAT.serialize(discoveryNodes, generateBlobFileName(), getCompressor()).streamInput(); + } + + @Override + public DiscoveryNodes deserialize(final InputStream inputStream) throws IOException { + return DISCOVERY_NODES_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java new file mode 100644 index 0000000000000..09f07de0d5c24 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteGlobalMetadata.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; + +/** + * Wrapper class for uploading/downloading global metadata ({@link Metadata}) to/from remote blob store + */ +public class RemoteGlobalMetadata extends AbstractRemoteWritableBlobEntity { + public static final String GLOBAL_METADATA = "global_metadata"; + + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "metadata", + METADATA_NAME_FORMAT, + Metadata::fromXContent + ); + + public RemoteGlobalMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + throw new UnsupportedOperationException(); + } + + @Override + public String getType() { + return GLOBAL_METADATA; + } + + @Override + public String generateBlobFileName() { + throw new UnsupportedOperationException(); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + throw new UnsupportedOperationException(); + } + + @Override + public InputStream serialize() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Metadata deserialize(final InputStream inputStream) throws IOException { + return GLOBAL_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java new file mode 100644 index 0000000000000..1debf75cdfec9 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteHashesOfConsistentSettings.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.metadata.DiffableStringMap; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; + +/** + * Wrapper class for uploading/downloading {@link DiffableStringMap} to/from remote blob store + */ +public class RemoteHashesOfConsistentSettings extends AbstractRemoteWritableBlobEntity { + public static final String HASHES_OF_CONSISTENT_SETTINGS = "hashes-of-consistent-settings"; + public static final ChecksumWritableBlobStoreFormat HASHES_OF_CONSISTENT_SETTINGS_FORMAT = + new ChecksumWritableBlobStoreFormat<>("hashes-of-consistent-settings", DiffableStringMap::readFrom); + + private DiffableStringMap hashesOfConsistentSettings; + private long metadataVersion; + + public RemoteHashesOfConsistentSettings( + final DiffableStringMap hashesOfConsistentSettings, + final long metadataVersion, + final String clusterUUID, + final Compressor compressor + ) { + super(clusterUUID, compressor, null); + this.metadataVersion = metadataVersion; + this.hashesOfConsistentSettings = hashesOfConsistentSettings; + } + + public RemoteHashesOfConsistentSettings(final String blobName, final String clusterUUID, final Compressor compressor) { + super(clusterUUID, compressor, null); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), HASHES_OF_CONSISTENT_SETTINGS); + } + + @Override + public String getType() { + return HASHES_OF_CONSISTENT_SETTINGS; + } + + @Override + public String generateBlobFileName() { + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public ClusterMetadataManifest.UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new ClusterMetadataManifest.UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, blobName); + } + + @Override + public InputStream serialize() throws IOException { + return HASHES_OF_CONSISTENT_SETTINGS_FORMAT.serialize(hashesOfConsistentSettings, generateBlobFileName(), getCompressor()) + .streamInput(); + } + + @Override + public DiffableStringMap deserialize(final InputStream inputStream) throws IOException { + return HASHES_OF_CONSISTENT_SETTINGS_FORMAT.deserialize(blobName, Streams.readFully(inputStream)); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java new file mode 100644 index 0000000000000..830b09b92e2cb --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteIndexMetadata.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; + +/** + * Wrapper class for uploading/downloading {@link IndexMetadata} to/from remote blob store + */ +public class RemoteIndexMetadata extends AbstractRemoteWritableBlobEntity { + + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 2; + + public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "index-metadata", + METADATA_NAME_PLAIN_FORMAT, + IndexMetadata::fromXContent + ); + public static final String INDEX = "index"; + + private IndexMetadata indexMetadata; + + public RemoteIndexMetadata( + final IndexMetadata indexMetadata, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.indexMetadata = indexMetadata; + } + + public RemoteIndexMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(INDEX, indexMetadata.getIndexUUID()), "metadata"); + } + + @Override + public String getType() { + return INDEX; + } + + @Override + public String generateBlobFileName() { + String blobFileName = String.join( + RemoteClusterStateUtils.DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(indexMetadata.getVersion()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during reads we read last + // place to determine codec version. + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedIndexMetadata(indexMetadata.getIndex().getName(), indexMetadata.getIndexUUID(), blobName); + } + + @Override + public InputStream serialize() throws IOException { + return INDEX_METADATA_FORMAT.serialize( + indexMetadata, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public IndexMetadata deserialize(final InputStream inputStream) throws IOException { + // Blob name parameter is redundant + return INDEX_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java new file mode 100644 index 0000000000000..9ee3db8f289e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemotePersistentSettingsMetadata.java @@ -0,0 +1,112 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; + +/** + * Wrapper class for uploading/downloading persistent {@link Settings} to/from remote blob store + */ +public class RemotePersistentSettingsMetadata extends AbstractRemoteWritableBlobEntity { + + public static final String SETTING_METADATA = "settings"; + + public static final ChecksumBlobStoreFormat SETTINGS_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "settings", + METADATA_NAME_PLAIN_FORMAT, + Settings::fromXContent + ); + + private Settings persistentSettings; + private long metadataVersion; + + public RemotePersistentSettingsMetadata( + final Settings settings, + final long metadataVersion, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.persistentSettings = settings; + this.metadataVersion = metadataVersion; + } + + public RemotePersistentSettingsMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of("global-metadata"), SETTING_METADATA); + } + + @Override + public String getType() { + return SETTING_METADATA; + } + + @Override + public String generateBlobFileName() { + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public InputStream serialize() throws IOException { + return SETTINGS_METADATA_FORMAT.serialize( + persistentSettings, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public Settings deserialize(final InputStream inputStream) throws IOException { + return SETTINGS_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(SETTING_METADATA, blobName); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java new file mode 100644 index 0000000000000..adee09eaeffef --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteReadResult.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.core.xcontent.ToXContent; + +/** + * Container class for entity read from remote store + */ +public class RemoteReadResult { + + ToXContent obj; + String component; + String componentName; + + public RemoteReadResult(ToXContent obj, String component, String componentName) { + this.obj = obj; + this.component = component; + this.componentName = componentName; + } + + public ToXContent getObj() { + return obj; + } + + public String getComponent() { + return component; + } + + public String getComponentName() { + return componentName; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java new file mode 100644 index 0000000000000..4513d35aef5e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTemplatesMetadata.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.cluster.metadata.TemplatesMetadata; +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_PLAIN_FORMAT; + +/** + * Wrapper class for uploading/downloading {@link TemplatesMetadata} to/from remote blob store + */ +public class RemoteTemplatesMetadata extends AbstractRemoteWritableBlobEntity { + + public static final String TEMPLATES_METADATA = "templates"; + + public static final ChecksumBlobStoreFormat TEMPLATES_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "templates", + METADATA_NAME_PLAIN_FORMAT, + TemplatesMetadata::fromXContent + ); + private TemplatesMetadata templatesMetadata; + private long metadataVersion; + + public RemoteTemplatesMetadata( + final TemplatesMetadata templatesMetadata, + final long metadataVersion, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.templatesMetadata = templatesMetadata; + this.metadataVersion = metadataVersion; + } + + public RemoteTemplatesMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of("global-metadata"), TEMPLATES_METADATA); + } + + @Override + public String getType() { + return TEMPLATES_METADATA; + } + + @Override + public String generateBlobFileName() { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/______ + // + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public InputStream serialize() throws IOException { + return TEMPLATES_METADATA_FORMAT.serialize( + templatesMetadata, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public TemplatesMetadata deserialize(final InputStream inputStream) throws IOException { + return TEMPLATES_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(TEMPLATES_METADATA, blobName); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java new file mode 100644 index 0000000000000..fd0526f05d015 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteTransientSettingsMetadata.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote.model; + +import org.opensearch.common.io.Streams; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; +import org.opensearch.common.remote.BlobPathParameters; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; +import org.opensearch.gateway.remote.RemoteClusterStateUtils; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.GLOBAL_METADATA_PATH_TOKEN; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_NAME_FORMAT; + +/** + * Wrapper class for uploading/downloading transient {@link Settings} to/from remote blob store + */ +public class RemoteTransientSettingsMetadata extends AbstractRemoteWritableBlobEntity { + + public static final String TRANSIENT_SETTING_METADATA = "transient-settings"; + + public static final ChecksumBlobStoreFormat SETTINGS_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "transient-settings", + METADATA_NAME_FORMAT, + Settings::fromXContent + ); + + private Settings transientSettings; + private long metadataVersion; + + public RemoteTransientSettingsMetadata( + final Settings transientSettings, + final long metadataVersion, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.transientSettings = transientSettings; + this.metadataVersion = metadataVersion; + } + + public RemoteTransientSettingsMetadata( + final String blobName, + final String clusterUUID, + final Compressor compressor, + final NamedXContentRegistry namedXContentRegistry + ) { + super(clusterUUID, compressor, namedXContentRegistry); + this.blobName = blobName; + } + + @Override + public BlobPathParameters getBlobPathParameters() { + return new BlobPathParameters(List.of(GLOBAL_METADATA_PATH_TOKEN), TRANSIENT_SETTING_METADATA); + } + + @Override + public String getType() { + return TRANSIENT_SETTING_METADATA; + } + + @Override + public String generateBlobFileName() { + String blobFileName = String.join( + DELIMITER, + getBlobPathParameters().getFilePrefix(), + RemoteStoreUtils.invertLong(metadataVersion), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + this.blobFileName = blobFileName; + return blobFileName; + } + + @Override + public InputStream serialize() throws IOException { + return SETTINGS_METADATA_FORMAT.serialize( + transientSettings, + generateBlobFileName(), + getCompressor(), + RemoteClusterStateUtils.FORMAT_PARAMS + ).streamInput(); + } + + @Override + public Settings deserialize(final InputStream inputStream) throws IOException { + return SETTINGS_METADATA_FORMAT.deserialize(blobName, getNamedXContentRegistry(), Streams.readFully(inputStream)); + } + + @Override + public UploadedMetadata getUploadedMetadata() { + assert blobName != null; + return new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, blobName); + } +} diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 257aca2b67990..991fbf12072be 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -357,6 +357,16 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { logger.trace(() -> new ParameterizedMessage("Http channel accepted: {}", httpChannel)); } + /** + * This method handles an incoming http request as a stream. + * + * @param httpRequest that is incoming + * @param httpChannel that received the http request + */ + public void incomingStream(HttpRequest httpRequest, final StreamingHttpChannel httpChannel) { + handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + } + /** * This method handles an incoming http request. * @@ -438,29 +448,56 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan RestChannel innerChannel; ThreadContext threadContext = threadPool.getThreadContext(); try { - innerChannel = new DefaultRestChannel( - httpChannel, - httpRequest, - restRequest, - bigArrays, - handlingSettings, - threadContext, - corsHandler, - trace - ); + if (httpChannel instanceof StreamingHttpChannel) { + innerChannel = new DefaultStreamingRestChannel( + (StreamingHttpChannel) httpChannel, + httpRequest, + restRequest, + bigArrays, + handlingSettings, + threadContext, + corsHandler, + trace + ); + } else { + innerChannel = new DefaultRestChannel( + httpChannel, + httpRequest, + restRequest, + bigArrays, + handlingSettings, + threadContext, + corsHandler, + trace + ); + } } catch (final IllegalArgumentException e) { badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); final RestRequest innerRequest = RestRequest.requestWithoutParameters(xContentRegistry, httpRequest, httpChannel); - innerChannel = new DefaultRestChannel( - httpChannel, - httpRequest, - innerRequest, - bigArrays, - handlingSettings, - threadContext, - corsHandler, - trace - ); + + if (httpChannel instanceof StreamingHttpChannel) { + innerChannel = new DefaultStreamingRestChannel( + (StreamingHttpChannel) httpChannel, + httpRequest, + innerRequest, + bigArrays, + handlingSettings, + threadContext, + corsHandler, + trace + ); + } else { + innerChannel = new DefaultRestChannel( + httpChannel, + httpRequest, + innerRequest, + bigArrays, + handlingSettings, + threadContext, + corsHandler, + trace + ); + } } channel = innerChannel; } diff --git a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java index 7084600133a75..497ec799ff937 100644 --- a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java @@ -58,11 +58,11 @@ /** * The default rest channel for incoming requests. This class implements the basic logic for sending a rest - * response. It will set necessary headers nad ensure that bytes are released after the response is sent. + * response. It will set necessary headers and ensure that bytes are released after the response is sent. * * @opensearch.internal */ -public class DefaultRestChannel extends AbstractRestChannel implements RestChannel { +class DefaultRestChannel extends AbstractRestChannel implements RestChannel { static final String CLOSE = "close"; static final String CONNECTION = "connection"; diff --git a/server/src/main/java/org/opensearch/http/DefaultStreamingRestChannel.java b/server/src/main/java/org/opensearch/http/DefaultStreamingRestChannel.java new file mode 100644 index 0000000000000..7d8445294a4f3 --- /dev/null +++ b/server/src/main/java/org/opensearch/http/DefaultStreamingRestChannel.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.StreamingRestChannel; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.reactivestreams.Subscriber; + +import static org.opensearch.tasks.Task.X_OPAQUE_ID; + +/** + * The streaming rest channel for incoming requests. This class implements the logic for sending a streaming + * rest in chunks response. It will set necessary headers and ensure that bytes are released after the full + * response is sent. + * + * @opensearch.internal + */ +class DefaultStreamingRestChannel extends DefaultRestChannel implements StreamingRestChannel { + private final StreamingHttpChannel streamingHttpChannel; + @Nullable + private final HttpTracer tracerLog; + + DefaultStreamingRestChannel( + StreamingHttpChannel streamingHttpChannel, + HttpRequest httpRequest, + RestRequest request, + BigArrays bigArrays, + HttpHandlingSettings settings, + ThreadContext threadContext, + CorsHandler corsHandler, + @Nullable HttpTracer tracerLog + ) { + super(streamingHttpChannel, httpRequest, request, bigArrays, settings, threadContext, corsHandler, tracerLog); + this.streamingHttpChannel = streamingHttpChannel; + this.tracerLog = tracerLog; + } + + @Override + public void subscribe(Subscriber subscriber) { + streamingHttpChannel.subscribe(subscriber); + } + + @Override + public void sendChunk(HttpChunk chunk) { + String opaque = null; + boolean success = false; + final List toClose = new ArrayList<>(3); + String contentLength = null; + + try { + opaque = request.header(X_OPAQUE_ID); + contentLength = String.valueOf(chunk.content().length()); + toClose.add(chunk); + + BytesStreamOutput bytesStreamOutput = newBytesOutput(); + if (bytesStreamOutput instanceof ReleasableBytesStreamOutput) { + toClose.add((Releasable) bytesStreamOutput); + } + + ActionListener listener = ActionListener.wrap(() -> Releasables.close(toClose)); + streamingHttpChannel.sendChunk(chunk, listener); + success = true; + } finally { + if (success == false) { + Releasables.close(toClose); + } + if (tracerLog != null) { + tracerLog.traceChunk(chunk, streamingHttpChannel, contentLength, opaque, request.getRequestId(), success); + } + } + } + + @Override + public void prepareResponse(RestStatus status, Map> headers) { + streamingHttpChannel.prepareResponse(status.getStatus(), headers); + } + + @Override + public boolean isReadable() { + return streamingHttpChannel.isReadable(); + } + + @Override + public boolean isWritable() { + return streamingHttpChannel.isWritable(); + } +} diff --git a/server/src/main/java/org/opensearch/http/HttpChunk.java b/server/src/main/java/org/opensearch/http/HttpChunk.java new file mode 100644 index 0000000000000..7bcb526fe17bb --- /dev/null +++ b/server/src/main/java/org/opensearch/http/HttpChunk.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.bytes.BytesReference; + +/** + * Represents a chunk of the HTTP request / response stream + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface HttpChunk extends Releasable { + /** + * Signals this is the last chunk of the stream. + * @return "true" if this is the last chunk of the stream, "false" otherwise + */ + boolean isLast(); + + /** + * Returns the content of this chunk + * @return the content of this chunk + */ + BytesReference content(); +} diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 012b69c29c1d4..f58d604151fd0 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -38,8 +38,12 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.service.ReportingService; import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; +import java.util.Map; +import java.util.Optional; + /** * HTTP Transport server * @@ -61,6 +65,17 @@ public interface HttpServerTransport extends LifecycleComponent, ReportingServic * Dispatches HTTP requests. */ interface Dispatcher { + /** + * Finds the matching {@link RestHandler} that the request is going to be dispatched to, if any. + * @param uri request URI + * @param rawPath request raw path + * @param method request HTTP method + * @param params request parameters + * @return matching {@link RestHandler} that the request is going to be dispatched to, {@code Optional.empty()} if none match + */ + default Optional dispatchHandler(String uri, String rawPath, RestRequest.Method method, Map params) { + return Optional.empty(); + } /** * Dispatches the {@link RestRequest} to the relevant request handler or responds to the given rest channel directly if diff --git a/server/src/main/java/org/opensearch/http/HttpTracer.java b/server/src/main/java/org/opensearch/http/HttpTracer.java index 7a763b9ffb790..de1da4a20e294 100644 --- a/server/src/main/java/org/opensearch/http/HttpTracer.java +++ b/server/src/main/java/org/opensearch/http/HttpTracer.java @@ -128,6 +128,36 @@ void traceResponse( ); } + /** + * Logs the response chunk to a request that was logged by {@link #maybeTraceRequest(RestRequest, Exception)}. + * + * @param chunk response chunk + * @param httpChannel HttpChannel the response was sent on + * @param contentLength Value of the response content length header + * @param opaqueHeader Value of HTTP header {@link Task#X_OPAQUE_ID} + * @param requestId Request id as returned by {@link RestRequest#getRequestId()} + * @param success Whether the response was successfully sent + */ + void traceChunk( + HttpChunk chunk, + StreamingHttpChannel httpChannel, + String contentLength, + String opaqueHeader, + long requestId, + boolean success + ) { + logger.trace( + new ParameterizedMessage( + "[{}][{}][{}] sent next chunk to [{}] success [{}]", + requestId, + opaqueHeader, + contentLength, + httpChannel, + success + ) + ); + } + private void setTracerLogInclude(List tracerLogInclude) { this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } diff --git a/server/src/main/java/org/opensearch/http/StreamingHttpChannel.java b/server/src/main/java/org/opensearch/http/StreamingHttpChannel.java new file mode 100644 index 0000000000000..9bab25cb537ed --- /dev/null +++ b/server/src/main/java/org/opensearch/http/StreamingHttpChannel.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.ActionListener; + +import java.util.List; +import java.util.Map; + +import org.reactivestreams.Publisher; + +/** + * Represents an HTTP communication channel with streaming capabilities. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface StreamingHttpChannel extends HttpChannel, Publisher { + /** + * Sends the next {@link HttpChunk} to the response stream + * @param chunk response chunk to send to channel + */ + void sendChunk(HttpChunk chunk, ActionListener listener); + + /** + * Receives the next {@link HttpChunk} from the request stream + * @param chunk next {@link HttpChunk} + */ + void receiveChunk(HttpChunk chunk); + + /** + * Prepares response before kicking of content streaming + * @param status response status + * @param headers response headers + */ + void prepareResponse(int status, Map> headers); + + /** + * Returns {@code true} is this channel is ready for streaming request data, {@code false} otherwise + * @return {@code true} is this channel is ready for streaming request data, {@code false} otherwise + */ + boolean isReadable(); + + /** + * Returns {@code true} is this channel is ready for streaming response data, {@code false} otherwise + * @return {@code true} is this channel is ready for streaming response data, {@code false} otherwise + */ + boolean isWritable(); +} diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 6c0ab2f6b0153..613e93698d683 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -151,6 +151,14 @@ public static IndexMergePolicy fromString(String text) { true, Property.IndexScope ); + + public static final Setting ALLOW_DERIVED_FIELDS = Setting.boolSetting( + "index.query.derived_field.enabled", + true, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting( "index.translog.sync_interval", TimeValue.timeValueSeconds(5), @@ -763,6 +771,7 @@ public static IndexMergePolicy fromString(String text) { private final boolean assignedOnRemoteNode; private final RemoteStorePathStrategy remoteStorePathStrategy; private final boolean isTranslogMetadataEnabled; + private volatile boolean allowDerivedField; /** * The maximum age of a retention lease before it is considered expired. @@ -856,6 +865,10 @@ private void setDefaultFields(List defaultFields) { this.defaultFields = defaultFields; } + private void setAllowDerivedField(boolean allowDerivedField) { + this.allowDerivedField = allowDerivedField; + } + /** * Returns true if query string parsing should be lenient. The default is false */ @@ -884,6 +897,13 @@ public boolean isDefaultAllowUnmappedFields() { return defaultAllowUnmappedFields; } + /** + * Returns true if queries are allowed to define and use derived fields. The default is true + */ + public boolean isDerivedFieldAllowed() { + return allowDerivedField; + } + /** * Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata * while index level settings will overwrite node settings. @@ -931,6 +951,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings); this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED); + this.allowDerivedField = scopedSettings.get(ALLOW_DERIVED_FIELDS); this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING); defaultFields = scopedSettings.get(DEFAULT_FIELD_SETTING); syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings); @@ -1105,6 +1126,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, this::setDocIdFuzzySetFalsePositiveProbability ); + scopedSettings.addSettingsUpdateConsumer(ALLOW_DERIVED_FIELDS, this::setAllowDerivedField); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { diff --git a/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java b/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java new file mode 100644 index 0000000000000..c577a4117247b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DefaultDerivedFieldResolver.java @@ -0,0 +1,229 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.regex.Regex; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static org.opensearch.index.mapper.FieldMapper.IGNORE_MALFORMED_SETTING; + +/** + * Accepts definition of DerivedField from search request in both forms: map parsed from SearchRequest and {@link DerivedField} defined using client. + * The object is initialized per search request and is responsible to resolve {@link DerivedFieldType} given a field name. + * It uses {@link FieldTypeInference} to infer field type for a nested field within DerivedField of {@link DerivedFieldSupportedTypes#OBJECT} type. + */ +public class DefaultDerivedFieldResolver implements DerivedFieldResolver { + private final QueryShardContext queryShardContext; + private final Map derivedFieldTypeMap = new ConcurrentHashMap<>(); + private final FieldTypeInference typeInference; + private static final Logger logger = LogManager.getLogger(DefaultDerivedFieldResolver.class); + + DefaultDerivedFieldResolver( + QueryShardContext queryShardContext, + Map derivedFieldsObject, + List derivedFields + ) { + this( + queryShardContext, + derivedFieldsObject, + derivedFields, + new FieldTypeInference( + queryShardContext.index().getName(), + queryShardContext.getMapperService(), + queryShardContext.getIndexReader() + ) + ); + } + + DefaultDerivedFieldResolver( + QueryShardContext queryShardContext, + Map derivedFieldsObject, + List derivedFields, + FieldTypeInference typeInference + ) { + this.queryShardContext = queryShardContext; + initDerivedFieldTypes(derivedFieldsObject, derivedFields); + this.typeInference = typeInference; + } + + @Override + public Set resolvePattern(String pattern) { + Set derivedFields = new HashSet<>(); + if (queryShardContext != null && queryShardContext.getMapperService() != null) { + for (MappedFieldType fieldType : queryShardContext.getMapperService().fieldTypes()) { + if (Regex.simpleMatch(pattern, fieldType.name()) && fieldType instanceof DerivedFieldType) { + derivedFields.add(fieldType.name()); + } + } + } + for (String fieldName : derivedFieldTypeMap.keySet()) { + if (Regex.simpleMatch(pattern, fieldName)) { + derivedFields.add(fieldName); + } + } + return derivedFields; + } + + /** + * Resolves the fieldName. The search request definitions are given precedence over derived fields definitions in the index mapping. + * It caches the response for previously resolved field names + * @param fieldName name of the field. It also accepts nested derived field + * @return DerivedFieldType if resolved successfully, a null otherwise. + */ + @Override + public DerivedFieldType resolve(String fieldName) { + return Optional.ofNullable(resolveUsingSearchDefinitions(fieldName)).orElseGet(() -> resolveUsingMappings(fieldName)); + } + + private DerivedFieldType resolveUsingSearchDefinitions(String fieldName) { + return Optional.ofNullable(derivedFieldTypeMap.get(fieldName)) + .orElseGet( + () -> Optional.ofNullable((DerivedFieldType) getParentDerivedField(fieldName)) + .map( + // compute and cache nested derived field + parentDerivedField -> derivedFieldTypeMap.computeIfAbsent( + fieldName, + f -> this.resolveNestedField(f, parentDerivedField) + ) + ) + .orElse(null) + ); + } + + private DerivedFieldType resolveNestedField(String fieldName, DerivedFieldType parentDerivedField) { + Objects.requireNonNull(parentDerivedField); + try { + Script script = parentDerivedField.derivedField.getScript(); + String nestedType = explicitTypeFromParent(parentDerivedField.derivedField, fieldName.substring(fieldName.indexOf(".") + 1)); + if (nestedType == null) { + Mapper inferredFieldMapper = typeInference.infer( + getValueFetcher(fieldName, script, parentDerivedField.derivedField.getIgnoreMalformed()) + ); + if (inferredFieldMapper != null) { + nestedType = inferredFieldMapper.typeName(); + } + } + if (nestedType != null) { + DerivedField derivedField = new DerivedField(fieldName, nestedType, script); + if (parentDerivedField.derivedField.getProperties() != null) { + derivedField.setProperties(parentDerivedField.derivedField.getProperties()); + } + if (parentDerivedField.derivedField.getPrefilterField() != null) { + derivedField.setPrefilterField(parentDerivedField.derivedField.getPrefilterField()); + } + if (parentDerivedField.derivedField.getFormat() != null) { + derivedField.setFormat(parentDerivedField.derivedField.getFormat()); + } + if (parentDerivedField.derivedField.getIgnoreMalformed()) { + derivedField.setIgnoreMalformed(parentDerivedField.derivedField.getIgnoreMalformed()); + } + return getDerivedFieldType(derivedField); + } else { + logger.warn( + "Field type cannot be inferred. Ensure the field {} is not rare across entire index or provide explicit mapping using [properties] under parent object [{}] ", + fieldName, + parentDerivedField.derivedField.getName() + ); + } + } catch (IOException e) { + logger.warn(e.getMessage()); + } + return null; + } + + private MappedFieldType getParentDerivedField(String fieldName) { + if (fieldName.contains(".")) { + return resolve(fieldName.split("\\.")[0]); + } + return null; + } + + private static String explicitTypeFromParent(DerivedField parentDerivedField, String subField) { + if (parentDerivedField == null) { + return null; + } + return parentDerivedField.getNestedFieldType(subField); + } + + ValueFetcher getValueFetcher(String fieldName, Script script, boolean ignoreMalformed) { + String subFieldName = fieldName.substring(fieldName.indexOf(".") + 1); + return new ObjectDerivedFieldType.ObjectDerivedFieldValueFetcher( + subFieldName, + DerivedFieldType.getDerivedFieldLeafFactory(script, queryShardContext, queryShardContext.lookup()), + o -> o, // raw object returned will be used to infer the type without modifying it + ignoreMalformed + ); + } + + private void initDerivedFieldTypes(Map derivedFieldsObject, List derivedFields) { + if (derivedFieldsObject != null && !derivedFieldsObject.isEmpty()) { + Map derivedFieldObject = new HashMap<>(); + derivedFieldObject.put(DerivedFieldMapper.CONTENT_TYPE, derivedFieldsObject); + derivedFieldTypeMap.putAll(getAllDerivedFieldTypeFromObject(derivedFieldObject)); + } + if (derivedFields != null) { + for (DerivedField derivedField : derivedFields) { + derivedFieldTypeMap.put(derivedField.getName(), getDerivedFieldType(derivedField)); + } + } + } + + private Map getAllDerivedFieldTypeFromObject(Map derivedFieldObject) { + Map derivedFieldTypes = new HashMap<>(); + DocumentMapper documentMapper = queryShardContext.getMapperService() + .documentMapperParser() + .parse(DerivedFieldMapper.CONTENT_TYPE, derivedFieldObject); + if (documentMapper != null && documentMapper.mappers() != null) { + for (Mapper mapper : documentMapper.mappers()) { + if (mapper instanceof DerivedFieldMapper) { + DerivedFieldType derivedFieldType = ((DerivedFieldMapper) mapper).fieldType(); + derivedFieldTypes.put(derivedFieldType.name(), derivedFieldType); + } + } + } + return derivedFieldTypes; + } + + private DerivedFieldType getDerivedFieldType(DerivedField derivedField) { + Mapper.BuilderContext builderContext = new Mapper.BuilderContext( + queryShardContext.getMapperService().getIndexSettings().getSettings(), + new ContentPath(1) + ); + DerivedFieldMapper.Builder builder = new DerivedFieldMapper.Builder( + derivedField, + queryShardContext.getMapperService().getIndexAnalyzers(), + null, + IGNORE_MALFORMED_SETTING.getDefault(queryShardContext.getIndexSettings().getSettings()) + ); + return builder.build(builderContext).fieldType(); + } + + private DerivedFieldType resolveUsingMappings(String name) { + if (queryShardContext != null && queryShardContext.getMapperService() != null) { + MappedFieldType mappedFieldType = queryShardContext.getMapperService().fieldType(name); + if (mappedFieldType instanceof DerivedFieldType) { + return (DerivedFieldType) mappedFieldType; + } + } + return null; + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java index b502e41cbb97b..249b60a1c4ec5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedField.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedField.java @@ -30,7 +30,7 @@ public class DerivedField implements Writeable, ToXContentFragment { private final String name; private final String type; private final Script script; - private String sourceIndexedField; + private String prefilterField; private Map properties; private Boolean ignoreMalformed; private String format; @@ -49,7 +49,7 @@ public DerivedField(StreamInput in) throws IOException { if (in.readBoolean()) { properties = in.readMap(); } - sourceIndexedField = in.readOptionalString(); + prefilterField = in.readOptionalString(); format = in.readOptionalString(); ignoreMalformed = in.readOptionalBoolean(); } @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeMap(properties); } - out.writeOptionalString(sourceIndexedField); + out.writeOptionalString(prefilterField); out.writeOptionalString(format); out.writeOptionalBoolean(ignoreMalformed); } @@ -81,8 +81,8 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par if (properties != null) { builder.field("properties", properties); } - if (sourceIndexedField != null) { - builder.field("source_indexed_field", sourceIndexedField); + if (prefilterField != null) { + builder.field("prefilter_field", prefilterField); } if (format != null) { builder.field("format", format); @@ -110,8 +110,15 @@ public Map getProperties() { return properties; } - public String getSourceIndexedField() { - return sourceIndexedField; + public String getNestedFieldType(String fieldName) { + if (properties == null || properties.isEmpty() || fieldName == null || fieldName.isEmpty()) { + return null; + } + return (String) properties.get(fieldName); + } + + public String getPrefilterField() { + return prefilterField; } public String getFormat() { @@ -126,8 +133,8 @@ public void setProperties(Map properties) { this.properties = properties; } - public void setSourceIndexedField(String sourceIndexedField) { - this.sourceIndexedField = sourceIndexedField; + public void setPrefilterField(String prefilterField) { + this.prefilterField = prefilterField; } public void setFormat(String format) { @@ -140,7 +147,7 @@ public void setIgnoreMalformed(boolean ignoreMalformed) { @Override public int hashCode() { - return Objects.hash(name, type, script, sourceIndexedField, properties, ignoreMalformed, format); + return Objects.hash(name, type, script, prefilterField, properties, ignoreMalformed, format); } @Override @@ -155,7 +162,7 @@ public boolean equals(Object obj) { return Objects.equals(name, other.name) && Objects.equals(type, other.type) && Objects.equals(script, other.script) - && Objects.equals(sourceIndexedField, other.sourceIndexedField) + && Objects.equals(prefilterField, other.prefilterField) && Objects.equals(properties, other.properties) && Objects.equals(ignoreMalformed, other.ignoreMalformed) && Objects.equals(format, other.format); diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java index c6ae71320c35c..e08e46e1ea969 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java @@ -9,16 +9,20 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; +import org.opensearch.common.time.DateFormatter; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.script.Script; import java.io.IOException; import java.util.Arrays; -import java.util.HashMap; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Function; +import static org.opensearch.index.mapper.DateFieldMapper.getDefaultDateTimeFormatter; + /** * A field mapper for derived fields * @@ -28,6 +32,8 @@ public class DerivedFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "derived"; + protected final IndexAnalyzers indexAnalyzers; + private static DerivedFieldMapper toType(FieldMapper in) { return (DerivedFieldMapper) in; } @@ -38,62 +44,180 @@ private static DerivedFieldMapper toType(FieldMapper in) { * @opensearch.internal */ public static class Builder extends ParametrizedFieldMapper.Builder { - // TODO: The type of parameter may change here if the actual underlying FieldType object is needed - private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, ""); + private final Parameter type = Parameter.stringParam("type", true, m -> toType(m).type, ""); + private final IndexAnalyzers indexAnalyzers; + private final boolean defaultIgnoreMalformed; + private final DateFormatter defaultDateFormatter; private final Parameter