diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index b3838b8e5ae97..d90b05c323cf1 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -32,7 +32,10 @@ jobs: if: runner.os == 'macos' continue-on-error: true run: | - brew install docker colima coreutils + # Force QEMU 9.0.2 usage + curl https://raw.githubusercontent.com/Homebrew/homebrew-core/f1a9cf104a9a51779c7a532b658c490f69974839/Formula/q/qemu.rb > qemu.rb + brew install qemu.rb + HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 HOMEBREW_NO_AUTO_UPDATE=1 brew install docker colima coreutils gtimeout 15m colima start shell: bash - name: Run Gradle (assemble) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index eb80b5a1c6ff1..8c72586e2da09 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -59,7 +59,7 @@ jobs: sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} @@ -86,7 +86,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} @@ -114,7 +114,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: token: ${{ steps.github_app_token.outputs.token }} base: main diff --git a/CHANGELOG.md b/CHANGELOG.md index 6397b81a5f1c3..21ce8ba4daf96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add path prefix support to hashed prefix snapshots ([#15664](https://github.com/opensearch-project/OpenSearch/pull/15664)) +- [Workload Management] QueryGroup resource cancellation framework changes ([#15651](https://github.com/opensearch-project/OpenSearch/pull/15651)) ### Dependencies - Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578)) +- Bump `protobuf` from 3.22.3 to 3.25.4 ([#15684](https://github.com/opensearch-project/OpenSearch/pull/15684)) +- Bump `peter-evans/create-pull-request` from 6 to 7 ([#15863](https://github.com/opensearch-project/OpenSearch/pull/15863)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.9.1 to 11.19.1 ([#15862](https://github.com/opensearch-project/OpenSearch/pull/15862)) ### Changed @@ -17,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix wildcard query containing escaped character ([#15737](https://github.com/opensearch-project/OpenSearch/pull/15737)) ### Security diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fce5e4a194837..ea04bd1e33d4a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,7 +20,7 @@ woodstox = 6.4.0 kotlin = 1.7.10 antlr4 = 4.13.1 guava = 32.1.1-jre -protobuf = 3.22.3 +protobuf = 3.25.4 jakarta_annotation = 1.3.5 google_http_client = 1.44.1 tdigest = 3.2 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 39a291b258efb..5c6205ebf24d4 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionSha256Sum=fdfca5dbc2834f0ece5020465737538e5ba679deeff5ab6c09621d67f8bb1a15 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index e3dd8c1bd0dfa..97c0acc79c6ea 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -62,7 +62,7 @@ dependencies { api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" api 'com.microsoft.azure:msal4j:1.17.0' - api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' + api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 new file mode 100644 index 0000000000000..7d83b0e8ca639 --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 @@ -0,0 +1 @@ +58db85a807a56ae76baffa519772271ad5808195 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.9.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.9.1.jar.sha1 deleted file mode 100644 index 96d9a196a172a..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.9.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa9a2e447e2cef4dfda40a854dd7ec35624a7799 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 8beec1d571f3b..ab651ff9939fd 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -148,9 +148,6 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', - 'com.google.protobuf.MapFieldBuilder', - 'com.google.protobuf.MapFieldBuilder$Converter', - 'com.google.protobuf.MapFieldReflectionAccessor', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', diff --git a/release-notes/opensearch.release-notes-2.17.0.md b/release-notes/opensearch.release-notes-2.17.0.md index 41c7a89ea634c..65407d4f426c7 100644 --- a/release-notes/opensearch.release-notes-2.17.0.md +++ b/release-notes/opensearch.release-notes-2.17.0.md @@ -51,6 +51,8 @@ - Relax the join validation for Remote State publication ([#15471](https://github.com/opensearch-project/OpenSearch/pull/15471)) - Reset DiscoveryNodes in all transport node actions request ([#15131](https://github.com/opensearch-project/OpenSearch/pull/15131)) - MultiTermQueries in keyword fields now default to `indexed` approach and gated behind cluster setting ([#15637](https://github.com/opensearch-project/OpenSearch/pull/15637)) +- Static RemotePublication setting added, removed experimental feature flag ([#15478](https://github.com/opensearch-project/OpenSearch/pull/15478)) +- [Remote Publication] Upload incremental cluster state on master re-election ([#15145](https://github.com/opensearch-project/OpenSearch/pull/15145)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/server/licenses/protobuf-java-3.22.3.jar.sha1 b/server/licenses/protobuf-java-3.22.3.jar.sha1 deleted file mode 100644 index 80feeec023e7b..0000000000000 --- a/server/licenses/protobuf-java-3.22.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdee98b8f6abab73f146a4edb4c09e56f8278d03 \ No newline at end of file diff --git a/server/licenses/protobuf-java-3.25.4.jar.sha1 b/server/licenses/protobuf-java-3.25.4.jar.sha1 new file mode 100644 index 0000000000000..21b6fdfd24dc8 --- /dev/null +++ b/server/licenses/protobuf-java-3.25.4.jar.sha1 @@ -0,0 +1 @@ +43fcb86e4a411516c7fc681450f1516de0b862a2 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java index a50466573c575..dd1c65bd4c178 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java @@ -35,12 +35,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.CLUSTER_STATE_CLEANUP_INTERVAL_DEFAULT; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.RETAINED_MANIFESTS; import static org.opensearch.gateway.remote.RemoteClusterStateCleanupManager.SKIP_CLEANUP_STATE_CHANGES; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.gateway.remote.RemoteUploadStats.REMOTE_UPLOAD; import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; @@ -189,7 +189,7 @@ public void testRemoteCleanupDeleteStaleIndexRoutingFiles() throws Exception { RemoteStoreEnums.PathType.HASHED_PREFIX.toString() ) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, REMOTE_ROUTING_TABLE_REPO) - .put(REMOTE_PUBLICATION_EXPERIMENTAL, true); + .put(REMOTE_PUBLICATION_SETTING_KEY, true); int shardCount = randomIntBetween(1, 2); int replicaCount = 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java index 4d4ad89cc0abb..78f03371b9883 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteRoutingTableServiceIT.java @@ -37,8 +37,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.gateway.remote.routingtable.RemoteIndexRoutingTable.INDEX_ROUTING_TABLE; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -66,7 +66,7 @@ protected Settings nodeSettings(int nodeOrdinal) { RemoteStoreEnums.PathType.HASHED_PREFIX.toString() ) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, REMOTE_ROUTING_TABLE_REPO) - .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .put(REMOTE_PUBLICATION_SETTING_KEY, true) .put( RemoteClusterStateService.REMOTE_CLUSTER_STATE_CHECKSUM_VALIDATION_MODE_SETTING.getKey(), RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index f794eec65cb1b..612ca7d543647 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -15,7 +15,6 @@ import org.opensearch.client.Client; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; @@ -41,6 +40,7 @@ import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; @@ -62,7 +62,7 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { private static final String REMOTE_STATE_PREFIX = "!"; private static final String REMOTE_ROUTING_PREFIX = "_"; private boolean isRemoteStateEnabled = true; - private String isRemotePublicationEnabled = "true"; + private boolean isRemotePublicationEnabled = true; private boolean hasRemoteStateCharPrefix; private boolean hasRemoteRoutingCharPrefix; @@ -70,19 +70,11 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { public void setup() { asyncUploadMockFsRepo = false; isRemoteStateEnabled = true; - isRemotePublicationEnabled = "true"; + isRemotePublicationEnabled = true; hasRemoteStateCharPrefix = randomBoolean(); hasRemoteRoutingCharPrefix = randomBoolean(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) - .build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { String routingTableRepoName = "remote-routing-repo"; @@ -100,6 +92,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) + .put(REMOTE_PUBLICATION_SETTING_KEY, isRemotePublicationEnabled) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) @@ -107,6 +100,7 @@ protected Settings nodeSettings(int nodeOrdinal) { RemoteClusterStateService.REMOTE_CLUSTER_STATE_CHECKSUM_VALIDATION_MODE_SETTING.getKey(), RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE ) + .put(REMOTE_PUBLICATION_SETTING_KEY, isRemotePublicationEnabled) .put( RemoteClusterStateService.CLUSTER_REMOTE_STORE_STATE_PATH_PREFIX.getKey(), hasRemoteStateCharPrefix ? REMOTE_STATE_PREFIX : "" @@ -224,7 +218,6 @@ public void testRemotePublicationDownloadStats() { .get(); assertDataNodeDownloadStats(nodesStatsResponseDataNode); - } private void assertDataNodeDownloadStats(NodesStatsResponse nodesStatsResponse) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java index 280fd13f0fdcf..e8df2c8686610 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -76,29 +76,4 @@ protected void verifyRestoredData(Map indexStats, String indexName protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { verifyRestoredData(indexStats, indexName, true); } - - public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); - } - - public void prepareCluster( - int numClusterManagerNodes, - int numDataOnlyNodes, - String indices, - int replicaCount, - int shardCount, - Settings settings - ) { - prepareCluster(numClusterManagerNodes, numDataOnlyNodes, settings); - for (String index : indices.split(",")) { - createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); - ensureYellowAndNoInitializingShards(index); - ensureGreen(index); - } - } - - public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, Settings settings) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); - internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); - } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index ba06bb463e5a8..bcb0d54c0a25c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -351,13 +351,7 @@ protected void restore(boolean restoreAllShards, String... indices) { } protected void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); - internalCluster().startDataOnlyNodes(numDataOnlyNodes); - for (String index : indices.split(",")) { - createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); - ensureYellowAndNoInitializingShards(index); - ensureGreen(index); - } + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); } protected void prepareCluster( @@ -368,11 +362,16 @@ protected void prepareCluster( int shardCount, Settings settings ) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); - internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, settings); for (String index : indices.split(",")) { createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); ensureGreen(index); } } + + protected void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, Settings settings) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 00e2d9bd92158..e694bf2b6ba80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -64,7 +64,6 @@ import java.util.List; import java.util.concurrent.ExecutionException; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; @@ -145,7 +144,7 @@ public void testShardClone() throws Exception { } public void testCloneSnapshotIndex() throws Exception { - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "repo-name"; createRepository(repoName, "fs"); @@ -336,7 +335,7 @@ public void testClonePreventsSnapshotDelete() throws Exception { indexRandomDocs(indexName, randomIntBetween(20, 100)); final String targetSnapshot = "target-snapshot"; - blockNodeOnAnyFiles(repoName, clusterManagerName); + blockClusterManagerOnWriteIndexFile(repoName); final ActionFuture cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, indexName); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); assertFalse(cloneFuture.isDone()); @@ -444,7 +443,7 @@ public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception { } public void testDeletePreventsClone() throws Exception { - final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "repo-name"; createRepository(repoName, "mock"); @@ -457,7 +456,7 @@ public void testDeletePreventsClone() throws Exception { indexRandomDocs(indexName, randomIntBetween(20, 100)); final String targetSnapshot = "target-snapshot"; - blockNodeOnAnyFiles(repoName, clusterManagerName); + blockClusterManagerOnWriteIndexFile(repoName); final ActionFuture deleteFuture = startDeleteSnapshot(repoName, sourceSnapshot); waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); assertFalse(deleteFuture.isDone()); @@ -609,7 +608,7 @@ public void testClusterManagerFailoverDuringCloneStep2() throws Exception { public void testExceptionDuringShardClone() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations - internalCluster().startClusterManagerOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -620,7 +619,7 @@ public void testExceptionDuringShardClone() throws Exception { createFullSnapshot(repoName, sourceSnapshot); final String targetSnapshot = "target-snapshot"; - blockClusterManagerFromFinalizingSnapshotOnSnapFile(repoName); + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, targetSnapshot, testIndex); awaitNumberOfSnapshotsInProgress(1); final String clusterManagerNode = internalCluster().getClusterManagerName(); @@ -698,7 +697,7 @@ public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throw } public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws Exception { - final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java new file mode 100644 index 0000000000000..c6744ae62db60 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java @@ -0,0 +1,438 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.snapshots; + +import org.opensearch.action.ActionRunnable; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class CloneSnapshotV2IT extends AbstractSnapshotIntegTestCase { + + public void testCloneShallowCopyV2() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String indexName3 = "testindex3"; + String snapshotRepoName = "test-clone-snapshot-repo"; + String snapshotName1 = "test-create-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Client client = client(); + + assertAcked( + client.admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + createIndex(indexName1, getRemoteStoreBackedIndexSettings()); + createIndex(indexName2, getRemoteStoreBackedIndexSettings()); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexRandomDocs(indexName1, numDocsInIndex1); + indexRandomDocs(indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .get(); + SnapshotInfo sourceSnapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(sourceSnapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(sourceSnapshotInfo.successfulShards(), greaterThan(0)); + assertThat(sourceSnapshotInfo.successfulShards(), equalTo(sourceSnapshotInfo.totalShards())); + assertThat(sourceSnapshotInfo.snapshotId().getName(), equalTo(snapshotName1)); + + // Validate that the snapshot was created + final BlobStoreRepository repository = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( + RepositoriesService.class + ).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + + assertTrue(repositoryData.getSnapshotIds().contains(sourceSnapshotInfo.snapshotId())); + + createIndex(indexName3, getRemoteStoreBackedIndexSettings()); + indexRandomDocs(indexName3, 10); + ensureGreen(indexName3); + + AcknowledgedResponse response = client().admin() + .cluster() + .prepareCloneSnapshot(snapshotRepoName, snapshotName1, "test_clone_snapshot1") + .setIndices("*") + .get(); + assertTrue(response.isAcknowledged()); + awaitClusterManagerFinishRepoOperations(); + + // Validate that snapshot is present in repository data + PlainActionFuture repositoryDataPlainActionFutureClone = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFutureClone); + + repositoryData = repositoryDataPlainActionFutureClone.get(); + assertEquals(repositoryData.getSnapshotIds().size(), 2); + boolean foundCloneInRepoData = false; + SnapshotId cloneSnapshotId = null; + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + if (snapshotId.getName().equals("test_clone_snapshot1")) { + foundCloneInRepoData = true; + cloneSnapshotId = snapshotId; + } + } + final SnapshotId cloneSnapshotIdFinal = cloneSnapshotId; + SnapshotInfo cloneSnapshotInfo = PlainActionFuture.get( + f -> repository.threadPool().generic().execute(ActionRunnable.supply(f, () -> repository.getSnapshotInfo(cloneSnapshotIdFinal))) + ); + + assertTrue(foundCloneInRepoData); + + assertThat(cloneSnapshotInfo.getPinnedTimestamp(), equalTo(sourceSnapshotInfo.getPinnedTimestamp())); + for (String index : sourceSnapshotInfo.indices()) { + assertTrue(cloneSnapshotInfo.indices().contains(index)); + + } + assertThat(cloneSnapshotInfo.totalShards(), equalTo(sourceSnapshotInfo.totalShards())); + } + + public void testCloneShallowCopyAfterDisablingV2() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String indexName3 = "testindex3"; + String snapshotRepoName = "test-clone-snapshot-repo"; + String sourceSnapshotV2 = "test-source-snapshot-v2"; + String sourceSnapshotV1 = "test-source-snapshot-v1"; + String cloneSnapshotV2 = "test-clone-snapshot-v2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Client client = client(); + + assertAcked( + client.admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + createIndex(indexName1, getRemoteStoreBackedIndexSettings()); + createIndex(indexName2, getRemoteStoreBackedIndexSettings()); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexRandomDocs(indexName1, numDocsInIndex1); + indexRandomDocs(indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + // create source snapshot which is v2 + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, sourceSnapshotV2) + .setWaitForCompletion(true) + .get(); + SnapshotInfo sourceSnapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(sourceSnapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(sourceSnapshotInfo.successfulShards(), greaterThan(0)); + assertThat(sourceSnapshotInfo.successfulShards(), equalTo(sourceSnapshotInfo.totalShards())); + assertThat(sourceSnapshotInfo.snapshotId().getName(), equalTo(sourceSnapshotV2)); + + // Validate that the snapshot was created + final BlobStoreRepository repository = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( + RepositoriesService.class + ).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + + assertTrue(repositoryData.getSnapshotIds().contains(sourceSnapshotInfo.snapshotId())); + + createIndex(indexName3, getRemoteStoreBackedIndexSettings()); + indexRandomDocs(indexName3, 10); + ensureGreen(indexName3); + + // disable snapshot v2 in repo + assertAcked( + client.admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) + ) + ); + + // validate that the created snapshot is v1 + CreateSnapshotResponse createSnapshotResponseV1 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, sourceSnapshotV1) + .setWaitForCompletion(true) + .get(); + SnapshotInfo sourceSnapshotInfoV1 = createSnapshotResponseV1.getSnapshotInfo(); + assertThat(sourceSnapshotInfoV1.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(sourceSnapshotInfoV1.successfulShards(), greaterThan(0)); + assertThat(sourceSnapshotInfoV1.successfulShards(), equalTo(sourceSnapshotInfoV1.totalShards())); + assertThat(sourceSnapshotInfoV1.getPinnedTimestamp(), equalTo(0L)); + + // Validate that snapshot is present in repository data + PlainActionFuture repositoryDataV1PlainActionFuture = new PlainActionFuture<>(); + BlobStoreRepository repositoryV1 = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( + RepositoriesService.class + ).repository(snapshotRepoName); + repositoryV1.getRepositoryData(repositoryDataV1PlainActionFuture); + + repositoryData = repositoryDataV1PlainActionFuture.get(); + + assertTrue(repositoryData.getSnapshotIds().contains(sourceSnapshotInfoV1.snapshotId())); + assertEquals(repositoryData.getSnapshotIds().size(), 2); + + // clone should get created for v2 snapshot + AcknowledgedResponse response = client().admin() + .cluster() + .prepareCloneSnapshot(snapshotRepoName, sourceSnapshotV2, cloneSnapshotV2) + .setIndices("*") + .get(); + assertTrue(response.isAcknowledged()); + awaitClusterManagerFinishRepoOperations(); + + // Validate that snapshot is present in repository data + PlainActionFuture repositoryDataCloneV2PlainActionFuture = new PlainActionFuture<>(); + BlobStoreRepository repositoryCloneV2 = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( + RepositoriesService.class + ).repository(snapshotRepoName); + repositoryCloneV2.getRepositoryData(repositoryDataCloneV2PlainActionFuture); + + repositoryData = repositoryDataCloneV2PlainActionFuture.get(); + + assertEquals(repositoryData.getSnapshotIds().size(), 3); + boolean foundCloneInRepoData = false; + SnapshotId cloneSnapshotId = null; + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + if (snapshotId.getName().equals(cloneSnapshotV2)) { + foundCloneInRepoData = true; + cloneSnapshotId = snapshotId; + } + } + final SnapshotId cloneSnapshotIdFinal = cloneSnapshotId; + SnapshotInfo cloneSnapshotInfo = PlainActionFuture.get( + f -> repository.threadPool().generic().execute(ActionRunnable.supply(f, () -> repository.getSnapshotInfo(cloneSnapshotIdFinal))) + ); + + assertTrue(foundCloneInRepoData); + // pinned timestamp value in clone snapshot v2 matches source snapshot v2 + assertThat(cloneSnapshotInfo.getPinnedTimestamp(), equalTo(sourceSnapshotInfo.getPinnedTimestamp())); + for (String index : sourceSnapshotInfo.indices()) { + assertTrue(cloneSnapshotInfo.indices().contains(index)); + + } + assertThat(cloneSnapshotInfo.totalShards(), equalTo(sourceSnapshotInfo.totalShards())); + + } + + public void testRestoreFromClone() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + + internalCluster().startClusterManagerOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); + + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + + String snapshotRepoName = "test-clone-snapshot-repo"; + String sourceSnapshot = "test-source-snapshot"; + String cloneSnapshot = "test-clone-snapshot"; + + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + String restoredIndexName1 = indexName1 + "-restored"; + + Client client = client(); + + assertAcked( + client.admin() + .cluster() + .preparePutRepository(snapshotRepoName) + .setType(FsRepository.TYPE) + .setSettings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) + ); + + createIndex(indexName1, getRemoteStoreBackedIndexSettings()); + createIndex(indexName2, getRemoteStoreBackedIndexSettings()); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexRandomDocs(indexName1, numDocsInIndex1); + indexRandomDocs(indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + logger.info("--> create source snapshot"); + + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, sourceSnapshot) + .setWaitForCompletion(true) + .get(); + SnapshotInfo sourceSnapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(sourceSnapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(sourceSnapshotInfo.successfulShards(), greaterThan(0)); + assertThat(sourceSnapshotInfo.successfulShards(), equalTo(sourceSnapshotInfo.totalShards())); + assertThat(sourceSnapshotInfo.snapshotId().getName(), equalTo(sourceSnapshot)); + + AcknowledgedResponse response = client().admin() + .cluster() + .prepareCloneSnapshot(snapshotRepoName, sourceSnapshot, cloneSnapshot) + .setIndices("*") + .get(); + assertTrue(response.isAcknowledged()); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + ensureGreen(indexName1); + + deleteResponse = client().prepareDelete(indexName1, "1").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + ensureGreen(indexName1); + + // delete the source snapshot + AcknowledgedResponse deleteSnapshotResponse = internalCluster().clusterManagerClient() + .admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, sourceSnapshot) + .get(); + assertAcked(deleteSnapshotResponse); + + deleteResponse = client().prepareDelete(indexName1, "2").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + ensureGreen(indexName1); + ensureGreen(indexName1); + + // restore from clone + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, cloneSnapshot) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.OK); + ensureGreen(restoredIndexName1, indexName2); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, indexName2, numDocsInIndex2); + } + + private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { + for (int i = 0; i < numOfDocs; i++) { + String id = Integer.toString(i); + logger.info("checking for index " + indexName + " with docId" + id); + assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); + } + } + + private Settings snapshotV2Settings(Path remoteStoreRepoPath) { + String REMOTE_REPO_NAME = "remote-store-repo-name"; + Settings settings = Settings.builder() + .put(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + return settings; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotITV2.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java similarity index 96% rename from server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotITV2.java rename to server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java index 381093b6e6238..1d7a58384c0be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotITV2.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class DeleteSnapshotITV2 extends AbstractSnapshotIntegTestCase { +public class DeleteSnapshotV2IT extends AbstractSnapshotIntegTestCase { private static final String REMOTE_REPO_NAME = "remote-store-repo-name"; @@ -277,9 +277,11 @@ public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exceptio Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); Path shardPath = Path.of(String.valueOf(indexPath), "0"); Path segmentsPath = Path.of(String.valueOf(shardPath), "segments"); + Path translogPath = Path.of(String.valueOf(shardPath), "translog"); // Get total segments remote store directory file count for deleted index and shard 0 int segmentFilesCountBeforeDeletingSnapshot1 = RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath); + int translogFilesCountBeforeDeletingSnapshot1 = RemoteStoreBaseIntegTestCase.getFileCount(translogPath); RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); @@ -313,6 +315,13 @@ public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exceptio assertThat(RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath), lessThan(segmentFilesCountAfterDeletingSnapshot1)); } catch (Exception e) {} }, 60, TimeUnit.SECONDS); + + assertBusy(() -> { + try { + assertThat(RemoteStoreBaseIntegTestCase.getFileCount(translogPath), lessThan(translogFilesCountBeforeDeletingSnapshot1)); + } catch (Exception e) {} + }, 60, TimeUnit.SECONDS); + } private Settings snapshotV2Settings(Path remoteStoreRepoPath) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 54ef372609390..6da26d9a2da45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -95,7 +95,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - snapshotsService.cloneSnapshot(request, ActionListener.map(listener, v -> new AcknowledgedResponse(true))); + snapshotsService.executeClone(request, ActionListener.map(listener, v -> new AcknowledgedResponse(true))); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index c7820c2c9a365..9cffc7051d756 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -39,8 +39,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.gateway.remote.ClusterMetadataManifest; import java.io.Closeable; import java.io.IOException; @@ -53,7 +53,7 @@ import java.util.Set; import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** @@ -81,7 +81,7 @@ public class CoordinationState { private VotingConfiguration lastPublishedConfiguration; private VoteCollection publishVotes; private final boolean isRemoteStateEnabled; - private final boolean isRemotePublicationEnabled; + private boolean isRemotePublicationEnabled; public CoordinationState( DiscoveryNode localNode, @@ -105,8 +105,9 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); + // ToDo: revisit this check while making the setting dynamic this.isRemotePublicationEnabled = isRemoteStateEnabled - && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + && REMOTE_PUBLICATION_SETTING.get(settings) && localNode.isRemoteStatePublicationEnabled(); } @@ -460,6 +461,9 @@ public PublishResponse handlePublishRequest(PublishRequest publishRequest) { clusterState.term() ); persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setLastAcceptedState(clusterState); + if (shouldUpdateRemotePersistedState(publishRequest)) { + updateRemotePersistedStateOnPublishRequest(publishRequest); + } assert getLastAcceptedState() == clusterState; return new PublishResponse(clusterState.term(), clusterState.version()); @@ -572,6 +576,9 @@ public void handleCommit(ApplyCommitRequest applyCommit) { ); persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).markLastAcceptedStateAsCommitted(); + if (shouldCommitRemotePersistedState()) { + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).markLastAcceptedStateAsCommitted(); + } assert getLastCommittedConfiguration().equals(getLastAcceptedConfiguration()); } @@ -617,6 +624,33 @@ public void close() throws IOException { IOUtils.close(persistedStateRegistry); } + private boolean shouldUpdateRemotePersistedState(PublishRequest publishRequest) { + return persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null + && publishRequest.getAcceptedState().getNodes().isLocalNodeElectedClusterManager() == false; + } + + private void updateRemotePersistedStateOnPublishRequest(PublishRequest publishRequest) { + if (publishRequest instanceof RemoteStatePublishRequest) { + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).setLastAcceptedState(publishRequest.getAcceptedState()); + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) + .setLastAcceptedManifest(((RemoteStatePublishRequest) publishRequest).getAcceptedManifest()); + } else { + // We will end up here if PublishRequest was sent not using Remote Store even with remote persisted state on this node + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).setLastAcceptedState(null); + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).setLastAcceptedManifest(null); + } + } + + private boolean shouldCommitRemotePersistedState() { + return persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null + && persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL) + .getLastAcceptedState() + .getNodes() + .isLocalNodeElectedClusterManager() == false + && persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState() != null + && persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedManifest() != null; + } + /** * Pluggable persistence layer for {@link CoordinationState}. * @@ -654,6 +688,22 @@ public interface PersistedState extends Closeable { */ PersistedStateStats getStats(); + /** + * Returns the last accepted {@link ClusterMetadataManifest}. + * + * @return The last accepted {@link ClusterMetadataManifest}, or null if no manifest + * has been accepted yet. + */ + default ClusterMetadataManifest getLastAcceptedManifest() { + // return null by default, this method needs to be overridden wherever required + return null; + } + + /** + * Sets the last accepted {@link ClusterMetadataManifest}. + */ + default void setLastAcceptedManifest(ClusterMetadataManifest manifest) {} + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, @@ -662,14 +712,7 @@ public interface PersistedState extends Closeable { */ default void markLastAcceptedStateAsCommitted() { final ClusterState lastAcceptedState = getLastAcceptedState(); - Metadata.Builder metadataBuilder = null; - if (lastAcceptedState.getLastAcceptedConfiguration().equals(lastAcceptedState.getLastCommittedConfiguration()) == false) { - final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(lastAcceptedState.coordinationMetadata()) - .lastCommittedConfiguration(lastAcceptedState.getLastAcceptedConfiguration()) - .build(); - metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); - metadataBuilder.coordinationMetadata(coordinationMetadata); - } + Metadata.Builder metadataBuilder = commitVotingConfiguration(lastAcceptedState); // if we receive a commit from a Zen1 cluster-manager that has not recovered its state yet, // the cluster uuid might not been known yet. assert lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false @@ -694,6 +737,18 @@ default void markLastAcceptedStateAsCommitted() { } } + default Metadata.Builder commitVotingConfiguration(ClusterState lastAcceptedState) { + Metadata.Builder metadataBuilder = null; + if (lastAcceptedState.getLastAcceptedConfiguration().equals(lastAcceptedState.getLastCommittedConfiguration()) == false) { + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(lastAcceptedState.coordinationMetadata()) + .lastCommittedConfiguration(lastAcceptedState.getLastAcceptedConfiguration()) + .build(); + metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); + metadataBuilder.coordinationMetadata(coordinationMetadata); + } + return metadataBuilder; + } + default void close() throws IOException {} } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index ca36011b3a0e9..cdf331b7bb577 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -199,7 +199,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque } fullClusterStateReceivedCount.incrementAndGet(); logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length()); - final PublishWithJoinResponse response = acceptState(incomingState); + final PublishWithJoinResponse response = acceptState(incomingState, null); lastSeenClusterState.set(incomingState); return response; } else { @@ -230,7 +230,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque incomingState.stateUUID(), request.bytes().length() ); - final PublishWithJoinResponse response = acceptState(incomingState); + final PublishWithJoinResponse response = acceptState(incomingState, null); lastSeenClusterState.compareAndSet(lastSeen, incomingState); return response; } @@ -281,7 +281,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest true ); fullClusterStateReceivedCount.incrementAndGet(); - final PublishWithJoinResponse response = acceptState(clusterState); + final PublishWithJoinResponse response = acceptState(clusterState, manifest); lastSeenClusterState.set(clusterState); return response; } else { @@ -300,7 +300,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest transportService.getLocalNode().getId() ); compatibleClusterStateDiffReceivedCount.incrementAndGet(); - final PublishWithJoinResponse response = acceptState(clusterState); + final PublishWithJoinResponse response = acceptState(clusterState, manifest); lastSeenClusterState.compareAndSet(lastSeen, clusterState); return response; } @@ -314,7 +314,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest } } - private PublishWithJoinResponse acceptState(ClusterState incomingState) { + private PublishWithJoinResponse acceptState(ClusterState incomingState, ClusterMetadataManifest manifest) { // if the state is coming from the current node, use original request instead (see currentPublishRequestToSelf for explanation) if (transportService.getLocalNode().equals(incomingState.nodes().getClusterManagerNode())) { final PublishRequest publishRequest = currentPublishRequestToSelf.get(); @@ -324,6 +324,9 @@ private PublishWithJoinResponse acceptState(ClusterState incomingState) { return handlePublishRequest.apply(publishRequest); } } + if (manifest != null) { + return handlePublishRequest.apply(new RemoteStatePublishRequest(incomingState, manifest)); + } return handlePublishRequest.apply(new PublishRequest(incomingState)); } @@ -539,7 +542,7 @@ public String executor() { } public void sendClusterState(DiscoveryNode destination, ActionListener listener) { - logger.info("sending cluster state over transport to node: {}", destination.getName()); + logger.debug("sending cluster state over transport to node: {}", destination.getName()); if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) { logger.trace("sending full cluster state version [{}] to [{}]", newState.version(), destination); sendFullClusterState(destination, listener); @@ -639,7 +642,7 @@ public class RemotePublicationContext extends PublicationContext { @Override public void sendClusterState(final DiscoveryNode destination, final ActionListener listener) { try { - logger.info("sending remote cluster state to node: {}", destination.getName()); + logger.debug("sending remote cluster state to node: {}", destination.getName()); final String manifestFileName = ((RemotePersistedState) persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE)) .getLastUploadedManifestFile(); final RemotePublishRequest remotePublishRequest = new RemotePublishRequest( diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemoteStatePublishRequest.java b/server/src/main/java/org/opensearch/cluster/coordination/RemoteStatePublishRequest.java new file mode 100644 index 0000000000000..5667e6d67d062 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/RemoteStatePublishRequest.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.gateway.remote.ClusterMetadataManifest; + +import java.util.Objects; + +/** + * PublishRequest created by downloading the accepted {@link ClusterState} from Remote Store, using the published {@link ClusterMetadataManifest} + * + * @opensearch.internal + */ +public class RemoteStatePublishRequest extends PublishRequest { + private final ClusterMetadataManifest manifest; + + public RemoteStatePublishRequest(ClusterState acceptedState, ClusterMetadataManifest acceptedManifest) { + super(acceptedState); + this.manifest = acceptedManifest; + } + + public ClusterMetadataManifest getAcceptedManifest() { + return manifest; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + RemoteStatePublishRequest that = (RemoteStatePublishRequest) o; + return Objects.equals(manifest, that.manifest); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), manifest); + } + + @Override + public String toString() { + return "RemoteStatePublishRequest{" + super.toString() + "manifest=" + manifest + "} "; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index e0444ee670011..ebb41eb2acc25 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1299,7 +1299,7 @@ public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOExcepti out.writeByte(state.id()); writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); - out.writeMapValues(mappings, (stream, val) -> val.writeTo(stream)); + out.writeMapValues(mappings, (stream, val) -> val.writeVerifiableTo((BufferedChecksumStreamOutput) stream)); out.writeMapValues(aliases, (stream, val) -> val.writeTo(stream)); out.writeMap(customData, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); out.writeMap( @@ -1314,6 +1314,44 @@ public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOExcepti } } + @Override + public String toString() { + return new StringBuilder().append("IndexMetadata{routingNumShards=") + .append(routingNumShards) + .append(", index=") + .append(index) + .append(", version=") + .append(version) + .append(", state=") + .append(state) + .append(", settingsVersion=") + .append(settingsVersion) + .append(", mappingVersion=") + .append(mappingVersion) + .append(", aliasesVersion=") + .append(aliasesVersion) + .append(", primaryTerms=") + .append(Arrays.toString(primaryTerms)) + .append(", aliases=") + .append(aliases) + .append(", settings=") + .append(settings) + .append(", mappings=") + .append(mappings) + .append(", customData=") + .append(customData) + .append(", inSyncAllocationIds=") + .append(inSyncAllocationIds) + .append(", rolloverInfos=") + .append(rolloverInfos) + .append(", isSystem=") + .append(isSystem) + .append(", context=") + .append(context) + .append("}") + .toString(); + } + public boolean isSystem() { return isSystem; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index f272baffcf10d..65a1ad873d659 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -46,6 +46,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.translog.BufferedChecksumStreamOutput; import java.io.IOException; import java.io.UncheckedIOException; @@ -168,6 +169,12 @@ public void writeTo(StreamOutput out) throws IOException { } } + public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOException { + out.writeString(type()); + source().writeVerifiableTo(out); + out.writeBoolean(routingRequired); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java index dcd96dceb4bf1..0eeafdc8f5eed 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java @@ -12,6 +12,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.common.UUIDs; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -41,7 +42,7 @@ * "updated_at": 4513232415 * } */ -@ExperimentalApi +@PublicApi(since = "2.18.0") public class QueryGroup extends AbstractDiffable implements ToXContentObject { public static final String _ID_STRING = "_id"; diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 827f3a12fbce4..39ce218dd801a 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -33,7 +33,7 @@ *

* Set specific setting to for setting the threshold of throttling of particular task type. * e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks, - * Set it to default value(-1) to disable the throttling for this task type. + * Set it to default value(-1) to disable the throttling for this task type. */ public class ClusterManagerTaskThrottler implements TaskBatcherListener { private static final Logger logger = LogManager.getLogger(ClusterManagerTaskThrottler.class); @@ -69,7 +69,7 @@ public class ClusterManagerTaskThrottler implements TaskBatcherListener { private final int MIN_THRESHOLD_VALUE = -1; // Disabled throttling private final ClusterManagerTaskThrottlerListener clusterManagerTaskThrottlerListener; - private final ConcurrentMap tasksCount; + final ConcurrentMap tasksCount; private final ConcurrentMap tasksThreshold; private final Supplier minNodeVersionSupplier; @@ -209,30 +209,59 @@ Long getThrottlingLimit(final String taskKey) { return tasksThreshold.get(taskKey); } + private void failFastWhenThrottlingThresholdsAreAlreadyBreached( + final boolean throttlingEnabledWithThreshold, + final Long threshold, + final long existingTaskCount, + final int incomingTaskCount, + final String taskThrottlingKey + ) { + if (throttlingEnabledWithThreshold && shouldThrottle(threshold, existingTaskCount, incomingTaskCount)) { + throw new ClusterManagerThrottlingException("Throttling Exception : Limit exceeded for " + taskThrottlingKey); + } + } + @Override public void onBeginSubmit(List tasks) { - ThrottlingKey clusterManagerThrottlingKey = ((ClusterStateTaskExecutor) tasks.get(0).batchingKey) + final ThrottlingKey clusterManagerThrottlingKey = ((ClusterStateTaskExecutor) tasks.get(0).batchingKey) .getClusterManagerThrottlingKey(); - tasksCount.putIfAbsent(clusterManagerThrottlingKey.getTaskThrottlingKey(), 0L); - tasksCount.computeIfPresent(clusterManagerThrottlingKey.getTaskThrottlingKey(), (key, count) -> { - int size = tasks.size(); - if (clusterManagerThrottlingKey.isThrottlingEnabled()) { - Long threshold = tasksThreshold.get(clusterManagerThrottlingKey.getTaskThrottlingKey()); - if (threshold != null && shouldThrottle(threshold, count, size)) { - clusterManagerTaskThrottlerListener.onThrottle(clusterManagerThrottlingKey.getTaskThrottlingKey(), size); - logger.warn( - "Throwing Throttling Exception for [{}]. Trying to add [{}] tasks to queue, limit is set to [{}]", - clusterManagerThrottlingKey.getTaskThrottlingKey(), - tasks.size(), - threshold - ); - throw new ClusterManagerThrottlingException( - "Throttling Exception : Limit exceeded for " + clusterManagerThrottlingKey.getTaskThrottlingKey() - ); - } - } - return count + size; - }); + final String taskThrottlingKey = clusterManagerThrottlingKey.getTaskThrottlingKey(); + final Long threshold = getThrottlingLimit(taskThrottlingKey); + final boolean isThrottlingEnabledWithThreshold = clusterManagerThrottlingKey.isThrottlingEnabled() && threshold != null; + int incomingTaskCount = tasks.size(); + + try { + tasksCount.putIfAbsent(taskThrottlingKey, 0L); + // Perform shallow check before acquiring lock to avoid blocking of network threads + // if throttling is ongoing for a specific task + failFastWhenThrottlingThresholdsAreAlreadyBreached( + isThrottlingEnabledWithThreshold, + threshold, + tasksCount.get(taskThrottlingKey), + incomingTaskCount, + taskThrottlingKey + ); + + tasksCount.computeIfPresent(taskThrottlingKey, (key, existingTaskCount) -> { + failFastWhenThrottlingThresholdsAreAlreadyBreached( + isThrottlingEnabledWithThreshold, + threshold, + existingTaskCount, + incomingTaskCount, + taskThrottlingKey + ); + return existingTaskCount + incomingTaskCount; + }); + } catch (final ClusterManagerThrottlingException e) { + clusterManagerTaskThrottlerListener.onThrottle(taskThrottlingKey, incomingTaskCount); + logger.trace( + "Throwing Throttling Exception for [{}]. Trying to add [{}] tasks to queue, limit is set to [{}]", + taskThrottlingKey, + incomingTaskCount, + threshold + ); + throw e; + } } /** diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingException.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingException.java index 04fa9fa45d5ea..7a835910c400f 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingException.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingException.java @@ -25,4 +25,10 @@ public ClusterManagerThrottlingException(String msg, Object... args) { public ClusterManagerThrottlingException(StreamInput in) throws IOException { super(in); } + + @Override + public Throwable fillInStackTrace() { + // This is on the hot path; stack traces are expensive to compute and not very useful for this exception, so don't fill it. + return this; + } } diff --git a/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java b/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java index 23fc6353dbad3..4c119c14298e4 100644 --- a/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java @@ -44,6 +44,7 @@ import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.translog.BufferedChecksumStreamOutput; import java.io.IOException; import java.io.OutputStream; @@ -169,6 +170,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByteArray(bytes); } + public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOException { + out.writeInt(crc32); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 67cf1305d3c92..f946cb6bc77f6 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -153,6 +153,7 @@ import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; @@ -735,6 +736,7 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, + RemoteClusterStateService.REMOTE_PUBLICATION_SETTING, INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, @@ -779,6 +781,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX, RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX, + BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, // Composite index settings CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index fec19e9d286b8..0d63139ffb6d2 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -37,7 +37,6 @@ protected FeatureFlagSettings( FeatureFlags.TIERED_REMOTE_INDEX_SETTING, FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, FeatureFlags.PLUGGABLE_CACHE_SETTING, - FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL_SETTING, FeatureFlags.APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, FeatureFlags.STAR_TREE_INDEX_SETTING, FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL_SETTING diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 0ef2e773a690b..7fc619f42023d 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -67,11 +67,6 @@ public class FeatureFlags { */ public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; - /** - * Gates the functionality of remote routing table. - */ - public static final String REMOTE_PUBLICATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.publication.enabled"; - public static final String READER_WRITER_SPLIT_EXPERIMENTAL = "opensearch.experimental.feature.read.write.split.enabled"; public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( @@ -96,12 +91,6 @@ public class FeatureFlags { public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); - public static final Setting REMOTE_PUBLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - REMOTE_PUBLICATION_EXPERIMENTAL, - false, - Property.NodeScope - ); - public static final Setting READER_WRITER_SPLIT_EXPERIMENTAL_SETTING = Setting.boolSetting( READER_WRITER_SPLIT_EXPERIMENTAL, false, @@ -143,7 +132,6 @@ public class FeatureFlags { DATETIME_FORMATTER_CACHING_SETTING, TIERED_REMOTE_INDEX_SETTING, PLUGGABLE_CACHE_SETTING, - REMOTE_PUBLICATION_EXPERIMENTAL_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, STAR_TREE_INDEX_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 06026964590e1..b908a2cdfbac1 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -698,8 +698,18 @@ public String getLastUploadedManifestFile() { return lastUploadedManifestFile; } + @Override + public ClusterMetadataManifest getLastAcceptedManifest() { + return lastAcceptedManifest; + } + @Override public void setLastAcceptedState(ClusterState clusterState) { + // for non leader node, update the lastAcceptedClusterState + if (clusterState == null || clusterState.getNodes().isLocalNodeElectedClusterManager() == false) { + lastAcceptedState = clusterState; + return; + } try { final RemoteClusterStateManifestInfo manifestDetails; // Decide the codec version @@ -736,7 +746,7 @@ assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == } assert verifyManifestAndClusterState(manifestDetails.getClusterMetadataManifest(), clusterState) == true : "Manifest and ClusterState are not in sync"; - lastAcceptedManifest = manifestDetails.getClusterMetadataManifest(); + setLastAcceptedManifest(manifestDetails.getClusterMetadataManifest()); lastAcceptedState = clusterState; lastUploadedManifestFile = manifestDetails.getManifestFileName(); } catch (Exception e) { @@ -745,6 +755,11 @@ assert verifyManifestAndClusterState(manifestDetails.getClusterMetadataManifest( } } + @Override + public void setLastAcceptedManifest(ClusterMetadataManifest manifest) { + this.lastAcceptedManifest = manifest; + } + @Override public PersistedStateStats getStats() { return remoteClusterStateService.getUploadStats(); @@ -768,7 +783,7 @@ private boolean shouldWriteFullClusterState(ClusterState clusterState, int codec assert lastAcceptedManifest == null || lastAcceptedManifest.getCodecVersion() <= codecVersion; if (lastAcceptedState == null || lastAcceptedManifest == null - || lastAcceptedState.term() != clusterState.term() + || (remoteClusterStateService.isRemotePublicationEnabled() == false && lastAcceptedState.term() != clusterState.term()) || lastAcceptedManifest.getOpensearchVersion() != Version.CURRENT || lastAcceptedManifest.getCodecVersion() != codecVersion) { return true; @@ -782,19 +797,32 @@ public void markLastAcceptedStateAsCommitted() { assert lastAcceptedState != null : "Last accepted state is not present"; assert lastAcceptedManifest != null : "Last accepted manifest is not present"; ClusterState clusterState = lastAcceptedState; - if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false - && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { + boolean shouldCommitVotingConfig = shouldCommitVotingConfig(); + boolean isClusterUUIDUnknown = lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID); + boolean isClusterUUIDCommitted = lastAcceptedState.metadata().clusterUUIDCommitted(); + if (shouldCommitVotingConfig || (isClusterUUIDUnknown == false && isClusterUUIDCommitted == false)) { Metadata.Builder metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); - metadataBuilder.clusterUUIDCommitted(true); + if (shouldCommitVotingConfig) { + metadataBuilder = commitVotingConfiguration(lastAcceptedState); + } + if (isClusterUUIDUnknown == false && isClusterUUIDCommitted == false) { + metadataBuilder.clusterUUIDCommitted(true); + } clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); } - final RemoteClusterStateManifestInfo committedManifestDetails = remoteClusterStateService.markLastStateAsCommitted( - clusterState, - lastAcceptedManifest - ); - lastAcceptedManifest = committedManifestDetails.getClusterMetadataManifest(); + if (clusterState.getNodes().isLocalNodeElectedClusterManager()) { + final RemoteClusterStateManifestInfo committedManifestDetails = remoteClusterStateService.markLastStateAsCommitted( + clusterState, + lastAcceptedManifest, + shouldCommitVotingConfig + ); + assert committedManifestDetails != null; + setLastAcceptedManifest(committedManifestDetails.getClusterMetadataManifest()); + lastUploadedManifestFile = committedManifestDetails.getManifestFileName(); + } else { + setLastAcceptedManifest(ClusterMetadataManifest.builder(lastAcceptedManifest).committed(true).build()); + } lastAcceptedState = clusterState; - lastUploadedManifestFile = committedManifestDetails.getManifestFileName(); } catch (Exception e) { handleExceptionOnWrite(e); } @@ -805,6 +833,10 @@ public void close() throws IOException { remoteClusterStateService.close(); } + private boolean shouldCommitVotingConfig() { + return !lastAcceptedState.getLastAcceptedConfiguration().equals(lastAcceptedState.getLastCommittedConfiguration()); + } + private void handleExceptionOnWrite(Exception e) { throw ExceptionsHelper.convertToRuntime(e); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index fe34b68702c41..e504c5abb46d3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -40,7 +40,6 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -92,7 +91,6 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static org.opensearch.cluster.ClusterState.CUSTOM_VALUE_SERIALIZER; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V3; @@ -123,6 +121,18 @@ public class RemoteClusterStateService implements Closeable { private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); + /** + * Gates the functionality of remote publication. + */ + public static final String REMOTE_PUBLICATION_SETTING_KEY = "cluster.remote_store.publication.enabled"; + + public static final Setting REMOTE_PUBLICATION_SETTING = Setting.boolSetting( + REMOTE_PUBLICATION_SETTING_KEY, + false, + Property.NodeScope, + Property.Final + ); + /** * Used to specify if cluster state metadata should be published to remote store */ @@ -260,7 +270,7 @@ public RemoteClusterStateService( this.remoteStateStats = new RemotePersistenceStats(); this.namedWriteableRegistry = namedWriteableRegistry; this.indexMetadataUploadListeners = indexMetadataUploadListeners; - this.isPublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isPublicationEnabled = REMOTE_PUBLICATION_SETTING.get(settings) && RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled(settings) && RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled(settings); this.remotePathPrefix = CLUSTER_REMOTE_STORE_STATE_PATH_PREFIX.get(settings); @@ -353,12 +363,20 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat * * @return {@link RemoteClusterStateManifestInfo} object containing uploaded manifest detail */ - @Nullable public RemoteClusterStateManifestInfo writeIncrementalMetadata( ClusterState previousClusterState, ClusterState clusterState, ClusterMetadataManifest previousManifest ) throws IOException { + if (previousClusterState == null) { + throw new IllegalArgumentException("previousClusterState cannot be null"); + } + if (clusterState == null) { + throw new IllegalArgumentException("clusterState cannot be null"); + } + if (previousManifest == null) { + throw new IllegalArgumentException("previousManifest cannot be null"); + } logger.trace("WRITING INCREMENTAL STATE"); final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); @@ -366,7 +384,6 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( logger.error("Local node is not elected cluster manager. Exiting"); return null; } - assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); boolean firstUploadForSplitGlobalMetadata = !previousManifest.hasMetadataAttributesFiles(); @@ -939,18 +956,41 @@ public RemoteClusterStateCleanupManager getCleanupManager() { } @Nullable - public RemoteClusterStateManifestInfo markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) - throws IOException { + public RemoteClusterStateManifestInfo markLastStateAsCommitted( + ClusterState clusterState, + ClusterMetadataManifest previousManifest, + boolean commitVotingConfig + ) throws IOException { assert clusterState != null : "Last accepted cluster state is not set"; if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); return null; } assert previousManifest != null : "Last cluster metadata manifest is not set"; + UploadedMetadataAttribute uploadedCoordinationMetadata = previousManifest.getCoordinationMetadata(); + if (commitVotingConfig) { + // update the coordination metadata if voting config is committed + uploadedCoordinationMetadata = writeMetadataInParallel( + clusterState, + emptyList(), + emptyMap(), + emptyMap(), + true, + false, + false, + false, + false, + false, + emptyMap(), + false, + emptyList(), + null + ).uploadedCoordinationMetadata; + } UploadedMetadataResults uploadedMetadataResults = new UploadedMetadataResults( previousManifest.getIndices(), previousManifest.getCustomMetadataMap(), - previousManifest.getCoordinationMetadata(), + uploadedCoordinationMetadata, previousManifest.getSettingsMetadata(), previousManifest.getTemplatesMetadata(), previousManifest.getTransientSettingsMetadata(), @@ -1752,6 +1792,10 @@ public String getLastKnownUUIDFromRemote(String clusterName) { } } + public boolean isRemotePublicationEnabled() { + return this.isPublicationEnabled; + } + public void setRemoteStateReadTimeout(TimeValue remoteStateReadTimeout) { this.remoteStateReadTimeout = remoteStateReadTimeout; } diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index cd95e320209ee..0cb416a9b8370 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -40,7 +40,6 @@ import org.apache.lucene.util.automaton.RegExp; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; @@ -430,22 +429,27 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo finalValue = value; } Predicate matchPredicate; - if (value.contains("?")) { - Automaton automaton = WildcardQuery.toAutomaton(new Term(name(), finalValue)); - CompiledAutomaton compiledAutomaton = new CompiledAutomaton(automaton); + Automaton automaton = WildcardQuery.toAutomaton(new Term(name(), finalValue)); + CompiledAutomaton compiledAutomaton = new CompiledAutomaton(automaton); + if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.SINGLE) { + // when type equals SINGLE, #compiledAutomaton.runAutomaton is null matchPredicate = s -> { if (caseInsensitive) { s = s.toLowerCase(Locale.ROOT); } - BytesRef valueBytes = BytesRefs.toBytesRef(s); - return compiledAutomaton.runAutomaton.run(valueBytes.bytes, valueBytes.offset, valueBytes.length); + return s.equals(finalValue); }; + } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.ALL) { + return existsQuery(context); + } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.NONE) { + return new MatchNoDocsQuery("Wildcard expression matches nothing"); } else { matchPredicate = s -> { if (caseInsensitive) { s = s.toLowerCase(Locale.ROOT); } - return Regex.simpleMatch(finalValue, s); + BytesRef valueBytes = BytesRefs.toBytesRef(s); + return compiledAutomaton.runAutomaton.run(valueBytes.bytes, valueBytes.offset, valueBytes.length); }; } @@ -468,11 +472,18 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo // Package-private for testing static Set getRequiredNGrams(String value) { Set terms = new HashSet<>(); + + if (value.isEmpty()) { + return terms; + } + int pos = 0; + String rawSequence = null; String currentSequence = null; if (!value.startsWith("?") && !value.startsWith("*")) { // Can add prefix term - currentSequence = getNonWildcardSequence(value, 0); + rawSequence = getNonWildcardSequence(value, 0); + currentSequence = performEscape(rawSequence); if (currentSequence.length() == 1) { terms.add(new String(new char[] { 0, currentSequence.charAt(0) })); } else { @@ -480,10 +491,11 @@ static Set getRequiredNGrams(String value) { } } else { pos = findNonWildcardSequence(value, pos); - currentSequence = getNonWildcardSequence(value, pos); + rawSequence = getNonWildcardSequence(value, pos); } while (pos < value.length()) { - boolean isEndOfValue = pos + currentSequence.length() == value.length(); + boolean isEndOfValue = pos + rawSequence.length() == value.length(); + currentSequence = performEscape(rawSequence); if (!currentSequence.isEmpty() && currentSequence.length() < 3 && !isEndOfValue && pos > 0) { // If this is a prefix or suffix of length < 3, then we already have a longer token including the anchor. terms.add(currentSequence); @@ -502,8 +514,8 @@ static Set getRequiredNGrams(String value) { terms.add(new String(new char[] { a, b, 0 })); } } - pos = findNonWildcardSequence(value, pos + currentSequence.length()); - currentSequence = getNonWildcardSequence(value, pos); + pos = findNonWildcardSequence(value, pos + rawSequence.length()); + rawSequence = getNonWildcardSequence(value, pos); } return terms; } @@ -511,7 +523,7 @@ static Set getRequiredNGrams(String value) { private static String getNonWildcardSequence(String value, int startFrom) { for (int i = startFrom; i < value.length(); i++) { char c = value.charAt(i); - if (c == '?' || c == '*') { + if ((c == '?' || c == '*') && (i == 0 || value.charAt(i - 1) != '\\')) { return value.substring(startFrom, i); } } @@ -529,6 +541,22 @@ private static int findNonWildcardSequence(String value, int startFrom) { return value.length(); } + private static String performEscape(String str) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < str.length(); i++) { + if (str.charAt(i) == '\\' && (i + 1) < str.length()) { + char c = str.charAt(i + 1); + if (c == '*' || c == '?') { + i++; + } + } + sb.append(str.charAt(i)); + } + assert !sb.toString().contains("\\*"); + assert !sb.toString().contains("\\?"); + return sb.toString(); + } + @Override public Query regexpQuery( String value, diff --git a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java index 9fb857e33bfee..3823b524df6fe 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java @@ -67,7 +67,9 @@ * * @opensearch.internal */ -public abstract class AbstractGeometryQueryBuilder> extends AbstractQueryBuilder { +public abstract class AbstractGeometryQueryBuilder> extends AbstractQueryBuilder + implements + WithFieldName { public static final String DEFAULT_SHAPE_INDEX_NAME = "shapes"; public static final String DEFAULT_SHAPE_FIELD_NAME = "shape"; @@ -218,6 +220,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { /** * @return the name of the field that will be queried */ + @Override public String fieldName() { return fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java index c4d9437a60c75..deb36a26e4c86 100644 --- a/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public abstract class BaseTermQueryBuilder> extends AbstractQueryBuilder { +public abstract class BaseTermQueryBuilder> extends AbstractQueryBuilder implements WithFieldName { public static final ParseField VALUE_FIELD = new ParseField("value"); @@ -153,6 +153,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Returns the field name used in this query. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java index 652cae86da0dc..24b10851cbe10 100644 --- a/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java @@ -67,7 +67,7 @@ * @opensearch.internal */ @Deprecated -public class CommonTermsQueryBuilder extends AbstractQueryBuilder { +public class CommonTermsQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String COMMON_TERMS_QUERY_DEPRECATION_MSG = "[match] query which can efficiently " + "skip blocks of documents if the total number of hits is not tracked"; @@ -152,6 +152,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeFloat(cutoffFrequency); } + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java index 1d9f0479c6b17..e4f3d8556158a 100644 --- a/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class DistanceFeatureQueryBuilder extends AbstractQueryBuilder { +public class DistanceFeatureQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "distance_feature"; private static final ParseField FIELD_FIELD = new ParseField("field"); @@ -136,7 +136,8 @@ protected Query doToQuery(QueryShardContext context) throws IOException { return fieldType.distanceFeatureQuery(origin.origin(), pivot, 1.0f, context); } - String fieldName() { + @Override + public String fieldName() { return field; } diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 6ae40fe1b1e64..f6b24e98f0f71 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -59,7 +59,7 @@ * * @opensearch.internal */ -public class ExistsQueryBuilder extends AbstractQueryBuilder { +public class ExistsQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "exists"; public static final ParseField FIELD_FIELD = new ParseField("field"); @@ -89,6 +89,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { /** * @return the field name that has to exist for this query to match */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java index 4e73d87b07b7a..7846098b55071 100644 --- a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -53,7 +53,10 @@ * * @opensearch.internal */ -public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder implements SpanQueryBuilder { +public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder + implements + SpanQueryBuilder, + WithFieldName { public static final String NAME = "span_field_masking"; public static final ParseField SPAN_FIELD_MASKING_FIELD = new ParseField(NAME, "field_masking_span"); @@ -100,6 +103,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { /** * @return the field name for this query */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java index 1fade8601e2a6..52e96159cf06e 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -66,7 +66,7 @@ * * @opensearch.internal * */ -public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder { +public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "geo_bounding_box"; /** Default type for executing this query (memory as of this writing). */ @@ -263,6 +263,7 @@ public GeoExecType type() { } /** Returns the name of the field to base the bounding box computation on. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java index 8d126f19a204c..79377ba01701f 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class GeoDistanceQueryBuilder extends AbstractQueryBuilder { +public class GeoDistanceQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "geo_distance"; /** Default for distance unit computation. */ @@ -129,6 +129,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Name of the field this query is operating on. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java index 47eafa3893384..e120f04ed9351 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java @@ -61,7 +61,7 @@ * * @opensearch.internal */ -public class GeoPolygonQueryBuilder extends AbstractQueryBuilder { +public class GeoPolygonQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "geo_polygon"; /** @@ -131,6 +131,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(ignoreUnmapped); } + @Override public String fieldName() { return fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index 7ceb17203e837..2c2c2de943e2f 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -61,7 +61,7 @@ * * @opensearch.internal */ -public class MatchBoolPrefixQueryBuilder extends AbstractQueryBuilder { +public class MatchBoolPrefixQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "match_bool_prefix"; @@ -127,6 +127,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Returns the field name used in this query. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilder.java index 8aa318af69222..cdd17d2c4a0d0 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder { +public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "match_phrase_prefix"; public static final ParseField MAX_EXPANSIONS_FIELD = new ParseField("max_expansions"); public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); @@ -109,6 +109,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Returns the field name used in this query. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java index 6cdf6c6600304..97e03d53d38f1 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java @@ -52,7 +52,7 @@ * * @opensearch.internal */ -public class MatchPhraseQueryBuilder extends AbstractQueryBuilder { +public class MatchPhraseQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "match_phrase"; public static final ParseField SLOP_FIELD = new ParseField("slop"); public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); @@ -100,6 +100,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Returns the field name used in this query. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 5e9e6a3660e76..593077d18951e 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -56,7 +56,7 @@ * * @opensearch.internal */ -public class MatchQueryBuilder extends AbstractQueryBuilder { +public class MatchQueryBuilder extends AbstractQueryBuilder implements WithFieldName { private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + "the [match] query can skip block of documents efficiently if the total number of hits is not tracked"; @@ -171,6 +171,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } /** Returns the field name used in this query. */ + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/MultiTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiTermQueryBuilder.java index b8e88da4741bb..f854df79f1ee8 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiTermQueryBuilder.java @@ -36,9 +36,4 @@ * * @opensearch.internal */ -public interface MultiTermQueryBuilder extends QueryBuilder { - /** - * Get the field name for this query. - */ - String fieldName(); -} +public interface MultiTermQueryBuilder extends QueryBuilder, WithFieldName {} diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index 30a1c29c29126..179673f500a92 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -322,7 +322,7 @@ public void visit(QueryBuilderVisitor visitor) { * * @opensearch.internal */ - public static class SpanGapQueryBuilder implements SpanQueryBuilder { + public static class SpanGapQueryBuilder implements SpanQueryBuilder, WithFieldName { public static final String NAME = "span_gap"; /** Name of field to match against. */ @@ -358,6 +358,7 @@ public SpanGapQueryBuilder(StreamInput in) throws IOException { /** * @return fieldName The name of the field */ + @Override public String fieldName() { return fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index 4b92d6a1f5460..dbd141e00f81c 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -79,7 +79,7 @@ * * @opensearch.internal */ -public class TermsQueryBuilder extends AbstractQueryBuilder { +public class TermsQueryBuilder extends AbstractQueryBuilder implements WithFieldName { public static final String NAME = "terms"; private final String fieldName; @@ -269,6 +269,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + @Override public String fieldName() { return this.fieldName; } diff --git a/server/src/main/java/org/opensearch/index/query/WithFieldName.java b/server/src/main/java/org/opensearch/index/query/WithFieldName.java new file mode 100644 index 0000000000000..adfd789ac3707 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/WithFieldName.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +/** + * Interface for classes with a fieldName method + * + * @opensearch.internal + */ +public interface WithFieldName { + /** + * Get the field name for this query. + */ + String fieldName(); +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 5e5686ab3a419..576ac9a26acc7 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -412,7 +412,7 @@ static long getGeneration(String[] filenameTokens) { public static long getTimestamp(String filename) { String[] filenameTokens = filename.split(SEPARATOR); - return RemoteStoreUtils.invertLong(filenameTokens[6]); + return RemoteStoreUtils.invertLong(filenameTokens[filenameTokens.length - 2]); } public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 85b3f25d950ef..32d886562f22d 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -121,4 +121,8 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s } } + public Supplier getRepositoriesService() { + return this.repositoriesService; + } + } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 0b134b3bddbec..27d34ec0d05af 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobMetadata; @@ -33,6 +34,7 @@ import java.util.Optional; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -52,10 +54,13 @@ */ public class RemoteFsTimestampAwareTranslog extends RemoteFsTranslog { + private static Logger staticLogger = LogManager.getLogger(RemoteFsTimestampAwareTranslog.class); private final Logger logger; private final Map metadataFilePinnedTimestampMap; // For metadata files, with no min generation in the name, we cache generation data to avoid multiple reads. private final Map> oldFormatMetadataFileGenerationMap; + private final Map> oldFormatMetadataFilePrimaryTermMap; + private final AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); public RemoteFsTimestampAwareTranslog( TranslogConfig config, @@ -86,6 +91,7 @@ public RemoteFsTimestampAwareTranslog( logger = Loggers.getLogger(getClass(), shardId); this.metadataFilePinnedTimestampMap = new HashMap<>(); this.oldFormatMetadataFileGenerationMap = new HashMap<>(); + this.oldFormatMetadataFilePrimaryTermMap = new HashMap<>(); } @Override @@ -165,7 +171,11 @@ public void onResponse(List blobMetadata) { return; } - List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted( + metadataFiles, + metadataFilePinnedTimestampMap, + logger + ); // If index is not deleted, make sure to keep latest metadata file if (indexDeleted == false) { @@ -209,7 +219,7 @@ public void onResponse(List blobMetadata) { oldFormatMetadataFileGenerationMap.keySet().retainAll(metadataFilesNotToBeDeleted); // Delete stale primary terms - deleteStaleRemotePrimaryTerms(metadataFiles); + deleteStaleRemotePrimaryTerms(metadataFilesNotToBeDeleted); } else { remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); } @@ -259,8 +269,16 @@ protected Set getGenerationsToBeDeleted( return generationsToBeDeleted; } - // Visible for testing protected List getMetadataFilesToBeDeleted(List metadataFiles) { + return getMetadataFilesToBeDeleted(metadataFiles, metadataFilePinnedTimestampMap, logger); + } + + // Visible for testing + protected static List getMetadataFilesToBeDeleted( + List metadataFiles, + Map metadataFilePinnedTimestampMap, + Logger logger + ) { Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); // Keep files since last successful run of scheduler @@ -351,27 +369,153 @@ protected Tuple getMinMaxTranslogGenerationFromMetadataFile( } } + private void deleteStaleRemotePrimaryTerms(List metadataFiles) { + deleteStaleRemotePrimaryTerms( + metadataFiles, + translogTransferManager, + oldFormatMetadataFilePrimaryTermMap, + minPrimaryTermInRemote, + logger + ); + } + /** * This method must be called only after there are valid generations to delete in trimUnreferencedReaders as it ensures * implicitly that minimum primary term in latest translog metadata in remote store is the current primary term. *
* This will also delete all stale translog metadata files from remote except the latest basis the metadata file comparator. */ - private void deleteStaleRemotePrimaryTerms(List metadataFiles) { + protected static void deleteStaleRemotePrimaryTerms( + List metadataFiles, + TranslogTransferManager translogTransferManager, + Map> oldFormatMetadataFilePrimaryTermMap, + AtomicLong minPrimaryTermInRemoteAtomicLong, + Logger logger + ) { // The deletion of older translog files in remote store is on best-effort basis, there is a possibility that there // are older files that are no longer needed and should be cleaned up. In here, we delete all files that are part // of older primary term. - if (olderPrimaryCleaned.trySet(Boolean.TRUE)) { - if (metadataFiles.isEmpty()) { - logger.trace("No metadata is uploaded yet, returning from deleteStaleRemotePrimaryTerms"); - return; + if (metadataFiles.isEmpty()) { + logger.trace("No metadata is uploaded yet, returning from deleteStaleRemotePrimaryTerms"); + return; + } + Optional minPrimaryTermFromMetadataFiles = metadataFiles.stream().map(file -> { + try { + return getMinMaxPrimaryTermFromMetadataFile(file, translogTransferManager, oldFormatMetadataFilePrimaryTermMap).v1(); + } catch (IOException e) { + return Long.MAX_VALUE; + } + }).min(Long::compareTo); + // First we delete all stale primary terms folders from remote store + Long minPrimaryTermInRemote = getMinPrimaryTermInRemote(minPrimaryTermInRemoteAtomicLong, translogTransferManager, logger); + if (minPrimaryTermFromMetadataFiles.get() > minPrimaryTermInRemote) { + translogTransferManager.deletePrimaryTermsAsync(minPrimaryTermFromMetadataFiles.get()); + minPrimaryTermInRemoteAtomicLong.set(minPrimaryTermFromMetadataFiles.get()); + } else { + logger.debug( + "Skipping primary term cleanup. minimumReferencedPrimaryTerm = {}, minPrimaryTermInRemote = {}", + minPrimaryTermFromMetadataFiles.get(), + minPrimaryTermInRemote + ); + } + } + + private static Long getMinPrimaryTermInRemote( + AtomicLong minPrimaryTermInRemote, + TranslogTransferManager translogTransferManager, + Logger logger + ) { + if (minPrimaryTermInRemote.get() == Long.MAX_VALUE) { + try { + Set primaryTermsInRemote = translogTransferManager.listPrimaryTermsInRemote(); + if (primaryTermsInRemote.isEmpty() == false) { + Optional minPrimaryTerm = primaryTermsInRemote.stream().min(Long::compareTo); + minPrimaryTerm.ifPresent(minPrimaryTermInRemote::set); + } + } catch (IOException e) { + logger.error("Exception while listing primary terms in remote translog", e); + } + } + return minPrimaryTermInRemote.get(); + } + + protected static Tuple getMinMaxPrimaryTermFromMetadataFile( + String metadataFile, + TranslogTransferManager translogTransferManager, + Map> oldFormatMetadataFilePrimaryTermMap + ) throws IOException { + Tuple minMaxPrimaryTermFromFileName = TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(metadataFile); + if (minMaxPrimaryTermFromFileName != null) { + return minMaxPrimaryTermFromFileName; + } else { + if (oldFormatMetadataFilePrimaryTermMap.containsKey(metadataFile)) { + return oldFormatMetadataFilePrimaryTermMap.get(metadataFile); + } else { + TranslogTransferMetadata metadata = translogTransferManager.readMetadata(metadataFile); + long maxPrimaryTem = TranslogTransferMetadata.getPrimaryTermFromFileName(metadataFile); + long minPrimaryTem = -1; + if (metadata.getGenerationToPrimaryTermMapper() != null + && metadata.getGenerationToPrimaryTermMapper().values().isEmpty() == false) { + Optional primaryTerm = metadata.getGenerationToPrimaryTermMapper() + .values() + .stream() + .map(s -> Long.parseLong(s)) + .min(Long::compareTo); + if (primaryTerm.isPresent()) { + minPrimaryTem = primaryTerm.get(); + } + } + Tuple minMaxPrimaryTermTuple = new Tuple<>(minPrimaryTem, maxPrimaryTem); + oldFormatMetadataFilePrimaryTermMap.put(metadataFile, minMaxPrimaryTermTuple); + return minMaxPrimaryTermTuple; } - Optional minPrimaryTerm = metadataFiles.stream() - .map(file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[1])) - .min(Long::compareTo); - // First we delete all stale primary terms folders from remote store - long minimumReferencedPrimaryTerm = minPrimaryTerm.get() - 1; - translogTransferManager.deletePrimaryTermsAsync(minimumReferencedPrimaryTerm); } } + + public static void cleanup(TranslogTransferManager translogTransferManager) throws IOException { + ActionListener> listMetadataFilesListener = new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); + + try { + if (metadataFiles.isEmpty()) { + staticLogger.debug("No stale translog metadata files found"); + return; + } + List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles, new HashMap<>(), staticLogger); + if (metadataFilesToBeDeleted.isEmpty()) { + staticLogger.debug("No metadata files to delete"); + return; + } + staticLogger.debug(() -> "metadataFilesToBeDeleted = " + metadataFilesToBeDeleted); + + // For all the files that we are keeping, fetch min and max generations + List metadataFilesNotToBeDeleted = new ArrayList<>(metadataFiles); + metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); + staticLogger.debug(() -> "metadataFilesNotToBeDeleted = " + metadataFilesNotToBeDeleted); + + // Delete stale metadata files + translogTransferManager.deleteMetadataFilesAsync(metadataFilesToBeDeleted, () -> {}); + + // Delete stale primary terms + deleteStaleRemotePrimaryTerms( + metadataFilesNotToBeDeleted, + translogTransferManager, + new HashMap<>(), + new AtomicLong(Long.MAX_VALUE), + staticLogger + ); + } catch (Exception e) { + staticLogger.error("Exception while cleaning up metadata and primary terms", e); + } + } + + @Override + public void onFailure(Exception e) { + staticLogger.error("Exception while cleaning up metadata and primary terms", e); + } + }; + translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 56a9aa6447dec..291218ea47499 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -545,6 +545,14 @@ public void onFailure(Exception e) { }); } + public Set listPrimaryTermsInRemote() throws IOException { + Set primaryTermsStr = transferService.listFolders(remoteDataTransferPath); + if (primaryTermsStr != null) { + return primaryTermsStr.stream().map(Long::parseLong).collect(Collectors.toSet()); + } + return new HashSet<>(); + } + /** * Handles deletion of all translog files associated with a primary term. * diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index 745fa9a8a219a..3b8885055e8f7 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -19,6 +19,7 @@ import java.util.Arrays; import java.util.Map; import java.util.Objects; +import java.util.Optional; /** * The metadata associated with every transfer {@link TransferSnapshot}. The metadata is uploaded at the end of the @@ -108,11 +109,28 @@ public String getFileName() { RemoteStoreUtils.invertLong(createdAt), String.valueOf(Objects.hash(nodeId)), RemoteStoreUtils.invertLong(minTranslogGeneration), + String.valueOf(getMinPrimaryTermReferred()), String.valueOf(CURRENT_VERSION) ) ); } + private long getMinPrimaryTermReferred() { + if (generationToPrimaryTermMapper.get() == null || generationToPrimaryTermMapper.get().values().isEmpty()) { + return -1; + } + Optional minPrimaryTerm = generationToPrimaryTermMapper.get() + .values() + .stream() + .map(s -> Long.parseLong(s)) + .min(Long::compareTo); + if (minPrimaryTerm.isPresent()) { + return minPrimaryTerm.get(); + } else { + return -1; + } + } + public static Tuple, String> getNodeIdByPrimaryTermAndGeneration(String filename) { String[] tokens = filename.split(METADATA_SEPARATOR); if (tokens.length < 6) { @@ -143,15 +161,43 @@ public static Tuple getMinMaxTranslogGenerationFromFilename(String f assert Version.CURRENT.onOrAfter(Version.V_2_17_0); try { // instead of direct index, we go backwards to avoid running into same separator in nodeId - String minGeneration = tokens[tokens.length - 2]; + String minGeneration = tokens[tokens.length - 3]; String maxGeneration = tokens[2]; return new Tuple<>(RemoteStoreUtils.invertLong(minGeneration), RemoteStoreUtils.invertLong(maxGeneration)); - } catch (NumberFormatException e) { + } catch (Exception e) { logger.error(() -> new ParameterizedMessage("Exception while getting min and max translog generation from: {}", filename), e); return null; } } + public static Tuple getMinMaxPrimaryTermFromFilename(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 7) { + // For versions < 2.17, we don't have min primary term. + return null; + } + assert Version.CURRENT.onOrAfter(Version.V_2_17_0); + try { + // instead of direct index, we go backwards to avoid running into same separator in nodeId + String minPrimaryTerm = tokens[tokens.length - 2]; + String maxPrimaryTerm = tokens[1]; + return new Tuple<>(Long.parseLong(minPrimaryTerm), RemoteStoreUtils.invertLong(maxPrimaryTerm)); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception while getting min and max primary term from: {}", filename), e); + return null; + } + } + + public static long getPrimaryTermFromFileName(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + try { + return RemoteStoreUtils.invertLong(tokens[1]); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception while getting max primary term from: {}", filename), e); + return -1; + } + } + @Override public int hashCode() { return Objects.hash(primaryTerm, generation); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index d07c84be37d41..2dde651a7a042 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -116,6 +116,7 @@ import org.opensearch.index.remote.RemoteStorePathStrategy.BasePathInput; import org.opensearch.index.remote.RemoteStorePathStrategy.SnapshotShardPathInput; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -132,6 +133,10 @@ import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; +import org.opensearch.index.translog.RemoteFsTimestampAwareTranslog; +import org.opensearch.index.translog.RemoteFsTranslog; +import org.opensearch.index.translog.transfer.FileTransferTracker; +import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -339,6 +344,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.NodeScope ); + /** + * Controls the fixed prefix for the snapshot shard blob path. + */ + public static final Setting SNAPSHOT_SHARD_PATH_PREFIX_SETTING = Setting.simpleString( + "cluster.snapshot.shard.path.prefix", + "", + Setting.Property.NodeScope, + Setting.Property.Final + ); + protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -430,6 +445,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final NamedXContentRegistry namedXContentRegistry; + private final String snapshotShardPathPrefix; + /** * Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for * {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart @@ -484,6 +501,7 @@ protected BlobStoreRepository( this.clusterService = clusterService; this.recoverySettings = recoverySettings; this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings()); + this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); } @Override @@ -2217,25 +2235,35 @@ private void cleanRemoteStoreDirectoryIfNeeded( } IndexMetadata prevIndexMetadata = this.getSnapshotIndexMetaData(oldRepoData, snapshotId, indexId); if (prevIndexMetadata != null && !isIndexPresent(clusterService, prevIndexMetadata.getIndexUUID())) { - String remoteStoreRepository = IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.get( + String remoteStoreRepository = IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get( prevIndexMetadata.getSettings() ); assert (remoteStoreRepository != null); + String remoteTranslogRepositoryName = IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.get( + prevIndexMetadata.getSettings() + ); + assert (remoteTranslogRepositoryName != null); + Repository remoteTranslogRepository = remoteSegmentStoreDirectoryFactory.getRepositoriesService() + .get() + .repository(remoteTranslogRepositoryName); + RemoteStorePathStrategy remoteStorePathStrategy = RemoteStoreUtils.determineRemoteStorePathStrategy( prevIndexMetadata ); for (int shardId = 0; shardId < prevIndexMetadata.getNumberOfShards(); shardId++) { + ShardId shard = new ShardId(Index.UNKNOWN_INDEX_NAME, prevIndexMetadata.getIndexUUID(), shardId); remoteDirectoryCleanupAsync( remoteSegmentStoreDirectoryFactory, threadPool, remoteStoreRepository, prevIndexMetadata.getIndexUUID(), - new ShardId(Index.UNKNOWN_INDEX_NAME, prevIndexMetadata.getIndexUUID(), shardId), + shard, ThreadPool.Names.REMOTE_PURGE, remoteStorePathStrategy ); + remoteTranslogCleanupAsync(remoteTranslogRepository, shard, remoteStorePathStrategy, prevIndexMetadata); } } } catch (Exception e) { @@ -2255,6 +2283,33 @@ private void cleanRemoteStoreDirectoryIfNeeded( } + private void remoteTranslogCleanupAsync( + Repository remoteTranslogRepository, + ShardId shardId, + RemoteStorePathStrategy remoteStorePathStrategy, + IndexMetadata prevIndexMetadata + ) { + assert remoteTranslogRepository instanceof BlobStoreRepository; + boolean indexMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(prevIndexMetadata); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 1000); + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + TranslogTransferManager translogTransferManager = RemoteFsTranslog.buildTranslogTransferManager( + (BlobStoreRepository) remoteTranslogRepository, + threadPool, + shardId, + fileTransferTracker, + remoteTranslogTransferTracker, + remoteStorePathStrategy, + remoteStoreSettings, + indexMetadataEnabled + ); + try { + RemoteFsTimestampAwareTranslog.cleanup(translogTransferManager); + } catch (IOException e) { + logger.error("Exception while cleaning up remote translog for shard: " + shardId, e); + } + } + /** * Finds and returns a list of shard paths that match the given index ID. * @@ -2786,6 +2841,7 @@ private BlobPath shardPath(IndexId indexId, int shardId) { SnapshotShardPathInput shardPathInput = new SnapshotShardPathInput.Builder().basePath(basePath()) .indexUUID(indexId.getId()) .shardId(String.valueOf(shardId)) + .fixedPrefix(snapshotShardPathPrefix) .build(); PathHashAlgorithm pathHashAlgorithm = pathType != PathType.FIXED ? FNV_1A_COMPOSITE_1 : null; return pathType.path(shardPathInput, pathHashAlgorithm); diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index dc5fe59911e90..5307902692e50 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -814,6 +814,34 @@ public void onFailure(Exception e) { ); } + private void cloneSnapshotPinnedTimestamp( + RepositoryData repositoryData, + SnapshotId sourceSnapshot, + Snapshot snapshot, + long timestampToPin, + ActionListener listener + ) { + remoteStorePinnedTimestampService.cloneTimestamp( + timestampToPin, + snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + sourceSnapshot.getUUID(), + snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshot.getSnapshotId().getUUID(), + new ActionListener() { + @Override + public void onResponse(Void unused) { + logger.debug("Timestamp pinned successfully for clone snapshot {}", snapshot.getSnapshotId().getName()); + listener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to pin timestamp for clone snapshot {} with exception {}", snapshot.getSnapshotId().getName(), e); + listener.onFailure(e); + + } + } + ); + } + private static void ensureSnapshotNameNotRunning( List runningSnapshots, String repositoryName, @@ -835,9 +863,14 @@ private static Map getInFlightIndexIds(List listener) { + /** + * This method does some pre-validation, checks for the presence of source snapshot in repository data. + * For shallow snapshot v2 clone, it checks the pinned timestamp to be greater than zero in the source snapshot. + * + * @param request snapshot request + * @param listener snapshot completion listener + */ + public void executeClone(CloneSnapshotRequest request, ActionListener listener) { final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); if (repository.isReadOnly()) { @@ -846,10 +879,230 @@ public void cloneSnapshot(CloneSnapshotRequest request, ActionListener lis } final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.target()); validate(repositoryName, snapshotName); - // TODO: create snapshot UUID in CloneSnapshotRequest and make this operation idempotent to cleanly deal with transport layer - // retries final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); final Snapshot snapshot = new Snapshot(repositoryName, snapshotId); + try { + final StepListener repositoryDataListener = new StepListener<>(); + repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); + repositoryDataListener.whenComplete(repositoryData -> { + final SnapshotId sourceSnapshotId = repositoryData.getSnapshotIds() + .stream() + .filter(src -> src.getName().equals(request.source())) + .findAny() + .orElseThrow(() -> new SnapshotMissingException(repositoryName, request.source())); + final StepListener snapshotInfoListener = new StepListener<>(); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + + executor.execute(ActionRunnable.supply(snapshotInfoListener, () -> repository.getSnapshotInfo(sourceSnapshotId))); + + snapshotInfoListener.whenComplete(sourceSnapshotInfo -> { + if (sourceSnapshotInfo.getPinnedTimestamp() > 0) { + if (hasWildCardPatterForCloneSnapshotV2(request.indices()) == false) { + throw new SnapshotException( + repositoryName, + snapshotName, + "Aborting clone for Snapshot-v2, only wildcard pattern '*' is supported for indices" + ); + } + cloneSnapshotV2(request, snapshot, repositoryName, repository, listener); + } else { + cloneSnapshot(request, snapshot, repositoryName, repository, listener); + } + }, e -> listener.onFailure(e)); + }, e -> listener.onFailure(e)); + + } catch (Exception e) { + assert false : new AssertionError(e); + logger.error("Snapshot {} clone failed with exception {}", snapshot.getSnapshotId().getName(), e); + listener.onFailure(e); + } + } + + /** + * This method is responsible for creating a clone of the shallow snapshot v2. + * It pins the same timestamp that is pinned by the source snapshot. + * + * Unlike traditional snapshot operations, this method performs a synchronous clone execution and doesn't + * upload any shard metadata to the snapshot repository. + * The pinned timestamp is later reconciled with remote store segment and translog metadata files during the restore + * operation. + * + * @param request snapshot request + * @param snapshot clone snapshot + * @param repositoryName snapshot repository name + * @param repository snapshot repository + * @param listener completion listener + */ + public void cloneSnapshotV2( + CloneSnapshotRequest request, + Snapshot snapshot, + String repositoryName, + Repository repository, + ActionListener listener + ) { + + long startTime = System.currentTimeMillis(); + ClusterState currentState = clusterService.state(); + String snapshotName = snapshot.getSnapshotId().getName(); + repository.executeConsistentStateUpdate(repositoryData -> new ClusterStateUpdateTask(Priority.URGENT) { + private SnapshotsInProgress.Entry newEntry; + private SnapshotId sourceSnapshotId; + private List indicesForSnapshot; + + @Override + public ClusterState execute(ClusterState currentState) { + createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); + final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + final List runningSnapshots = snapshots.entries(); + + sourceSnapshotId = repositoryData.getSnapshotIds() + .stream() + .filter(src -> src.getName().equals(request.source())) + .findAny() + .orElseThrow(() -> new SnapshotMissingException(repositoryName, request.source())); + + final SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( + SnapshotDeletionsInProgress.TYPE, + SnapshotDeletionsInProgress.EMPTY + ); + if (deletionsInProgress.getEntries().stream().anyMatch(entry -> entry.getSnapshots().contains(sourceSnapshotId))) { + throw new ConcurrentSnapshotExecutionException( + repositoryName, + sourceSnapshotId.getName(), + "cannot clone from snapshot that is being deleted" + ); + } + indicesForSnapshot = new ArrayList<>(); + for (IndexId indexId : repositoryData.getIndices().values()) { + if (repositoryData.getSnapshots(indexId).contains(sourceSnapshotId)) { + indicesForSnapshot.add(indexId.getName()); + } + } + + newEntry = SnapshotsInProgress.startClone( + snapshot, + sourceSnapshotId, + repositoryData.resolveIndices(indicesForSnapshot), + threadPool.absoluteTimeInMillis(), + repositoryData.getGenId(), + minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null) + ); + final List newEntries = new ArrayList<>(runningSnapshots); + newEntries.add(newEntry); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(newEntries)).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to clone snapshot-v2", repositoryName, snapshotName), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { + logger.info("snapshot-v2 clone [{}] started", snapshot); + final StepListener snapshotInfoListener = new StepListener<>(); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + + executor.execute(ActionRunnable.supply(snapshotInfoListener, () -> repository.getSnapshotInfo(sourceSnapshotId))); + final ShardGenerations shardGenerations = repositoryData.shardGenerations(); + + snapshotInfoListener.whenComplete(snapshotInfo -> { + final SnapshotInfo cloneSnapshotInfo = new SnapshotInfo( + snapshot.getSnapshotId(), + indicesForSnapshot, + newEntry.dataStreams(), + startTime, + null, + System.currentTimeMillis(), + snapshotInfo.totalShards(), + Collections.emptyList(), + newEntry.includeGlobalState(), + newEntry.userMetadata(), + true, + snapshotInfo.getPinnedTimestamp() + ); + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + throw new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2 clone, no longer cluster manager"); + } + final StepListener pinnedTimestampListener = new StepListener<>(); + pinnedTimestampListener.whenComplete(repoData -> { + logger.info("snapshot-v2 clone [{}] completed successfully", snapshot); + listener.onResponse(null); + }, listener::onFailure); + repository.finalizeSnapshot( + shardGenerations, + repositoryData.getGenId(), + metadataForSnapshot( + currentState.metadata(), + newEntry.includeGlobalState(), + false, + newEntry.dataStreams(), + newEntry.indices() + ), + cloneSnapshotInfo, + repositoryData.getVersion(sourceSnapshotId), + state -> stateWithoutSnapshot(state, snapshot), + Priority.IMMEDIATE, + new ActionListener() { + @Override + public void onResponse(RepositoryData repositoryData) { + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Aborting Snapshot-v2 clone, no longer cluster manager") + ); + listener.onFailure( + new SnapshotException( + repositoryName, + snapshotName, + "Aborting Snapshot-v2 clone, no longer cluster manager" + ) + ); + return; + } + cloneSnapshotPinnedTimestamp( + repositoryData, + sourceSnapshotId, + snapshot, + snapshotInfo.getPinnedTimestamp(), + pinnedTimestampListener + ); + } + + @Override + public void onFailure(Exception e) { + logger.error( + "Failed to upload files to snapshot repo {} for clone snapshot-v2 {} ", + repositoryName, + snapshotName + ); + listener.onFailure(e); + } + } + ); + + }, listener::onFailure); + } + + @Override + public TimeValue timeout() { + return request.clusterManagerNodeTimeout(); + } + }, "clone_snapshot_v2 [" + request.source() + "][" + snapshotName + ']', listener::onFailure); + } + + // TODO: It is worth revisiting the design choice of creating a placeholder entry in snapshots-in-progress here once we have a cache + // for repository metadata and loading it has predictable performance + public void cloneSnapshot( + CloneSnapshotRequest request, + Snapshot snapshot, + String repositoryName, + Repository repository, + ActionListener listener + ) { + String snapshotName = snapshot.getSnapshotId().getName(); + initializingClones.add(snapshot); repository.executeConsistentStateUpdate(repositoryData -> new ClusterStateUpdateTask() { @@ -4091,6 +4344,15 @@ private void startExecutableClones(SnapshotsInProgress snapshotsInProgress, @Nul } } + private boolean hasWildCardPatterForCloneSnapshotV2(String[] indices) { + for (String index : indices) { + if ("*".equals(index)) { + return true; + } + } + return false; + } + private class UpdateSnapshotStatusAction extends TransportClusterManagerNodeAction< UpdateIndexShardSnapshotStatusRequest, UpdateIndexShardSnapshotStatusResponse> { diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java b/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java index 7577c8573ec10..de213eaab64a8 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupLevelResourceUsageView.java @@ -8,8 +8,6 @@ package org.opensearch.wlm; -import org.opensearch.tasks.Task; - import java.util.List; import java.util.Map; @@ -20,11 +18,11 @@ */ public class QueryGroupLevelResourceUsageView { // resourceUsage holds the resource usage data for a QueryGroup at a point in time - private final Map resourceUsage; + private final Map resourceUsage; // activeTasks holds the list of active tasks for a QueryGroup at a point in time - private final List activeTasks; + private final List activeTasks; - public QueryGroupLevelResourceUsageView(Map resourceUsage, List activeTasks) { + public QueryGroupLevelResourceUsageView(Map resourceUsage, List activeTasks) { this.resourceUsage = resourceUsage; this.activeTasks = activeTasks; } @@ -34,7 +32,7 @@ public QueryGroupLevelResourceUsageView(Map resourceUsage, L * * @return The map of resource usage data */ - public Map getResourceUsageData() { + public Map getResourceUsageData() { return resourceUsage; } @@ -43,7 +41,7 @@ public Map getResourceUsageData() { * * @return The list of active tasks */ - public List getActiveTasks() { + public List getActiveTasks() { return activeTasks; } } diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java index 4eb413be61b72..a1cb766579d43 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.tasks.TaskId; @@ -17,6 +18,7 @@ import java.util.Map; import java.util.Optional; +import java.util.function.LongSupplier; import java.util.function.Supplier; import static org.opensearch.search.SearchService.NO_TIMEOUT; @@ -24,15 +26,17 @@ /** * Base class to define QueryGroup tasks */ +@PublicApi(since = "2.18.0") public class QueryGroupTask extends CancellableTask { private static final Logger logger = LogManager.getLogger(QueryGroupTask.class); public static final String QUERY_GROUP_ID_HEADER = "queryGroupId"; public static final Supplier DEFAULT_QUERY_GROUP_ID_SUPPLIER = () -> "DEFAULT_QUERY_GROUP"; + private final LongSupplier nanoTimeSupplier; private String queryGroupId; public QueryGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { - this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); + this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT, System::nanoTime); } public QueryGroupTask( @@ -43,8 +47,22 @@ public QueryGroupTask( TaskId parentTaskId, Map headers, TimeValue cancelAfterTimeInterval + ) { + this(id, type, action, description, parentTaskId, headers, cancelAfterTimeInterval, System::nanoTime); + } + + public QueryGroupTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval, + LongSupplier nanoTimeSupplier ) { super(id, type, action, description, parentTaskId, headers, cancelAfterTimeInterval); + this.nanoTimeSupplier = nanoTimeSupplier; } /** @@ -69,6 +87,10 @@ public final void setQueryGroupId(final ThreadContext threadContext) { .orElse(DEFAULT_QUERY_GROUP_ID_SUPPLIER.get()); } + public long getElapsedTime() { + return nanoTimeSupplier.getAsLong() - getStartTimeNanos(); + } + @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/wlm/ResourceType.java b/server/src/main/java/org/opensearch/wlm/ResourceType.java index b2485988c6ef6..01d1d1fb63880 100644 --- a/server/src/main/java/org/opensearch/wlm/ResourceType.java +++ b/server/src/main/java/org/opensearch/wlm/ResourceType.java @@ -10,8 +10,9 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.tasks.resourcetracker.ResourceStats; -import org.opensearch.tasks.Task; +import org.opensearch.wlm.tracker.CpuUsageCalculator; +import org.opensearch.wlm.tracker.MemoryUsageCalculator; +import org.opensearch.wlm.tracker.ResourceUsageCalculator; import java.io.IOException; import java.util.List; @@ -25,19 +26,25 @@ */ @PublicApi(since = "2.17.0") public enum ResourceType { - CPU("cpu", task -> task.getTotalResourceUtilization(ResourceStats.CPU), true), - MEMORY("memory", task -> task.getTotalResourceUtilization(ResourceStats.MEMORY), true); + CPU("cpu", true, CpuUsageCalculator.INSTANCE, WorkloadManagementSettings::getNodeLevelCpuCancellationThreshold), + MEMORY("memory", true, MemoryUsageCalculator.INSTANCE, WorkloadManagementSettings::getNodeLevelMemoryCancellationThreshold); private final String name; - private final Function getResourceUsage; private final boolean statsEnabled; - + private final ResourceUsageCalculator resourceUsageCalculator; + private final Function nodeLevelThresholdSupplier; private static List sortedValues = List.of(CPU, MEMORY); - ResourceType(String name, Function getResourceUsage, boolean statsEnabled) { + ResourceType( + String name, + boolean statsEnabled, + ResourceUsageCalculator resourceUsageCalculator, + Function nodeLevelThresholdSupplier + ) { this.name = name; - this.getResourceUsage = getResourceUsage; this.statsEnabled = statsEnabled; + this.resourceUsageCalculator = resourceUsageCalculator; + this.nodeLevelThresholdSupplier = nodeLevelThresholdSupplier; } /** @@ -62,20 +69,18 @@ public String getName() { return name; } - /** - * Gets the resource usage for a given resource type and task. - * - * @param task the task for which to calculate resource usage - * @return the resource usage - */ - public long getResourceUsage(Task task) { - return getResourceUsage.apply(task); - } - public boolean hasStatsEnabled() { return statsEnabled; } + public ResourceUsageCalculator getResourceUsageCalculator() { + return resourceUsageCalculator; + } + + public double getNodeLevelThreshold(WorkloadManagementSettings settings) { + return nodeLevelThresholdSupplier.apply(settings); + } + public static List getSortedValues() { return sortedValues; } diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java index b104925df77b3..b3577c1b3219d 100644 --- a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java @@ -8,6 +8,7 @@ package org.opensearch.wlm; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -15,6 +16,7 @@ /** * Main class to declare Workload Management related settings */ +@PublicApi(since = "2.18.0") public class WorkloadManagementSettings { private static final Double DEFAULT_NODE_LEVEL_MEMORY_REJECTION_THRESHOLD = 0.8; private static final Double DEFAULT_NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD = 0.9; diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java b/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java new file mode 100644 index 0000000000000..ffb326c07e7ac --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategy.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; + +/** + * Represents the highest resource consuming task first selection strategy. + */ +public class MaximumResourceTaskSelectionStrategy implements TaskSelectionStrategy { + + public MaximumResourceTaskSelectionStrategy() {} + + /** + * Returns a comparator that defines the sorting condition for tasks. + * This is the default implementation since the most resource consuming tasks are the likely to regress the performance. + * from resiliency point of view it makes sense to cancel them first + * + * @return The comparator + */ + private Comparator sortingCondition(ResourceType resourceType) { + return Comparator.comparingDouble(task -> resourceType.getResourceUsageCalculator().calculateTaskResourceUsage(task)); + } + + /** + * Selects tasks for cancellation based on the provided limit and resource type. + * The tasks are sorted based on the sorting condition and then selected until the accumulated resource usage reaches the limit. + * + * @param tasks The list of tasks from which to select + * @param limit The limit on the accumulated resource usage + * @param resourceType The type of resource to consider + * @return The list of selected tasks + * @throws IllegalArgumentException If the limit is less than zero + */ + public List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType) { + if (limit < 0) { + throw new IllegalArgumentException("limit has to be greater than zero"); + } + if (limit < MIN_VALUE) { + return Collections.emptyList(); + } + + List sortedTasks = tasks.stream().sorted(sortingCondition(resourceType).reversed()).collect(Collectors.toList()); + + List selectedTasks = new ArrayList<>(); + double accumulated = 0; + for (QueryGroupTask task : sortedTasks) { + selectedTasks.add(task); + accumulated += resourceType.getResourceUsageCalculator().calculateTaskResourceUsage(task); + if ((accumulated - limit) > MIN_VALUE) { + break; + } + } + return selectedTasks; + } +} diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java b/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java new file mode 100644 index 0000000000000..a2c97c8d8635b --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationService.java @@ -0,0 +1,205 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; +import org.opensearch.wlm.QueryGroupLevelResourceUsageView; +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadManagementSettings; +import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService.TRACKED_RESOURCES; + +/** + * Manages the cancellation of tasks enforced by QueryGroup thresholds on resource usage criteria. + * This class utilizes a strategy pattern through {@link MaximumResourceTaskSelectionStrategy} to identify tasks that exceed + * predefined resource usage limits and are therefore eligible for cancellation. + * + *

The cancellation process is initiated by evaluating the resource usage of each QueryGroup against its + * resource limits. Tasks that contribute to exceeding these limits are selected for cancellation based on the + * implemented task selection strategy.

+ * + *

Instances of this class are configured with a map linking QueryGroup IDs to their corresponding resource usage + * views, a set of active QueryGroups, and a task selection strategy. These components collectively facilitate the + * identification and cancellation of tasks that threaten to breach QueryGroup resource limits.

+ * + * @see MaximumResourceTaskSelectionStrategy + * @see QueryGroup + * @see ResourceType + */ +public class QueryGroupTaskCancellationService { + public static final double MIN_VALUE = 1e-9; + + private final WorkloadManagementSettings workloadManagementSettings; + private final TaskSelectionStrategy taskSelectionStrategy; + private final QueryGroupResourceUsageTrackerService resourceUsageTrackerService; + // a map of QueryGroupId to its corresponding QueryGroupLevelResourceUsageView object + Map queryGroupLevelResourceUsageViews; + private final Collection activeQueryGroups; + private final Collection deletedQueryGroups; + + public QueryGroupTaskCancellationService( + WorkloadManagementSettings workloadManagementSettings, + TaskSelectionStrategy taskSelectionStrategy, + QueryGroupResourceUsageTrackerService resourceUsageTrackerService, + Collection activeQueryGroups, + Collection deletedQueryGroups + ) { + this.workloadManagementSettings = workloadManagementSettings; + this.taskSelectionStrategy = taskSelectionStrategy; + this.resourceUsageTrackerService = resourceUsageTrackerService; + this.activeQueryGroups = activeQueryGroups; + this.deletedQueryGroups = deletedQueryGroups; + } + + /** + * Cancel tasks based on the implemented strategy. + */ + public final void cancelTasks(BooleanSupplier isNodeInDuress) { + queryGroupLevelResourceUsageViews = resourceUsageTrackerService.constructQueryGroupLevelUsageViews(); + // cancel tasks from QueryGroups that are in Enforced mode that are breaching their resource limits + cancelTasks(ResiliencyMode.ENFORCED); + // if the node is in duress, cancel tasks accordingly. + handleNodeDuress(isNodeInDuress); + } + + private void handleNodeDuress(BooleanSupplier isNodeInDuress) { + if (!isNodeInDuress.getAsBoolean()) { + return; + } + // List of tasks to be executed in order if the node is in duress + List> duressActions = List.of(v -> cancelTasksFromDeletedQueryGroups(), v -> cancelTasks(ResiliencyMode.SOFT)); + + for (Consumer duressAction : duressActions) { + if (!isNodeInDuress.getAsBoolean()) { + break; + } + duressAction.accept(null); + } + } + + private void cancelTasksFromDeletedQueryGroups() { + cancelTasks(getAllCancellableTasks(this.deletedQueryGroups)); + } + + /** + * Get all cancellable tasks from the QueryGroups. + * + * @return List of tasks that can be cancelled + */ + List getAllCancellableTasks(ResiliencyMode resiliencyMode) { + return getAllCancellableTasks( + activeQueryGroups.stream().filter(queryGroup -> queryGroup.getResiliencyMode() == resiliencyMode).collect(Collectors.toList()) + ); + } + + /** + * Get all cancellable tasks from the given QueryGroups. + * + * @return List of tasks that can be cancelled + */ + List getAllCancellableTasks(Collection queryGroups) { + List taskCancellations = new ArrayList<>(); + for (QueryGroup queryGroup : queryGroups) { + final List reasons = new ArrayList<>(); + List selectedTasks = new ArrayList<>(); + for (ResourceType resourceType : TRACKED_RESOURCES) { + // We need to consider the already selected tasks since those tasks also consumed the resources + double excessUsage = getExcessUsage(queryGroup, resourceType) - resourceType.getResourceUsageCalculator() + .calculateResourceUsage(selectedTasks); + if (excessUsage > MIN_VALUE) { + reasons.add(new TaskCancellation.Reason(generateReasonString(queryGroup, resourceType), 1)); + // TODO: We will need to add the cancellation callback for these resources for the queryGroup to reflect stats + + // Only add tasks not already added to avoid double cancellations + selectedTasks.addAll( + taskSelectionStrategy.selectTasksForCancellation(getTasksFor(queryGroup), excessUsage, resourceType) + .stream() + .filter(x -> selectedTasks.stream().noneMatch(y -> x.getId() != y.getId())) + .collect(Collectors.toList()) + ); + } + } + + if (!reasons.isEmpty()) { + taskCancellations.addAll( + selectedTasks.stream().map(task -> createTaskCancellation(task, reasons)).collect(Collectors.toList()) + ); + } + } + return taskCancellations; + } + + private String generateReasonString(QueryGroup queryGroup, ResourceType resourceType) { + final double currentUsage = getCurrentUsage(queryGroup, resourceType); + return "QueryGroup ID : " + + queryGroup.get_id() + + " breached the resource limit: (" + + currentUsage + + " > " + + queryGroup.getResourceLimits().get(resourceType) + + ") for resource type : " + + resourceType.getName(); + } + + private List getTasksFor(QueryGroup queryGroup) { + return queryGroupLevelResourceUsageViews.get(queryGroup.get_id()).getActiveTasks(); + } + + private void cancelTasks(ResiliencyMode resiliencyMode) { + cancelTasks(getAllCancellableTasks(resiliencyMode)); + } + + private void cancelTasks(List cancellableTasks) { + cancellableTasks.forEach(TaskCancellation::cancel); + } + + private TaskCancellation createTaskCancellation(CancellableTask task, List reasons) { + return new TaskCancellation(task, reasons, List.of(this::callbackOnCancel)); + } + + private double getExcessUsage(QueryGroup queryGroup, ResourceType resourceType) { + if (queryGroup.getResourceLimits().get(resourceType) == null + || !queryGroupLevelResourceUsageViews.containsKey(queryGroup.get_id())) { + return 0; + } + return getCurrentUsage(queryGroup, resourceType) - getNormalisedThreshold(queryGroup, resourceType); + } + + private double getCurrentUsage(QueryGroup queryGroup, ResourceType resourceType) { + final QueryGroupLevelResourceUsageView queryGroupResourceUsageView = queryGroupLevelResourceUsageViews.get(queryGroup.get_id()); + return queryGroupResourceUsageView.getResourceUsageData().get(resourceType); + } + + /** + * normalises configured value with respect to node level cancellation thresholds + * @param queryGroup instance + * @return normalised value with respect to node level cancellation thresholds + */ + private double getNormalisedThreshold(QueryGroup queryGroup, ResourceType resourceType) { + double nodeLevelCancellationThreshold = resourceType.getNodeLevelThreshold(workloadManagementSettings); + return queryGroup.getResourceLimits().get(resourceType) * nodeLevelCancellationThreshold; + } + + private void callbackOnCancel() { + // TODO Implement callback logic here mostly used for Stats + } +} diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java b/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java new file mode 100644 index 0000000000000..63fbf9b791a33 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/cancellation/TaskSelectionStrategy.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; + +import java.util.List; + +/** + * This interface exposes a method which implementations can use + */ +public interface TaskSelectionStrategy { + /** + * Determines how the tasks are selected from the list of given tasks based on resource type + * @param tasks to select from + * @param limit min cumulative resource usage sum of selected tasks + * @param resourceType + * @return list of tasks + */ + List selectTasksForCancellation(List tasks, double limit, ResourceType resourceType); +} diff --git a/server/src/main/java/org/opensearch/wlm/cancellation/package-info.java b/server/src/main/java/org/opensearch/wlm/cancellation/package-info.java new file mode 100644 index 0000000000000..1ce7b571e9a9c --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/cancellation/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Workload management resource based cancellation artifacts + */ +package org.opensearch.wlm.cancellation; diff --git a/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java new file mode 100644 index 0000000000000..05c84cd767b1f --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/CpuUsageCalculator.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.wlm.QueryGroupTask; + +import java.util.List; + +/** + * class to help make cpu usage calculations for the query group + */ +public class CpuUsageCalculator extends ResourceUsageCalculator { + // This value should be initialised at the start time of the process and be used throughout the codebase + public static final int PROCESSOR_COUNT = Runtime.getRuntime().availableProcessors(); + public static final CpuUsageCalculator INSTANCE = new CpuUsageCalculator(); + + private CpuUsageCalculator() {} + + @Override + public double calculateResourceUsage(List tasks) { + double usage = tasks.stream().mapToDouble(this::calculateTaskResourceUsage).sum(); + + usage /= PROCESSOR_COUNT; + return usage; + } + + @Override + public double calculateTaskResourceUsage(QueryGroupTask task) { + return (1.0f * task.getTotalResourceUtilization(ResourceStats.CPU)) / task.getElapsedTime(); + } +} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java new file mode 100644 index 0000000000000..fb66ff47f58d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/MemoryUsageCalculator.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.wlm.QueryGroupTask; + +import java.util.List; + +/** + * class to help make memory usage calculations for the query group + */ +public class MemoryUsageCalculator extends ResourceUsageCalculator { + public static final long HEAP_SIZE_BYTES = JvmStats.jvmStats().getMem().getHeapMax().getBytes(); + public static final MemoryUsageCalculator INSTANCE = new MemoryUsageCalculator(); + + private MemoryUsageCalculator() {} + + @Override + public double calculateResourceUsage(List tasks) { + return tasks.stream().mapToDouble(this::calculateTaskResourceUsage).sum(); + } + + @Override + public double calculateTaskResourceUsage(QueryGroupTask task) { + return (1.0f * task.getTotalResourceUtilization(ResourceStats.MEMORY)) / HEAP_SIZE_BYTES; + } +} diff --git a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java index 15852b5bbe6a8..b23d9ff342139 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java @@ -8,7 +8,6 @@ package org.opensearch.wlm.tracker; -import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.wlm.QueryGroupLevelResourceUsageView; import org.opensearch.wlm.QueryGroupTask; @@ -25,7 +24,6 @@ * This class tracks resource usage per QueryGroup */ public class QueryGroupResourceUsageTrackerService { - public static final EnumSet TRACKED_RESOURCES = EnumSet.allOf(ResourceType.class); private final TaskResourceTrackingService taskResourceTrackingService; @@ -44,19 +42,16 @@ public QueryGroupResourceUsageTrackerService(TaskResourceTrackingService taskRes * @return Map of QueryGroup views */ public Map constructQueryGroupLevelUsageViews() { - final Map> tasksByQueryGroup = getTasksGroupedByQueryGroup(); + final Map> tasksByQueryGroup = getTasksGroupedByQueryGroup(); final Map queryGroupViews = new HashMap<>(); // Iterate over each QueryGroup entry - for (Map.Entry> queryGroupEntry : tasksByQueryGroup.entrySet()) { - // Compute the QueryGroup usage - final EnumMap queryGroupUsage = new EnumMap<>(ResourceType.class); + for (Map.Entry> queryGroupEntry : tasksByQueryGroup.entrySet()) { + // Compute the QueryGroup resource usage + final Map queryGroupUsage = new EnumMap<>(ResourceType.class); for (ResourceType resourceType : TRACKED_RESOURCES) { - long queryGroupResourceUsage = 0; - for (Task task : queryGroupEntry.getValue()) { - queryGroupResourceUsage += resourceType.getResourceUsage(task); - } - queryGroupUsage.put(resourceType, queryGroupResourceUsage); + double usage = resourceType.getResourceUsageCalculator().calculateResourceUsage(queryGroupEntry.getValue()); + queryGroupUsage.put(resourceType, usage); } // Add to the QueryGroup View @@ -73,12 +68,12 @@ public Map constructQueryGroupLevelUsa * * @return Map of tasks grouped by QueryGroup */ - private Map> getTasksGroupedByQueryGroup() { + private Map> getTasksGroupedByQueryGroup() { return taskResourceTrackingService.getResourceAwareTasks() .values() .stream() .filter(QueryGroupTask.class::isInstance) .map(QueryGroupTask.class::cast) - .collect(Collectors.groupingBy(QueryGroupTask::getQueryGroupId, Collectors.mapping(task -> (Task) task, Collectors.toList()))); + .collect(Collectors.groupingBy(QueryGroupTask::getQueryGroupId, Collectors.mapping(task -> task, Collectors.toList()))); } } diff --git a/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java new file mode 100644 index 0000000000000..bc8317cbfbf92 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/tracker/ResourceUsageCalculator.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.wlm.QueryGroupTask; + +import java.util.List; + +/** + * This class is used to track query group level resource usage + */ +@PublicApi(since = "2.18.0") +public abstract class ResourceUsageCalculator { + /** + * calculates the current resource usage for the query group + * + * @param tasks list of tasks in the query group + */ + public abstract double calculateResourceUsage(List tasks); + + /** + * calculates the task level resource usage + * @param task QueryGroupTask + * @return task level resource usage + */ + public abstract double calculateTaskResourceUsage(QueryGroupTask task); +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index ee9a2951ec541..32cb95e0c04f6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -57,6 +57,7 @@ import java.io.IOException; import java.util.Collections; import java.util.Locale; +import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -66,26 +67,34 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; public class CoordinationStateTests extends OpenSearchTestCase { private DiscoveryNode node1; private DiscoveryNode node2; private DiscoveryNode node3; + private DiscoveryNode nodeWithPub; private ClusterState initialStateNode1; + private ClusterState initialStateNode2; private PersistedState ps1; private PersistedStateRegistry psr1; @@ -99,16 +108,18 @@ public void setupNodes() { node1 = createNode("node1"); node2 = createNode("node2"); node3 = createNode("node3"); + nodeWithPub = createNode( + "nodeWithPub", + Map.of( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "", + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "" + ) + ); initialStateNode1 = clusterState(0L, 0L, node1, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 42L); - ClusterState initialStateNode2 = clusterState( - 0L, - 0L, - node2, - VotingConfiguration.EMPTY_CONFIG, - VotingConfiguration.EMPTY_CONFIG, - 42L - ); + initialStateNode2 = clusterState(0L, 0L, node2, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 42L); ClusterState initialStateNode3 = clusterState( 0L, 0L, @@ -128,6 +139,10 @@ public void setupNodes() { } public static DiscoveryNode createNode(String id) { + return createNode(id, Collections.emptyMap()); + } + + public static DiscoveryNode createNode(String id, Map attributes) { final TransportAddress address = buildNewFakeTransportAddress(); return new DiscoveryNode( "", @@ -136,7 +151,7 @@ public static DiscoveryNode createNode(String id) { address.address().getHostString(), address.getAddress(), address, - Collections.emptyMap(), + attributes, DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT ); @@ -913,7 +928,7 @@ public void testSafety() { public void testHandlePrePublishAndCommitWhenRemoteStateDisabled() { final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); - final PersistedStateRegistry persistedStateRegistrySpy = Mockito.spy(persistedStateRegistry); + final PersistedStateRegistry persistedStateRegistrySpy = spy(persistedStateRegistry); final CoordinationState coordinationState = createCoordinationState(persistedStateRegistrySpy, node1, Settings.EMPTY); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); @@ -932,7 +947,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateDisabled() { public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); - final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + final ClusterState clusterState = clusterStateWithClusterManager(0L, 0L, node1, node1, initialConfig, initialConfig, 42L); final String previousClusterUUID = "prev-cluster-uuid"; final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() .clusterTerm(0L) @@ -948,8 +963,9 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .previousClusterUUID(randomAlphaOfLength(10)) .clusterUUIDCommitted(true) .build(); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION)) - .thenReturn(new RemoteClusterStateManifestInfo(manifest, "path/to/manifest")); + when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION)).thenReturn( + new RemoteClusterStateManifestInfo(manifest, "path/to/manifest") + ); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); @@ -958,40 +974,21 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep new RemotePersistedState(remoteClusterStateService, previousClusterUUID) ); - String randomRepoName = "randomRepoName"; - String stateRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - randomRepoName - ); - String stateRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - randomRepoName - ); - - Settings settings = Settings.builder() - .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) - .put(stateRepoTypeAttributeKey, FsRepository.TYPE) - .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) - .build(); - - final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, remoteStateSettings()); coordinationState.handlePrePublish(clusterState); Mockito.verify(remoteClusterStateService, Mockito.times(1)) .writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); - Mockito.when(remoteClusterStateService.markLastStateAsCommitted(any(), any())) - .thenReturn(new RemoteClusterStateManifestInfo(manifest, "path/to/manifest")); + when(remoteClusterStateService.markLastStateAsCommitted(any(), any(), eq(false))).thenReturn( + new RemoteClusterStateManifestInfo(manifest, "path/to/manifest") + ); coordinationState.handlePreCommit(); ClusterState committedClusterState = ClusterState.builder(clusterState) .metadata(Metadata.builder(clusterState.metadata()).clusterUUIDCommitted(true).build()) .build(); - // Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(committedClusterState, manifest); ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); - verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any()); + verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any(), eq(false)); assertThat(clusterStateCaptor.getValue().metadata().indices(), equalTo(committedClusterState.metadata().indices())); assertThat(clusterStateCaptor.getValue().metadata().clusterUUID(), equalTo(committedClusterState.metadata().clusterUUID())); assertThat(clusterStateCaptor.getValue().stateUUID(), equalTo(committedClusterState.stateUUID())); @@ -1006,11 +1003,276 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep ); } + public void testHandlePublishRequestOnFollowerWhenRemotePublicationEnabled() { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + // cluster manager is node1 and node2 is a follower node + VotingConfiguration initialConfig = VotingConfiguration.of(node1); + ClusterState state1 = clusterState( + 0L, + 0L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + initialConfig, + 42L + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialStateNode2)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, state1.metadata().clusterUUID()) + ); + + final CoordinationState coordinationState = createCoordinationState( + persistedStateRegistry, + nodeWithPub, + remotePublicationSettings() + ); + coordinationState.setInitialState(state1); + long newTerm = randomLongBetween(1, 10); + StartJoinRequest startJoinRequest = new StartJoinRequest(nodeWithPub, newTerm); + + coordinationState.handleStartJoin(startJoinRequest); + + ClusterState state2 = setValue( + ClusterState.builder(state1) + .metadata( + Metadata.builder(state1.metadata()) + .coordinationMetadata(CoordinationMetadata.builder(state1.coordinationMetadata()).term(newTerm).build()) + .build() + ) + .version(randomLongBetween(1, 10)) + .build(), + 43L + ); + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .clusterTerm(1L) + .stateVersion(state2.version()) + .clusterUUID(state2.metadata().clusterUUID()) + .nodeId(node1.getId()) + .stateUUID(randomAlphaOfLength(10)) + .opensearchVersion(Version.CURRENT) + .committed(false) + .codecVersion(1) + .globalMetadataFileName(randomAlphaOfLength(10)) + .indices(Collections.emptyList()) + .previousClusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(true) + .build(); + + PublishResponse publishResponse = coordinationState.handlePublishRequest(new RemoteStatePublishRequest(state2, manifest)); + assertEquals(state2.term(), publishResponse.getTerm()); + assertEquals(state2.version(), publishResponse.getVersion()); + verifyNoInteractions(remoteClusterStateService); + assertEquals(state2, persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState()); + assertEquals(manifest, persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedManifest()); + } + + public void testHandleCommitOnFollowerNodeWhenRemotePublicationEnabled() { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + VotingConfiguration initialConfig = VotingConfiguration.of(node1, nodeWithPub); + ClusterState state1 = clusterState( + 0L, + 0L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + initialConfig, + 42L + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialStateNode2)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, state1.metadata().clusterUUID()) + ); + + final CoordinationState coordinationState = createCoordinationState( + persistedStateRegistry, + nodeWithPub, + remotePublicationSettings() + ); + coordinationState.setInitialState(state1); + long newTerm = randomLongBetween(1, 10); + StartJoinRequest startJoinRequest = new StartJoinRequest(nodeWithPub, newTerm); + + Join v1 = cs1.handleStartJoin(startJoinRequest); + Join v2 = coordinationState.handleStartJoin(startJoinRequest); + assertTrue(coordinationState.handleJoin(v1)); + assertTrue(coordinationState.handleJoin(v2)); + assertTrue(coordinationState.electionWon()); + VotingConfiguration newConfig = VotingConfiguration.of(node1, nodeWithPub, node3); + ClusterState state2 = clusterState( + startJoinRequest.getTerm(), + 2L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + newConfig, + 7L + ); + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .clusterTerm(1L) + .stateVersion(state2.version()) + .clusterUUID(state2.metadata().clusterUUID()) + .nodeId(node1.getId()) + .stateUUID(randomAlphaOfLength(10)) + .opensearchVersion(Version.CURRENT) + .committed(false) + .codecVersion(1) + .globalMetadataFileName(randomAlphaOfLength(10)) + .indices(Collections.emptyList()) + .previousClusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(true) + .build(); + + PublishRequest publishRequest = coordinationState.handleClientValue(state2); + coordinationState.handlePublishRequest(new RemoteStatePublishRequest(publishRequest.getAcceptedState(), manifest)); + ApplyCommitRequest applyCommitRequest = new ApplyCommitRequest(node2, state2.term(), state2.version()); + coordinationState.handleCommit(applyCommitRequest); + verifyNoInteractions(remoteClusterStateService); + assertTrue( + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState().metadata().clusterUUIDCommitted() + ); + assertTrue(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedManifest().isCommitted()); + assertEquals(coordinationState.getLastCommittedConfiguration(), newConfig); + } + + public void testRemotePersistedStateResetsForPublicationEnabledAfterLocalPublication() { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + // cluster manager is node1 and nodeWithPub is a follower node + VotingConfiguration initialConfig = VotingConfiguration.of(node1); + ClusterState state1 = clusterState( + 0L, + 0L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + initialConfig, + 42L + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialStateNode2)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, state1.metadata().clusterUUID()) + ); + + final CoordinationState coordinationState = createCoordinationState( + persistedStateRegistry, + nodeWithPub, + remotePublicationSettings() + ); + coordinationState.setInitialState(state1); + long newTerm = randomLongBetween(1, 10); + StartJoinRequest startJoinRequest = new StartJoinRequest(nodeWithPub, newTerm); + + coordinationState.handleStartJoin(startJoinRequest); + + ClusterState state2 = setValue( + ClusterState.builder(state1) + .metadata( + Metadata.builder(state1.metadata()) + .coordinationMetadata(CoordinationMetadata.builder(state1.coordinationMetadata()).term(newTerm).build()) + .build() + ) + .version(randomLongBetween(1, 10)) + .build(), + 43L + ); + + PublishResponse publishResponse = coordinationState.handlePublishRequest(new PublishRequest(state2)); + assertEquals(state2.term(), publishResponse.getTerm()); + assertEquals(state2.version(), publishResponse.getVersion()); + verifyNoInteractions(remoteClusterStateService); + assertNull(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState()); + assertNull(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedManifest()); + } + + public void testHandleCommitOnFollowerNodeWhenRemotePublicationEnabledWithNullRemotePersistedState() { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + VotingConfiguration initialConfig = VotingConfiguration.of(node1, nodeWithPub); + ClusterState state1 = clusterState( + 0L, + 0L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + initialConfig, + 42L + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialStateNode2)); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, state1.metadata().clusterUUID()) + ); + + final CoordinationState coordinationState = createCoordinationState( + persistedStateRegistry, + nodeWithPub, + remotePublicationSettings() + ); + coordinationState.setInitialState(state1); + long newTerm = randomLongBetween(1, 10); + StartJoinRequest startJoinRequest = new StartJoinRequest(nodeWithPub, newTerm); + + Join v1 = cs1.handleStartJoin(startJoinRequest); + Join v2 = coordinationState.handleStartJoin(startJoinRequest); + assertTrue(coordinationState.handleJoin(v1)); + assertTrue(coordinationState.handleJoin(v2)); + assertTrue(coordinationState.electionWon()); + VotingConfiguration newConfig = VotingConfiguration.of(node1, nodeWithPub, node3); + ClusterState state2 = clusterState( + startJoinRequest.getTerm(), + 2L, + DiscoveryNodes.builder() + .add(node1) + .add(nodeWithPub) + .clusterManagerNodeId(node1.getId()) + .localNodeId(nodeWithPub.getId()) + .build(), + initialConfig, + newConfig, + 7L + ); + + PublishRequest publishRequest = coordinationState.handleClientValue(state2); + coordinationState.handlePublishRequest(new PublishRequest(publishRequest.getAcceptedState())); + assertNull(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState()); + assertNull(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedManifest()); + ApplyCommitRequest applyCommitRequest = new ApplyCommitRequest(node2, state2.term(), state2.version()); + PersistedState spyRPS = spy(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE)); + coordinationState.handleCommit(applyCommitRequest); + verifyNoInteractions(spyRPS); + verifyNoInteractions(remoteClusterStateService); + } + public void testIsRemotePublicationEnabled_WithInconsistentSettings() { // create settings with remote state disabled but publication enabled Settings settings = Settings.builder() .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) - .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .put(REMOTE_PUBLICATION_SETTING_KEY, true) .build(); CoordinationState coordinationState = createCoordinationState(psr1, node1, settings); assertFalse(coordinationState.isRemotePublicationEnabled()); @@ -1042,6 +1304,30 @@ public static ClusterState clusterState( ); } + public static ClusterState clusterStateWithClusterManager( + long term, + long version, + DiscoveryNode localNode, + DiscoveryNode clusterManagerNode, + VotingConfiguration lastCommittedConfig, + VotingConfiguration lastAcceptedConfig, + long value + ) { + return clusterState( + term, + version, + DiscoveryNodes.builder() + .add(localNode) + .add(clusterManagerNode) + .localNodeId(localNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + .build(), + lastCommittedConfig, + lastAcceptedConfig, + value + ); + } + public static ClusterState clusterState( long term, long version, @@ -1090,4 +1376,30 @@ private static PersistedStateRegistry createPersistedStateRegistry(ClusterState persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, clusterState)); return persistedStateRegistry; } + + private static Settings remoteStateSettings() { + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + return settings; + } + + private static Settings remotePublicationSettings() { + return Settings.builder().put(remoteStateSettings()).put(REMOTE_PUBLICATION_SETTING_KEY, true).build(); + } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index 698ace4105d75..f9968ca08ebba 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -199,7 +199,30 @@ public void testWriteVerifiableTo() throws IOException { ), randomNonNegativeLong() ); - + String mappings = " {\n" + + " \"_doc\": {\n" + + " \"properties\": {\n" + + " \"actiongroups\": {\n" + + " \"type\": \"text\",\n" + + " \"fields\": {\n" + + " \"keyword\": {\n" + + " \"type\": \"keyword\",\n" + + " \"ignore_above\": 256\n" + + " }\n" + + " }\n" + + " },\n" + + " \"allowlist\": {\n" + + " \"type\": \"text\",\n" + + " \"fields\": {\n" + + " \"keyword\": {\n" + + " \"type\": \"keyword\",\n" + + " \"ignore_above\": 256\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }"; IndexMetadata metadata1 = IndexMetadata.builder("foo") .settings( Settings.builder() @@ -220,11 +243,13 @@ public void testWriteVerifiableTo() throws IOException { .putRolloverInfo(info1) .putRolloverInfo(info2) .putInSyncAllocationIds(0, Set.of("1", "2", "3")) + .putMapping(mappings) .build(); BytesStreamOutput out = new BytesStreamOutput(); BufferedChecksumStreamOutput checksumOut = new BufferedChecksumStreamOutput(out); metadata1.writeVerifiableTo(checksumOut); + assertNotNull(metadata1.toString()); IndexMetadata metadata2 = IndexMetadata.builder(metadata1.getIndex().getName()) .settings( @@ -246,6 +271,7 @@ public void testWriteVerifiableTo() throws IOException { .putRolloverInfo(info2) .putRolloverInfo(info1) .putInSyncAllocationIds(0, Set.of("3", "1", "2")) + .putMapping(mappings) .build(); BytesStreamOutput out2 = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java index 86f4b9502d6ab..683942fd34a37 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceFactoryTests.java @@ -10,7 +10,6 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; @@ -20,7 +19,7 @@ import java.util.function.Supplier; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; public class RemoteRoutingTableServiceFactoryTests extends OpenSearchTestCase { @@ -50,9 +49,8 @@ public void testGetServiceWhenRemoteRoutingEnabled() { Settings settings = Settings.builder() .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .put(FsRepository.REPOSITORIES_COMPRESS_SETTING.getKey(), false) + .put(REMOTE_PUBLICATION_SETTING_KEY, "true") .build(); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); RemoteRoutingTableService service = RemoteRoutingTableServiceFactory.getService( repositoriesService, settings, diff --git a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java index d7009641c2b76..59d5dd663abe2 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/remote/RemoteRoutingTableServiceTests.java @@ -28,7 +28,6 @@ import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.TestCapturingListener; import org.opensearch.core.action.ActionListener; import org.opensearch.core.compress.Compressor; @@ -63,8 +62,8 @@ import org.mockito.Mockito; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifestTests.randomUploadedIndexMetadataList; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.gateway.remote.RemoteClusterStateServiceTests.generateClusterStateWithOneIndex; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; @@ -114,6 +113,7 @@ public void setup() { Settings settings = Settings.builder() .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") + .put(REMOTE_PUBLICATION_SETTING_KEY, "true") .build(); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = mock(ClusterService.class); @@ -126,8 +126,6 @@ public void setup() { when(repositoriesService.repository("routing_repository")).thenReturn(blobStoreRepository); when(blobStoreRepository.blobStore()).thenReturn(blobStore); when(blobStore.blobContainer(any())).thenReturn(blobContainer); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); compressor = new NoneCompressor(); basePath = BlobPath.cleanPath().add("base-path"); when(blobStoreRepository.basePath()).thenReturn(basePath); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index e25a0e0b2c3bf..3bd9333dc4168 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -30,6 +30,8 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.opensearch.test.ClusterServiceUtils.setState; @@ -69,7 +71,7 @@ public static void afterClass() { public void testDefaults() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); throttler.registerClusterManagerTask("create-index", true); @@ -108,7 +110,7 @@ public void testValidateSettingsForDifferentVersion() { } } - public void testValidateSettingsForTaskWihtoutRetryOnDataNode() { + public void testValidateSettingsForTaskWithoutRetryOnDataNode() { DiscoveryNode clusterManagerNode = getClusterManagerNode(Version.V_2_5_0); DiscoveryNode dataNode = getDataNode(Version.V_2_5_0); setState( @@ -139,7 +141,7 @@ public void testUpdateSettingsForNullValue() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -173,7 +175,7 @@ public void testSettingsOnBootstrap() { .put("cluster_manager.throttling.retry.max.delay", maxDelay + "s") .build(); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(initialSettings, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -187,7 +189,7 @@ public void testSettingsOnBootstrap() { public void testUpdateRetryDelaySetting() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); // verify defaults @@ -217,7 +219,7 @@ public void testValidateSettingsForUnknownTask() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); // set some limit for update snapshot tasks @@ -236,7 +238,7 @@ public void testUpdateThrottlingLimitForBasicSanity() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -263,7 +265,7 @@ public void testValidateSettingForLimit() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -274,7 +276,7 @@ public void testValidateSettingForLimit() { public void testUpdateLimit() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -309,7 +311,7 @@ public void testThrottlingForDisabledThrottlingTask() { String taskKey = "test"; ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, false); @@ -321,6 +323,9 @@ public void testThrottlingForDisabledThrottlingTask() { // Asserting that there was not any throttling for it assertEquals(0L, throttlingStats.getThrottlingCount(taskKey)); + + // Asserting value in tasksCount map to make sure it gets updated even when throttling is disabled + assertEquals(Optional.of(10L).get(), throttler.tasksCount.get(taskKey)); } public void testThrottlingForInitialStaticSettingAndVersionCheck() { @@ -339,7 +344,7 @@ public void testThrottlingForInitialStaticSettingAndVersionCheck() { .put("cluster_manager.throttling.thresholds.put-mapping.value", put_mapping_threshold_value) .build(); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(initialSettings, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask("put-mapping", true); @@ -367,7 +372,7 @@ public void testThrottling() { String taskKey = "test"; ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, throttlingStats); ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, true); @@ -406,6 +411,164 @@ public void testThrottling() { throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 1)); } + public void testThrottlingWithLock() { + ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); + String taskKey = "test"; + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getClusterManagerService().getMinNodeVersion(); + }, throttlingStats); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, true); + + throttler.updateLimit(taskKey, 5); + + // adding 3 tasks + throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)); + + // adding 3 more tasks, these tasks should be throttled + // taskCount in Queue: 3 Threshold: 5 + assertThrows( + ClusterManagerThrottlingException.class, + () -> throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)) + ); + assertEquals(3L, throttlingStats.getThrottlingCount(taskKey)); + + // remove one task + throttler.onBeginProcessing(getMockUpdateTaskList(taskKey, throttlingKey, 1)); + + // add 3 tasks should pass now. + // taskCount in Queue: 2 Threshold: 5 + throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)); + + final CountDownLatch latch = new CountDownLatch(1); + Thread threadToLock = null; + try { + // Taking lock on tasksCount will not impact throttling behaviour now. + threadToLock = new Thread(() -> { + throttler.tasksCount.computeIfPresent(taskKey, (key, count) -> { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return 10L; + }); + }); + threadToLock.start(); + + // adding one task will throttle + // taskCount in Queue: 5 Threshold: 5 + final ClusterManagerThrottlingException exception = assertThrows( + ClusterManagerThrottlingException.class, + () -> throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 1)) + ); + assertEquals("Throttling Exception : Limit exceeded for test", exception.getMessage()); + assertEquals(Optional.of(5L).get(), throttler.tasksCount.get(taskKey)); + assertEquals(4L, throttlingStats.getThrottlingCount(taskKey)); + } finally { + if (threadToLock != null) { + latch.countDown(); + // Wait to complete and then assert on new tasksCount that got modified by threadToLock Thread + try { + threadToLock.join(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + assertEquals(Optional.of(10L).get(), throttler.tasksCount.get(taskKey)); + } + + public void testThrottlingWithMultipleOnBeginSubmitsThreadsWithLock() { + ClusterManagerThrottlingStats throttlingStats = new ClusterManagerThrottlingStats(); + String taskKey = "test"; + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { + return clusterService.getClusterManagerService().getMinNodeVersion(); + }, throttlingStats); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = throttler.registerClusterManagerTask(taskKey, true); + + throttler.updateLimit(taskKey, 5); + + // adding 3 tasks + throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)); + + // adding 3 more tasks, these tasks should be throttled + // taskCount in Queue: 3 Threshold: 5 + assertThrows( + ClusterManagerThrottlingException.class, + () -> throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)) + ); + assertEquals(3L, throttlingStats.getThrottlingCount(taskKey)); + + // remove one task + throttler.onBeginProcessing(getMockUpdateTaskList(taskKey, throttlingKey, 1)); + + // add 3 tasks should pass now. + // taskCount in Queue: 2 Threshold: 5 + throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 3)); + + final CountDownLatch latch = new CountDownLatch(1); + Thread threadToLock = null; + List submittingThreads = new ArrayList<>(); + + try { + // Taking lock on tasksCount will not impact throttling behaviour now. + threadToLock = new Thread(() -> { + throttler.tasksCount.computeIfPresent(taskKey, (key, count) -> { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return 10L; + }); + }); + threadToLock.start(); + + final CountDownLatch latch2 = new CountDownLatch(10); + for (int i = 0; i < 10; i++) { + Thread submittingThread = new Thread(() -> { + // adding one task will throttle + // taskCount in Queue: 5 Threshold: 5 + final ClusterManagerThrottlingException exception = assertThrows( + ClusterManagerThrottlingException.class, + () -> throttler.onBeginSubmit(getMockUpdateTaskList(taskKey, throttlingKey, 1)) + ); + assertEquals("Throttling Exception : Limit exceeded for test", exception.getMessage()); + assertEquals(Optional.of(5L).get(), throttler.tasksCount.get(taskKey)); + latch2.countDown(); + }); + submittingThread.start(); + submittingThreads.add(submittingThread); + } + try { + latch2.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + assertEquals(13L, throttlingStats.getThrottlingCount(taskKey)); + } finally { + if (threadToLock != null) { + latch.countDown(); + try { + // Wait to complete and then assert on new tasksCount that got modified by threadToLock Thread + threadToLock.join(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + for (Thread submittingThread : submittingThreads) { + try { + submittingThread.join(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + assertEquals(Optional.of(10L).get(), throttler.tasksCount.get(taskKey)); + } + private List getMockUpdateTaskList( String taskKey, ClusterManagerTaskThrottler.ThrottlingKey throttlingKey, diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 9972bbfff5d66..5ea5241762753 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -115,6 +115,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; @@ -210,11 +211,15 @@ public void testSetCurrentTerm() throws IOException { } private ClusterState createClusterState(long version, Metadata metadata) { - return ClusterState.builder(clusterName) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()) - .version(version) - .metadata(metadata) - .build(); + return createClusterState(version, metadata, false); + } + + private ClusterState createClusterState(long version, Metadata metadata, boolean isClusterManagerNode) { + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()); + if (isClusterManagerNode) { + nodesBuilder.clusterManagerNodeId(localNode.getId()); + } + return ClusterState.builder(clusterName).nodes(nodesBuilder.build()).version(version).metadata(metadata).build(); } private ClusterState createClusterStateWithNodes(long version, Metadata metadata) { @@ -225,7 +230,12 @@ private ClusterState createClusterStateWithNodes(long version, Metadata metadata Sets.newHashSet(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), Version.V_2_13_0 ); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).add(oldNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() + .add(localNode) + .localNodeId(localNode.getId()) + .clusterManagerNodeId(localNode.getId()) + .add(oldNode) + .build(); return ClusterState.builder(clusterName).nodes(discoveryNodes).version(version).metadata(metadata).build(); } @@ -762,7 +772,8 @@ public void testRemotePersistedState() throws IOException { final long clusterTerm = randomNonNegativeLong(); final ClusterState clusterState = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build(), + true ); remotePersistedState.setLastAcceptedState(clusterState); @@ -773,7 +784,8 @@ public void testRemotePersistedState() throws IOException { final ClusterState secondClusterState = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build(), + true ); remotePersistedState.setLastAcceptedState(secondClusterState); @@ -783,11 +795,11 @@ public void testRemotePersistedState() throws IOException { assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); - when(remoteClusterStateService.markLastStateAsCommitted(Mockito.any(), Mockito.any())).thenReturn( + when(remoteClusterStateService.markLastStateAsCommitted(Mockito.any(), Mockito.any(), eq(false))).thenReturn( new RemoteClusterStateManifestInfo(manifest, "path/to/manifest") ); remotePersistedState.markLastAcceptedStateAsCommitted(); - Mockito.verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(Mockito.any(), Mockito.any()); + Mockito.verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(Mockito.any(), Mockito.any(), eq(false)); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -825,7 +837,8 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx ClusterState clusterState2 = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(1L).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(1L).build()).build(), + true ); final ClusterMetadataManifest manifest2 = ClusterMetadataManifest.builder() .clusterTerm(1L) @@ -840,7 +853,8 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx ClusterState clusterState3 = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(1L).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(1L).build()).build(), + true ); Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())) .thenReturn(new RemoteClusterStateManifestInfo(manifest2, "path/to/manifest3")); @@ -849,6 +863,73 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx } + public void testRemotePersistentState_FollowerNode() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .clusterTerm(1L) + .stateVersion(5L) + .committed(false) + .build(); + final String previousClusterUUID = "prev-cluster-uuid"; + RemotePersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + assertNull(remotePersistedState.getLastAcceptedState()); + assertNull(remotePersistedState.getLastAcceptedManifest()); + assertEquals(0, remotePersistedState.getCurrentTerm()); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()) + .clusterUUIDCommitted(true) + .build(), + false + ); + + remotePersistedState.setLastAcceptedState(clusterState); + remotePersistedState.setLastAcceptedManifest(manifest); + Mockito.verify(remoteClusterStateService, never()) + .writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + + assertEquals(clusterState, remotePersistedState.getLastAcceptedState()); + assertEquals(clusterTerm, remotePersistedState.getCurrentTerm()); + assertEquals(manifest, remotePersistedState.getLastAcceptedManifest()); + + final ClusterState secondClusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()) + .clusterUUIDCommitted(false) + .build(), + false + ); + + remotePersistedState.setLastAcceptedState(secondClusterState); + Mockito.verify(remoteClusterStateService, never()) + .writeFullMetadata(secondClusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + + assertEquals(secondClusterState, remotePersistedState.getLastAcceptedState()); + assertEquals(clusterTerm, remotePersistedState.getCurrentTerm()); + assertFalse(remotePersistedState.getLastAcceptedManifest().isCommitted()); + + remotePersistedState.markLastAcceptedStateAsCommitted(); + Mockito.verify(remoteClusterStateService, never()).markLastStateAsCommitted(Mockito.any(), Mockito.any(), eq(false)); + + assertEquals(secondClusterState, remotePersistedState.getLastAcceptedState()); + assertEquals(clusterTerm, remotePersistedState.getCurrentTerm()); + assertFalse(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted()); + assertTrue(remotePersistedState.getLastAcceptedManifest().isCommitted()); + + final ClusterState thirdClusterState = ClusterState.builder(secondClusterState) + .metadata(Metadata.builder(secondClusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).build()) + .build(); + remotePersistedState.setLastAcceptedState(thirdClusterState); + remotePersistedState.markLastAcceptedStateAsCommitted(); + assertTrue(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted()); + assertTrue(remotePersistedState.getLastAcceptedManifest().isCommitted()); + } + public void testRemotePersistedStateNotCommitted() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final String previousClusterUUID = "prev-cluster-uuid"; @@ -875,7 +956,8 @@ public void testRemotePersistedStateNotCommitted() throws IOException { final long clusterTerm = randomNonNegativeLong(); ClusterState clusterState = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build(), + true ); clusterState = ClusterState.builder(clusterState) .metadata(Metadata.builder(clusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).clusterUUIDCommitted(false).build()) @@ -901,7 +983,8 @@ public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOExcept final long clusterTerm = randomNonNegativeLong(); final ClusterState clusterState = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build(), + true ); assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); @@ -924,7 +1007,8 @@ public void testRemotePersistedStateFailureStats() throws IOException { final long clusterTerm = randomNonNegativeLong(); final ClusterState clusterState = createClusterState( randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build(), + true ); assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); @@ -1025,7 +1109,8 @@ public void testGatewayForRemoteStateForNodeReplacement() throws IOException { false ) .clusterUUID(randomAlphaOfLength(10)) - .build() + .build(), + false ); when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn( previousState.metadata().clusterUUID() @@ -1071,7 +1156,8 @@ public void testGatewayForRemoteStateForNodeReboot() throws IOException { .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) .put(indexMetadata, false) .clusterUUID(randomAlphaOfLength(10)) - .build() + .build(), + false ); gateway = newGatewayForRemoteState( remoteClusterStateService, @@ -1117,7 +1203,8 @@ public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws I .put(indexMetadata, false) .clusterUUID(ClusterState.UNKNOWN_UUID) .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) - .build() + .build(), + false ) ).nodes(DiscoveryNodes.EMPTY_NODES).build(); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 9bfa0d9c0b80f..79f6c13bd694f 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -107,12 +107,12 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.stream.Collectors.toList; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; import static org.opensearch.gateway.remote.ClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata1; import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata2; import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata3; @@ -273,8 +273,6 @@ public void teardown() throws Exception { super.tearDown(); remoteClusterStateService.close(); publicationEnabled = false; - Settings nodeSettings = Settings.builder().build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); threadPool.shutdown(); } @@ -371,8 +369,7 @@ public void testWriteFullMetadataSuccess() throws IOException { public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException { // TODO Make the publication flag parameterized publicationEnabled = true; - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, publicationEnabled).build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); + settings = Settings.builder().put(settings).put(REMOTE_PUBLICATION_SETTING_KEY, publicationEnabled).build(); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, @@ -611,20 +608,20 @@ public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOExc final RemoteClusterStateManifestInfo manifestDetails = remoteClusterStateService.writeIncrementalMetadata( clusterState, clusterState, - null + ClusterMetadataManifest.builder().build() ); Assert.assertThat(manifestDetails, nullValue()); assertEquals(0, remoteClusterStateService.getUploadStats().getSuccessCount()); } - public void testFailWriteIncrementalMetadataWhenTermChanged() { + public void testFailWriteIncrementalMetadataWhenManifestNull() { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(2L).build(); final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) .build(); assertThrows( - AssertionError.class, + IllegalArgumentException.class, () -> remoteClusterStateService.writeIncrementalMetadata(previousClusterState, clusterState, null) ); } @@ -749,8 +746,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { public void testWriteIncrementalMetadataSuccessWhenPublicationEnabled() throws IOException { publicationEnabled = true; - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, publicationEnabled).build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); + settings = Settings.builder().put(settings).put(REMOTE_PUBLICATION_SETTING_KEY, true).build(); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, @@ -2524,7 +2520,7 @@ public void testMarkLastStateAsCommittedSuccess() throws IOException { List indices = List.of(uploadedIndexMetadata); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); - final ClusterMetadataManifest manifest = remoteClusterStateService.markLastStateAsCommitted(clusterState, previousManifest) + final ClusterMetadataManifest manifest = remoteClusterStateService.markLastStateAsCommitted(clusterState, previousManifest, false) .getClusterMetadataManifest(); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() @@ -2658,7 +2654,7 @@ public void testRemoteRoutingTableInitializedWhenEnabled() { .build(); clusterSettings.applySettings(newSettings); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_SETTING_KEY, "true").build(); FeatureFlags.initializeFeatureFlags(nodeSettings); remoteClusterStateService = new RemoteClusterStateService( @@ -2935,11 +2931,10 @@ private void initializeRoutingTable() { .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_PUBLICATION_SETTING_KEY, "true") .build(); clusterSettings.applySettings(newSettings); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, @@ -2966,11 +2961,10 @@ private void initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClust .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_CHECKSUM_VALIDATION_MODE_SETTING.getKey(), mode.name()) + .put(REMOTE_PUBLICATION_SETTING_KEY, true) .build(); clusterSettings.applySettings(newSettings); - Settings nodeSettings = Settings.builder().put(REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); - FeatureFlags.initializeFeatureFlags(nodeSettings); remoteClusterStateService = new RemoteClusterStateService( "test-node-id", repositoriesServiceSupplier, diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java index cd2a23cf94c37..1a813495e9033 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldTypeTests.java @@ -88,6 +88,32 @@ public void testWildcardQuery() { ); } + public void testEscapedWildcardQuery() { + MappedFieldType ft = new WildcardFieldMapper.WildcardFieldType("field"); + Set expectedTerms = new HashSet<>(); + expectedTerms.add(prefixAnchored("*")); + expectedTerms.add(suffixAnchored("*")); + + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (String term : expectedTerms) { + builder.add(new TermQuery(new Term("field", term)), BooleanClause.Occur.FILTER); + } + + assertEquals( + new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\**\\*"), + ft.wildcardQuery("\\**\\*", null, null) + ); + + assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\*"), ft.wildcardQuery("\\*", null, null)); + + expectedTerms.remove(suffixAnchored("*")); + builder = new BooleanQuery.Builder(); + for (String term : expectedTerms) { + builder.add(new TermQuery(new Term("field", term)), BooleanClause.Occur.FILTER); + } + assertEquals(new WildcardFieldMapper.WildcardMatchingQuery("field", builder.build(), "\\**"), ft.wildcardQuery("\\**", null, null)); + } + public void testMultipleWildcardsInQuery() { final String pattern = "a?cd*efg?h"; MappedFieldType ft = new WildcardFieldMapper.WildcardFieldType("field"); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 2a34dd3580948..be30de97ee830 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -637,7 +637,7 @@ private Tuple, Set> testGetPinnedTimestampLockedFilesW String metadataPrefix = "metadata__1__2__3__4__5__"; Map metadataFiles = new HashMap<>(); for (Long metadataFileTimestamp : metadataFileTimestamps) { - metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp) + "__1"); } return new Tuple<>( metadataFiles, @@ -662,7 +662,7 @@ private Tuple, Set> testGetPinnedTimestampLockedFilesW String primaryTerm = RemoteStoreUtils.invertLong(metadataFileTimestampPrimaryTerm.getValue()); String metadataPrefix = "metadata__" + primaryTerm + "__2__3__4__5__"; long metadataFileTimestamp = metadataFileTimestampPrimaryTerm.getKey(); - metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp) + "__1"); } return new Tuple<>( metadataFiles, diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 336d4bafd4b66..ecd6620dbea15 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -1170,9 +1170,9 @@ public void testInitializeToSpecificTimestampNoMdMatchingTimestamp() throws IOEx public void testInitializeToSpecificTimestampMatchingMdFile() throws IOException { String metadataPrefix = "metadata__1__2__3__4__5__"; List metadataFiles = new ArrayList<>(); - metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(1000)); - metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); - metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(1000) + "__1"); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000) + "__1"); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000) + "__1"); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); @@ -1184,7 +1184,7 @@ public void testInitializeToSpecificTimestampMatchingMdFile() throws IOException Integer.MAX_VALUE ) ).thenReturn(metadataFiles); - when(remoteMetadataDirectory.getBlobStream(metadataPrefix + RemoteStoreUtils.invertLong(1000))).thenReturn( + when(remoteMetadataDirectory.getBlobStream(metadataPrefix + RemoteStoreUtils.invertLong(1000) + "__1")).thenReturn( createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) ); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index f66d8f4a894b0..4c9da7e95dfa7 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.translog; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -53,6 +55,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -63,7 +66,9 @@ import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -211,16 +216,44 @@ public void onFailure(Exception e) { // Node id containing separator String nodeIdWithSeparator = - "metadata__9223372036438563903__9223372036854774799__9223370311919910393__node__1__9223372036438563958__1"; + "metadata__9223372036438563903__9223372036854774799__9223370311919910393__node__1__9223372036438563958__2__1"; Tuple minMaxGen = TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(nodeIdWithSeparator); Long minGen = Long.MAX_VALUE - 9223372036438563958L; assertEquals(minGen, minMaxGen.v1()); // Malformed md filename - String malformedMdFileName = "metadata__9223372036438563903__9223372036854774799__9223370311919910393__node1__xyz__1"; + String malformedMdFileName = "metadata__9223372036438563903__9223372036854774799__9223370311919910393__node1__xyz__3__1"; assertNull(TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(malformedMdFileName)); } + public void testGetMinMaxPrimaryTermFromFilename() throws Exception { + // New format metadata file + String newFormatMetadataFile = + "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1"; + Tuple minMaxPrimaryterm = TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(newFormatMetadataFile); + Long minPrimaryTerm = 2L; + Long maxPrimaryTerm = 7L; + assertEquals(minPrimaryTerm, minMaxPrimaryterm.v1()); + assertEquals(maxPrimaryTerm, minMaxPrimaryterm.v2()); + + // Old format metadata file + String oldFormatMdFilename = "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__1"; + assertNull(TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(oldFormatMdFilename)); + + // Node id containing separator + String nodeIdWithSeparator = + "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node__1__9223372036438563958__2__1"; + minMaxPrimaryterm = TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(nodeIdWithSeparator); + minPrimaryTerm = 2L; + maxPrimaryTerm = 7L; + assertEquals(minPrimaryTerm, minMaxPrimaryterm.v1()); + assertEquals(maxPrimaryTerm, minMaxPrimaryterm.v2()); + + // Malformed md filename + String malformedMdFileName = "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node1__xyz__3qwe__1"; + assertNull(TranslogTransferMetadata.getMinMaxPrimaryTermFromFilename(malformedMdFileName)); + } + public void testIndexDeletionWithNoPinnedTimestampNoRecentMdFiles() throws Exception { RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); ArrayList ops = new ArrayList<>(); @@ -605,11 +638,11 @@ public void testGetGenerationsToBeDeletedEmptyMetadataFilesNotToBeDeleted() thro List metadataFilesNotToBeDeleted = new ArrayList<>(); List metadataFilesToBeDeleted = List.of( // 4 to 7 - "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__1", + "metadata__9223372036854775806__9223372036854775800__9223370311919910398__31__9223372036854775803__1__1", // 17 to 37 - "metadata__9223372036438563903__9223372036854775770__9223370311919910398__31__9223372036854775790__1", + "metadata__9223372036854775806__9223372036854775770__9223370311919910398__31__9223372036854775790__1__1", // 27 to 42 - "metadata__9223372036438563903__9223372036854775765__9223370311919910403__31__9223372036854775780__1" + "metadata__9223372036854775806__9223372036854775765__9223370311919910403__31__9223372036854775780__1__1" ); Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, @@ -619,6 +652,7 @@ public void testGetGenerationsToBeDeletedEmptyMetadataFilesNotToBeDeleted() thro Set md1Generations = LongStream.rangeClosed(4, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 37).boxed().collect(Collectors.toSet()); Set md3Generations = LongStream.rangeClosed(27, 42).boxed().collect(Collectors.toSet()); + assertTrue(generations.containsAll(md1Generations)); assertTrue(generations.containsAll(md2Generations)); assertTrue(generations.containsAll(md3Generations)); @@ -632,19 +666,19 @@ public void testGetGenerationsToBeDeletedEmptyMetadataFilesNotToBeDeleted() thro public void testGetGenerationsToBeDeleted() throws IOException { List metadataFilesNotToBeDeleted = List.of( // 1 to 4 - "metadata__9223372036438563903__9223372036854775803__9223370311919910398__31__9223372036854775806__1", + "metadata__9223372036854775806__9223372036854775803__9223370311919910398__31__9223372036854775806__1__1", // 26 to 30 - "metadata__9223372036438563903__9223372036854775777__9223370311919910398__31__9223372036854775781__1", + "metadata__9223372036854775806__9223372036854775777__9223370311919910398__31__9223372036854775781__1__1", // 42 to 100 - "metadata__9223372036438563903__9223372036854775707__9223370311919910403__31__9223372036854775765__1" + "metadata__9223372036854775806__9223372036854775707__9223370311919910403__31__9223372036854775765__1__1" ); List metadataFilesToBeDeleted = List.of( // 4 to 7 - "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__1", + "metadata__9223372036854775806__9223372036854775800__9223370311919910398__31__9223372036854775803__1__1", // 17 to 37 - "metadata__9223372036438563903__9223372036854775770__9223370311919910398__31__9223372036854775790__1", + "metadata__9223372036854775806__9223372036854775770__9223370311919910398__31__9223372036854775790__1__1", // 27 to 42 - "metadata__9223372036438563903__9223372036854775765__9223370311919910403__31__9223372036854775780__1" + "metadata__9223372036854775806__9223372036854775765__9223370311919910403__31__9223372036854775780__1__1" ); Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, @@ -654,6 +688,7 @@ public void testGetGenerationsToBeDeleted() throws IOException { Set md1Generations = LongStream.rangeClosed(5, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 25).boxed().collect(Collectors.toSet()); Set md3Generations = LongStream.rangeClosed(31, 41).boxed().collect(Collectors.toSet()); + assertTrue(generations.containsAll(md1Generations)); assertTrue(generations.containsAll(md2Generations)); assertTrue(generations.containsAll(md3Generations)); @@ -784,49 +819,49 @@ public void testGetMinMaxTranslogGenerationFromMetadataFile() throws IOException assertEquals( new Tuple<>(701L, 1008L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__9223372036854775106__1", + "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__9223372036854775106__1__1", translogTransferManager ) ); assertEquals( new Tuple<>(4L, 7L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__1", + "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__2__1", translogTransferManager ) ); assertEquals( new Tuple<>(106L, 106L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__1", + "metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__3__1", translogTransferManager ) ); assertEquals( new Tuple<>(4573L, 99964L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854675843__9223370311919910408__31__9223372036854771234__1", + "metadata__9223372036438563903__9223372036854675843__9223370311919910408__31__9223372036854771234__4__1", translogTransferManager ) ); assertEquals( new Tuple<>(1L, 4L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854775803__9223370311919910413__31__9223372036854775806__1", + "metadata__9223372036438563903__9223372036854775803__9223370311919910413__31__9223372036854775806__5__1", translogTransferManager ) ); assertEquals( new Tuple<>(2474L, 3462L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854772345__9223370311919910429__31__9223372036854773333__1", + "metadata__9223372036438563903__9223372036854772345__9223370311919910429__31__9223372036854773333__6__1", translogTransferManager ) ); assertEquals( new Tuple<>(5807L, 7917L), translog.getMinMaxTranslogGenerationFromMetadataFile( - "metadata__9223372036438563903__9223372036854767890__9223370311919910434__31__9223372036854770000__1", + "metadata__9223372036438563903__9223372036854767890__9223370311919910434__31__9223372036854770000__7__1", translogTransferManager ) ); @@ -860,4 +895,93 @@ public void testGetMinMaxTranslogGenerationFromMetadataFile() throws IOException verify(translogTransferManager).readMetadata("metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__1"); verify(translogTransferManager).readMetadata("metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__1"); } + + public void testDeleteStaleRemotePrimaryTerms() throws IOException { + TranslogTransferManager translogTransferManager = mock(TranslogTransferManager.class); + + List metadataFiles = List.of( + // PT 4 to 9 + "metadata__9223372036854775798__9223372036854774799__9223370311919910393__node1__9223372036438563958__4__1", + // PT 2 to 7 + "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1", + // PT 2 to 6 + "metadata__9223372036854775801__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1" + ); + + Logger staticLogger = LogManager.getLogger(RemoteFsTimestampAwareTranslogTests.class); + when(translogTransferManager.listPrimaryTermsInRemote()).thenReturn(Set.of(1L, 2L, 3L, 4L)); + AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); + RemoteFsTimestampAwareTranslog.deleteStaleRemotePrimaryTerms( + metadataFiles, + translogTransferManager, + new HashMap<>(), + minPrimaryTermInRemote, + staticLogger + ); + verify(translogTransferManager).deletePrimaryTermsAsync(2L); + assertEquals(2, minPrimaryTermInRemote.get()); + + RemoteFsTimestampAwareTranslog.deleteStaleRemotePrimaryTerms( + metadataFiles, + translogTransferManager, + new HashMap<>(), + minPrimaryTermInRemote, + staticLogger + ); + // This means there are no new invocations of deletePrimaryTermAsync + verify(translogTransferManager, times(1)).deletePrimaryTermsAsync(anyLong()); + } + + public void testDeleteStaleRemotePrimaryTermsNoPrimaryTermInRemote() throws IOException { + TranslogTransferManager translogTransferManager = mock(TranslogTransferManager.class); + + List metadataFiles = List.of( + // PT 4 to 9 + "metadata__9223372036854775798__9223372036854774799__9223370311919910393__node1__9223372036438563958__4__1", + // PT 2 to 7 + "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1", + // PT 2 to 6 + "metadata__9223372036854775801__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1" + ); + + Logger staticLogger = LogManager.getLogger(RemoteFsTimestampAwareTranslogTests.class); + when(translogTransferManager.listPrimaryTermsInRemote()).thenReturn(Set.of()); + AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); + RemoteFsTimestampAwareTranslog.deleteStaleRemotePrimaryTerms( + metadataFiles, + translogTransferManager, + new HashMap<>(), + minPrimaryTermInRemote, + staticLogger + ); + verify(translogTransferManager, times(0)).deletePrimaryTermsAsync(anyLong()); + assertEquals(Long.MAX_VALUE, minPrimaryTermInRemote.get()); + } + + public void testDeleteStaleRemotePrimaryTermsPrimaryTermInRemoteIsBigger() throws IOException { + TranslogTransferManager translogTransferManager = mock(TranslogTransferManager.class); + + List metadataFiles = List.of( + // PT 4 to 9 + "metadata__9223372036854775798__9223372036854774799__9223370311919910393__node1__9223372036438563958__4__1", + // PT 2 to 7 + "metadata__9223372036854775800__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1", + // PT 2 to 6 + "metadata__9223372036854775801__9223372036854774799__9223370311919910393__node1__9223372036438563958__2__1" + ); + + Logger staticLogger = LogManager.getLogger(RemoteFsTimestampAwareTranslogTests.class); + when(translogTransferManager.listPrimaryTermsInRemote()).thenReturn(Set.of(2L, 3L, 4L)); + AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); + RemoteFsTimestampAwareTranslog.deleteStaleRemotePrimaryTerms( + metadataFiles, + translogTransferManager, + new HashMap<>(), + minPrimaryTermInRemote, + staticLogger + ); + verify(translogTransferManager, times(0)).deletePrimaryTermsAsync(anyLong()); + assertEquals(2, minPrimaryTermInRemote.get()); + } + } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 8605043ddd5b5..ed0d6b7d50706 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -628,7 +628,7 @@ public void testMetadataConflict() throws InterruptedException { String mdFilename = tm.getFileName(); long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count(); // There should not be any `_` in mdFile name as it is used a separator . - assertEquals(12, count); + assertEquals(14, count); Thread.sleep(1); TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); String mdFilename2 = tm2.getFileName(); diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java index 4b41079de18ef..9c022aade5dc6 100644 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java @@ -13,9 +13,12 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; @@ -26,6 +29,7 @@ import java.io.IOException; +import static java.util.Arrays.asList; import static org.apache.lucene.document.LongPoint.pack; import static org.mockito.Mockito.mock; @@ -253,8 +257,7 @@ public void testApproximateRangeShortCircuitAscSort() throws IOException { for (int v = 0; v < dims; v++) { scratch[v] = i; } - doc.add(new LongPoint("point", scratch)); - iw.addDocument(doc); + iw.addDocument(asList(new LongPoint("point", scratch[0]), new NumericDocValuesField("point", scratch[0]))); } iw.flush(); iw.forceMerge(1); @@ -277,8 +280,9 @@ protected String toString(int dimension, byte[] value) { Query query = LongPoint.newRangeQuery("point", lower, upper); ; IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(approximateQuery, 10); - TopDocs topDocs1 = searcher.search(query, 10); + Sort sort = new Sort(new SortField("point", SortField.Type.LONG)); + TopDocs topDocs = searcher.search(approximateQuery, 10, sort); + TopDocs topDocs1 = searcher.search(query, 10, sort); // since we short-circuit from the approx range at the end of size these will not be equal assertNotEquals(topDocs.totalHits, topDocs1.totalHits); diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java index 532bf3de95bd6..0c7eb721806d5 100644 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupLevelResourceUsageViewTests.java @@ -8,23 +8,34 @@ package org.opensearch.wlm; -import org.opensearch.action.search.SearchAction; -import org.opensearch.core.tasks.TaskId; -import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests; -import java.util.Collections; import java.util.List; import java.util.Map; +import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; +import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; +import static org.opensearch.wlm.tracker.ResourceUsageCalculatorTests.createMockTaskWithResourceStats; +import static org.mockito.Mockito.mock; + public class QueryGroupLevelResourceUsageViewTests extends OpenSearchTestCase { - Map resourceUsage; - List activeTasks; + Map resourceUsage; + List activeTasks; + ResourceUsageCalculatorTrackerServiceTests.TestClock clock; + WorkloadManagementSettings settings; public void setUp() throws Exception { super.setUp(); - resourceUsage = Map.of(ResourceType.fromName("memory"), 34L, ResourceType.fromName("cpu"), 12L); - activeTasks = List.of(getRandomTask(4321)); + settings = mock(WorkloadManagementSettings.class); + clock = new ResourceUsageCalculatorTrackerServiceTests.TestClock(); + activeTasks = List.of(createMockTaskWithResourceStats(QueryGroupTask.class, 100, 200, 0, 1)); + clock.fastForwardBy(300); + double memoryUsage = 200.0 / HEAP_SIZE_BYTES; + double cpuUsage = 100.0 / (PROCESSOR_COUNT * 300.0); + + resourceUsage = Map.of(ResourceType.MEMORY, memoryUsage, ResourceType.CPU, cpuUsage); } public void testGetResourceUsageData() { @@ -32,7 +43,7 @@ public void testGetResourceUsageData() { resourceUsage, activeTasks ); - Map resourceUsageData = queryGroupLevelResourceUsageView.getResourceUsageData(); + Map resourceUsageData = queryGroupLevelResourceUsageView.getResourceUsageData(); assertTrue(assertResourceUsageData(resourceUsageData)); } @@ -41,23 +52,13 @@ public void testGetActiveTasks() { resourceUsage, activeTasks ); - List activeTasks = queryGroupLevelResourceUsageView.getActiveTasks(); + List activeTasks = queryGroupLevelResourceUsageView.getActiveTasks(); assertEquals(1, activeTasks.size()); - assertEquals(4321, activeTasks.get(0).getId()); + assertEquals(1, activeTasks.get(0).getId()); } - private boolean assertResourceUsageData(Map resourceUsageData) { - return resourceUsageData.get(ResourceType.fromName("memory")) == 34L && resourceUsageData.get(ResourceType.fromName("cpu")) == 12L; - } - - private Task getRandomTask(long id) { - return new Task( - id, - "transport", - SearchAction.NAME, - "test description", - new TaskId(randomLong() + ":" + randomLong()), - Collections.emptyMap() - ); + private boolean assertResourceUsageData(Map resourceUsageData) { + return (resourceUsageData.get(ResourceType.MEMORY) - 200.0 / HEAP_SIZE_BYTES) <= MIN_VALUE + && (resourceUsageData.get(ResourceType.CPU) - 100.0 / (300)) < MIN_VALUE; } } diff --git a/server/src/test/java/org/opensearch/wlm/ResourceTypeTests.java b/server/src/test/java/org/opensearch/wlm/ResourceTypeTests.java index 737cbb37b554c..16bd8b7e66266 100644 --- a/server/src/test/java/org/opensearch/wlm/ResourceTypeTests.java +++ b/server/src/test/java/org/opensearch/wlm/ResourceTypeTests.java @@ -8,14 +8,8 @@ package org.opensearch.wlm; -import org.opensearch.action.search.SearchShardTask; -import org.opensearch.core.tasks.resourcetracker.ResourceStats; -import org.opensearch.tasks.CancellableTask; import org.opensearch.test.OpenSearchTestCase; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - public class ResourceTypeTests extends OpenSearchTestCase { public void testFromName() { @@ -35,17 +29,4 @@ public void testGetName() { assertEquals("cpu", ResourceType.CPU.getName()); assertEquals("memory", ResourceType.MEMORY.getName()); } - - public void testGetResourceUsage() { - SearchShardTask mockTask = createMockTask(SearchShardTask.class, 100, 200); - assertEquals(100, ResourceType.CPU.getResourceUsage(mockTask)); - assertEquals(200, ResourceType.MEMORY.getResourceUsage(mockTask)); - } - - private T createMockTask(Class type, long cpuUsage, long heapUsage) { - T task = mock(type); - when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); - when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); - return task; - } } diff --git a/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java b/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java new file mode 100644 index 0000000000000..dc79822c59c49 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/cancellation/MaximumResourceTaskSelectionStrategyTests.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchTask; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.IntStream; + +import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; + +public class MaximumResourceTaskSelectionStrategyTests extends OpenSearchTestCase { + + public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsGreaterThanZero() { + MaximumResourceTaskSelectionStrategy testHighestResourceConsumingTaskFirstSelectionStrategy = + new MaximumResourceTaskSelectionStrategy(); + double reduceBy = 50000.0 / HEAP_SIZE_BYTES; + ResourceType resourceType = ResourceType.MEMORY; + List tasks = getListOfTasks(100); + List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( + tasks, + reduceBy, + resourceType + ); + assertFalse(selectedTasks.isEmpty()); + boolean sortedInDescendingResourceUsage = IntStream.range(0, selectedTasks.size() - 1) + .noneMatch( + index -> ResourceType.MEMORY.getResourceUsageCalculator() + .calculateTaskResourceUsage(selectedTasks.get(index)) < ResourceType.MEMORY.getResourceUsageCalculator() + .calculateTaskResourceUsage(selectedTasks.get(index + 1)) + ); + assertTrue(sortedInDescendingResourceUsage); + assertTrue(tasksUsageMeetsThreshold(selectedTasks, reduceBy)); + } + + public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsLesserThanZero() { + MaximumResourceTaskSelectionStrategy testHighestResourceConsumingTaskFirstSelectionStrategy = + new MaximumResourceTaskSelectionStrategy(); + double reduceBy = -50.0 / HEAP_SIZE_BYTES; + ResourceType resourceType = ResourceType.MEMORY; + List tasks = getListOfTasks(3); + try { + testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation(tasks, reduceBy, resourceType); + } catch (Exception e) { + assertTrue(e instanceof IllegalArgumentException); + assertEquals("limit has to be greater than zero", e.getMessage()); + } + } + + public void testSelectTasksToCancelSelectsTasksMeetingThreshold_ifReduceByIsEqualToZero() { + MaximumResourceTaskSelectionStrategy testHighestResourceConsumingTaskFirstSelectionStrategy = + new MaximumResourceTaskSelectionStrategy(); + double reduceBy = 0.0; + ResourceType resourceType = ResourceType.MEMORY; + List tasks = getListOfTasks(50); + List selectedTasks = testHighestResourceConsumingTaskFirstSelectionStrategy.selectTasksForCancellation( + tasks, + reduceBy, + resourceType + ); + assertTrue(selectedTasks.isEmpty()); + } + + private boolean tasksUsageMeetsThreshold(List selectedTasks, double threshold) { + double memory = 0; + for (QueryGroupTask task : selectedTasks) { + memory += ResourceType.MEMORY.getResourceUsageCalculator().calculateTaskResourceUsage(task); + if ((memory - threshold) > MIN_VALUE) { + return true; + } + } + return false; + } + + private List getListOfTasks(int numberOfTasks) { + List tasks = new ArrayList<>(); + + while (tasks.size() < numberOfTasks) { + long id = randomLong(); + final QueryGroupTask task = getRandomSearchTask(id); + long initial_memory = randomLongBetween(1, 100); + + ResourceUsageMetric[] initialTaskResourceMetrics = new ResourceUsageMetric[] { + new ResourceUsageMetric(ResourceStats.MEMORY, initial_memory) }; + task.startThreadResourceTracking(id, ResourceStatsType.WORKER_STATS, initialTaskResourceMetrics); + + long memory = initial_memory + randomLongBetween(1, 10000); + + ResourceUsageMetric[] taskResourceMetrics = new ResourceUsageMetric[] { + new ResourceUsageMetric(ResourceStats.MEMORY, memory), }; + task.updateThreadResourceStats(id, ResourceStatsType.WORKER_STATS, taskResourceMetrics); + task.stopThreadResourceTracking(id, ResourceStatsType.WORKER_STATS); + tasks.add(task); + } + + return tasks; + } + + private QueryGroupTask getRandomSearchTask(long id) { + return new SearchTask( + id, + "transport", + SearchAction.NAME, + () -> "test description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java b/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java new file mode 100644 index 0000000000000..f7a49235efc69 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/cancellation/QueryGroupTaskCancellationServiceTests.java @@ -0,0 +1,541 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.cancellation; + +import org.opensearch.action.search.SearchAction; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.TaskCancellation; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.MutableQueryGroupFragment; +import org.opensearch.wlm.MutableQueryGroupFragment.ResiliencyMode; +import org.opensearch.wlm.QueryGroupLevelResourceUsageView; +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadManagementSettings; +import org.opensearch.wlm.tracker.QueryGroupResourceUsageTrackerService; +import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests.TestClock; +import org.junit.Before; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class QueryGroupTaskCancellationServiceTests extends OpenSearchTestCase { + private static final String queryGroupId1 = "queryGroup1"; + private static final String queryGroupId2 = "queryGroup2"; + + private TestClock clock; + + private Map queryGroupLevelViews; + private Set activeQueryGroups; + private Set deletedQueryGroups; + private QueryGroupTaskCancellationService taskCancellation; + private WorkloadManagementSettings workloadManagementSettings; + private QueryGroupResourceUsageTrackerService resourceUsageTrackerService; + + @Before + public void setup() { + workloadManagementSettings = mock(WorkloadManagementSettings.class); + queryGroupLevelViews = new HashMap<>(); + activeQueryGroups = new HashSet<>(); + deletedQueryGroups = new HashSet<>(); + + clock = new TestClock(); + when(workloadManagementSettings.getNodeLevelCpuCancellationThreshold()).thenReturn(0.9); + when(workloadManagementSettings.getNodeLevelMemoryCancellationThreshold()).thenReturn(0.9); + resourceUsageTrackerService = mock(QueryGroupResourceUsageTrackerService.class); + taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + } + + public void testGetCancellableTasksFrom_setupAppropriateCancellationReasonAndScore() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.1; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + clock.fastForwardBy(1000); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); + queryGroupLevelViews.put(queryGroupId1, mockView); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + assertEquals(1, cancellableTasksFrom.get(0).getReasons().get(0).getCancellationScore()); + } + + public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThreshold() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.1; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(resourceType, cpuUsage, ResourceType.MEMORY, memoryUsage)); + queryGroupLevelViews.put(queryGroupId1, mockView); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_returnsTasksWhenBreachingThresholdForMemory() { + ResourceType resourceType = ResourceType.MEMORY; + double cpuUsage = 0.0; + double memoryUsage = 0.11; + Double threshold = 0.1; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, resourceType, memoryUsage)); + + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_returnsNoTasksWhenNotBreachingThreshold() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.81; + double memoryUsage = 0.0; + Double threshold = 0.9; + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup1)); + assertTrue(cancellableTasksFrom.isEmpty()); + } + + public void testGetCancellableTasksFrom_filtersQueryGroupCorrectly() { + ResourceType resourceType = ResourceType.CPU; + double usage = 0.02; + Double threshold = 0.01; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT); + assertEquals(0, cancellableTasksFrom.size()); + } + + public void testCancelTasks_cancelsGivenTasks() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.011; + double memoryUsage = 0.011; + + Double threshold = 0.01; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold, ResourceType.MEMORY, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + + QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); + taskCancellation.cancelTasks(() -> false); + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_cancelsTasksFromDeletedQueryGroups() { + ResourceType resourceType = ResourceType.CPU; + double activeQueryGroupCpuUsage = 0.01; + double activeQueryGroupMemoryUsage = 0.0; + double deletedQueryGroupCpuUsage = 0.01; + double deletedQueryGroupMemoryUsage = 0.0; + Double threshold = 0.01; + + QueryGroup activeQueryGroup = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroup deletedQueryGroup = new QueryGroup( + "testQueryGroup", + queryGroupId2, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + QueryGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( + resourceType, + deletedQueryGroupCpuUsage, + List.of(1000, 1001) + ); + + when(mockView1.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, activeQueryGroupCpuUsage, ResourceType.MEMORY, activeQueryGroupMemoryUsage) + ); + when(mockView2.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, deletedQueryGroupCpuUsage, ResourceType.MEMORY, deletedQueryGroupMemoryUsage) + ); + queryGroupLevelViews.put(queryGroupId1, mockView1); + queryGroupLevelViews.put(queryGroupId2, mockView2); + + activeQueryGroups.add(activeQueryGroup); + deletedQueryGroups.add(deletedQueryGroup); + + QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFromDeletedQueryGroups = taskCancellation.getAllCancellableTasks(List.of(deletedQueryGroup)); + assertEquals(2, cancellableTasksFromDeletedQueryGroups.size()); + assertEquals(1000, cancellableTasksFromDeletedQueryGroups.get(0).getTask().getId()); + assertEquals(1001, cancellableTasksFromDeletedQueryGroups.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); + taskCancellation.cancelTasks(() -> true); + + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertTrue(cancellableTasksFromDeletedQueryGroups.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFromDeletedQueryGroups.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_does_not_cancelTasksFromDeletedQueryGroups_whenNodeNotInDuress() { + ResourceType resourceType = ResourceType.CPU; + double activeQueryGroupCpuUsage = 0.11; + double activeQueryGroupMemoryUsage = 0.0; + double deletedQueryGroupCpuUsage = 0.11; + double deletedQueryGroupMemoryUsage = 0.0; + + Double threshold = 0.01; + + QueryGroup activeQueryGroup = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroup deletedQueryGroup = new QueryGroup( + "testQueryGroup", + queryGroupId2, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + QueryGroupLevelResourceUsageView mockView2 = createResourceUsageViewMock( + resourceType, + deletedQueryGroupCpuUsage, + List.of(1000, 1001) + ); + + when(mockView1.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, activeQueryGroupCpuUsage, ResourceType.MEMORY, activeQueryGroupMemoryUsage) + ); + when(mockView2.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, deletedQueryGroupCpuUsage, ResourceType.MEMORY, deletedQueryGroupMemoryUsage) + ); + + queryGroupLevelViews.put(queryGroupId1, mockView1); + queryGroupLevelViews.put(queryGroupId2, mockView2); + activeQueryGroups.add(activeQueryGroup); + deletedQueryGroups.add(deletedQueryGroup); + + QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFromDeletedQueryGroups = taskCancellation.getAllCancellableTasks(List.of(deletedQueryGroup)); + assertEquals(2, cancellableTasksFromDeletedQueryGroups.size()); + assertEquals(1000, cancellableTasksFromDeletedQueryGroups.get(0).getTask().getId()); + assertEquals(1001, cancellableTasksFromDeletedQueryGroups.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); + taskCancellation.cancelTasks(() -> false); + + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertFalse(cancellableTasksFromDeletedQueryGroups.get(0).getTask().isCancelled()); + assertFalse(cancellableTasksFromDeletedQueryGroups.get(1).getTask().isCancelled()); + } + + public void testCancelTasks_cancelsGivenTasks_WhenNodeInDuress() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage1 = 0.11; + double memoryUsage1 = 0.0; + double cpuUsage2 = 0.11; + double memoryUsage2 = 0.0; + Double threshold = 0.01; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroup queryGroup2 = new QueryGroup( + "testQueryGroup", + queryGroupId2, + new MutableQueryGroupFragment(ResiliencyMode.SOFT, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView1 = createResourceUsageViewMock(); + when(mockView1.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage1, ResourceType.MEMORY, memoryUsage1)); + queryGroupLevelViews.put(queryGroupId1, mockView1); + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(5678), getRandomSearchTask(8765))); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage2, ResourceType.MEMORY, memoryUsage2)); + queryGroupLevelViews.put(queryGroupId2, mockView); + Collections.addAll(activeQueryGroups, queryGroup1, queryGroup2); + + QueryGroupTaskCancellationService taskCancellation = new QueryGroupTaskCancellationService( + workloadManagementSettings, + new MaximumResourceTaskSelectionStrategy(), + resourceUsageTrackerService, + activeQueryGroups, + deletedQueryGroups + ); + + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, cancellableTasksFrom.size()); + assertEquals(1234, cancellableTasksFrom.get(0).getTask().getId()); + assertEquals(4321, cancellableTasksFrom.get(1).getTask().getId()); + + List cancellableTasksFrom1 = taskCancellation.getAllCancellableTasks(ResiliencyMode.SOFT); + assertEquals(2, cancellableTasksFrom1.size()); + assertEquals(5678, cancellableTasksFrom1.get(0).getTask().getId()); + assertEquals(8765, cancellableTasksFrom1.get(1).getTask().getId()); + + when(resourceUsageTrackerService.constructQueryGroupLevelUsageViews()).thenReturn(queryGroupLevelViews); + taskCancellation.cancelTasks(() -> true); + assertTrue(cancellableTasksFrom.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom.get(1).getTask().isCancelled()); + assertTrue(cancellableTasksFrom1.get(0).getTask().isCancelled()); + assertTrue(cancellableTasksFrom1.get(1).getTask().isCancelled()); + } + + public void testGetAllCancellableTasks_ReturnsNoTasksWhenNotBreachingThresholds() { + ResourceType resourceType = ResourceType.CPU; + double queryGroupCpuUsage = 0.09; + double queryGroupMemoryUsage = 0.0; + Double threshold = 0.1; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn( + Map.of(ResourceType.CPU, queryGroupCpuUsage, ResourceType.MEMORY, queryGroupMemoryUsage) + ); + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertTrue(allCancellableTasks.isEmpty()); + } + + public void testGetAllCancellableTasks_ReturnsTasksWhenBreachingThresholds() { + ResourceType resourceType = ResourceType.CPU; + double cpuUsage = 0.11; + double memoryUsage = 0.0; + Double threshold = 0.01; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + when(mockView.getResourceUsageData()).thenReturn(Map.of(ResourceType.CPU, cpuUsage, ResourceType.MEMORY, memoryUsage)); + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List allCancellableTasks = taskCancellation.getAllCancellableTasks(ResiliencyMode.ENFORCED); + assertEquals(2, allCancellableTasks.size()); + assertEquals(1234, allCancellableTasks.get(0).getTask().getId()); + assertEquals(4321, allCancellableTasks.get(1).getTask().getId()); + } + + public void testGetCancellableTasksFrom_doesNotReturnTasksWhenQueryGroupIdNotFound() { + ResourceType resourceType = ResourceType.CPU; + double usage = 0.11; + Double threshold = 0.01; + + QueryGroup queryGroup1 = new QueryGroup( + "testQueryGroup1", + queryGroupId1, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + QueryGroup queryGroup2 = new QueryGroup( + "testQueryGroup2", + queryGroupId2, + new MutableQueryGroupFragment(ResiliencyMode.ENFORCED, Map.of(resourceType, threshold)), + 1L + ); + + QueryGroupLevelResourceUsageView mockView = createResourceUsageViewMock(); + queryGroupLevelViews.put(queryGroupId1, mockView); + activeQueryGroups.add(queryGroup1); + activeQueryGroups.add(queryGroup2); + taskCancellation.queryGroupLevelResourceUsageViews = queryGroupLevelViews; + + List cancellableTasksFrom = taskCancellation.getAllCancellableTasks(List.of(queryGroup2)); + assertEquals(0, cancellableTasksFrom.size()); + } + + private QueryGroupLevelResourceUsageView createResourceUsageViewMock() { + QueryGroupLevelResourceUsageView mockView = mock(QueryGroupLevelResourceUsageView.class); + when(mockView.getActiveTasks()).thenReturn(List.of(getRandomSearchTask(1234), getRandomSearchTask(4321))); + return mockView; + } + + private QueryGroupLevelResourceUsageView createResourceUsageViewMock(ResourceType resourceType, double usage, Collection ids) { + QueryGroupLevelResourceUsageView mockView = mock(QueryGroupLevelResourceUsageView.class); + when(mockView.getResourceUsageData()).thenReturn(Collections.singletonMap(resourceType, usage)); + when(mockView.getActiveTasks()).thenReturn(ids.stream().map(this::getRandomSearchTask).collect(Collectors.toList())); + return mockView; + } + + private QueryGroupTask getRandomSearchTask(long id) { + return new QueryGroupTask( + id, + "transport", + SearchAction.NAME, + "test description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap(), + null, + clock::getTime + ); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java new file mode 100644 index 0000000000000..21d9717a1aaca --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTests.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.wlm.QueryGroupTask; +import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.tracker.ResourceUsageCalculatorTrackerServiceTests.TestClock; + +import java.util.List; + +import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; +import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ResourceUsageCalculatorTests extends OpenSearchTestCase { + + public void testQueryGroupCpuUsage() { + TestClock clock = new TestClock(); + long fastForwardTime = PROCESSOR_COUNT * 200L; + clock.fastForwardBy(fastForwardTime); + + double expectedQueryGroupCpuUsage = 1.0 / PROCESSOR_COUNT; + + QueryGroupTask mockTask = createMockTaskWithResourceStats(QueryGroupTask.class, fastForwardTime, 200, 0, 123); + when(mockTask.getElapsedTime()).thenReturn(fastForwardTime); + double actualUsage = ResourceType.CPU.getResourceUsageCalculator().calculateResourceUsage(List.of(mockTask)); + assertEquals(expectedQueryGroupCpuUsage, actualUsage, MIN_VALUE); + + double taskResourceUsage = ResourceType.CPU.getResourceUsageCalculator().calculateTaskResourceUsage(mockTask); + assertEquals(1.0, taskResourceUsage, MIN_VALUE); + } + + public void testQueryGroupMemoryUsage() { + QueryGroupTask mockTask = createMockTaskWithResourceStats(QueryGroupTask.class, 100, 200, 0, 123); + double actualMemoryUsage = ResourceType.MEMORY.getResourceUsageCalculator().calculateResourceUsage(List.of(mockTask)); + double expectedMemoryUsage = 200.0 / HEAP_SIZE_BYTES; + + assertEquals(expectedMemoryUsage, actualMemoryUsage, MIN_VALUE); + assertEquals( + 200.0 / HEAP_SIZE_BYTES, + ResourceType.MEMORY.getResourceUsageCalculator().calculateTaskResourceUsage(mockTask), + MIN_VALUE + ); + } + + public static T createMockTaskWithResourceStats( + Class type, + long cpuUsage, + long heapUsage, + long startTimeNanos, + long taskId + ) { + T task = mock(type); + when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); + when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); + when(task.getStartTimeNanos()).thenReturn(startTimeNanos); + when(task.getId()).thenReturn(taskId); + return task; + } +} diff --git a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java similarity index 69% rename from server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java rename to server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java index ca2891cb532f2..fe72bd6e710c8 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java @@ -9,10 +9,8 @@ package org.opensearch.wlm.tracker; import org.opensearch.action.search.SearchShardTask; -import org.opensearch.action.search.SearchTask; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.tasks.resourcetracker.ResourceStats; -import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.test.OpenSearchTestCase; @@ -21,6 +19,7 @@ import org.opensearch.wlm.QueryGroupLevelResourceUsageView; import org.opensearch.wlm.QueryGroupTask; import org.opensearch.wlm.ResourceType; +import org.opensearch.wlm.WorkloadManagementSettings; import org.junit.After; import org.junit.Before; @@ -31,18 +30,38 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.opensearch.wlm.cancellation.QueryGroupTaskCancellationService.MIN_VALUE; +import static org.opensearch.wlm.tracker.CpuUsageCalculator.PROCESSOR_COUNT; +import static org.opensearch.wlm.tracker.MemoryUsageCalculator.HEAP_SIZE_BYTES; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class QueryGroupResourceUsageTrackerServiceTests extends OpenSearchTestCase { +public class ResourceUsageCalculatorTrackerServiceTests extends OpenSearchTestCase { TestThreadPool threadPool; TaskResourceTrackingService mockTaskResourceTrackingService; QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService; + WorkloadManagementSettings settings; + + public static class TestClock { + long time; + + public void fastForwardBy(long nanos) { + time += nanos; + } + + public long getTime() { + return time; + } + } + + TestClock clock; @Before public void setup() { + clock = new TestClock(); + settings = mock(WorkloadManagementSettings.class); threadPool = new TestThreadPool(getTestName()); mockTaskResourceTrackingService = mock(TaskResourceTrackingService.class); queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService(mockTaskResourceTrackingService); @@ -55,16 +74,24 @@ public void cleanup() { public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_WhenTasksArePresent() { List queryGroupIds = List.of("queryGroup1", "queryGroup2", "queryGroup3"); + clock.fastForwardBy(2000); Map activeSearchShardTasks = createActiveSearchShardTasks(queryGroupIds); when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); + Map stringQueryGroupLevelResourceUsageViewMap = queryGroupResourceUsageTrackerService .constructQueryGroupLevelUsageViews(); for (String queryGroupId : queryGroupIds) { assertEquals( - 400, - (long) stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.MEMORY) + (400 * 1.0f) / HEAP_SIZE_BYTES, + stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.MEMORY), + MIN_VALUE + ); + assertEquals( + (200 * 1.0f) / (PROCESSOR_COUNT * 2000), + stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getResourceUsageData().get(ResourceType.CPU), + MIN_VALUE ); assertEquals(2, stringQueryGroupLevelResourceUsageViewMap.get(queryGroupId).getActiveTasks().size()); } @@ -78,14 +105,23 @@ public void testConstructQueryGroupLevelViews_CreatesQueryGroupLevelUsageView_Wh public void testConstructQueryGroupLevelUsageViews_WithTasksHavingDifferentResourceUsage() { Map activeSearchShardTasks = new HashMap<>(); + clock.fastForwardBy(2000); activeSearchShardTasks.put(1L, createMockTask(SearchShardTask.class, 100, 200, "queryGroup1")); activeSearchShardTasks.put(2L, createMockTask(SearchShardTask.class, 200, 400, "queryGroup1")); when(mockTaskResourceTrackingService.getResourceAwareTasks()).thenReturn(activeSearchShardTasks); - Map queryGroupViews = queryGroupResourceUsageTrackerService .constructQueryGroupLevelUsageViews(); - assertEquals(600, (long) queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.MEMORY)); + assertEquals( + (double) 600 / HEAP_SIZE_BYTES, + queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.MEMORY), + MIN_VALUE + ); + assertEquals( + ((double) 300) / (PROCESSOR_COUNT * 2000), + queryGroupViews.get("queryGroup1").getResourceUsageData().get(ResourceType.CPU), + MIN_VALUE + ); assertEquals(2, queryGroupViews.get("queryGroup1").getActiveTasks().size()); } @@ -100,19 +136,16 @@ private Map createActiveSearchShardTasks(List queryGroupIds) return activeSearchShardTasks; } - private T createMockTask(Class type, long cpuUsage, long heapUsage, String queryGroupId) { + private T createMockTask(Class type, long cpuUsage, long heapUsage, String queryGroupId) { T task = mock(type); - if (task instanceof SearchTask || task instanceof SearchShardTask) { - // Stash the current thread context to ensure that any existing context is preserved and restored after setting the query group - // ID. - try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { - threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, queryGroupId); - ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); - } + try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, queryGroupId); + task.setQueryGroupId(threadPool.getThreadContext()); } when(task.getTotalResourceUtilization(ResourceStats.CPU)).thenReturn(cpuUsage); when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); when(task.getStartTimeNanos()).thenReturn((long) 0); + when(task.getElapsedTime()).thenReturn(clock.getTime()); AtomicBoolean isCancelled = new AtomicBoolean(false); doAnswer(invocation -> { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index a63abdeb626b7..45ba68afe8f7b 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -416,11 +416,14 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { private static Boolean segmentsPathFixedPrefix; + private static Boolean snapshotShardPathFixedPrefix; + @BeforeClass public static void beforeClass() throws Exception { prefixModeVerificationEnable = randomBoolean(); translogPathFixedPrefix = randomBoolean(); segmentsPathFixedPrefix = randomBoolean(); + snapshotShardPathFixedPrefix = randomBoolean(); testClusterRule.beforeClass(); } @@ -2944,6 +2947,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), false); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.getKey(), translogPathFixedPrefix ? "a" : ""); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.getKey(), segmentsPathFixedPrefix ? "b" : ""); + settings.put(BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING.getKey(), segmentsPathFixedPrefix ? "c" : ""); return settings.build(); }