From a9ee2fd71aa522f8d2e960fdde95ece52f1752b4 Mon Sep 17 00:00:00 2001 From: Umberto D'Ovidio <92302395+udovidio@users.noreply.github.com> Date: Fri, 7 Oct 2022 14:00:05 +0200 Subject: [PATCH] Vert.x 4 update (#1438) * Vert.x 4 Update --- .jenkins/run-splits.sh | 2 +- CHANGELOG.adoc | 3 + Jenkinsfile.split | 11 +- api/pom.xml | 4 + bom/pom.xml | 9 +- .../handler/MeshAnonymousAuthHandler.java | 8 +- .../mesh/auth/handler/MeshAuthHandler.java | 6 +- .../mesh/auth/handler/MeshJWTAuthHandler.java | 45 +- .../auth/provider/MeshJWTAuthProvider.java | 11 +- .../cache/impl/EventAwareCacheFactory.java | 12 +- .../mesh/cache/impl/EventAwareCacheImpl.java | 70 +-- .../mesh/distributed/DistributionUtils.java | 16 +- .../com/gentics/mesh/event/EventBusStore.java | 45 ++ .../gentics/mesh/event/MeshEventSender.java | 4 +- .../rest/impl/InternalEndpointRouteImpl.java | 55 ++- .../gentics/mesh/router/APIRouterImpl.java | 10 +- .../router/route/DefaultNotFoundHandler.java | 6 +- .../mesh/util/rx/WrapperWriteStream.java | 255 ----------- core/pom.xml | 8 +- .../mesh/auth/MeshBasicAuthLoginHandler.java | 18 +- .../mesh/cache/PermissionCacheImpl.java | 12 +- .../cli/AbstractBootstrapInitializer.java | 10 +- .../java/com/gentics/mesh/cli/MeshImpl.java | 91 ++-- .../mesh/core/data/impl/MeshAuthUserImpl.java | 13 +- .../endpoint/eventbus/EventbusEndpoint.java | 31 +- .../node/BinaryUploadHandlerImpl.java | 2 +- ...S3BinaryMetadataExtractionHandlerImpl.java | 5 + .../endpoint/user/UserTokenAuthHandler.java | 8 +- .../mesh/rest/MeshLocalClientImpl.java | 5 + .../mesh/rest/MeshLocalRequestImpl.java | 3 +- databases/orientdb/pom.xml | 4 +- .../ferma/ext/orientdb3/OrientDBTx.java | 15 +- distributed-coordinator/pom.xml | 5 + .../coordinator/MasterElector.java | 50 +-- .../ClusterEnabledRequestDelegatorImpl.java | 51 +-- hazelcast3-cluster-manager/README.md | 6 + hazelcast3-cluster-manager/pom.xml | 243 ++++++++++ .../src/main/java/examples/Examples.java | 130 ++++++ .../src/main/java/examples/package-info.java | 23 + .../cluster/hazelcast/ClusterHealthCheck.java | 44 ++ .../spi/cluster/hazelcast/ConfigUtil.java | 106 +++++ .../hazelcast/HazelcastClusterManager.java | 425 ++++++++++++++++++ .../hazelcast/impl/ConversionUtils.java | 90 ++++ .../impl/HandlerCallBackAdapter.java | 42 ++ .../hazelcast/impl/HazelcastAsyncMap.java | 165 +++++++ .../hazelcast/impl/HazelcastCounter.java | 129 ++++++ .../cluster/hazelcast/impl/HazelcastLock.java | 42 ++ .../hazelcast/impl/HazelcastNodeInfo.java | 61 +++ .../impl/HazelcastRegistrationInfo.java | 76 ++++ .../hazelcast/impl/HazelcastServerID.java | 62 +++ .../cluster/hazelcast/impl/SubsMapHelper.java | 168 +++++++ .../hazelcast/impl/SubsOpSerializer.java | 62 +++ .../spi/cluster/hazelcast/package-info.java | 14 + .../src/main/resources/META-INF/MANIFEST.MF | 2 + .../io.vertx.core.spi.VertxServiceProvider | 1 + .../src/main/resources/default-cluster.xml | 169 +++++++ .../src/test/java/io/vertx/Lifecycle.java | 95 ++++ .../java/io/vertx/LoggingTestWatcher.java | 31 ++ .../io/vertx/core/HazelcastComplexHATest.java | 48 ++ .../java/io/vertx/core/HazelcastHATest.java | 60 +++ ...ogrammaticHazelcastClusterManagerTest.java | 254 +++++++++++ .../HazelcastClusteredEventbusTest.java | 54 +++ .../eventbus/HazelcastFaultToleranceTest.java | 63 +++ .../core/eventbus/HazelcastNodeInfoTest.java | 54 +++ .../HazelcastClusteredAsyncMapTest.java | 91 ++++ ...azelcastClusteredAsynchronousLockTest.java | 67 +++ .../HazelcastClusteredSharedCounterTest.java | 54 +++ .../HazelcastClusteredSessionHandlerTest.java | 59 +++ .../HazelcastClusteredAsyncMapTest.java | 109 +++++ ...azelcastClusteredAsynchronousLockTest.java | 85 ++++ .../HazelcastClusteredEventbusTest.java | 72 +++ .../HazelcastClusteredSharedCounterTest.java | 72 +++ .../litemembers/HazelcastComplexHATest.java | 67 +++ .../vertx/it/litemembers/HazelcastHATest.java | 79 ++++ .../HazelcastDiscoveryImplClusteredTest.java | 57 +++ .../src/test/resources/cluster.xml | 120 +++++ .../src/test/resources/logback-test.xml | 31 ++ .../mesh/context/InternalActionContext.java | 7 +- .../InternalRoutingActionContextImpl.java | 9 +- .../context/impl/LocalActionContextImpl.java | 12 +- .../impl/NodeMigrationActionContextImpl.java | 11 +- .../mesh/rest/InternalEndpointRoute.java | 8 + .../cli/OrientDBBootstrapInitializerImpl.java | 9 +- .../gentics/mesh/plugin/PluginContext.java | 56 ++- pom.xml | 5 +- .../auth/oauth2/MeshOAuth2ServiceImpl.java | 5 +- .../gentics/mesh/mock/MockingLoggerRule.java | 5 + .../verticle/admin/AdminGUIEndpointTest.java | 33 +- .../mesh/cache/EventAwareCacheTest.java | 5 +- .../core/graphql/GraphQLEndpointTest.java | 2 +- .../core/project/ProjectInfoEndpointTest.java | 2 +- .../mesh/core/rest/MeshRestAPITest.java | 13 +- .../utilities/AbstractValidateSchemaTest.java | 6 +- .../webrootfield/WebRootFieldTypeTest.java | 30 +- 94 files changed, 4180 insertions(+), 623 deletions(-) create mode 100644 common/src/main/java/com/gentics/mesh/event/EventBusStore.java delete mode 100644 common/src/main/java/com/gentics/mesh/util/rx/WrapperWriteStream.java create mode 100644 hazelcast3-cluster-manager/README.md create mode 100644 hazelcast3-cluster-manager/pom.xml create mode 100644 hazelcast3-cluster-manager/src/main/java/examples/Examples.java create mode 100644 hazelcast3-cluster-manager/src/main/java/examples/package-info.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ClusterHealthCheck.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ConfigUtil.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/HazelcastClusterManager.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/ConversionUtils.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HandlerCallBackAdapter.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastAsyncMap.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastCounter.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastLock.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastNodeInfo.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastRegistrationInfo.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastServerID.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsMapHelper.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsOpSerializer.java create mode 100644 hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/package-info.java create mode 100644 hazelcast3-cluster-manager/src/main/resources/META-INF/MANIFEST.MF create mode 100644 hazelcast3-cluster-manager/src/main/resources/META-INF/services/io.vertx.core.spi.VertxServiceProvider create mode 100644 hazelcast3-cluster-manager/src/main/resources/default-cluster.xml create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/Lifecycle.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/LoggingTestWatcher.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastComplexHATest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastHATest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/ProgrammaticHazelcastClusterManagerTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastClusteredEventbusTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastFaultToleranceTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastNodeInfoTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsyncMapTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsynchronousLockTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredSharedCounterTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/ext/web/sstore/HazelcastClusteredSessionHandlerTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsyncMapTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsynchronousLockTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredEventbusTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredSharedCounterTest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastComplexHATest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastHATest.java create mode 100644 hazelcast3-cluster-manager/src/test/java/io/vertx/servicediscovery/impl/HazelcastDiscoveryImplClusteredTest.java create mode 100644 hazelcast3-cluster-manager/src/test/resources/cluster.xml create mode 100644 hazelcast3-cluster-manager/src/test/resources/logback-test.xml diff --git a/.jenkins/run-splits.sh b/.jenkins/run-splits.sh index de48d6e056..048e86a815 100755 --- a/.jenkins/run-splits.sh +++ b/.jenkins/run-splits.sh @@ -12,4 +12,4 @@ fi echo "Running tests: $tests" jacoco=$2 echo "Using jacoco: $jacoco" -time mvn -fae -Dsurefire.excludedGroups=com.gentics.mesh.test.category.FailingTests,com.gentics.mesh.test.category.ClusterTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -e -pl '!demo/default,!doc,!performance-tests' test -Dtest=$tests -DfailIfNoTests=false -Djacoco.skip=$jacoco | ts "$3 [%Y-%m-%d %H:%M:%S]" +time mvn -fae -Dsurefire.excludedGroups=com.gentics.mesh.test.category.FailingTests,com.gentics.mesh.test.category.ClusterTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dskip.vertx-hazelcast=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -e -pl '!demo/default,!doc,!performance-tests' test -Dtest=$tests -DfailIfNoTests=false -Djacoco.skip=$jacoco | ts "$3 [%Y-%m-%d %H:%M:%S]" diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 62aee4b201..b075c81f2d 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -29,6 +29,9 @@ icon:plus[] Core: The OrientDB database as been updated to version 3.2.10. icon:plus[] Rest: The new endpoints `/api/v2/.../rolePermissions` allow getting, granting and revoking permissions on entities for multiple roles in a single request. +icon:plus[] Core: The core Vert.x library was updated to version `4.3.2`. + + [[v1.9.3]] == 1.9.3 (22.09.2022) diff --git a/Jenkinsfile.split b/Jenkinsfile.split index 4f82a6d79e..a6b2114c88 100644 --- a/Jenkinsfile.split +++ b/Jenkinsfile.split @@ -62,8 +62,9 @@ stage("Setup Build Environment") { echo "Setup of GPG" sh "gpg --no-tty --batch --import /mnt/credentials/gpg/gpg-public-key.asc" sh "gpg --no-tty --batch --import /mnt/credentials/gpg/gpg-secret-key.asc" - withCredentials([usernamePassword(credentialsId: 'gentics.gpg', usernameVariable: 'gpgKeyName', passwordVariable: 'gpgKeyPass')]) { - sh "mvn -ff -B -U -Dmaven.javadoc.skip=true -Dskip.test-plugins=false -Dskip.cluster.tests=true -Dgpg.skip=false -DskipTests ${extraFlags} clean install" + withCredentials([usernamePassword(credentialsId: 'repo.gentics.com', usernameVariable: 'repoUsername', passwordVariable: 'repoPassword'), + usernamePassword(credentialsId: 'gentics.gpg', usernameVariable: 'gpgKeyName', passwordVariable: 'gpgKeyPass')]) { + sh "mvn -ff -B -U -Dmaven.javadoc.skip=true -Dskip.test-plugins=false -Dskip.cluster.tests=true -Dskip.vertx-hazelcast=true -Dgpg.skip=false -DskipTests ${extraFlags} clean install" } } else { echo "Omitted since we don't execute tests" @@ -116,7 +117,7 @@ stage("Setup Build Environment") { } else { sshagent(["git"]) { try { - sh "mvn -fae -Dsurefire.excludedGroups=com.gentics.mesh.test.category.FailingTests,com.gentics.mesh.test.category.ClusterTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -U -e -pl '!ferma,!demo/default,!doc,!performance-tests' clean install" + sh "mvn -fae -Dsurefire.excludedGroups=com.gentics.mesh.test.category.FailingTests,com.gentics.mesh.test.category.ClusterTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dskip.vertx-hazelcast=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -U -e -pl '!ferma,!demo/default,!doc,!performance-tests' clean install" } finally { step([$class: 'JUnitResultArchiver', testResults: '**/target/surefire-reports/*.xml']) } @@ -131,7 +132,7 @@ stage("Setup Build Environment") { if (Boolean.valueOf(params.runUnstableTests)) { sshagent(["git"]) { try { - sh "mvn -fae -Dsurefire.groups=com.gentics.mesh.test.category.FailingTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -e -pl '!ferma,!demo/default,!doc,!performance-tests' test -DfailIfNoTests=false" + sh "mvn -fae -Dsurefire.groups=com.gentics.mesh.test.category.FailingTests -Dmaven.javadoc.skip=true -Dskip.cluster.tests=true -Dskip.vertx-hazelcast=true -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/ -B -e -pl '!ferma,!demo/default,!doc,!performance-tests' test -DfailIfNoTests=false" } finally { step([$class: 'JUnitResultArchiver', testResults: '**/target/surefire-reports/*.xml']) } @@ -218,7 +219,7 @@ stage("Setup Build Environment") { node("mesh-performance-worker-11") { try { unstash 'project' - sh "mvn -B -U clean package -pl '!doc,!demo/default,!server' -Dskip.unit.tests=true -Dskip.cluster.tests=true -Dskip.performance.tests=false -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/" + sh "mvn -B -U clean package -pl '!doc,!demo/default,!server' -Dskip.unit.tests=true -Dskip.cluster.tests=true -Dskip.vertx-hazelcast=true -Dskip.performance.tests=false -Dmaven.test.failure.ignore=true -Dmesh.container.image.prefix=docker.apa-it.at/" } finally { step([$class: 'JUnitResultArchiver', testResults: '**/target/*.performance.xml']) deleteDir() diff --git a/api/pom.xml b/api/pom.xml index 6997f2d03d..5711ba35c6 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -47,6 +47,10 @@ commons-cli commons-cli + + com.fasterxml.jackson.core + jackson-databind + com.github.stefanbirkner diff --git a/bom/pom.xml b/bom/pom.xml index 24ce87ad86..1dbeb9281d 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -29,7 +29,7 @@ 3.12.8 2.13.2 2.13.2 - 4.1.72.Final + 4.1.78.Final 1.26 2.16.83 1.30 @@ -497,6 +497,13 @@ 2.6.0 + + + com.gentics.mesh + hazelcast3-cluster-manager + ${project.version} + + com.gentics.mesh diff --git a/common/src/main/java/com/gentics/mesh/auth/handler/MeshAnonymousAuthHandler.java b/common/src/main/java/com/gentics/mesh/auth/handler/MeshAnonymousAuthHandler.java index 452e9bcbfc..f4332af5ac 100644 --- a/common/src/main/java/com/gentics/mesh/auth/handler/MeshAnonymousAuthHandler.java +++ b/common/src/main/java/com/gentics/mesh/auth/handler/MeshAnonymousAuthHandler.java @@ -15,18 +15,18 @@ import io.vertx.core.AsyncResult; import io.vertx.core.Handler; import io.vertx.core.http.HttpServerRequest; -import io.vertx.core.json.JsonObject; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; +import io.vertx.ext.auth.User; import io.vertx.ext.web.RoutingContext; -import io.vertx.ext.web.handler.impl.AuthHandlerImpl; +import io.vertx.ext.web.handler.impl.AuthenticationHandlerImpl; /** * Auth handler which will deal with anonymous auth handling. This handler will only auth the user if anonymous auth is enabled and the request does not contain * any auth header. */ @Singleton -public class MeshAnonymousAuthHandler extends AuthHandlerImpl implements MeshAuthHandler { +public class MeshAnonymousAuthHandler extends AuthenticationHandlerImpl implements MeshAuthHandler { public static final String ANONYMOUS_USERNAME = "anonymous"; @@ -45,7 +45,7 @@ public MeshAnonymousAuthHandler(MeshJWTAuthProvider authProvider, MeshOptions op } @Override - public void parseCredentials(RoutingContext arg0, Handler> arg1) { + public void authenticate(RoutingContext routingContext, Handler> handler) { // Not needed for this handler } diff --git a/common/src/main/java/com/gentics/mesh/auth/handler/MeshAuthHandler.java b/common/src/main/java/com/gentics/mesh/auth/handler/MeshAuthHandler.java index fba96a4b81..c3ce95feb2 100644 --- a/common/src/main/java/com/gentics/mesh/auth/handler/MeshAuthHandler.java +++ b/common/src/main/java/com/gentics/mesh/auth/handler/MeshAuthHandler.java @@ -2,12 +2,12 @@ import io.vertx.ext.auth.User; import io.vertx.ext.web.RoutingContext; -import io.vertx.ext.web.handler.AuthHandler; +import io.vertx.ext.web.handler.AuthenticationHandler; /** - * Common interface all all custom Gentics Mesh Auth handlers. + * Common interface all custom Gentics Mesh Auth handlers. */ -public interface MeshAuthHandler extends AuthHandler { +public interface MeshAuthHandler extends AuthenticationHandler { /** * Finish the request with code 401. diff --git a/common/src/main/java/com/gentics/mesh/auth/handler/MeshJWTAuthHandler.java b/common/src/main/java/com/gentics/mesh/auth/handler/MeshJWTAuthHandler.java index 0a418171ca..263d9dfe38 100644 --- a/common/src/main/java/com/gentics/mesh/auth/handler/MeshJWTAuthHandler.java +++ b/common/src/main/java/com/gentics/mesh/auth/handler/MeshJWTAuthHandler.java @@ -16,24 +16,23 @@ import io.vertx.core.AsyncResult; import io.vertx.core.Handler; +import io.vertx.core.http.Cookie; import io.vertx.core.http.HttpServerRequest; -import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; import io.vertx.ext.auth.User; -import io.vertx.ext.web.Cookie; import io.vertx.ext.web.RoutingContext; import io.vertx.ext.web.handler.JWTAuthHandler; -import io.vertx.ext.web.handler.impl.AuthHandlerImpl; +import io.vertx.ext.web.handler.impl.AuthenticationHandlerImpl; /** - * This class extends the Vert.x AuthHandler, so that it also works when the token is set as a cookie. + * This class extends the Vert.x AuthenticationHandlerImpl, so that it also works when the token is set as a cookie. * * Central authentication handler for mesh. All requests to secured resources must pass this handler. */ @Singleton -public class MeshJWTAuthHandler extends AuthHandlerImpl implements JWTAuthHandler, MeshAuthHandler { +public class MeshJWTAuthHandler extends AuthenticationHandlerImpl implements JWTAuthHandler, MeshAuthHandler { private static final Logger log = LoggerFactory.getLogger(MeshJWTAuthHandler.class); @@ -56,24 +55,6 @@ public MeshJWTAuthHandler(MeshJWTAuthProvider authProvider, MeshOptions meshOpti options = new JsonObject(); } - @Override - public JWTAuthHandler setAudience(List audience) { - options.put("audience", new JsonArray(audience)); - return this; - } - - @Override - public JWTAuthHandler setIssuer(String issuer) { - options.put("issuer", issuer); - return this; - } - - @Override - public JWTAuthHandler setIgnoreExpiration(boolean ignoreExpiration) { - options.put("ignoreExpiration", ignoreExpiration); - return this; - } - @Override public void handle(RoutingContext context) { handle(context, false); @@ -97,7 +78,7 @@ public void handle(RoutingContext context, boolean ignoreDecodeErrors) { } @Override - public void parseCredentials(RoutingContext arg0, Handler> arg1) { + public void authenticate(RoutingContext routingContext, Handler> handler) { // Not needed for this handler } @@ -155,7 +136,7 @@ private void handleJWTAuth(RoutingContext context, boolean ignoreDecodeErrors) { } // 4. Authenticate the found token using JWT - JsonObject authInfo = new JsonObject().put("jwt", token).put("options", options); + JsonObject authInfo = new JsonObject().put("token", token).put("options", options); authProvider.authenticateJWT(authInfo, res -> { // Authentication was successful. @@ -189,4 +170,18 @@ private void handleJWTAuth(RoutingContext context, boolean ignoreDecodeErrors) { }); } + @Override + public JWTAuthHandler scopeDelimiter(String s) { + return this; + } + + @Override + public JWTAuthHandler withScope(String s) { + return this; + } + + @Override + public JWTAuthHandler withScopes(List list) { + return this; + } } diff --git a/common/src/main/java/com/gentics/mesh/auth/provider/MeshJWTAuthProvider.java b/common/src/main/java/com/gentics/mesh/auth/provider/MeshJWTAuthProvider.java index 32341cb96f..c882a544f5 100644 --- a/common/src/main/java/com/gentics/mesh/auth/provider/MeshJWTAuthProvider.java +++ b/common/src/main/java/com/gentics/mesh/auth/provider/MeshJWTAuthProvider.java @@ -8,6 +8,9 @@ import javax.inject.Inject; import javax.inject.Singleton; +import com.gentics.mesh.auth.handler.MeshJWTAuthHandler; +import io.vertx.core.http.Cookie; +import io.vertx.ext.auth.authentication.AuthenticationProvider; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.lang3.StringUtils; import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; @@ -36,7 +39,6 @@ import io.vertx.ext.auth.User; import io.vertx.ext.auth.jwt.JWTAuth; import io.vertx.ext.auth.jwt.JWTAuthOptions; -import io.vertx.ext.web.Cookie; /** * Central mesh authentication provider which will handle JWT. @@ -46,7 +48,7 @@ * */ @Singleton -public class MeshJWTAuthProvider implements AuthProvider, JWTAuth { +public class MeshJWTAuthProvider implements AuthenticationProvider, JWTAuth { private static final Logger log = LoggerFactory.getLogger(MeshJWTAuthProvider.class); @@ -131,6 +133,11 @@ public String generateToken(JsonObject claims, JWTOptions options) { throw new NotImplementedException(); } + @Override + public String generateToken(JsonObject jsonObject) { + throw new NotImplementedException(); + } + /** * Authenticates the user and returns a JWToken if successful. * diff --git a/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheFactory.java b/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheFactory.java index ea78e4a1f9..5dab415727 100644 --- a/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheFactory.java +++ b/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheFactory.java @@ -5,23 +5,21 @@ import com.gentics.mesh.cache.EventAwareCache; import com.gentics.mesh.etc.config.MeshOptions; +import com.gentics.mesh.event.EventBusStore; import com.gentics.mesh.metric.MetricsService; -import io.vertx.core.Vertx; - /** * Factory for {@link EventAwareCache} instances. */ @Singleton public class EventAwareCacheFactory { - - private final Vertx vertx; + private final EventBusStore eventBusStore; private final MeshOptions meshOptions; private final MetricsService metricsService; @Inject - public EventAwareCacheFactory(Vertx vertx, MeshOptions meshOptions, MetricsService metricsService) { - this.vertx = vertx; + public EventAwareCacheFactory(EventBusStore eventBusStore, MeshOptions meshOptions, MetricsService metricsService) { + this.eventBusStore = eventBusStore; this.meshOptions = meshOptions; this.metricsService = metricsService; } @@ -35,7 +33,7 @@ public EventAwareCacheFactory(Vertx vertx, MeshOptions meshOptions, MetricsServi */ public EventAwareCacheImpl.Builder builder() { return new EventAwareCacheImpl.Builder() - .vertx(vertx) + .eventBusStore(eventBusStore) .meshOptions(meshOptions) .setMetricsService(metricsService); } diff --git a/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheImpl.java b/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheImpl.java index 214e89f5f7..650d2d8dcc 100644 --- a/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheImpl.java +++ b/common/src/main/java/com/gentics/mesh/cache/impl/EventAwareCacheImpl.java @@ -11,6 +11,7 @@ import com.gentics.mesh.cache.EventAwareCache; import com.gentics.mesh.core.rest.MeshEvent; import com.gentics.mesh.etc.config.MeshOptions; +import com.gentics.mesh.event.EventBusStore; import com.gentics.mesh.metric.CachingMetric; import com.gentics.mesh.metric.MetricsService; import com.github.benmanes.caffeine.cache.Cache; @@ -18,8 +19,8 @@ import io.micrometer.core.instrument.Counter; import io.reactivex.Observable; +import io.reactivex.disposables.Disposable; import io.reactivex.functions.Predicate; -import io.vertx.core.Vertx; import io.vertx.core.eventbus.EventBus; import io.vertx.core.eventbus.Message; import io.vertx.core.eventbus.MessageConsumer; @@ -36,8 +37,6 @@ public class EventAwareCacheImpl implements EventAwareCache { private final Cache cache; - private final Vertx vertx; - private final MeshOptions options; private final Predicate> filter; @@ -51,10 +50,11 @@ public class EventAwareCacheImpl implements EventAwareCache { private final Counter missCounter; private final Counter hitCounter; - public EventAwareCacheImpl(String name, long maxSize, Duration expireAfter, Duration expireAfterAccess, Vertx vertx, MeshOptions options, MetricsService metricsService, + private Disposable eventSubscription; + + public EventAwareCacheImpl(String name, long maxSize, Duration expireAfter, Duration expireAfterAccess, EventBusStore eventBusStore, MeshOptions options, MetricsService metricsService, Predicate> filter, BiConsumer, EventAwareCache> onNext, MeshEvent... events) { - this.vertx = vertx; this.options = options; Caffeine cacheBuilder = Caffeine.newBuilder().maximumSize(maxSize); if (expireAfter != null) { @@ -66,33 +66,39 @@ public EventAwareCacheImpl(String name, long maxSize, Duration expireAfter, Dura this.cache = cacheBuilder.build(); this.filter = filter; this.onNext = onNext; - registerEventHandlers(events); + registerEventHandlers(eventBusStore, events); invalidateKeyCounter = metricsService.counter(new CachingMetric(CachingMetric.Event.CLEAR_SINGLE, name)); invalidateAllCounter = metricsService.counter(new CachingMetric(CachingMetric.Event.CLEAR_ALL, name)); missCounter = metricsService.counter(new CachingMetric(CachingMetric.Event.MISS, name)); hitCounter = metricsService.counter(new CachingMetric(CachingMetric.Event.HIT, name)); } - private void registerEventHandlers(MeshEvent... events) { - if (log.isTraceEnabled()) { - log.trace("Registering to events"); - } - EventBus eb = vertx.eventBus(); - Observable> o = rxEventBus(eb, events); - if (filter != null) { - o = o.filter(filter); - } + private void registerEventHandlers(EventBusStore eventBusStore, MeshEvent... events) { + eventBusStore.eventBus().subscribe((eb) -> { + if (log.isTraceEnabled()) { + log.trace("Registering to events"); + } + Observable> o = rxEventBus(eb, events); + if (filter != null) { + o = o.filter(filter); + } - o.subscribe(event -> { - // Use a default implementation which will invalidate the whole cache on every event - if (onNext == null) { - invalidate(); - } else { - onNext.accept(event, this); + // Dispose previous event bus subscription + if (eventSubscription != null && !eventSubscription.isDisposed()) { + eventSubscription.dispose(); } - }, error -> { - log.error("Error while handling event in cache. Disabling cache.", error); - disable(); + + eventSubscription = o.subscribe(event -> { + // Use a default implementation which will invalidate the whole cache on every event + if (onNext == null) { + invalidate(); + } else { + onNext.accept(event, this); + } + }, error -> { + log.error("Error while handling event in cache. Disabling cache.", error); + disable(); + }); }); } @@ -195,13 +201,14 @@ public static class Builder { private Predicate> filter = null; private BiConsumer, EventAwareCache> onNext = null; private MeshEvent[] events = null; - private Vertx vertx; + private EventBusStore eventBusStore; private Duration expireAfter; private Duration expireAfterAccess; private String name; private MeshOptions options; private MetricsService metricsService; + /** * Build the cache instance. * @@ -209,9 +216,8 @@ public static class Builder { */ public EventAwareCache build() { Objects.requireNonNull(events, "No events for the cache have been set"); - Objects.requireNonNull(vertx, "No Vert.x instance has been set"); Objects.requireNonNull(name, "No name has been set"); - EventAwareCacheImpl c = new EventAwareCacheImpl<>(name, maxSize, expireAfter, expireAfterAccess, vertx, options, metricsService, filter, onNext, events); + EventAwareCacheImpl c = new EventAwareCacheImpl<>(name, maxSize, expireAfter, expireAfterAccess, eventBusStore, options, metricsService, filter, onNext, events); if (disabled) { c.disable(); } @@ -261,13 +267,13 @@ public Builder disabled() { } /** - * Set the vertx instance to be used for eventbus communication. - * - * @param vertx + * Sets the event bus store + * + * @param eventBusStore * @return Fluent API */ - public Builder vertx(Vertx vertx) { - this.vertx = vertx; + public Builder eventBusStore(EventBusStore eventBusStore) { + this.eventBusStore = eventBusStore; return this; } diff --git a/common/src/main/java/com/gentics/mesh/distributed/DistributionUtils.java b/common/src/main/java/com/gentics/mesh/distributed/DistributionUtils.java index 1527291e91..82e8ffdbf5 100644 --- a/common/src/main/java/com/gentics/mesh/distributed/DistributionUtils.java +++ b/common/src/main/java/com/gentics/mesh/distributed/DistributionUtils.java @@ -34,16 +34,16 @@ public static boolean isReadRequest(HttpMethod method, String path) { return false; } - switch (method) { - case CONNECT: - case OPTIONS: - case GET: + switch (method.name()) { + case "CONNECT": + case "OPTIONS": + case "GET": return true; - case DELETE: - case PATCH: - case PUT: + case "DELETE": + case "PATCH": + case "PUT": return false; - case POST: + case "POST": // Lets check whether the request is actually a read request. // In this case we don't need to delegate it. return isReadOnly(path); diff --git a/common/src/main/java/com/gentics/mesh/event/EventBusStore.java b/common/src/main/java/com/gentics/mesh/event/EventBusStore.java new file mode 100644 index 0000000000..2e01073d96 --- /dev/null +++ b/common/src/main/java/com/gentics/mesh/event/EventBusStore.java @@ -0,0 +1,45 @@ +package com.gentics.mesh.event; + +import io.reactivex.Observable; +import io.reactivex.subjects.BehaviorSubject; +import io.vertx.core.eventbus.EventBus; + +import javax.annotation.Nullable; +import javax.inject.Inject; +import javax.inject.Singleton; + +/** + * Wraps the event bus in an observable. + */ +@Singleton +public class EventBusStore { + private BehaviorSubject eventBus = BehaviorSubject.create(); + @Inject + public EventBusStore() { + } + + /** + * The EventBus observable + * @return + */ + public Observable eventBus() { + return eventBus; + } + + /** + * Set the event bus + * @param eventBus + */ + public void setEventBus(EventBus eventBus) { + this.eventBus.onNext(eventBus); + } + + /** + * Get the current event bus (can be null) + * @return + */ + @Nullable + public EventBus current() { + return eventBus.getValue(); + } +} diff --git a/common/src/main/java/com/gentics/mesh/event/MeshEventSender.java b/common/src/main/java/com/gentics/mesh/event/MeshEventSender.java index 328b66eee5..2e8a845211 100644 --- a/common/src/main/java/com/gentics/mesh/event/MeshEventSender.java +++ b/common/src/main/java/com/gentics/mesh/event/MeshEventSender.java @@ -31,7 +31,7 @@ public MeshEventSender(Vertx vertx) { * @return */ public Single isSearchIdle() { - return vertx.eventBus().rxSend(IS_SEARCH_IDLE.address, null) + return vertx.eventBus().rxRequest(IS_SEARCH_IDLE.address, null) .map(msg -> (Boolean) msg.body()); } @@ -41,7 +41,7 @@ public Single isSearchIdle() { * @return */ public Completable refreshSearch() { - return vertx.eventBus().rxSend(SEARCH_REFRESH_REQUEST.address, null).toCompletable(); + return vertx.eventBus().rxRequest(SEARCH_REFRESH_REQUEST.address, null).toCompletable(); } /** diff --git a/common/src/main/java/com/gentics/mesh/rest/impl/InternalEndpointRouteImpl.java b/common/src/main/java/com/gentics/mesh/rest/impl/InternalEndpointRouteImpl.java index 240ca4b02e..829baf5abf 100644 --- a/common/src/main/java/com/gentics/mesh/rest/impl/InternalEndpointRouteImpl.java +++ b/common/src/main/java/com/gentics/mesh/rest/impl/InternalEndpointRouteImpl.java @@ -21,6 +21,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.vertx.ext.web.handler.PlatformHandler; import org.codehaus.jettison.json.JSONObject; import org.raml.model.MimeType; import org.raml.model.Response; @@ -106,22 +107,8 @@ public class InternalEndpointRouteImpl implements InternalEndpointRoute { */ public InternalEndpointRouteImpl(Router router, LocalConfigApi localConfigApi, Database db) { this.route = router.route(); - route.handler(rc -> { - if (!isMutating()) { - rc.next(); - } else { - if (db.isReadOnly(true)) { - rc.fail(error(HttpResponseStatus.METHOD_NOT_ALLOWED, "error_readonly_mode")); - } - localConfigApi.getActiveConfig().subscribe(config -> { - if (config.isReadOnly()) { - rc.fail(error(HttpResponseStatus.METHOD_NOT_ALLOWED, "error_readonly_mode")); - } else { - rc.next(); - } - }); - } - }); + ReadOnlyHandler readOnlyHandler = new ReadOnlyHandler(localConfigApi, db); + route.handler(readOnlyHandler); } @Override @@ -181,6 +168,13 @@ public InternalEndpointRoute handler(Handler requestHandler) { return this; } + @Override + public InternalEndpointRoute subRouter(Router subRouter) { + validate(); + route.subRouter(subRouter); + return this; + } + @Override public InternalEndpointRoute validate() { if (!produces.isEmpty() && produces.contains(APPLICATION_JSON) && exampleResponses.isEmpty()) { @@ -537,4 +531,33 @@ public InternalEndpointRouteImpl setMutating(Boolean mutating) { public Route getRoute() { return route; } + + private class ReadOnlyHandler implements PlatformHandler { + + private final LocalConfigApi localConfigApi; + private final Database db; + + public ReadOnlyHandler(LocalConfigApi localConfigApi, Database db) { + this.localConfigApi = localConfigApi; + this.db = db; + } + + @Override + public void handle(RoutingContext rc) { + if (!isMutating()) { + rc.next(); + } else { + if (db.isReadOnly(true)) { + rc.fail(error(HttpResponseStatus.METHOD_NOT_ALLOWED, "error_readonly_mode")); + } + localConfigApi.getActiveConfig().subscribe(config -> { + if (config.isReadOnly()) { + rc.fail(error(HttpResponseStatus.METHOD_NOT_ALLOWED, "error_readonly_mode")); + } else { + rc.next(); + } + }); + } + } + } } diff --git a/common/src/main/java/com/gentics/mesh/router/APIRouterImpl.java b/common/src/main/java/com/gentics/mesh/router/APIRouterImpl.java index 86782ceb30..c187ae67e9 100644 --- a/common/src/main/java/com/gentics/mesh/router/APIRouterImpl.java +++ b/common/src/main/java/com/gentics/mesh/router/APIRouterImpl.java @@ -1,19 +1,17 @@ package com.gentics.mesh.router; -import java.util.HashMap; -import java.util.Map; - import com.gentics.mesh.core.db.Database; import com.gentics.mesh.etc.config.ClusterOptions; import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.etc.config.cluster.CoordinatorMode; import com.gentics.mesh.handler.VersionUtils; - import io.vertx.core.Vertx; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; import io.vertx.ext.web.Router; -import io.vertx.ext.web.handler.CookieHandler; + +import java.util.HashMap; +import java.util.Map; /** * @see APIRouter @@ -83,8 +81,6 @@ private void initHandlers(RouterStorage storage) { } }); - router.route().handler(CookieHandler.create()); - } @Override diff --git a/common/src/main/java/com/gentics/mesh/router/route/DefaultNotFoundHandler.java b/common/src/main/java/com/gentics/mesh/router/route/DefaultNotFoundHandler.java index c43241b746..005a30be06 100644 --- a/common/src/main/java/com/gentics/mesh/router/route/DefaultNotFoundHandler.java +++ b/common/src/main/java/com/gentics/mesh/router/route/DefaultNotFoundHandler.java @@ -28,9 +28,9 @@ public void handle(RoutingContext rc) { String internalMessage = "The rest endpoint or resource for given path {" + rc.normalisedPath() + "} could not be found."; String contentType = rc.request().getHeader("Content-Type"); if (contentType == null) { - switch (rc.request().method()) { - case PUT: - case POST: + switch (rc.request().method().name()) { + case "PUT": + case "POST": internalMessage += " You tried to POST or PUT data but you did not specifiy any Content-Type within your request."; break; default: diff --git a/common/src/main/java/com/gentics/mesh/util/rx/WrapperWriteStream.java b/common/src/main/java/com/gentics/mesh/util/rx/WrapperWriteStream.java deleted file mode 100644 index 116b2f854c..0000000000 --- a/common/src/main/java/com/gentics/mesh/util/rx/WrapperWriteStream.java +++ /dev/null @@ -1,255 +0,0 @@ -package com.gentics.mesh.util.rx; - -import java.io.IOException; -import java.io.InputStream; -import java.security.InvalidParameterException; - -import io.reactivex.Completable; -import io.reactivex.Observable; -import io.reactivex.subjects.PublishSubject; -import io.reactivex.subjects.ReplaySubject; -import io.vertx.core.AsyncResult; -import io.vertx.core.Handler; -import io.vertx.core.buffer.Buffer; -import io.vertx.core.logging.Logger; -import io.vertx.core.logging.LoggerFactory; -import io.vertx.core.streams.WriteStream; - -/** - * Wrapper for write streams which can be used to create an {@link InputStream} via {@link #createInputStream()} from an async source by calling - * {@link #write(Buffer)} and {@link #end()}. - */ -public class WrapperWriteStream implements WriteStream { - - private static final Logger log = LoggerFactory.getLogger(WrapperWriteStream.class); - - private Buffer buffer; - private Object bufferLock = new Object(); - private boolean ended = false; - private int writeQueueMaxSize = 32 * 1024 * 1024; // 32MB - private PublishSubject bufferChanged$; - private ReplaySubject requested$; - - public WrapperWriteStream() { - buffer = Buffer.buffer(); - bufferChanged$ = PublishSubject.create(); - requested$ = ReplaySubject.createWithSize(1); - } - - @Override - public WriteStream exceptionHandler(Handler handler) { - return this; - } - - @Override - public synchronized WriteStream write(Buffer data) { - int length; - synchronized (bufferLock) { - buffer.appendBuffer(data); - length = buffer.length(); - } - // Writes out alot, thats why logging is disabled - // log.debug(String.format("Wrote %d bytes", data.length())); - bufferChanged$.onNext(length); - return this; - } - - @Override - public void end() { - this.ended = true; - this.bufferChanged$.onComplete(); - if (endedAndEmpty()) { - log.debug("End completing"); - requested$.onComplete(); - } - } - - @Override - public void end(Buffer buffer) { - this.write(buffer); - this.end(); - } - - @Override - public WriteStream setWriteQueueMaxSize(int maxSize) { - this.writeQueueMaxSize = maxSize; - return this; - } - - @Override - public boolean writeQueueFull() { - return this.buffer.length() >= this.writeQueueMaxSize; - } - - @Override - public WriteStream drainHandler(Handler handler) { - if (this.buffer.length() < writeQueueMaxSize) { - handler.handle(null); - } else { - this.bufferChanged$.filter(len -> len < writeQueueMaxSize).firstOrError().toCompletable().subscribe(() -> handler.handle(null)); - } - return this; - } - - /** - * Access to the requested buffers. - * - * @return - */ - public Observable requestedBuffers() { - return this.requested$; - } - - /** - * Request that the given amount of bytes will be read. This will provide data which can be read via the {@link #createInputStream()} object. - * - * @param byteCount - */ - public void request(int byteCount) { - if (byteCount > writeQueueMaxSize) { - throw new InvalidParameterException("Can't request more than buffer size!"); - } - Buffer ret; - boolean ended; - synchronized (bufferLock) { - ret = sliceBuffer(byteCount); - ended = endedAndEmpty(); - } - if (log.isDebugEnabled()) { - log.debug(String.format("Requested %d bytes", byteCount)); - } - - if (ret != null) { - if (log.isDebugEnabled()) { - log.debug("Sent immediately"); - } - requested$.onNext(ret); - if (log.isDebugEnabled()) { - log.debug("Sending complete"); - } - if (ended) { - requested$.onComplete(); - } - } else { - if (log.isDebugEnabled()) { - log.debug("Not ready yet"); - } - Completable enoughBytes$ = this.bufferChanged$.filter(length -> length >= byteCount).firstOrError().toCompletable(); - Completable ended$ = this.bufferChanged$.ignoreElements(); - - Completable.ambArray(enoughBytes$, ended$).subscribe(() -> { - Buffer buf; - boolean innerended; - synchronized (bufferLock) { - buf = sliceBuffer(byteCount); - innerended = endedAndEmpty(); - } - requested$.onNext(buf); - if (innerended) { - requested$.onComplete(); - } - }); - } - } - - private Buffer sliceBuffer(int byteCount) { - Buffer ret; - if (buffer == null) { - return null; - } - - if (ended && byteCount >= buffer.length()) { - ret = buffer; - this.buffer = null; - } else if (byteCount == this.buffer.length()) { - ret = buffer; - buffer = Buffer.buffer(); - } else if (byteCount <= this.buffer.length()) { - ret = this.buffer.getBuffer(0, byteCount); - this.buffer = this.buffer.getBuffer(byteCount, buffer.length()); - bufferChanged$.onNext(this.buffer.length()); - } else { - ret = null; - } - - return ret; - } - - /** - * Creates a blocking input stream from this write stream. Don't use this with {@link #request(int) request} - * - * @return - */ - public InputStream createInputStream() { - return new InputStream() { - @Override - public int read() throws IOException { - log.debug("Read byte"); - if (endedAndEmpty()) { - return -1; - } - int bufLen; - int ret; - synchronized (bufferLock) { - bufLen = buffer.length(); - } - if (bufLen == 0) { - bufferChanged$.firstOrError().blockingGet(); - } - synchronized (bufferLock) { - Buffer buf = sliceBuffer(1); - ret = buf.getByte(0); - } - return ret; - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - if (endedAndEmpty()) { - log.debug("read ended"); - return -1; - } - - if (log.isDebugEnabled()) { - log.debug(String.format("Trying to read %d bytes", len)); - } - int bufLen, byteCount; - synchronized (bufferLock) { - bufLen = buffer.length(); - } - - if (bufLen == 0) { - if (log.isDebugEnabled()) { - log.debug("Have to wait for buffer to fill"); - } - bufferChanged$.firstOrError().blockingGet(); - } - synchronized (bufferLock) { - bufLen = buffer.length(); - byteCount = Math.min(len, bufLen); - sliceBuffer(byteCount).getByteBuf().getBytes(0, b, off, byteCount); - } - if (log.isDebugEnabled()) { - log.debug(String.format("Actually read %d bytes", byteCount)); - } - return byteCount; - } - }; - } - - private boolean endedAndEmpty() { - return ended && (buffer == null || buffer.length() == 0); - } - - @Override - public WriteStream write(Buffer data, Handler> handler) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void end(Handler> handler) { - - } - -} \ No newline at end of file diff --git a/core/pom.xml b/core/pom.xml index 28ce045b30..aaf460fde6 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -134,9 +134,10 @@ io.vertx vertx-codegen + - io.vertx - vertx-hazelcast + com.gentics.mesh + hazelcast3-cluster-manager @@ -145,8 +146,7 @@ - - + com.hazelcast hazelcast-kubernetes diff --git a/core/src/main/java/com/gentics/mesh/auth/MeshBasicAuthLoginHandler.java b/core/src/main/java/com/gentics/mesh/auth/MeshBasicAuthLoginHandler.java index c657d00172..b1401ca022 100644 --- a/core/src/main/java/com/gentics/mesh/auth/MeshBasicAuthLoginHandler.java +++ b/core/src/main/java/com/gentics/mesh/auth/MeshBasicAuthLoginHandler.java @@ -7,11 +7,9 @@ import io.vertx.core.Handler; import io.vertx.core.http.HttpHeaders; import io.vertx.core.http.HttpServerRequest; -import io.vertx.core.json.JsonObject; import io.vertx.ext.auth.User; import io.vertx.ext.web.RoutingContext; -import io.vertx.ext.web.handler.AuthHandler; -import io.vertx.ext.web.handler.impl.AuthHandlerImpl; +import io.vertx.ext.web.handler.impl.AuthenticationHandlerImpl; import javax.inject.Inject; import javax.inject.Singleton; @@ -23,7 +21,7 @@ * The {@link #handle(RoutingContext)} method is overriden in order to support the {@link MeshJWTAuthProvider}. */ @Singleton -public class MeshBasicAuthLoginHandler extends AuthHandlerImpl { +public class MeshBasicAuthLoginHandler extends AuthenticationHandlerImpl { final String realm; @@ -37,18 +35,12 @@ public MeshBasicAuthLoginHandler(MeshJWTAuthProvider authProvider) { } private void authorizeUser(RoutingContext ctx, User user) { - authorize(user, authZ -> { - if (authZ.failed()) { - ctx.fail(authZ.cause()); - return; - } - // success, allowed to continue - ctx.next(); - }); + // authorization is done with roles + ctx.next(); } @Override - public void parseCredentials(RoutingContext context, Handler> handler) { + public void authenticate(RoutingContext routingContext, Handler> handler) { // Not needed } diff --git a/core/src/main/java/com/gentics/mesh/cache/PermissionCacheImpl.java b/core/src/main/java/com/gentics/mesh/cache/PermissionCacheImpl.java index aaae711588..1ce9b560e4 100644 --- a/core/src/main/java/com/gentics/mesh/cache/PermissionCacheImpl.java +++ b/core/src/main/java/com/gentics/mesh/cache/PermissionCacheImpl.java @@ -18,7 +18,7 @@ import com.gentics.mesh.core.rest.MeshEvent; import com.gentics.mesh.etc.config.MeshOptions; -import io.vertx.core.Vertx; +import com.gentics.mesh.event.EventBusStore; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; @@ -30,7 +30,7 @@ public class PermissionCacheImpl extends AbstractMeshCache, EnumSet> uniqueMap = Collections.synchronizedMap(new HashMap<>()); @Inject - public PermissionCacheImpl(EventAwareCacheFactory factory, Vertx vertx, CacheRegistry registry, MeshOptions options) { + public PermissionCacheImpl(EventAwareCacheFactory factory, EventBusStore eventBusStore, CacheRegistry registry, MeshOptions options) { super(createCache(factory), registry, CACHE_SIZE); - this.vertx = vertx; + this.eventBusStore = eventBusStore; this.options = options; } @@ -104,9 +104,9 @@ private String createCacheKey(Object userId, Object elementId) { public void clear(boolean notify) { // Invalidate locally cache.invalidate(); - if (notify && options.getClusterOptions().isEnabled()) { + if (notify && options.getClusterOptions().isEnabled() && eventBusStore.current() != null) { // Send the event to inform other to purge the stored permissions - vertx.eventBus().publish(CLEAR_PERMISSION_STORE.address, null); + eventBusStore.current().publish(CLEAR_PERMISSION_STORE.address, null); // log.error("Can't distribute cache clear event. Maybe Vert.x is stopping / starting right now"); } } diff --git a/core/src/main/java/com/gentics/mesh/cli/AbstractBootstrapInitializer.java b/core/src/main/java/com/gentics/mesh/cli/AbstractBootstrapInitializer.java index 4251600e9a..36a7362a84 100644 --- a/core/src/main/java/com/gentics/mesh/cli/AbstractBootstrapInitializer.java +++ b/core/src/main/java/com/gentics/mesh/cli/AbstractBootstrapInitializer.java @@ -19,6 +19,7 @@ import javax.inject.Inject; +import com.gentics.mesh.event.EventBusStore; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; @@ -163,6 +164,9 @@ public abstract class AbstractBootstrapInitializer implements BootstrapInitializ @Inject public EventBusLivenessManager eventbusLiveness; + @Inject + public EventBusStore eventBusStore; + // TODO: Changing the role name or deleting the role would cause code that utilizes this field to break. // This is however a rare case. protected HibRole anonymousRole; @@ -535,7 +539,6 @@ protected String getLocalIpForRoutedRemoteIP(String destination) { */ public void initVertx(MeshOptions options) { VertxOptions vertxOptions = new VertxOptions(); - vertxOptions.getEventBusOptions().setClustered(options.getClusterOptions().isEnabled()); vertxOptions.setWorkerPoolSize(options.getVertxOptions().getWorkerPoolSize()); vertxOptions.setEventLoopPoolSize(options.getVertxOptions().getEventPoolSize()); @@ -549,7 +552,7 @@ public void initVertx(MeshOptions options) { vertxOptions.setPreferNativeTransport(true); System.setProperty("vertx.cacheDirBase", options.getTempDirectory()); Vertx vertx = null; - if (vertxOptions.getEventBusOptions().isClustered()) { + if (options.getClusterOptions().isEnabled()) { log.info("Creating clustered Vert.x instance"); vertx = createClusteredVertx(options, vertxOptions); } else { @@ -563,6 +566,7 @@ public void initVertx(MeshOptions options) { } this.vertx = vertx; + this.eventBusStore.setEventBus(vertx.eventBus()); } /** @@ -993,7 +997,7 @@ protected Vertx createClusteredVertx(MeshOptions options, VertxOptions vertxOpti try { return fut.get(getClusteredVertxInitializationTimeoutInSeconds(), SECONDS); } catch (Exception e) { - throw new RuntimeException("Error while creating clusterd Vert.x instance"); + throw new RuntimeException("Error while creating clusterd Vert.x instance", e); } } diff --git a/core/src/main/java/com/gentics/mesh/cli/MeshImpl.java b/core/src/main/java/com/gentics/mesh/cli/MeshImpl.java index fa4de8fbdb..226847f018 100644 --- a/core/src/main/java/com/gentics/mesh/cli/MeshImpl.java +++ b/core/src/main/java/com/gentics/mesh/cli/MeshImpl.java @@ -15,6 +15,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import io.vertx.core.http.*; import org.apache.commons.lang3.StringUtils; import com.gentics.mesh.Mesh; @@ -29,8 +30,6 @@ import io.reactivex.Completable; import io.vertx.core.MultiMap; import io.vertx.core.Vertx; -import io.vertx.core.http.HttpClientOptions; -import io.vertx.core.http.HttpClientRequest; import io.vertx.core.impl.launcher.commands.VersionCommand; import io.vertx.core.json.JsonObject; import io.vertx.core.logging.Logger; @@ -205,45 +204,59 @@ private void checkSystemRequirements() { public void invokeUpdateCheck() { String currentVersion = Mesh.getPlainVersion(); log.info("Checking for updates.."); - HttpClientRequest request = getVertx().createHttpClient(new HttpClientOptions().setSsl(true).setTrustAll(false)).get(443, "getmesh.io", - "/api/updatecheck?v=" + Mesh.getPlainVersion(), rh -> { - int code = rh.statusCode(); - if (code < 200 || code >= 299) { - log.error("Update check failed with status code {" + code + "}"); - } else { - rh.bodyHandler(bh -> { - JsonObject info = bh.toJsonObject(); - String latestVersion = info.getString("latest"); - - if (currentVersion.contains("-SNAPSHOT")) { - log.warn("You are using a SNAPSHOT version {" + currentVersion - + "}. This is potentially dangerous because this version has never been officially released."); - log.info("The latest version of Gentics Mesh is {" + latestVersion + "}"); - } else { - int result = VersionUtil.compareVersions(latestVersion, currentVersion); - if (result == 0) { - log.info("Great! You are using the latest version"); - } else if (result > 0) { - log.warn("Your Gentics Mesh version is outdated. You are using {" + currentVersion + "} but version {" - + latestVersion + "} is available."); - } - } - }); - } - }); - MultiMap headers = request.headers(); - headers.set("content-type", "application/json"); - String hostname = getHostname(); - if (!isEmpty(hostname)) { - headers.set("X-Hostname", hostname); - } - request.exceptionHandler(err -> { - log.info("Failed to check for updates."); - log.debug("Reason for failed update check", err); - }); - request.end(); + RequestOptions requestOptions = new RequestOptions(); + requestOptions.setMethod(HttpMethod.GET); + requestOptions.setSsl(true); + requestOptions.setHost("getmesh.io/api/updatecheck?v=" + Mesh.getPlainVersion()); + getVertx().createHttpClient(new HttpClientOptions().setSsl(true).setTrustAll(false)) + .request(HttpMethod.GET, 443, "getmesh.io", "/api/updatecheck?v=" + Mesh.getPlainVersion(), ar -> { + if (ar.succeeded()) { + HttpClientRequest req = ar.result(); + + MultiMap headers = req.headers(); + headers.set("content-type", "application/json"); + String hostname = getHostname(); + if (!isEmpty(hostname)) { + headers.set("X-Hostname", hostname); + } + req.send(ar2 -> { + if (ar2.succeeded()) { + HttpClientResponse response = ar2.result(); + int code = response.statusCode(); + if (code < 200 || code >= 299) { + log.error("Update check failed with status code {" + code + "}"); + } else { + response.bodyHandler(bh -> { + JsonObject info = bh.toJsonObject(); + String latestVersion = info.getString("latest"); + + if (currentVersion.contains("-SNAPSHOT")) { + log.warn("You are using a SNAPSHOT version {" + currentVersion + + "}. This is potentially dangerous because this version has never been officially released."); + log.info("The latest version of Gentics Mesh is {" + latestVersion + "}"); + } else { + int result = VersionUtil.compareVersions(latestVersion, currentVersion); + if (result == 0) { + log.info("Great! You are using the latest version"); + } else if (result > 0) { + log.warn("Your Gentics Mesh version is outdated. You are using {" + currentVersion + "} but version {" + + latestVersion + "} is available."); + } + } + }); + } + } else { + log.info("Failed to check for updates."); + log.debug("Reason for failed update check", ar2.cause()); + } + }); + } else { + log.info("Failed to check for updates."); + log.debug("Reason for failed update check", ar.cause()); + } + }); } /** diff --git a/core/src/main/java/com/gentics/mesh/core/data/impl/MeshAuthUserImpl.java b/core/src/main/java/com/gentics/mesh/core/data/impl/MeshAuthUserImpl.java index c2b0983e14..e95442547a 100644 --- a/core/src/main/java/com/gentics/mesh/core/data/impl/MeshAuthUserImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/data/impl/MeshAuthUserImpl.java @@ -2,6 +2,7 @@ import java.util.Objects; +import io.vertx.ext.auth.authorization.Authorization; import org.apache.commons.lang.NotImplementedException; import com.gentics.mesh.core.data.dao.UserDao; @@ -97,7 +98,17 @@ public void setAuthProvider(AuthProvider authProvider) { } @Override - public User isAuthorized(String authority, Handler> resultHandler) { + public User merge(User user) { + throw new NotImplementedException(); + } + + @Override + public JsonObject attributes() { + throw new NotImplementedException(); + } + + @Override + public User isAuthorized(Authorization authorization, Handler> handler) { throw new NotImplementedException("Please use the MeshAuthUserImpl method instead."); } diff --git a/core/src/main/java/com/gentics/mesh/core/endpoint/eventbus/EventbusEndpoint.java b/core/src/main/java/com/gentics/mesh/core/endpoint/eventbus/EventbusEndpoint.java index 307cb00997..6d1d7021f2 100644 --- a/core/src/main/java/com/gentics/mesh/core/endpoint/eventbus/EventbusEndpoint.java +++ b/core/src/main/java/com/gentics/mesh/core/endpoint/eventbus/EventbusEndpoint.java @@ -13,6 +13,7 @@ import io.vertx.ext.auth.User; import io.vertx.ext.bridge.BridgeEventType; import io.vertx.ext.bridge.PermittedOptions; +import io.vertx.ext.web.Router; import io.vertx.ext.web.handler.sockjs.SockJSBridgeOptions; import io.vertx.ext.web.handler.sockjs.SockJSHandler; import io.vertx.ext.web.handler.sockjs.SockJSHandlerOptions; @@ -47,13 +48,17 @@ public void registerEndPoints() { } private void addEventBusHandler() { - SockJSHandler handler = null; - if (localRouter != null) { + secureAll(); + InternalEndpointRoute endpoint = createRoute(); + endpoint.setRAMLPath("/"); + endpoint.description("This endpoint provides a sockjs compliant websocket which can be used to interface with the vert.x eventbus."); + + if (!isRamlGeneratorContext()) { SockJSHandlerOptions sockJSoptions = new SockJSHandlerOptions().setHeartbeatInterval(2000); - handler = SockJSHandler.create(vertx, sockJSoptions); + SockJSHandler handler = SockJSHandler.create(vertx, sockJSoptions); SockJSBridgeOptions bridgeOptions = new SockJSBridgeOptions(); for (MeshEvent event : MeshEvent.publicEvents()) { - // TODO ensure that clients can't fire internal mesh events. + // TODO ensure that clients can't fire internal mesh events. bridgeOptions.addInboundPermitted(new PermittedOptions().setAddress(event.address)); bridgeOptions.addOutboundPermitted(new PermittedOptions().setAddress(event.address)); } @@ -61,7 +66,7 @@ private void addEventBusHandler() { bridgeOptions.addInboundPermitted(new PermittedOptions().setAddressRegex("custom.*")); bridgeOptions.addOutboundPermitted(new PermittedOptions().setAddressRegex("custom.*")); - handler.bridge(bridgeOptions, event -> { + Router brigdeRoute = handler.bridge(bridgeOptions, event -> { if (log.isDebugEnabled()) { if (event.type() == BridgeEventType.SOCKET_CREATED) { // TODO maybe it would be useful to send a reply to the user. @@ -76,14 +81,16 @@ private void addEventBusHandler() { log.debug("Eventbridge creation. User was authenticated: " + isAuthenticated); event.complete(isAuthenticated); }); - } - - secureAll(); - InternalEndpointRoute endpoint = createRoute(); - endpoint.setRAMLPath("/"); - endpoint.description("This endpoint provides a sockjs complient websocket which can be used to interface with the vert.x eventbus."); - endpoint.path("/*").handler(handler); + endpoint.path("/*").subRouter(brigdeRoute); + } } + /** + * Returns whether the method is called from during the documentation generation context. + * @return + */ + private boolean isRamlGeneratorContext() { + return localRouter == null; + } } diff --git a/core/src/main/java/com/gentics/mesh/core/endpoint/node/BinaryUploadHandlerImpl.java b/core/src/main/java/com/gentics/mesh/core/endpoint/node/BinaryUploadHandlerImpl.java index a9df72a7ec..c346422591 100644 --- a/core/src/main/java/com/gentics/mesh/core/endpoint/node/BinaryUploadHandlerImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/endpoint/node/BinaryUploadHandlerImpl.java @@ -162,7 +162,7 @@ public void handleUpdateField(InternalActionContext ac, String nodeUuid, String throw error(BAD_REQUEST, "upload_error_no_version"); } - Set fileUploads = ac.getFileUploads(); + List fileUploads = ac.getFileUploads(); if (fileUploads.isEmpty()) { throw error(BAD_REQUEST, "node_error_no_binarydata_found"); } diff --git a/core/src/main/java/com/gentics/mesh/core/endpoint/node/S3BinaryMetadataExtractionHandlerImpl.java b/core/src/main/java/com/gentics/mesh/core/endpoint/node/S3BinaryMetadataExtractionHandlerImpl.java index ace5cf888d..0a3eb1efcf 100644 --- a/core/src/main/java/com/gentics/mesh/core/endpoint/node/S3BinaryMetadataExtractionHandlerImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/endpoint/node/S3BinaryMetadataExtractionHandlerImpl.java @@ -164,6 +164,11 @@ public String charSet() { // TODO Auto-generated method stub return "UTF-8"; } + + @Override + public boolean cancel() { + return false; + } }; ctx.setFileUpload(fileUpload); ctx.setS3ObjectKey(nodeUuid + "/" + fieldName); diff --git a/core/src/main/java/com/gentics/mesh/core/endpoint/user/UserTokenAuthHandler.java b/core/src/main/java/com/gentics/mesh/core/endpoint/user/UserTokenAuthHandler.java index 29e319d41a..f98f27c389 100644 --- a/core/src/main/java/com/gentics/mesh/core/endpoint/user/UserTokenAuthHandler.java +++ b/core/src/main/java/com/gentics/mesh/core/endpoint/user/UserTokenAuthHandler.java @@ -21,9 +21,9 @@ import io.vertx.core.AsyncResult; import io.vertx.core.Handler; -import io.vertx.core.json.JsonObject; +import io.vertx.ext.auth.User; import io.vertx.ext.web.RoutingContext; -import io.vertx.ext.web.handler.impl.AuthHandlerImpl; +import io.vertx.ext.web.handler.impl.AuthenticationHandlerImpl; /** * The user token authentication handler grants access to routes by validating the provides token query parameter value. @@ -33,7 +33,7 @@ * this handler fails the {@link MeshJWTAuthHandler} should try to extract the JWT token from the cookie and load the correct user. */ @Singleton -public class UserTokenAuthHandler extends AuthHandlerImpl { +public class UserTokenAuthHandler extends AuthenticationHandlerImpl { public static final int DEFAULT_MAX_TOKEN_AGE_IN_MINS = 30; private Database db; @@ -45,7 +45,7 @@ public UserTokenAuthHandler(MeshJWTAuthProvider authProvider, Database db) { } @Override - public void parseCredentials(RoutingContext context, Handler> handler) { + public void authenticate(RoutingContext routingContext, Handler> handler) { // Not needed } diff --git a/core/src/main/java/com/gentics/mesh/rest/MeshLocalClientImpl.java b/core/src/main/java/com/gentics/mesh/rest/MeshLocalClientImpl.java index bc623d54d3..2702272a10 100644 --- a/core/src/main/java/com/gentics/mesh/rest/MeshLocalClientImpl.java +++ b/core/src/main/java/com/gentics/mesh/rest/MeshLocalClientImpl.java @@ -1211,6 +1211,11 @@ public String charSet() { // TODO Auto-generated method stub return null; } + + @Override + public boolean cancel() { + return false; + } }); fieldAPIHandler.handleUpdateField(ac, nodeUuid, fieldKey, attributes); diff --git a/core/src/main/java/com/gentics/mesh/rest/MeshLocalRequestImpl.java b/core/src/main/java/com/gentics/mesh/rest/MeshLocalRequestImpl.java index a55091b0ec..932a532e8b 100644 --- a/core/src/main/java/com/gentics/mesh/rest/MeshLocalRequestImpl.java +++ b/core/src/main/java/com/gentics/mesh/rest/MeshLocalRequestImpl.java @@ -3,6 +3,7 @@ import java.util.List; import java.util.Map; +import io.vertx.reactivex.SingleHelper; import org.apache.commons.lang.NotImplementedException; import com.gentics.mesh.rest.client.MeshRequest; @@ -28,7 +29,7 @@ public MeshLocalRequestImpl(Future future) { @Override public Single toSingle() { - return new io.vertx.reactivex.core.Future(future).rxOnComplete(); + return SingleHelper.toSingle(future::onComplete); } @Override diff --git a/databases/orientdb/pom.xml b/databases/orientdb/pom.xml index 0b1bf29732..aa85cc4d81 100644 --- a/databases/orientdb/pom.xml +++ b/databases/orientdb/pom.xml @@ -49,8 +49,8 @@ test - io.vertx - vertx-hazelcast + com.gentics.mesh + hazelcast3-cluster-manager diff --git a/databases/orientdb/src/main/java/com/syncleus/ferma/ext/orientdb3/OrientDBTx.java b/databases/orientdb/src/main/java/com/syncleus/ferma/ext/orientdb3/OrientDBTx.java index 1055e2191f..af8a94c9ae 100644 --- a/databases/orientdb/src/main/java/com/syncleus/ferma/ext/orientdb3/OrientDBTx.java +++ b/databases/orientdb/src/main/java/com/syncleus/ferma/ext/orientdb3/OrientDBTx.java @@ -5,6 +5,7 @@ import javax.inject.Inject; +import dagger.Lazy; import org.springframework.security.crypto.password.PasswordEncoder; import com.gentics.mesh.Mesh; @@ -93,7 +94,11 @@ public class OrientDBTx extends AbstractTx { private final CommonTxData txData; private final ContextDataRegistry contextDataRegistry; private final OrientDBDaoCollection daos; - private final CacheCollection caches; + /** + * We provide a lazy instance, otherwise we risk prematurely subscribing to the event bus in certain bootstrapping + * scenarios (mesh clustered + init cluster flag set to true) + */ + private final Lazy caches; private final SecurityUtils security; private final Binaries binaries; private final S3Binaries s3binaries; @@ -102,9 +107,9 @@ public class OrientDBTx extends AbstractTx { @Inject public OrientDBTx(OrientDBMeshOptions options, Database db, OrientDBBootstrapInitializer boot, - OrientDBDaoCollection daos, CacheCollection caches, SecurityUtils security, OrientStorage provider, - TypeResolver typeResolver, MetricsService metrics, PermissionRoots permissionRoots, - ContextDataRegistry contextDataRegistry, S3Binaries s3binaries, Binaries binaries, CommonTxData txData) { + OrientDBDaoCollection daos, Lazy caches, SecurityUtils security, OrientStorage provider, + TypeResolver typeResolver, MetricsService metrics, PermissionRoots permissionRoots, + ContextDataRegistry contextDataRegistry, S3Binaries s3binaries, Binaries binaries, CommonTxData txData) { this.db = db; this.boot = boot; this.typeResolver = typeResolver; @@ -324,7 +329,7 @@ public S3Binaries s3binaries() { @Override public PermissionCache permissionCache() { - return caches.permissionCache(); + return caches.get().permissionCache(); } @Override diff --git a/distributed-coordinator/pom.xml b/distributed-coordinator/pom.xml index 8cc4442e10..706dc7bcd5 100644 --- a/distributed-coordinator/pom.xml +++ b/distributed-coordinator/pom.xml @@ -38,6 +38,11 @@ io.vertx vertx-web + + io.vertx + vertx-http-proxy + ${vertx.version} + com.hazelcast hazelcast diff --git a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/MasterElector.java b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/MasterElector.java index c55781341a..a462a2fb1d 100644 --- a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/MasterElector.java +++ b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/MasterElector.java @@ -106,7 +106,7 @@ public void start() { /** * Check whether the instance that runs this code is the elected master. - * + * * @return */ public boolean isMaster() { @@ -134,7 +134,7 @@ public void setMaster() { /** * Each instance in the cluster will call the elect master method when the structure of the cluster changes. The master election runs in a locked manner and * is terminated as soon as one node in the cluster got elected. - * + * * @return Elected member */ private void electMaster() { @@ -145,8 +145,8 @@ private void electMaster() { try { log.info("Locked for master election"); Optional foundMaster = cluster.getMembers().stream() - .filter(m -> isMaster(m)) - .findFirst(); + .filter(m -> isMaster(m)) + .findFirst(); boolean hasMaster = foundMaster.isPresent(); boolean isElectible = isElectable(localMember()); if (!hasMaster && isElectible) { @@ -156,8 +156,8 @@ private void electMaster() { localMember().setBooleanAttribute(MASTER, true); log.info("Cluster node was elected as new master"); } else if (cluster.getMembers().stream() - .filter(m -> isMaster(m)) - .count() > 1) { + .filter(m -> isMaster(m)) + .count() > 1) { log.info("Detected multiple masters in the cluster, giving up the master flag"); giveUpMasterFlag(); } @@ -169,7 +169,7 @@ private void electMaster() { /** * Check whether the member is allowed to be elected as master - * + * * @param m * @return */ @@ -236,16 +236,16 @@ public void memberAdded(MembershipEvent membershipEvent) { hazelcast.get().getLifecycleService().addLifecycleListener(event -> { log.info(String.format("Lifecycle state changed to %s", event.getState())); switch (event.getState()) { - case MERGING: - merging = true; - break; - case MERGED: - // when the instance merged into a cluster, we need to elect a new master (to avoid multimaster situations) - merging = false; - electMaster(); - break; - default: - break; + case MERGING: + merging = true; + break; + case MERGED: + // when the instance merged into a cluster, we need to elect a new master (to avoid multimaster situations) + merging = false; + electMaster(); + break; + default: + break; } }); @@ -273,8 +273,8 @@ public void memberAdded(MembershipEvent membershipEvent) { protected void findCurrentMaster() { Cluster cluster = hazelcast.get().getCluster(); Optional master = cluster.getMembers().stream() - .filter(m -> isMaster(m)) - .findFirst(); + .filter(m -> isMaster(m)) + .findFirst(); if (master.isPresent()) { masterMember = master.get(); log.info("Updated master member {" + masterMember.getStringAttribute(MESH_NODE_NAME_ATTR) + "}"); @@ -296,7 +296,7 @@ private void giveUpMasterFlag() { /** * Check whether the given member currently is the master - * + * * @param member * member * @return true for the master @@ -307,7 +307,7 @@ private static boolean isMaster(Member member) { /** * Check whether the given instance is the local instance. - * + * * @param member * @return */ @@ -317,7 +317,7 @@ public boolean isLocal(Member member) { /** * Return the hazelcast member for this instance. - * + * * @return */ public Member localMember() { @@ -348,7 +348,7 @@ public MasterServer getMasterMember() { /** * Let the handler accept the object from the given message, if the message was not published from the local node - * + * * @param msg * message * @param handler @@ -363,7 +363,7 @@ private void executeIfNotFromLocal(Message msg, Consumer handler) { /** * Let the handler accept the object from the given message, if the message was published from the local node - * + * * @param msg * message * @param handler @@ -378,7 +378,7 @@ private void executeIfFromLocal(Message msg, Consumer handler) { /** * Check whether the local member is electable. - * + * * @return */ public boolean isElectable() { diff --git a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java index 50e923345f..ebd3274862 100644 --- a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java +++ b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java @@ -26,8 +26,8 @@ import io.vertx.core.http.HttpServerResponse; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; -import io.vertx.core.streams.Pump; import io.vertx.ext.web.RoutingContext; +import io.vertx.httpproxy.ProxyRequest; /** * @see RequestDelegator @@ -133,44 +133,31 @@ public boolean canWrite() { @Override public void redirectToMaster(RoutingContext rc) { HttpServerRequest request = rc.request(); - String requestURI = request.uri(); - HttpMethod method = request.method(); - HttpServerResponse response = rc.response(); MasterServer master = coordinator.getMasterMember(); String host = master.getHost(); int port = master.getPort(); - if (log.isDebugEnabled()) { log.debug("Forwarding request to master {" + master.toString() + "}"); } - @SuppressWarnings("deprecation") - HttpClientRequest forwardRequest = httpClient.request(method, port, host, requestURI, forwardResponse -> { - response.setChunked(true); - response.setStatusCode(forwardResponse.statusCode()); - response.putHeader(MESH_FORWARDED_FROM_HEADER, master.getName()); - forwardHeaders(response, forwardResponse); - printHeaders("Forward response headers", response.headers()); - Pump.pump(forwardResponse, response) - .setWriteQueueMaxSize(8192) - .start(); - forwardResponse.endHandler(v -> response.end()); - }); - - forwardHeaders(request, forwardRequest); - forwardRequest.putHeader(MESH_DIRECT_HEADER, "true"); - forwardRequest.setChunked(true); - - if (request.isEnded()) { - log.warn("Request to be proxied is already read"); - proxyEndHandler(forwardRequest, rc.getBody()); - } else { - request.exceptionHandler(e -> log.error("Could not forward request to Mesh: {}", e, e.getMessage())) - .endHandler(v -> proxyEndHandler(forwardRequest, null)); - Pump.pump(request, forwardRequest) - .setWriteQueueMaxSize(8192) - .start(); - } + ProxyRequest proxyRequest = ProxyRequest.reverseProxy(request); + proxyRequest.putHeader(MESH_DIRECT_HEADER, "true"); + + httpClient.request(proxyRequest.getMethod(), port, host, proxyRequest.getURI()) + .compose(proxyRequest::send) + .onSuccess(proxyResponse -> { + // Send the proxy response + proxyResponse.putHeader(MESH_FORWARDED_FROM_HEADER, master.getName()); + proxyResponse.send(); + }) + .onFailure(err -> { + // Release the request + proxyRequest.release(); + + // Send error + request.response().setStatusCode(500) + .send(); + }); } @Override diff --git a/hazelcast3-cluster-manager/README.md b/hazelcast3-cluster-manager/README.md new file mode 100644 index 0000000000..35eb30fd27 --- /dev/null +++ b/hazelcast3-cluster-manager/README.md @@ -0,0 +1,6 @@ +# Hazelcast 3 Cluster Manager + +Fork of [Vert.x Hazelcast](https://github.com/vert-x3/vertx-hazelcast) implementing Vert.x 4 Cluster SPI using Hazelcast 3. + +This was created since Vert.x Hazelcast cluster manager supports only Hazelcast 4, and OrientDB doesn't support +Hazelcast 4 at the moment of writing. diff --git a/hazelcast3-cluster-manager/pom.xml b/hazelcast3-cluster-manager/pom.xml new file mode 100644 index 0000000000..8fa4101d07 --- /dev/null +++ b/hazelcast3-cluster-manager/pom.xml @@ -0,0 +1,243 @@ + + + 4.0.0 + + mesh + com.gentics.mesh + 1.10.0-SNAPSHOT + + + hazelcast3-cluster-manager + Vert.x Hazelcast Cluster Manager + + + 4.3.2 + 3.12.2 + 1.5.1 + ${skip.vertx-hazelcast} + ${project.basedir}/src/main/resources/META-INF/MANIFEST.MF + + + + + + io.vertx + vertx-dependencies + ${stack.version} + pom + import + + + + + + + + io.vertx + vertx-core + + + io.vertx + vertx-health-check + true + + + io.vertx + vertx-web + true + + + com.hazelcast + hazelcast + ${hazelcast.version} + + + + io.vertx + vertx-docgen + provided + + + io.vertx + vertx-codegen + provided + + + + junit + junit + 4.12 + test + + + io.vertx + vertx-core + test-jar + test + + + io.vertx + vertx-web + test-jar + test + + + io.vertx + vertx-service-discovery + test + + + io.vertx + vertx-service-proxy + test + + + io.vertx + vertx-service-discovery + test-jar + test + + + org.assertj + assertj-core + 3.3.0 + test + + + com.jayway.awaitility + awaitility + 1.7.0 + test + + + com.hazelcast + hazelcast-client + ${hazelcast.version} + test + + + ch.qos.logback + logback-classic + 1.1.7 + test + + + + + + + + org.bsc.maven + maven-processor-plugin + + + generate-sources + + + ${hazelcast.version} + + + + + + + maven-surefire-plugin + + false + + PARANOID + ${project.build.directory} + ${project.version} + true + io.vertx.core.logging.SLF4JLogDelegateFactory + slf4j + + + -Xmx1200M + 1 + true + + + + maven-failsafe-plugin + 2.19.1 + + false + + PARANOID + ${project.build.directory} + ${project.version} + true + io.vertx.core.logging.SLF4JLogDelegateFactory + slf4j + + + -Xmx1200M + 1 + true + + + + + + + maven-surefire-plugin + + ${skipUnitTests} + + **/it/**/*Test.java + + + + + maven-failsafe-plugin + + + lite-members + + integration-test + verify + + integration-test + + + **/it/litemembers/*Test.java + + + + + + + + + + + coverage + + + + + maven-surefire-plugin + + false + + PARANOID + ${project.build.directory} + ${project.version} + true + + + + -Xmx1200M + 1 + true + + + + + + + + \ No newline at end of file diff --git a/hazelcast3-cluster-manager/src/main/java/examples/Examples.java b/hazelcast3-cluster-manager/src/main/java/examples/Examples.java new file mode 100644 index 0000000000..4911d7a06f --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/examples/Examples.java @@ -0,0 +1,130 @@ +/* + * Copyright 2014 Red Hat, Inc. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * and Apache License v2.0 which accompanies this distribution. + * + * The Eclipse Public License is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * The Apache License v2.0 is available at + * http://www.opensource.org/licenses/apache2.0.php + * + * You may elect to redistribute this code under either of these licenses. + */ + +package examples; + +import com.hazelcast.config.Config; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.core.Handler; +import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.VertxOptions; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.ext.healthchecks.HealthCheckHandler; +import io.vertx.ext.healthchecks.HealthChecks; +import io.vertx.ext.healthchecks.Status; +import io.vertx.ext.web.Router; +import io.vertx.spi.cluster.hazelcast.ClusterHealthCheck; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; + +/** + * @author Tim Fox + */ +public class Examples { + + public void example1() { + + ClusterManager mgr = new HazelcastClusterManager(); + + VertxOptions options = new VertxOptions().setClusterManager(mgr); + + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + Vertx vertx = res.result(); + } else { + // failed! + } + }); + } + + public void example2() { + + Config hazelcastConfig = new Config(); + + // Now set some stuff on the config (omitted) + + ClusterManager mgr = new HazelcastClusterManager(hazelcastConfig); + + VertxOptions options = new VertxOptions().setClusterManager(mgr); + + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + Vertx vertx = res.result(); + } else { + // failed! + } + }); + } + + public void customizeDefaultConfig() { + Config hazelcastConfig = ConfigUtil.loadConfig(); + + hazelcastConfig.getGroupConfig() + .setName("my-cluster-name"); + + ClusterManager mgr = new HazelcastClusterManager(hazelcastConfig); + + VertxOptions options = new VertxOptions().setClusterManager(mgr); + + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + Vertx vertx = res.result(); + } else { + // failed! + } + }); + } + + public void example3(HazelcastInstance hazelcastInstance) { + ClusterManager mgr = new HazelcastClusterManager(hazelcastInstance); + VertxOptions options = new VertxOptions().setClusterManager(mgr); + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + Vertx vertx = res.result(); + } else { + // failed! + } + }); + } + + public void healthCheck(Vertx vertx) { + Handler> procedure = ClusterHealthCheck.createProcedure(vertx); + HealthChecks checks = HealthChecks.create(vertx).register("cluster-health", procedure); + } + + public void healthCheckHandler(Vertx vertx, HealthChecks checks) { + Router router = Router.router(vertx); + router.get("/readiness").handler(HealthCheckHandler.createWithHealthChecks(checks)); + } + + public void liteMemberConfig() { + Config hazelcastConfig = ConfigUtil.loadConfig() + .setLiteMember(true); + + ClusterManager mgr = new HazelcastClusterManager(hazelcastConfig); + + VertxOptions options = new VertxOptions().setClusterManager(mgr); + + Vertx.clusteredVertx(options, res -> { + if (res.succeeded()) { + Vertx vertx = res.result(); + } else { + // failed! + } + }); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/examples/package-info.java b/hazelcast3-cluster-manager/src/main/java/examples/package-info.java new file mode 100644 index 0000000000..8bb8f7b89b --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/examples/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2014 Red Hat, Inc. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * and Apache License v2.0 which accompanies this distribution. + * + * The Eclipse Public License is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * The Apache License v2.0 is available at + * http://www.opensource.org/licenses/apache2.0.php + * + * You may elect to redistribute this code under either of these licenses. + */ + +/** + * @author Julien Viet + */ +@Source +package examples; + +import io.vertx.docgen.Source; \ No newline at end of file diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ClusterHealthCheck.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ClusterHealthCheck.java new file mode 100644 index 0000000000..cd982c1221 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ClusterHealthCheck.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 + * which is available at https://www.apache.org/licenses/LICENSE-2.0. + * + * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 + */ +package io.vertx.spi.cluster.hazelcast; + +import com.hazelcast.core.PartitionService; +import io.vertx.codegen.annotations.VertxGen; +import io.vertx.core.Handler; +import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.impl.VertxInternal; +import io.vertx.ext.healthchecks.Status; + +import java.util.Objects; + +/** + * A helper to create Vert.x cluster {@link io.vertx.ext.healthchecks.HealthChecks} procedures. + */ +@VertxGen +public interface ClusterHealthCheck { + + /** + * Creates a ready-to-use Vert.x cluster {@link io.vertx.ext.healthchecks.HealthChecks} procedure. + * + * @param vertx the instance of Vert.x, must not be {@code null} + * @return a Vert.x cluster {@link io.vertx.ext.healthchecks.HealthChecks} procedure + */ + static Handler> createProcedure(Vertx vertx) { + Objects.requireNonNull(vertx); + return future -> { + VertxInternal vertxInternal = (VertxInternal) vertx; + HazelcastClusterManager clusterManager = (HazelcastClusterManager) vertxInternal.getClusterManager(); + PartitionService partitionService = clusterManager.getHazelcastInstance().getPartitionService(); + future.complete(new Status().setOk(partitionService.isClusterSafe())); + }; + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ConfigUtil.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ConfigUtil.java new file mode 100644 index 0000000000..b4895b4911 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/ConfigUtil.java @@ -0,0 +1,106 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast; + +import com.hazelcast.config.Config; +import com.hazelcast.config.XmlConfigBuilder; +import io.vertx.core.impl.logging.Logger; +import io.vertx.core.impl.logging.LoggerFactory; + +import java.io.*; + +/** + * @author Thomas Segismont + */ +public class ConfigUtil { + + private static final Logger log = LoggerFactory.getLogger(ConfigUtil.class); + + // Hazelcast config file + private static final String DEFAULT_CONFIG_FILE = "default-cluster.xml"; + private static final String CONFIG_FILE = "cluster.xml"; + + /** + * Loads Hazelcast config XML and transform it into a {@link Config} object. + * + * The content is read from: + *
    + *
  1. the location denoted by the {@code vertx.hazelcast.config} sysprop, if present, or
  2. + *
  3. the {@code cluster.xml} file on the classpath, if present, or
  4. + *
  5. the default config file
  6. + *
+ * + * @return a config object + */ + public static Config loadConfig() { + Config cfg = null; + try (InputStream is = getConfigStream(); + InputStream bis = new BufferedInputStream(is)) { + cfg = new XmlConfigBuilder(bis).build(); + } catch (IOException ex) { + log.error("Failed to read config", ex); + } + return cfg; + } + + private static InputStream getConfigStream() { + InputStream is = getConfigStreamFromSystemProperty(); + if (is == null) { + is = getConfigStreamFromClasspath(CONFIG_FILE, DEFAULT_CONFIG_FILE); + } + return is; + } + + private static InputStream getConfigStreamFromSystemProperty() { + String configProp = System.getProperty("vertx.hazelcast.config"); + InputStream is = null; + if (configProp != null) { + if (configProp.startsWith("classpath:")) { + return getConfigStreamFromClasspath(configProp.substring("classpath:".length()), CONFIG_FILE); + } + File cfgFile = new File(configProp); + if (cfgFile.exists()) { + try { + is = new FileInputStream(cfgFile); + } catch (FileNotFoundException ex) { + log.warn("Failed to open file '" + configProp + "' defined in 'vertx.hazelcast.config'. Continuing " + + "classpath search for " + CONFIG_FILE); + } + } + } + return is; + } + + private static InputStream getConfigStreamFromClasspath(String configFile, String defaultConfig) { + InputStream is = null; + ClassLoader ctxClsLoader = Thread.currentThread().getContextClassLoader(); + if (ctxClsLoader != null) { + is = ctxClsLoader.getResourceAsStream(configFile); + } + if (is == null) { + is = ConfigUtil.class.getClassLoader().getResourceAsStream(configFile); + if (is == null) { + is = ConfigUtil.class.getClassLoader().getResourceAsStream(defaultConfig); + } + } + return is; + } + + private ConfigUtil() { + // Utility class + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/HazelcastClusterManager.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/HazelcastClusterManager.java new file mode 100644 index 0000000000..918431cbfc --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/HazelcastClusterManager.java @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2011-2013 The original author or authors + * ------------------------------------------------------ + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * and Apache License v2.0 which accompanies this distribution. + * + * The Eclipse Public License is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * The Apache License v2.0 is available at + * http://www.opensource.org/licenses/apache2.0.php + * + * You may elect to redistribute this code under either of these licenses. + */ + +package io.vertx.spi.cluster.hazelcast; + +import com.hazelcast.config.Config; +import com.hazelcast.core.*; +import io.vertx.core.Promise; +import io.vertx.core.Vertx; +import io.vertx.core.VertxException; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.logging.Logger; +import io.vertx.core.impl.logging.LoggerFactory; +import io.vertx.core.shareddata.AsyncMap; +import io.vertx.core.shareddata.Counter; +import io.vertx.core.shareddata.Lock; +import io.vertx.core.spi.cluster.*; +import io.vertx.spi.cluster.hazelcast.impl.*; + +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A cluster manager that uses Hazelcast + * + * @author Tim Fox + */ +public class HazelcastClusterManager implements ClusterManager, MembershipListener, LifecycleListener { + + private static final Logger log = LoggerFactory.getLogger(HazelcastClusterManager.class); + + private static final String LOCK_SEMAPHORE_PREFIX = "__vertx."; + private static final String NODE_ID_ATTRIBUTE = "__vertx.nodeId"; + + private VertxInternal vertx; + private NodeSelector nodeSelector; + + private HazelcastInstance hazelcast; + private String nodeId; + private NodeInfo nodeInfo; + private SubsMapHelper subsMapHelper; + private IMap nodeInfoMap; + private String membershipListenerId; + private String lifecycleListenerId; + private boolean customHazelcastCluster; + private Set nodeIds = new HashSet<>(); + + private NodeListener nodeListener; + private volatile boolean active; + + private Config conf; + + private ExecutorService lockReleaseExec; + + /** + * Constructor - gets config from classpath + */ + public HazelcastClusterManager() { + } + + /** + * Constructor - config supplied + * + * @param conf Hazelcast config, not null + */ + public HazelcastClusterManager(Config conf) { + Objects.requireNonNull(conf, "The Hazelcast config cannot be null."); + this.conf = conf; + } + + public HazelcastClusterManager(HazelcastInstance instance) { + Objects.requireNonNull(instance, "The Hazelcast instance cannot be null."); + hazelcast = instance; + customHazelcastCluster = true; + } + + @Override + public void init(Vertx vertx, NodeSelector nodeSelector) { + this.vertx = (VertxInternal) vertx; + this.nodeSelector = nodeSelector; + } + + @Override + public void join(Promise promise) { + vertx.executeBlocking(prom -> { + if (!active) { + active = true; + + lockReleaseExec = Executors.newCachedThreadPool(r -> new Thread(r, "vertx-hazelcast-service-release-lock-thread")); + + // The hazelcast instance has not been passed using the constructor. + if (!customHazelcastCluster) { + if (conf == null) { + conf = loadConfig(); + if (conf == null) { + log.warn("Cannot find cluster configuration on 'vertx.hazelcast.config' system property, on the classpath, " + + "or specified programmatically. Using default hazelcast configuration"); + conf = new Config(); + } + } + + // We have our own shutdown hook and need to ensure ours runs before Hazelcast is shutdown + conf.setProperty("hazelcast.shutdownhook.enabled", "false"); + + hazelcast = Hazelcast.newHazelcastInstance(conf); + } + + Member localMember = hazelcast.getCluster().getLocalMember(); + nodeId = localMember.getUuid(); + localMember.setStringAttribute(NODE_ID_ATTRIBUTE, nodeId); + membershipListenerId = hazelcast.getCluster().addMembershipListener(this); + lifecycleListenerId = hazelcast.getLifecycleService().addLifecycleListener(this); + + subsMapHelper = new SubsMapHelper(vertx, hazelcast, nodeSelector); + nodeInfoMap = hazelcast.getMap("__vertx.nodeInfo"); + + prom.complete(); + } + }, promise); + } + + @Override + public String getNodeId() { + return nodeId; + } + + @Override + public List getNodes() { + List list = new ArrayList<>(); + for (Member member : hazelcast.getCluster().getMembers()) { + String nodeIdAttribute = member.getStringAttribute(NODE_ID_ATTRIBUTE); + list.add(nodeIdAttribute != null ? nodeIdAttribute : member.getUuid()); + } + return list; + } + + @Override + public void nodeListener(NodeListener listener) { + this.nodeListener = listener; + } + + @Override + public void setNodeInfo(NodeInfo nodeInfo, Promise promise) { + synchronized (this) { + this.nodeInfo = nodeInfo; + } + HazelcastNodeInfo value = new HazelcastNodeInfo(nodeInfo); + vertx.executeBlocking(prom -> { + nodeInfoMap.put(nodeId, value); + prom.complete(); + }, false, promise); + } + + @Override + public synchronized NodeInfo getNodeInfo() { + return nodeInfo; + } + + @Override + public void getNodeInfo(String nodeId, Promise promise) { + vertx.executeBlocking(prom -> { + HazelcastNodeInfo value = nodeInfoMap.get(nodeId); + if (value != null) { + prom.complete(value.unwrap()); + } else { + promise.fail("Not a member of the cluster"); + } + }, false, promise); + } + + @Override + public void getAsyncMap(String name, Promise> promise) { + promise.complete(new HazelcastAsyncMap<>(vertx, hazelcast.getMap(name))); + } + + @Override + public Map getSyncMap(String name) { + return hazelcast.getMap(name); + } + + @Override + public void getLockWithTimeout(String name, long timeout, Promise promise) { + vertx.executeBlocking(prom -> { + ISemaphore iSemaphore = hazelcast.getSemaphore(LOCK_SEMAPHORE_PREFIX + name); + boolean locked = false; + long remaining = timeout; + do { + long start = System.nanoTime(); + try { + locked = iSemaphore.tryAcquire(remaining, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // OK continue + } + remaining = remaining - MILLISECONDS.convert(System.nanoTime() - start, NANOSECONDS); + } while (!locked && remaining > 0); + if (locked) { + prom.complete(new HazelcastLock(iSemaphore, lockReleaseExec)); + } else { + throw new VertxException("Timed out waiting to get lock " + name); + } + }, false, promise); + } + + @Override + public void getCounter(String name, Promise promise) { + promise.complete(new HazelcastCounter(vertx, hazelcast.getAtomicLong(name))); + } + + @Override + public void leave(Promise promise) { + vertx.executeBlocking(prom -> { + // We need to synchronized on the cluster manager instance to avoid other call to happen while leaving the + // cluster, typically, memberRemoved and memberAdded + synchronized (HazelcastClusterManager.this) { + if (active) { + try { + active = false; + lockReleaseExec.shutdown(); + subsMapHelper.close(); + boolean left = hazelcast.getCluster().removeMembershipListener(membershipListenerId); + if (!left) { + log.warn("No membership listener"); + } + hazelcast.getLifecycleService().removeLifecycleListener(lifecycleListenerId); + + // Do not shutdown the cluster if we are not the owner. + while (!customHazelcastCluster && hazelcast.getLifecycleService().isRunning()) { + try { + // This can sometimes throw java.util.concurrent.RejectedExecutionException so we retry. + hazelcast.getLifecycleService().shutdown(); + } catch (RejectedExecutionException ignore) { + log.debug("Rejected execution of the shutdown operation, retrying"); + } + try { + Thread.sleep(1); + } catch (InterruptedException t) { + // Manage the interruption in another handler. + Thread.currentThread().interrupt(); + } + } + + if (customHazelcastCluster) { + hazelcast.getCluster().getLocalMember().removeAttribute(NODE_ID_ATTRIBUTE); + } + + } catch (Throwable t) { + prom.fail(t); + } + } + } + prom.complete(); + }, promise); + } + + @Override + public synchronized void memberAdded(MembershipEvent membershipEvent) { + if (!active) { + return; + } + Member member = membershipEvent.getMember(); + String attribute = member.getStringAttribute(NODE_ID_ATTRIBUTE); + String nid = attribute != null ? attribute : member.getUuid(); + try { + if (nodeListener != null) { + nodeIds.add(nid); + nodeListener.nodeAdded(nid); + } + } catch (Throwable t) { + log.error("Failed to handle memberAdded", t); + } + } + + @Override + public synchronized void memberRemoved(MembershipEvent membershipEvent) { + if (!active) { + return; + } + Member member = membershipEvent.getMember(); + String attribute = member.getStringAttribute(NODE_ID_ATTRIBUTE); + String nid = attribute != null ? attribute : member.getUuid(); + try { + membersRemoved(Collections.singleton(nid)); + } catch (Throwable t) { + log.error("Failed to handle memberRemoved", t); + } + } + + private synchronized void membersRemoved(Set ids) { + cleanSubs(ids); + cleanNodeInfos(ids); + nodeInfoMap.put(nodeId, new HazelcastNodeInfo(getNodeInfo())); + nodeSelector.registrationsLost(); + republishOwnSubs(); + if (nodeListener != null) { + nodeIds.removeAll(ids); + ids.forEach(nodeListener::nodeLeft); + } + } + + private void cleanSubs(Set ids) { + subsMapHelper.removeAllForNodes(ids); + } + + private void cleanNodeInfos(Set ids) { + ids.forEach(nodeInfoMap::remove); + } + + private void republishOwnSubs() { + vertx.executeBlocking(prom -> { + subsMapHelper.republishOwnSubs(); + prom.complete(); + }, false); + } + + @Override + public synchronized void stateChanged(LifecycleEvent lifecycleEvent) { + if (!active) { + return; + } + // Safeguard to make sure members list is OK after a partition merge + if (lifecycleEvent.getState() == LifecycleEvent.LifecycleState.MERGED) { + final List currentNodes = getNodes(); + Set newNodes = new HashSet<>(currentNodes); + newNodes.removeAll(nodeIds); + Set removedMembers = new HashSet<>(nodeIds); + removedMembers.removeAll(currentNodes); + for (String nodeId : newNodes) { + nodeListener.nodeAdded(nodeId); + } + membersRemoved(removedMembers); + nodeIds.retainAll(currentNodes); + } + } + + @Override + public boolean isActive() { + return active; + } + + @Override + public void addRegistration(String address, RegistrationInfo registrationInfo, Promise promise) { + SubsOpSerializer serializer = SubsOpSerializer.get(vertx.getOrCreateContext()); + serializer.execute(subsMapHelper::put, address, registrationInfo, promise); + } + + @Override + public void removeRegistration(String address, RegistrationInfo registrationInfo, Promise promise) { + SubsOpSerializer serializer = SubsOpSerializer.get(vertx.getOrCreateContext()); + serializer.execute(subsMapHelper::remove, address, registrationInfo, promise); + } + + @Override + public void getRegistrations(String address, Promise> promise) { + vertx.executeBlocking(prom -> { + prom.complete(subsMapHelper.get(address)); + }, false, promise); + } + + @Override + public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) { + } + + /** + * Get the Hazelcast config. + * + * @return a config object + */ + public Config getConfig() { + return conf; + } + + /** + * Set the Hazelcast config. + * + * @param config a config object + */ + public void setConfig(Config config) { + this.conf = config; + } + + /** + * Load Hazelcast config XML and transform it into a {@link Config} object. + * The content is read from: + *
    + *
  1. the location denoted by the {@code vertx.hazelcast.config} sysprop, if present, or
  2. + *
  3. the {@code cluster.xml} file on the classpath, if present, or
  4. + *
  5. the default config file
  6. + *
+ *

+ * The cluster manager uses this method to load the config when the node joins the cluster, if no config was provided upon creation. + *

+ *

+ * You may use this method to get a base config and customize it before the node joins the cluster. + * In this case, don't forget to invoke {@link #setConfig(Config)} after you applied your changes. + *

+ * + * @return a config object + */ + public Config loadConfig() { + return ConfigUtil.loadConfig(); + } + + public HazelcastInstance getHazelcastInstance() { + return hazelcast; + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/ConversionUtils.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/ConversionUtils.java new file mode 100644 index 0000000000..0cfe8cd289 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/ConversionUtils.java @@ -0,0 +1,90 @@ +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.nio.ObjectDataInput; +import com.hazelcast.nio.ObjectDataOutput; +import com.hazelcast.nio.serialization.DataSerializable; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.shareddata.impl.ClusterSerializable; + +import java.io.IOException; + +public class ConversionUtils { + + @SuppressWarnings("unchecked") + public static T convertParam(T obj) { + if (obj instanceof ClusterSerializable) { + ClusterSerializable cobj = (ClusterSerializable) obj; + return (T) (new DataSerializableHolder(cobj)); + } else { + return obj; + } + } + + @SuppressWarnings("unchecked") + public static T convertReturn(Object obj) { + if (obj instanceof DataSerializableHolder) { + DataSerializableHolder cobj = (DataSerializableHolder) obj; + return (T) cobj.clusterSerializable(); + } else { + return (T) obj; + } + } + + @SuppressWarnings("unchecked") + private static final class DataSerializableHolder implements DataSerializable { + + private ClusterSerializable clusterSerializable; + + public DataSerializableHolder() { + } + + private DataSerializableHolder(ClusterSerializable clusterSerializable) { + this.clusterSerializable = clusterSerializable; + } + + @Override + public void writeData(ObjectDataOutput objectDataOutput) throws IOException { + objectDataOutput.writeUTF(clusterSerializable.getClass().getName()); + Buffer buffer = Buffer.buffer(); + clusterSerializable.writeToBuffer(buffer); + byte[] bytes = buffer.getBytes(); + objectDataOutput.writeInt(bytes.length); + objectDataOutput.write(bytes); + } + + @Override + public void readData(ObjectDataInput objectDataInput) throws IOException { + String className = objectDataInput.readUTF(); + int length = objectDataInput.readInt(); + byte[] bytes = new byte[length]; + objectDataInput.readFully(bytes); + try { + Class clazz = Thread.currentThread().getContextClassLoader().loadClass(className); + clusterSerializable = (ClusterSerializable) clazz.newInstance(); + clusterSerializable.readFromBuffer(0, Buffer.buffer(bytes)); + } catch (Exception e) { + throw new IllegalStateException("Failed to load class " + e.getMessage(), e); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof DataSerializableHolder)) return false; + DataSerializableHolder that = (DataSerializableHolder) o; + if (clusterSerializable != null ? !clusterSerializable.equals(that.clusterSerializable) : that.clusterSerializable != null) { + return false; + } + return true; + } + + @Override + public int hashCode() { + return clusterSerializable != null ? clusterSerializable.hashCode() : 0; + } + + public ClusterSerializable clusterSerializable() { + return clusterSerializable; + } + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HandlerCallBackAdapter.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HandlerCallBackAdapter.java new file mode 100644 index 0000000000..e01c35daed --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HandlerCallBackAdapter.java @@ -0,0 +1,42 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.core.ExecutionCallback; +import io.vertx.core.Promise; + +/** + * @author Jaromir Hamala + */ +public class HandlerCallBackAdapter implements ExecutionCallback { + + private final Promise promise; + + public HandlerCallBackAdapter(Promise promise) { + this.promise = promise; + } + + @Override + public void onResponse(V v) { + promise.complete(v); + } + + @Override + public void onFailure(Throwable throwable) { + promise.fail(throwable); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastAsyncMap.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastAsyncMap.java new file mode 100644 index 0000000000..b9a896b75b --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastAsyncMap.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2011-2013 The original author or authors + * ------------------------------------------------------ + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * and Apache License v2.0 which accompanies this distribution. + * + * The Eclipse Public License is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * The Apache License v2.0 is available at + * http://www.opensource.org/licenses/apache2.0.php + * + * You may elect to redistribute this code under either of these licenses. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.core.IMap; +import io.vertx.core.Future; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.future.PromiseInternal; +import io.vertx.core.shareddata.AsyncMap; + +import java.util.*; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import static io.vertx.spi.cluster.hazelcast.impl.ConversionUtils.convertParam; +import static io.vertx.spi.cluster.hazelcast.impl.ConversionUtils.convertReturn; + +public class HazelcastAsyncMap implements AsyncMap { + + private final VertxInternal vertx; + private final IMap map; + + public HazelcastAsyncMap(VertxInternal vertx, IMap map) { + this.vertx = vertx; + this.map = map; + } + + @Override + public Future get(K k) { + K kk = convertParam(k); + PromiseInternal promise = vertx.promise(); + map.getAsync(kk).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future().map(ConversionUtils::convertReturn); + } + + @Override + public Future put(K k, V v) { + K kk = convertParam(k); + V vv = convertParam(v); + PromiseInternal promise = vertx.promise(); + map.setAsync(kk, HazelcastServerID.convertServerID(vv)).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future putIfAbsent(K k, V v) { + K kk = convertParam(k); + V vv = convertParam(v); + return vertx.executeBlocking(fut -> { + fut.complete(convertReturn(map.putIfAbsent(kk, HazelcastServerID.convertServerID(vv)))); + }, false); + } + + @Override + public Future put(K k, V v, long ttl) { + K kk = convertParam(k); + V vv = convertParam(v); + return vertx.executeBlocking(fut -> { + map.set(kk, HazelcastServerID.convertServerID(vv), ttl, TimeUnit.MILLISECONDS); + fut.complete(); + }, false); + } + + @Override + public Future putIfAbsent(K k, V v, long ttl) { + K kk = convertParam(k); + V vv = convertParam(v); + return vertx.executeBlocking(fut -> fut.complete(convertReturn(map.putIfAbsent(kk, HazelcastServerID.convertServerID(vv), + ttl, TimeUnit.MILLISECONDS))), false); + } + + @Override + public Future remove(K k) { + K kk = convertParam(k); + PromiseInternal promise = vertx.promise(); + map.removeAsync(kk).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future().map(ConversionUtils::convertReturn); + } + + @Override + public Future removeIfPresent(K k, V v) { + K kk = convertParam(k); + V vv = convertParam(v); + return vertx.executeBlocking(fut -> fut.complete(map.remove(kk, vv)), false); + } + + @Override + public Future replace(K k, V v) { + K kk = convertParam(k); + V vv = convertParam(v); + return vertx.executeBlocking(fut -> fut.complete(convertReturn(map.replace(kk, vv))), false); + } + + @Override + public Future replaceIfPresent(K k, V oldValue, V newValue) { + K kk = convertParam(k); + V vv = convertParam(oldValue); + V vvv = convertParam(newValue); + return vertx.executeBlocking(fut -> fut.complete(map.replace(kk, vv, vvv)), false); + } + + @Override + public Future clear() { + return vertx.executeBlocking(fut -> { + map.clear(); + fut.complete(); + }, false); + } + + @Override + public Future size() { + return vertx.executeBlocking(fut -> fut.complete(map.size()), false); + } + + @Override + public Future> keys() { + return vertx.executeBlocking(fut -> { + Set set = new HashSet<>(); + for (K kk : map.keySet()) { + K k = ConversionUtils.convertReturn(kk); + set.add(k); + } + fut.complete(set); + }, false); + } + + @Override + public Future> values() { + return vertx.executeBlocking(fut -> { + List list = new ArrayList<>(); + for (V vv : map.values()) { + V v = ConversionUtils.convertReturn(vv); + list.add(v); + } + fut.complete(list); + }, false); + } + + @Override + public Future> entries() { + return vertx.executeBlocking(fut -> { + Map result = new HashMap<>(); + for (Entry entry : map.entrySet()) { + K k = ConversionUtils.convertReturn(entry.getKey()); + V v = ConversionUtils.convertReturn(entry.getValue()); + result.put(k, v); + } + fut.complete(result); + }, false); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastCounter.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastCounter.java new file mode 100644 index 0000000000..28affc77f6 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastCounter.java @@ -0,0 +1,129 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.core.IAtomicLong; +import io.vertx.core.AsyncResult; +import io.vertx.core.Future; +import io.vertx.core.Handler; +import io.vertx.core.Promise; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.shareddata.Counter; + +import java.util.Objects; + +public class HazelcastCounter implements Counter { + + private final VertxInternal vertx; + private final IAtomicLong atomicLong; + + public HazelcastCounter(VertxInternal vertx, IAtomicLong atomicLong) { + this.vertx = vertx; + this.atomicLong = atomicLong; + } + + @Override + public Future get() { + Promise promise = vertx.promise(); + atomicLong.getAsync().andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future incrementAndGet() { + Promise promise = vertx.promise(); + atomicLong.incrementAndGetAsync().andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future getAndIncrement() { + Promise promise = vertx.promise(); + atomicLong.getAndIncrementAsync().andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future decrementAndGet() { + Promise promise = vertx.promise(); + atomicLong.decrementAndGetAsync().andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future addAndGet(long value) { + Promise promise = vertx.promise(); + atomicLong.addAndGetAsync(value).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future getAndAdd(long value) { + Promise promise = vertx.promise(); + atomicLong.getAndAddAsync(value).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public Future compareAndSet(long expected, long value) { + Promise promise = vertx.promise(); + atomicLong.compareAndSetAsync(expected, value).andThen(new HandlerCallBackAdapter<>(promise)); + return promise.future(); + } + + @Override + public void get(Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + get().onComplete(resultHandler); + } + + @Override + public void incrementAndGet(Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + incrementAndGet().onComplete(resultHandler); + } + + @Override + public void getAndIncrement(Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + getAndIncrement().onComplete(resultHandler); + } + + @Override + public void decrementAndGet(Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + decrementAndGet().onComplete(resultHandler); + } + + @Override + public void addAndGet(long value, Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + addAndGet(value).onComplete(resultHandler); + } + + @Override + public void getAndAdd(long value, Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + getAndAdd(value).onComplete(resultHandler); + } + + @Override + public void compareAndSet(long expected, long value, Handler> resultHandler) { + Objects.requireNonNull(resultHandler, "resultHandler"); + compareAndSet(expected, value).onComplete(resultHandler); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastLock.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastLock.java new file mode 100644 index 0000000000..f1af8da122 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastLock.java @@ -0,0 +1,42 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.core.ISemaphore; +import io.vertx.core.shareddata.Lock; + +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; + +public class HazelcastLock implements Lock { + + private final ISemaphore semaphore; + private final Executor lockReleaseExec; + private final AtomicBoolean released = new AtomicBoolean(); + + public HazelcastLock(ISemaphore semaphore, Executor lockReleaseExec) { + this.semaphore = semaphore; + this.lockReleaseExec = lockReleaseExec; + } + + @Override + public void release() { + if (released.compareAndSet(false, true)) { + lockReleaseExec.execute(semaphore::release); + } + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastNodeInfo.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastNodeInfo.java new file mode 100644 index 0000000000..376808562c --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastNodeInfo.java @@ -0,0 +1,61 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.nio.ObjectDataInput; +import com.hazelcast.nio.ObjectDataOutput; +import com.hazelcast.nio.serialization.DataSerializable; +import io.vertx.core.buffer.Buffer; +import io.vertx.core.json.JsonObject; +import io.vertx.core.spi.cluster.NodeInfo; + +import java.io.IOException; + +/** + * @author Thomas Segismont + */ +public class HazelcastNodeInfo implements DataSerializable { + + private NodeInfo nodeInfo; + + public HazelcastNodeInfo() { + } + + public HazelcastNodeInfo(NodeInfo nodeInfo) { + this.nodeInfo = nodeInfo; + } + + @Override + public void writeData(ObjectDataOutput dataOutput) throws IOException { + dataOutput.writeUTF(nodeInfo.host()); + dataOutput.writeInt(nodeInfo.port()); + JsonObject metadata = nodeInfo.metadata(); + dataOutput.writeByteArray(metadata != null ? metadata.toBuffer().getBytes() : null); + } + + @Override + public void readData(ObjectDataInput dataInput) throws IOException { + String host = dataInput.readUTF(); + int port = dataInput.readInt(); + byte[] bytes = dataInput.readByteArray(); + nodeInfo = new NodeInfo(host, port, bytes != null ? new JsonObject(Buffer.buffer(bytes)) : null); + } + + public NodeInfo unwrap() { + return nodeInfo; + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastRegistrationInfo.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastRegistrationInfo.java new file mode 100644 index 0000000000..34d8d4c4ad --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastRegistrationInfo.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.nio.ObjectDataInput; +import com.hazelcast.nio.ObjectDataOutput; +import com.hazelcast.nio.serialization.DataSerializable; +import io.vertx.core.spi.cluster.RegistrationInfo; + +import java.io.IOException; +import java.util.Objects; + +/** + * @author Thomas Segismont + */ +public class HazelcastRegistrationInfo implements DataSerializable { + + private RegistrationInfo registrationInfo; + + public HazelcastRegistrationInfo() { + } + + public HazelcastRegistrationInfo(RegistrationInfo registrationInfo) { + this.registrationInfo = Objects.requireNonNull(registrationInfo); + } + + public RegistrationInfo unwrap() { + return registrationInfo; + } + + @Override + public void writeData(ObjectDataOutput dataOutput) throws IOException { + dataOutput.writeUTF(registrationInfo.nodeId()); + dataOutput.writeLong(registrationInfo.seq()); + dataOutput.writeBoolean(registrationInfo.localOnly()); + } + + @Override + public void readData(ObjectDataInput dataInput) throws IOException { + registrationInfo = new RegistrationInfo(dataInput.readUTF(), dataInput.readLong(), dataInput.readBoolean()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HazelcastRegistrationInfo that = (HazelcastRegistrationInfo) o; + + return registrationInfo.equals(that.registrationInfo); + } + + @Override + public int hashCode() { + return registrationInfo.hashCode(); + } + + @Override + public String toString() { + return registrationInfo.toString(); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastServerID.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastServerID.java new file mode 100644 index 0000000000..cb3e186f85 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/HazelcastServerID.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2011-2013 The original author or authors + * ------------------------------------------------------ + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * and Apache License v2.0 which accompanies this distribution. + * + * The Eclipse Public License is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * The Apache License v2.0 is available at + * http://www.opensource.org/licenses/apache2.0.php + * + * You may elect to redistribute this code under either of these licenses. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.nio.ObjectDataInput; +import com.hazelcast.nio.ObjectDataOutput; +import com.hazelcast.nio.serialization.DataSerializable; +import io.vertx.core.net.impl.ServerID; + +import java.io.IOException; + +/** + * @author Tim Fox + */ +public class HazelcastServerID extends ServerID implements DataSerializable { + + public HazelcastServerID() { + } + + public HazelcastServerID(ServerID serverID) { + super(serverID.port, serverID.host); + } + + @Override + public void writeData(ObjectDataOutput dataOutput) throws IOException { + dataOutput.writeInt(port); + dataOutput.writeUTF(host); + } + + @Override + public void readData(ObjectDataInput dataInput) throws IOException { + port = dataInput.readInt(); + host = dataInput.readUTF(); + } + + // We replace any ServerID instances with HazelcastServerID - this allows them to be serialized more optimally using + // DataSerializable + public static V convertServerID(V val) { + if (val.getClass() == ServerID.class) { + ServerID sid = (ServerID)val; + HazelcastServerID hsid = new HazelcastServerID(sid); + return (V)hsid; + } else { + return val; + } + } + +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsMapHelper.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsMapHelper.java new file mode 100644 index 0000000000..2e580d1c45 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsMapHelper.java @@ -0,0 +1,168 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import com.hazelcast.core.*; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.logging.Logger; +import io.vertx.core.impl.logging.LoggerFactory; +import io.vertx.core.spi.cluster.NodeSelector; +import io.vertx.core.spi.cluster.RegistrationInfo; +import io.vertx.core.spi.cluster.RegistrationUpdateEvent; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * @author Thomas Segismont + */ +public class SubsMapHelper implements EntryListener { + + private static final Logger log = LoggerFactory.getLogger(SubsMapHelper.class); + + private final VertxInternal vertx; + private final MultiMap map; + private final NodeSelector nodeSelector; + private final String listenerId; + + private final ConcurrentMap> ownSubs = new ConcurrentHashMap<>(); + private final ReadWriteLock republishLock = new ReentrantReadWriteLock(); + + public SubsMapHelper(VertxInternal vertx, HazelcastInstance hazelcast, NodeSelector nodeSelector) { + this.vertx = vertx; + map = hazelcast.getMultiMap("__vertx.subs"); + this.nodeSelector = nodeSelector; + listenerId = map.addEntryListener(this, false); + } + + public List get(String address) { + Lock readLock = republishLock.readLock(); + readLock.lock(); + try { + List list = new ArrayList<>(); + for (HazelcastRegistrationInfo registrationInfo : map.get(address)) { + list.add(registrationInfo.unwrap()); + } + return list; + } finally { + readLock.unlock(); + } + } + + public void put(String address, RegistrationInfo registrationInfo) { + Lock readLock = republishLock.readLock(); + readLock.lock(); + try { + ownSubs.compute(address, (add, curr) -> { + Set res = curr != null ? curr : new CopyOnWriteArraySet<>(); + res.add(registrationInfo); + return res; + }); + map.put(address, new HazelcastRegistrationInfo(registrationInfo)); + } finally { + readLock.unlock(); + } + } + + public void remove(String address, RegistrationInfo registrationInfo) { + Lock readLock = republishLock.readLock(); + readLock.lock(); + try { + ownSubs.computeIfPresent(address, (add, curr) -> { + curr.remove(registrationInfo); + return curr.isEmpty() ? null : curr; + }); + map.remove(address, new HazelcastRegistrationInfo(registrationInfo)); + } finally { + readLock.unlock(); + } + } + + public void removeAllForNodes(Set nodeIds) { + for (Map.Entry entry : map.entrySet()) { + HazelcastRegistrationInfo registrationInfo = entry.getValue(); + if (nodeIds.contains(registrationInfo.unwrap().nodeId())) { + map.remove(entry.getKey(), registrationInfo); + } + } + } + + public void republishOwnSubs() { + Lock writeLock = republishLock.writeLock(); + writeLock.lock(); + try { + for (Map.Entry> entry : ownSubs.entrySet()) { + String address = entry.getKey(); + for (RegistrationInfo registrationInfo : entry.getValue()) { + map.put(address, new HazelcastRegistrationInfo(registrationInfo)); + } + } + } finally { + writeLock.unlock(); + } + } + + @Override + public void entryAdded(EntryEvent event) { + fireRegistrationUpdateEvent(event); + } + + private void fireRegistrationUpdateEvent(EntryEvent event) { + String address = event.getKey(); + vertx.>executeBlocking(prom -> { + prom.complete(get(address)); + }, false, ar -> { + if (ar.succeeded()) { + nodeSelector.registrationsUpdated(new RegistrationUpdateEvent(address, ar.result())); + } else { + log.trace("A failure occured while retrieving the updated registrations", ar.cause()); + nodeSelector.registrationsUpdated(new RegistrationUpdateEvent(address, Collections.emptyList())); + } + }); + } + + @Override + public void entryEvicted(EntryEvent event) { + } + + @Override + public void entryRemoved(EntryEvent event) { + fireRegistrationUpdateEvent(event); + } + + @Override + public void entryUpdated(EntryEvent event) { + fireRegistrationUpdateEvent(event); + } + + @Override + public void mapCleared(MapEvent event) { + } + + @Override + public void mapEvicted(MapEvent event) { + } + + public void close() { + map.removeEntryListener(listenerId); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsOpSerializer.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsOpSerializer.java new file mode 100644 index 0000000000..766130a475 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/impl/SubsOpSerializer.java @@ -0,0 +1,62 @@ +/* + * Copyright 2020 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.spi.cluster.hazelcast.impl; + +import io.vertx.core.Promise; +import io.vertx.core.impl.ContextInternal; +import io.vertx.core.impl.TaskQueue; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.spi.cluster.RegistrationInfo; + +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiConsumer; + +/** + * @author Thomas Segismont + */ +public class SubsOpSerializer { + + private final VertxInternal vertx; + private final TaskQueue taskQueue; + + private SubsOpSerializer(VertxInternal vertx) { + this.vertx = vertx; + taskQueue = new TaskQueue(); + } + + public static SubsOpSerializer get(ContextInternal context) { + ConcurrentMap contextData = context.contextData(); + SubsOpSerializer instance = (SubsOpSerializer) contextData.get(SubsOpSerializer.class); + if (instance == null) { + SubsOpSerializer candidate = new SubsOpSerializer(context.owner()); + SubsOpSerializer previous = (SubsOpSerializer) contextData.putIfAbsent(SubsOpSerializer.class, candidate); + instance = previous == null ? candidate : previous; + } + return instance; + } + + public void execute(BiConsumer op, String address, RegistrationInfo registrationInfo, Promise promise) { + taskQueue.execute(() -> { + try { + op.accept(address, registrationInfo); + promise.complete(); + } catch (Exception e) { + promise.fail(e); + } + }, vertx.getWorkerPool().executor()); + } +} diff --git a/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/package-info.java b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/package-info.java new file mode 100644 index 0000000000..57e51b83cd --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/java/io/vertx/spi/cluster/hazelcast/package-info.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 + * which is available at https://www.apache.org/licenses/LICENSE-2.0. + * + * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 + */ +@ModuleGen(name = "vertx-hazelcast", groupPackage = "io.vertx") +package io.vertx.spi.cluster.hazelcast; + +import io.vertx.codegen.annotations.ModuleGen; diff --git a/hazelcast3-cluster-manager/src/main/resources/META-INF/MANIFEST.MF b/hazelcast3-cluster-manager/src/main/resources/META-INF/MANIFEST.MF new file mode 100644 index 0000000000..83c0133545 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/resources/META-INF/MANIFEST.MF @@ -0,0 +1,2 @@ +Automatic-Module-Name: io.vertx.clustermanager.hazelcast + diff --git a/hazelcast3-cluster-manager/src/main/resources/META-INF/services/io.vertx.core.spi.VertxServiceProvider b/hazelcast3-cluster-manager/src/main/resources/META-INF/services/io.vertx.core.spi.VertxServiceProvider new file mode 100644 index 0000000000..9cd23bd519 --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/resources/META-INF/services/io.vertx.core.spi.VertxServiceProvider @@ -0,0 +1 @@ +io.vertx.spi.cluster.hazelcast.HazelcastClusterManager \ No newline at end of file diff --git a/hazelcast3-cluster-manager/src/main/resources/default-cluster.xml b/hazelcast3-cluster-manager/src/main/resources/default-cluster.xml new file mode 100644 index 0000000000..1ea74aebbe --- /dev/null +++ b/hazelcast3-cluster-manager/src/main/resources/default-cluster.xml @@ -0,0 +1,169 @@ + + + + + + 5 + + + + dev + + http://localhost:8080/mancenter + + 5701 + + + + + 0 + + + + 224.2.2.3 + 54327 + + + 192.168.1.28 + + + my-access-key + my-secret-key + + us-west-1 + + ec2.amazonaws.com + + hazelcast-sg + type + hz-nodes + + + + 10.10.1.* + + + + + + PBEWithMD5AndDES + + thesalt + + thepass + + 19 + + + + + 16 + + 0 + + + + 1 + SET + + + + + + 1 + + 0 + + 0 + + NONE + + 0 + + 25 + + com.hazelcast.map.merge.LatestUpdateMapMergePolicy + + + + + 1 + + + + + 1 + + + diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/Lifecycle.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/Lifecycle.java new file mode 100644 index 0000000000..cbf6771317 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/Lifecycle.java @@ -0,0 +1,95 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx; + +import com.hazelcast.core.HazelcastInstance; +import io.vertx.core.Vertx; +import io.vertx.core.impl.VertxInternal; +import io.vertx.core.impl.logging.Logger; +import io.vertx.core.impl.logging.LoggerFactory; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.core.spi.cluster.WrappedClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.*; + +/** + * @author Thomas Segismont + */ +public class Lifecycle { + + private static final Logger log = LoggerFactory.getLogger(Lifecycle.class); + + public static void closeClustered(List clustered) throws Exception { + for (Vertx vertx : clustered) { + VertxInternal vertxInternal = (VertxInternal) vertx; + + HazelcastClusterManager clusterManager = getHazelcastClusterManager(vertxInternal.getClusterManager()); + + if (clusterManager != null) { + HazelcastInstance hazelcastInstance = clusterManager.getHazelcastInstance(); + + SECONDS.sleep(2); // Make sure rebalancing has been triggered + + long start = System.currentTimeMillis(); + try { + while (!hazelcastInstance.getPartitionService().isClusterSafe() + && System.currentTimeMillis() - start < MILLISECONDS.convert(2, MINUTES)) { + MILLISECONDS.sleep(100); + } + } catch (Exception ignore) { + } + } + + CountDownLatch latch = new CountDownLatch(1); + vertxInternal.close(ar -> { + if (ar.failed()) { + log.error("Failed to shutdown vert.x", ar.cause()); + } + latch.countDown(); + }); + latch.await(2, TimeUnit.MINUTES); + } + } + + private static HazelcastClusterManager getHazelcastClusterManager(ClusterManager cm) { + if (cm == null) { + return null; + } + if (cm instanceof WrappedClusterManager) { + return getHazelcastClusterManager(((WrappedClusterManager) cm).getDelegate()); + } + if (cm instanceof HazelcastClusterManager) { + return (HazelcastClusterManager) cm; + } + throw new ClassCastException("Unexpected cluster manager implementation: " + cm.getClass()); + } + + public static void closeDataNodes(List dataNodes) throws Exception { + for (HazelcastInstance dataNode : dataNodes) { + dataNode.getLifecycleService().terminate(); + } + } + + private Lifecycle() { + // Utility + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/LoggingTestWatcher.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/LoggingTestWatcher.java new file mode 100644 index 0000000000..33e9a76c54 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/LoggingTestWatcher.java @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx; + +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +/** + * @author Thomas Segismont + */ +public class LoggingTestWatcher extends TestWatcher { + + @Override + protected void starting(Description description) { + System.out.printf("Running %s#%s %s", description.getClassName(), description.getMethodName(), System.getProperty("line.separator")); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastComplexHATest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastComplexHATest.java new file mode 100644 index 0000000000..5bd319dcbd --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastComplexHATest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core; + +import io.vertx.Lifecycle; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastComplexHATest extends ComplexHATest { + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastHATest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastHATest.java new file mode 100644 index 0000000000..4b0f75c8fc --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/HazelcastHATest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author Tim Fox + */ +public class HazelcastHATest extends HATest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + protected void awaitLatch(CountDownLatch latch) throws InterruptedException { + assertTrue(latch.await(30, TimeUnit.SECONDS)); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/ProgrammaticHazelcastClusterManagerTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/ProgrammaticHazelcastClusterManagerTest.java new file mode 100644 index 0000000000..14d95ad20e --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/ProgrammaticHazelcastClusterManagerTest.java @@ -0,0 +1,254 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core; + +import com.hazelcast.config.Config; +import com.hazelcast.config.GroupConfig; +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.LoggingTestWatcher; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import io.vertx.test.core.AsyncTestBase; +import org.junit.AfterClass; +import org.junit.Rule; +import org.junit.Test; + +import java.math.BigInteger; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; + +/** + * @author Tim Fox + */ +public class ProgrammaticHazelcastClusterManagerTest extends AsyncTestBase { + + static { + // this is only checked once every 10 seconds by Hazelcast on client disconnect + System.setProperty("hazelcast.client.max.no.heartbeat.seconds", "9"); + } + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Test + public void testProgrammaticSetConfig() throws Exception { + Config config = createConfig(); + HazelcastClusterManager mgr = new HazelcastClusterManager(); + mgr.setConfig(config); + testProgrammatic(mgr, config); + } + + @Test + public void testProgrammaticSetWithConstructor() throws Exception { + Config config = createConfig(); + HazelcastClusterManager mgr = new HazelcastClusterManager(config); + testProgrammatic(mgr, config); + } + + @Test + public void testCustomHazelcastInstance() throws Exception { + HazelcastInstance instance = Hazelcast.newHazelcastInstance(createConfig()); + HazelcastClusterManager mgr = new HazelcastClusterManager(instance); + testProgrammatic(mgr, instance.getConfig()); + } + + private Config createConfig() { + return new Config() + .setProperty("hazelcast.wait.seconds.before.join", "0") + .setProperty("hazelcast.local.localAddress", "127.0.0.1") + .setGroupConfig(new GroupConfig() + .setName(System.getProperty("vertx.hazelcast.test.group.name"))); + } + + private void testProgrammatic(HazelcastClusterManager mgr, Config config) throws Exception { + mgr.setConfig(config); + assertEquals(config, mgr.getConfig()); + VertxOptions options = new VertxOptions().setClusterManager(mgr); + Vertx.clusteredVertx(options, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr.getHazelcastInstance()); + res.result().close(res2 -> { + assertTrue(res2.succeeded()); + testComplete(); + }); + }); + await(); + } + + @Test + public void testEventBusWhenUsingACustomHazelcastInstance() throws Exception { + HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(createConfig()); + HazelcastInstance instance2 = Hazelcast.newHazelcastInstance(createConfig()); + + HazelcastClusterManager mgr1 = new HazelcastClusterManager(instance1); + HazelcastClusterManager mgr2 = new HazelcastClusterManager(instance2); + VertxOptions options1 = new VertxOptions().setClusterManager(mgr1); + options1.getEventBusOptions().setHost("127.0.0.1"); + VertxOptions options2 = new VertxOptions().setClusterManager(mgr2); + options2.getEventBusOptions().setHost("127.0.0.1"); + + AtomicReference vertx1 = new AtomicReference<>(); + AtomicReference vertx2 = new AtomicReference<>(); + + Vertx.clusteredVertx(options1, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr1.getHazelcastInstance()); + res.result().eventBus().consumer("news", message -> { + assertNotNull(message); + assertTrue(message.body().equals("hello")); + testComplete(); + }); + vertx1.set(res.result()); + }); + + assertWaitUntil(() -> vertx1.get() != null); + + Vertx.clusteredVertx(options2, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr2.getHazelcastInstance()); + vertx2.set(res.result()); + res.result().eventBus().send("news", "hello"); + }); + + await(); + + vertx1.get().close(ar -> vertx1.set(null)); + vertx2.get().close(ar -> vertx2.set(null)); + + assertTrue(instance1.getLifecycleService().isRunning()); + assertTrue(instance2.getLifecycleService().isRunning()); + + assertWaitUntil(() -> vertx1.get() == null && vertx2.get() == null); + + instance1.shutdown(); + instance2.shutdown(); + + } + + @Test + public void testSharedDataUsingCustomHazelcast() throws Exception { + HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(createConfig()); + HazelcastInstance instance2 = Hazelcast.newHazelcastInstance(createConfig()); + + HazelcastClusterManager mgr1 = new HazelcastClusterManager(instance1); + HazelcastClusterManager mgr2 = new HazelcastClusterManager(instance2); + VertxOptions options1 = new VertxOptions().setClusterManager(mgr1); + options1.getEventBusOptions().setHost("127.0.0.1"); + VertxOptions options2 = new VertxOptions().setClusterManager(mgr2); + options2.getEventBusOptions().setHost("127.0.0.1"); + + AtomicReference vertx1 = new AtomicReference<>(); + AtomicReference vertx2 = new AtomicReference<>(); + + Vertx.clusteredVertx(options1, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr1.getHazelcastInstance()); + res.result().sharedData().getClusterWideMap("mymap1", ar -> { + ar.result().put("news", "hello", v -> { + vertx1.set(res.result()); + }); + }); + }); + + assertWaitUntil(() -> vertx1.get() != null); + + Vertx.clusteredVertx(options2, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr2.getHazelcastInstance()); + vertx2.set(res.result()); + res.result().sharedData().getClusterWideMap("mymap1", ar -> { + ar.result().get("news", r -> { + assertEquals("hello", r.result()); + testComplete(); + }); + }); + }); + + await(); + + vertx1.get().close(ar -> vertx1.set(null)); + vertx2.get().close(ar -> vertx2.set(null)); + + assertWaitUntil(() -> vertx1.get() == null && vertx2.get() == null); + + // be sure stopping vertx did not cause or require our custom hazelcast to shutdown + + assertTrue(instance1.getLifecycleService().isRunning()); + assertTrue(instance2.getLifecycleService().isRunning()); + + instance1.shutdown(); + instance2.shutdown(); + + } + + @Test + public void testThatExternalHZInstanceCanBeShutdown() { + // This instance won't be used by vert.x + HazelcastInstance instance = Hazelcast.newHazelcastInstance(createConfig()); + String nodeID = instance.getCluster().getLocalMember().getUuid(); + + HazelcastClusterManager mgr = new HazelcastClusterManager(createConfig()); + VertxOptions options = new VertxOptions().setClusterManager(mgr); + options.getEventBusOptions().setHost("127.0.0.1"); + + AtomicReference vertx1 = new AtomicReference<>(); + + Vertx.clusteredVertx(options, res -> { + assertTrue(res.succeeded()); + assertNotNull(mgr.getHazelcastInstance()); + res.result().sharedData().getClusterWideMap("mymap1", ar -> { + ar.result().put("news", "hello", v -> { + vertx1.set(res.result()); + }); + }); + }); + + assertWaitUntil(() -> vertx1.get() != null); + int size = mgr.getNodes().size(); + assertTrue(mgr.getNodes().contains(nodeID)); + + // Retrieve the value inserted by vert.x + Map map = instance.getMap("mymap1"); + Map anotherMap = instance.getMap("mymap2"); + assertEquals(map.get("news"), "hello"); + map.put("another-key", "stuff"); + anotherMap.put("another-key", "stuff"); + map.remove("news"); + map.remove("another-key"); + anotherMap.remove("another-key"); + + instance.shutdown(); + + assertWaitUntil(() -> mgr.getNodes().size() == size - 1); + vertx1.get().close(ar -> vertx1.set(null)); + + assertWaitUntil(() -> vertx1.get() == null); + } + + @AfterClass + public static void afterTests() { + System.clearProperty("hazelcast.client.max.no.heartbeat.seconds"); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastClusteredEventbusTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastClusteredEventbusTest.java new file mode 100644 index 0000000000..31dd9a914b --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastClusteredEventbusTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.eventbus; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastClusteredEventbusTest extends ClusteredEventBusTest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastFaultToleranceTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastFaultToleranceTest.java new file mode 100644 index 0000000000..45203354a6 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastFaultToleranceTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.eventbus; + +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; + +import java.math.BigInteger; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastFaultToleranceTest extends FaultToleranceTest { + + private String groupName; + + @Override + public void setUp() throws Exception { + Random random = new Random(); + groupName = new BigInteger(128, random).toString(32); + System.setProperty("vertx.hazelcast.test.group.name", groupName); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected List getExternalNodeSystemProperties() { + return Arrays.asList( + "-Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory", + "-Dhazelcast.logging.type=slf4j", + "-Djava.net.preferIPv4Stack=true", + "-Dvertx.hazelcast.test.group.name=" + groupName + ); + } + + @Override + protected void afterNodesKilled() throws Exception { + super.afterNodesKilled(); + // Additionnal wait to make sure all nodes noticed the shutdowns + Thread.sleep(30_000); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastNodeInfoTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastNodeInfoTest.java new file mode 100644 index 0000000000..4db9f81142 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/eventbus/HazelcastNodeInfoTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.eventbus; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastNodeInfoTest extends NodeInfoTest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsyncMapTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsyncMapTest.java new file mode 100644 index 0000000000..4fd5cc5039 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsyncMapTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.shareddata; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastClusteredAsyncMapTest extends ClusteredAsyncMapTest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + @Test + @Ignore("Hazelcast removes the binding even if a new entry is added without ttl") + public void testMapPutTtlThenPut() { + super.testMapPutTtlThenPut(); + } + + @Override + @Test + @Ignore + public void testMapReplaceIfPresentTtl() { + super.testMapReplaceIfPresentTtl(); + } + + @Override + @Test + @Ignore + public void testMapReplaceIfPresentTtlWhenNotPresent() { + super.testMapReplaceIfPresentTtlWhenNotPresent(); + } + + @Override + @Test + @Ignore + public void testMapReplaceTtl() { + super.testMapReplaceTtl(); + } + + @Override + @Test + @Ignore + public void testMapReplaceTtlWithPreviousValue() { + super.testMapReplaceTtlWithPreviousValue(); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsynchronousLockTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsynchronousLockTest.java new file mode 100644 index 0000000000..55dde1745d --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredAsynchronousLockTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.shareddata; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; +import org.junit.Test; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastClusteredAsynchronousLockTest extends ClusteredAsynchronousLockTest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + @Test + public void testLockReleasedForClosedNode() throws Exception { + super.testLockReleasedForClosedNode(); + } + + @Override + @Test + public void testLockReleasedForKilledNode() throws Exception { + super.testLockReleasedForKilledNode(); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredSharedCounterTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredSharedCounterTest.java new file mode 100644 index 0000000000..38123763a6 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/core/shareddata/HazelcastClusteredSharedCounterTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.core.shareddata; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Tim Fox + */ +public class HazelcastClusteredSharedCounterTest extends ClusteredSharedCounterTest { + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/ext/web/sstore/HazelcastClusteredSessionHandlerTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/ext/web/sstore/HazelcastClusteredSessionHandlerTest.java new file mode 100644 index 0000000000..f5944b2350 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/ext/web/sstore/HazelcastClusteredSessionHandlerTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.ext.web.sstore; + +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastClusteredSessionHandlerTest extends ClusteredSessionHandlerTest { + + static { + System.setProperty("hazelcast.wait.seconds.before.join", "0"); + System.setProperty("hazelcast.local.localAddress", "127.0.0.1"); + } + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsyncMapTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsyncMapTest.java new file mode 100644 index 0000000000..ce5c2b8128 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsyncMapTest.java @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.shareddata.ClusteredAsyncMapTest; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastClusteredAsyncMapTest extends ClusteredAsyncMapTest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + @Test + @Ignore("Hazelcast removes the binding even if a new entry is added without ttl") + public void testMapPutTtlThenPut() { + super.testMapPutTtlThenPut(); + } + + @Override + @Test + @Ignore + public void testMapReplaceIfPresentTtl() { + super.testMapReplaceIfPresentTtl(); + } + + @Override + @Test + @Ignore + public void testMapReplaceIfPresentTtlWhenNotPresent() { + super.testMapReplaceIfPresentTtlWhenNotPresent(); + } + + @Override + @Test + @Ignore + public void testMapReplaceTtl() { + super.testMapReplaceTtl(); + } + + @Override + @Test + @Ignore + public void testMapReplaceTtlWithPreviousValue() { + super.testMapReplaceTtlWithPreviousValue(); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsynchronousLockTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsynchronousLockTest.java new file mode 100644 index 0000000000..b134b61e7e --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredAsynchronousLockTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.shareddata.ClusteredAsynchronousLockTest; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; +import org.junit.Test; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastClusteredAsynchronousLockTest extends ClusteredAsynchronousLockTest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + @Test + public void testLockReleasedForClosedNode() throws Exception { + super.testLockReleasedForClosedNode(); + } + + @Override + @Test + public void testLockReleasedForKilledNode() throws Exception { + super.testLockReleasedForKilledNode(); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredEventbusTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredEventbusTest.java new file mode 100644 index 0000000000..e829a18b92 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredEventbusTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.eventbus.ClusteredEventBusTest; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastClusteredEventbusTest extends ClusteredEventBusTest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredSharedCounterTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredSharedCounterTest.java new file mode 100644 index 0000000000..0908cb39d0 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastClusteredSharedCounterTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.shareddata.ClusteredSharedCounterTest; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastClusteredSharedCounterTest extends ClusteredSharedCounterTest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastComplexHATest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastComplexHATest.java new file mode 100644 index 0000000000..fd0896ee93 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastComplexHATest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.core.ComplexHATest; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +/** + * @author Thomas Segismont + */ +public class HazelcastComplexHATest extends ComplexHATest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastHATest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastHATest.java new file mode 100644 index 0000000000..fdd101f995 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/it/litemembers/HazelcastHATest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.it.litemembers; + +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import io.vertx.Lifecycle; +import io.vertx.LoggingTestWatcher; +import io.vertx.core.HATest; +import io.vertx.core.Vertx; +import io.vertx.core.spi.cluster.ClusterManager; +import io.vertx.spi.cluster.hazelcast.ConfigUtil; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author Thomas Segismont + */ +public class HazelcastHATest extends HATest { + + private static final int DATA_NODES = Integer.getInteger("litemembers.datanodes.count", 1); + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + private List dataNodes = new ArrayList<>(); + + @Override + public void setUp() throws Exception { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +for (int i = 0; i < DATA_NODES; i++) { + dataNodes.add(Hazelcast.newHazelcastInstance(ConfigUtil.loadConfig())); +} + super.setUp(); + } + + @Override + protected ClusterManager getClusterManager() { + return new HazelcastClusterManager(ConfigUtil.loadConfig().setLiteMember(true)); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + Lifecycle.closeDataNodes(dataNodes); + } + + @Override + protected void closeClustered(List clustered) throws Exception { + Lifecycle.closeClustered(clustered); + } + + @Override + protected void awaitLatch(CountDownLatch latch) throws InterruptedException { + assertTrue(latch.await(30, TimeUnit.SECONDS)); + } +} diff --git a/hazelcast3-cluster-manager/src/test/java/io/vertx/servicediscovery/impl/HazelcastDiscoveryImplClusteredTest.java b/hazelcast3-cluster-manager/src/test/java/io/vertx/servicediscovery/impl/HazelcastDiscoveryImplClusteredTest.java new file mode 100644 index 0000000000..6014782313 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/java/io/vertx/servicediscovery/impl/HazelcastDiscoveryImplClusteredTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.vertx.servicediscovery.impl; + +import io.vertx.LoggingTestWatcher; +import io.vertx.core.Vertx; +import io.vertx.core.VertxOptions; +import io.vertx.servicediscovery.ServiceDiscoveryOptions; +import io.vertx.spi.cluster.hazelcast.HazelcastClusterManager; +import org.junit.Before; +import org.junit.Rule; + +import java.math.BigInteger; +import java.util.Random; + +import static com.jayway.awaitility.Awaitility.await; + +/** + * @author Thomas Segismont + */ +public class HazelcastDiscoveryImplClusteredTest extends DiscoveryImplTestBase { + + static { + System.setProperty("hazelcast.wait.seconds.before.join", "0"); + System.setProperty("hazelcast.local.localAddress", "127.0.0.1"); + } + + @Rule + public LoggingTestWatcher watchman = new LoggingTestWatcher(); + + @Before + public void setUp() { + Random random = new Random(); + System.setProperty("vertx.hazelcast.test.group.name", new BigInteger(128, random).toString(32)); +VertxOptions options = new VertxOptions() + .setClusterManager(new HazelcastClusterManager()); + Vertx.clusteredVertx(options, ar -> { + vertx = ar.result(); + }); + await().until(() -> vertx != null); + discovery = new DiscoveryImpl(vertx, new ServiceDiscoveryOptions()); + } +} diff --git a/hazelcast3-cluster-manager/src/test/resources/cluster.xml b/hazelcast3-cluster-manager/src/test/resources/cluster.xml new file mode 100644 index 0000000000..25cc750bc3 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/resources/cluster.xml @@ -0,0 +1,120 @@ + + + + + + 0 + 127.0.0.1 + + + + ${vertx.hazelcast.test.group.name} + + http://localhost:8080/mancenter + + + + + + + + 16 + + 0 + + + + 1 + SET + + + + + + 1 + + 0 + + 0 + + NONE + + 0 + + 25 + + com.hazelcast.map.merge.LatestUpdateMapMergePolicy + + + + + 1 + + + + + 1 + + + diff --git a/hazelcast3-cluster-manager/src/test/resources/logback-test.xml b/hazelcast3-cluster-manager/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..abb46dcf37 --- /dev/null +++ b/hazelcast3-cluster-manager/src/test/resources/logback-test.xml @@ -0,0 +1,31 @@ + + + + + + + %d [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + diff --git a/mdm/api/src/main/java/com/gentics/mesh/context/InternalActionContext.java b/mdm/api/src/main/java/com/gentics/mesh/context/InternalActionContext.java index 9d21a92f40..9050437b22 100644 --- a/mdm/api/src/main/java/com/gentics/mesh/context/InternalActionContext.java +++ b/mdm/api/src/main/java/com/gentics/mesh/context/InternalActionContext.java @@ -1,5 +1,6 @@ package com.gentics.mesh.context; +import java.util.List; import java.util.Set; import com.gentics.mesh.core.data.user.HibUser; @@ -14,8 +15,8 @@ import io.vertx.core.AsyncResult; import io.vertx.core.Handler; import io.vertx.core.MultiMap; +import io.vertx.core.http.Cookie; import io.vertx.core.spi.logging.LogDelegate; -import io.vertx.ext.web.Cookie; import io.vertx.ext.web.FileUpload; /** @@ -67,11 +68,11 @@ public interface InternalActionContext extends ActionContext, ParameterProviderC void send(HttpResponseStatus status); /** - * Return the set of fileuploads that are accessible through the context. + * Return the list of fileuploads that are accessible through the context. * * @return */ - Set getFileUploads(); + List getFileUploads(); /** * Return the request headers. diff --git a/mdm/common/src/main/java/com/gentics/mesh/context/impl/InternalRoutingActionContextImpl.java b/mdm/common/src/main/java/com/gentics/mesh/context/impl/InternalRoutingActionContextImpl.java index 1625410996..557816cc12 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/context/impl/InternalRoutingActionContextImpl.java +++ b/mdm/common/src/main/java/com/gentics/mesh/context/impl/InternalRoutingActionContextImpl.java @@ -4,10 +4,7 @@ import static io.vertx.core.http.HttpHeaders.CACHE_CONTROL; import static io.vertx.core.http.HttpHeaders.CONTENT_TYPE; -import java.util.Collections; -import java.util.Locale; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.concurrent.ConcurrentHashMap; import com.gentics.mesh.context.AbstractInternalActionContext; @@ -20,11 +17,11 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.vertx.core.MultiMap; +import io.vertx.core.http.Cookie; import io.vertx.core.http.HttpHeaders; import io.vertx.core.http.HttpServerResponse; import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; -import io.vertx.ext.web.Cookie; import io.vertx.ext.web.FileUpload; import io.vertx.ext.web.RoutingContext; import io.vertx.ext.web.Session; @@ -148,7 +145,7 @@ public Locale getLocale() { } @Override - public Set getFileUploads() { + public List getFileUploads() { return rc.fileUploads(); } diff --git a/mdm/common/src/main/java/com/gentics/mesh/context/impl/LocalActionContextImpl.java b/mdm/common/src/main/java/com/gentics/mesh/context/impl/LocalActionContextImpl.java index 518c397b63..0129977d98 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/context/impl/LocalActionContextImpl.java +++ b/mdm/common/src/main/java/com/gentics/mesh/context/impl/LocalActionContextImpl.java @@ -2,12 +2,8 @@ import static com.gentics.mesh.rest.client.AbstractMeshRestHttpClient.getQuery; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; +import java.util.*; import java.util.Map.Entry; -import java.util.Set; import com.gentics.mesh.MeshVersion; import com.gentics.mesh.cli.BootstrapInitializer; @@ -28,8 +24,8 @@ import io.vertx.core.Future; import io.vertx.core.MultiMap; import io.vertx.core.Promise; +import io.vertx.core.http.Cookie; import io.vertx.core.spi.logging.LogDelegate; -import io.vertx.ext.web.Cookie; import io.vertx.ext.web.FileUpload; /** @@ -50,7 +46,7 @@ public class LocalActionContextImpl extends AbstractInternalActionContext imp private HttpResponseStatus responseStatus; private Promise promise = Promise.promise(); private Class classOfResponse; - private Set fileUploads = new HashSet<>(); + private List fileUploads = new ArrayList<>(); private LogDelegate securityLogger = new DummyLogger(); private BootstrapInitializer boot; @@ -205,7 +201,7 @@ public void setProject(String projectName) { } @Override - public Set getFileUploads() { + public List getFileUploads() { return fileUploads; } diff --git a/mdm/common/src/main/java/com/gentics/mesh/context/impl/NodeMigrationActionContextImpl.java b/mdm/common/src/main/java/com/gentics/mesh/context/impl/NodeMigrationActionContextImpl.java index f9f037996c..95c46e7096 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/context/impl/NodeMigrationActionContextImpl.java +++ b/mdm/common/src/main/java/com/gentics/mesh/context/impl/NodeMigrationActionContextImpl.java @@ -1,11 +1,6 @@ package com.gentics.mesh.context.impl; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Set; +import java.util.*; import com.gentics.mesh.context.AbstractInternalActionContext; import com.gentics.mesh.context.NodeMigrationActionContext; @@ -21,7 +16,7 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.vertx.core.MultiMap; -import io.vertx.ext.web.Cookie; +import io.vertx.core.http.Cookie; import io.vertx.ext.web.FileUpload; /** @@ -126,7 +121,7 @@ public MeshAuthUser getMeshAuthUser() { } @Override - public Set getFileUploads() { + public List getFileUploads() { return null; } diff --git a/mdm/common/src/main/java/com/gentics/mesh/rest/InternalEndpointRoute.java b/mdm/common/src/main/java/com/gentics/mesh/rest/InternalEndpointRoute.java index fc3880cdc5..5f8307c211 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/rest/InternalEndpointRoute.java +++ b/mdm/common/src/main/java/com/gentics/mesh/rest/InternalEndpointRoute.java @@ -4,6 +4,7 @@ import java.util.List; import java.util.Map; +import io.vertx.ext.web.Router; import org.codehaus.jettison.json.JSONObject; import org.raml.model.MimeType; import org.raml.model.Response; @@ -60,6 +61,13 @@ public interface InternalEndpointRoute extends Comparable */ InternalEndpointRoute handler(Handler requestHandler); + /** + * Create a sub router + * @param router + * @return + */ + InternalEndpointRoute subRouter(Router router); + /** * Wrapper for {@link Route#last()} * diff --git a/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/cli/OrientDBBootstrapInitializerImpl.java b/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/cli/OrientDBBootstrapInitializerImpl.java index 0ee1aabd18..984f815284 100644 --- a/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/cli/OrientDBBootstrapInitializerImpl.java +++ b/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/cli/OrientDBBootstrapInitializerImpl.java @@ -159,15 +159,9 @@ protected void initCluster(MeshOptions options, PostProcessFlags flags, boolean // handles the clustering. db.setupConnectionPool(); - // TODO find a better way around the chicken and the egg issues. - // Vert.x is currently needed for eventQueueBatch creation. - // This process fails if vert.x has not been made accessible during local data setup. - vertx = Vertx.vertx(); initLocalData(flags, options, false); db.closeConnectionPool(); db.shutdown(); - vertx.close(); - vertx = null; // Start OrientDB Server which will open the previously created db and init hazelcast db.clusterManager().startAndSync(); @@ -175,6 +169,9 @@ protected void initCluster(MeshOptions options, PostProcessFlags flags, boolean // Now since hazelcast is ready we can create Vert.x initVertx(options); + // update the event bus store with the clustered event bus + eventBusStore.setEventBus(vertx.eventBus()); + // Setup the connection pool in order to allow transactions to be used db.setupConnectionPool(); diff --git a/plugin-api/src/main/java/com/gentics/mesh/plugin/PluginContext.java b/plugin-api/src/main/java/com/gentics/mesh/plugin/PluginContext.java index eb1a6807f0..95fbf7c533 100644 --- a/plugin-api/src/main/java/com/gentics/mesh/plugin/PluginContext.java +++ b/plugin-api/src/main/java/com/gentics/mesh/plugin/PluginContext.java @@ -2,6 +2,7 @@ import static io.vertx.core.http.HttpHeaders.AUTHORIZATION; +import java.nio.charset.Charset; import java.util.List; import java.util.Map; import java.util.Set; @@ -11,13 +12,13 @@ import com.gentics.mesh.http.HttpConstants; import com.gentics.mesh.plugin.env.PluginEnvironment; import com.gentics.mesh.rest.client.MeshRestClient; - import io.reactivex.annotations.Nullable; import io.vertx.core.AsyncResult; import io.vertx.core.Handler; import io.vertx.core.MultiMap; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; +import io.vertx.core.http.Cookie; import io.vertx.core.http.HttpHeaders; import io.vertx.core.http.HttpMethod; import io.vertx.core.http.HttpServerRequest; @@ -25,13 +26,7 @@ import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; import io.vertx.ext.auth.User; -import io.vertx.ext.web.Cookie; -import io.vertx.ext.web.FileUpload; -import io.vertx.ext.web.Locale; -import io.vertx.ext.web.ParsedHeaderValues; -import io.vertx.ext.web.Route; -import io.vertx.ext.web.RoutingContext; -import io.vertx.ext.web.Session; +import io.vertx.ext.web.*; /** * Wrapper for the regular Vert.x routing context. @@ -178,6 +173,11 @@ public T get(String key) { return rc.get(key); } + @Override + public T get(String s, T t) { + return rc.get(s, t); + } + @Override public T remove(String key) { return rc.remove(key); @@ -209,13 +209,13 @@ public String normalisedPath() { } @Override - public @Nullable Cookie getCookie(String name) { - return rc.getCookie(name); + public String normalizedPath() { + return rc.normalizedPath(); } @Override - public RoutingContext addCookie(Cookie cookie) { - return rc.addCookie(cookie); + public @Nullable Cookie getCookie(String name) { + return rc.getCookie(name); } @Override @@ -228,11 +228,6 @@ public int cookieCount() { return rc.cookieCount(); } - @Override - public Set cookies() { - return rc.cookies(); - } - @Override public @Nullable String getBodyAsString() { return rc.getBodyAsString(); @@ -243,6 +238,16 @@ public Set cookies() { return rc.getBodyAsString(encoding); } + @Override + public @io.vertx.codegen.annotations.Nullable JsonObject getBodyAsJson(int i) { + return rc.getBodyAsJson(i); + } + + @Override + public @io.vertx.codegen.annotations.Nullable JsonArray getBodyAsJsonArray(int i) { + return rc.getBodyAsJsonArray(i); + } + @Override public @Nullable JsonObject getBodyAsJson() { return rc.getBodyAsJson(); @@ -259,7 +264,12 @@ public Set cookies() { } @Override - public Set fileUploads() { + public RequestBody body() { + return rc.body(); + } + + @Override + public List fileUploads() { return rc.fileUploads(); } @@ -348,11 +358,6 @@ public void reroute(HttpMethod method, String path) { rc.reroute(method, path); } - @Override - public List acceptableLocales() { - return rc.acceptableLocales(); - } - @Override public Map pathParams() { return rc.pathParams(); @@ -368,6 +373,11 @@ public MultiMap queryParams() { return rc.queryParams(); } + @Override + public MultiMap queryParams(Charset charset) { + return rc.queryParams(charset); + } + @Override public @Nullable List queryParam(String query) { return rc.queryParam(query); diff --git a/pom.xml b/pom.xml index 1eeb3c57ce..22862a3b3b 100644 --- a/pom.xml +++ b/pom.xml @@ -47,7 +47,7 @@ com.gentics.vertx maven-vertx-parent - 3.9.12.0 + 4.3.2.0 @@ -95,7 +95,8 @@ doc tests performance-tests - + hazelcast3-cluster-manager + diff --git a/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java b/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java index 567a728ddd..122fa6871b 100644 --- a/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java +++ b/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java @@ -59,7 +59,6 @@ import io.vertx.core.logging.Logger; import io.vertx.core.logging.LoggerFactory; import io.vertx.ext.auth.User; -import io.vertx.ext.auth.jwt.impl.JWTUser; import io.vertx.ext.web.Route; import io.vertx.ext.web.RoutingContext; import io.vertx.ext.web.handler.JWTAuthHandler; @@ -168,8 +167,8 @@ public void secure(Route route) { rc.next(); return; } - if (user instanceof JWTUser) { - JWTUser token = (JWTUser) user; + if (user instanceof User) { + User token = (User) user; List plugins = authPluginRegistry.getPlugins(); JsonObject decodedToken = token.principal(); diff --git a/tests/common/src/main/java/com/gentics/mesh/mock/MockingLoggerRule.java b/tests/common/src/main/java/com/gentics/mesh/mock/MockingLoggerRule.java index b16dc3488b..3991a6490a 100644 --- a/tests/common/src/main/java/com/gentics/mesh/mock/MockingLoggerRule.java +++ b/tests/common/src/main/java/com/gentics/mesh/mock/MockingLoggerRule.java @@ -48,6 +48,11 @@ public LogDelegate get(String name) { return mocks.computeIfAbsent(name, key -> mock(LogDelegate.class)); } + @Override + public boolean isAvailable() { + return true; + } + @Override public LogDelegate createDelegate(String name) { return get(name); diff --git a/tests/tests-admin-gui/src/main/java/com/gentics/mesh/verticle/admin/AdminGUIEndpointTest.java b/tests/tests-admin-gui/src/main/java/com/gentics/mesh/verticle/admin/AdminGUIEndpointTest.java index ca3bd9dbb4..bc43314166 100644 --- a/tests/tests-admin-gui/src/main/java/com/gentics/mesh/verticle/admin/AdminGUIEndpointTest.java +++ b/tests/tests-admin-gui/src/main/java/com/gentics/mesh/verticle/admin/AdminGUIEndpointTest.java @@ -10,6 +10,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import io.vertx.core.http.HttpClientResponse; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -20,7 +21,6 @@ import com.gentics.mesh.test.context.AbstractMeshTest; import io.vertx.core.http.HttpClient; -import io.vertx.core.http.HttpClientRequest; @MeshTestSetting(testSize = TestSize.EMPTY, startServer = false) public class AdminGUIEndpointTest extends AbstractMeshTest { @@ -43,16 +43,18 @@ public void testAdminConfigRendering() throws InterruptedException, ExecutionExc HttpClient client = createHttpClient(); CompletableFuture future = new CompletableFuture<>(); - HttpClientRequest request = client.request(GET, "/mesh-ui-v1/mesh-ui-config.js", rh -> { - rh.bodyHandler(bh -> { - if (rh.statusCode() == 200) { - future.complete(bh.toString()); - } else { - future.completeExceptionally(new Exception("Status code wrong {" + rh.statusCode() + "}")); - } - }); - }); - request.end(); + client.request(GET, "/mesh-ui-v1/mesh-ui-config.js") + .compose(request -> request.send()) + .onComplete(rh -> { + HttpClientResponse response = rh.result(); + response.bodyHandler(bh -> { + if (response.statusCode() == 200) { + future.complete(bh.toString()); + } else { + future.completeExceptionally(new Exception("Status code wrong {" + response.statusCode() + "}")); + } + }); + }); String response = future.get(10, TimeUnit.SECONDS); // String expectedUrl = "localhost:" + port; @@ -67,10 +69,11 @@ public void testAdminConfigRendering() throws InterruptedException, ExecutionExc public void testRedirect() throws InterruptedException, ExecutionException { HttpClient client = createHttpClient(); CompletableFuture future = new CompletableFuture<>(); - HttpClientRequest request = client.request(GET, "/", rh -> { - future.complete(rh.getHeader("Location")); - }); - request.end(); + client.request(GET, "/") + .compose(request -> request.send()) + .onComplete(rh -> { + future.complete(rh.result().getHeader("Location")); + }); assertEquals("/mesh-ui/", future.get()); } } diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/cache/EventAwareCacheTest.java b/tests/tests-core/src/main/java/com/gentics/mesh/cache/EventAwareCacheTest.java index 11b559a6a4..00c06a2583 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/cache/EventAwareCacheTest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/cache/EventAwareCacheTest.java @@ -8,6 +8,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; +import com.gentics.mesh.event.EventBusStore; import org.junit.Test; import com.gentics.mesh.cache.impl.EventAwareCacheImpl; @@ -25,6 +26,8 @@ public class EventAwareCacheTest extends AbstractMeshTest { public void testCustomHandler() { MeshOptions options = getTestContext().getOptions(); options.getMonitoringOptions().setEnabled(false); + EventBusStore eventBusStore = new EventBusStore(); + eventBusStore.setEventBus(vertx().eventBus()); EventAwareCache USER_STATE_CACHE = new EventAwareCacheImpl.Builder() .maxSize(15_000) .events(USER_UPDATED) @@ -40,7 +43,7 @@ public void testCustomHandler() { .setMetricsService(mock(MetricsService.class)) .meshOptions(options) .name("testcache") - .vertx(vertx()) + .eventBusStore(eventBusStore) .build(); // Set some values to the cache diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/core/graphql/GraphQLEndpointTest.java b/tests/tests-core/src/main/java/com/gentics/mesh/core/graphql/GraphQLEndpointTest.java index faf79b4565..9fa809112b 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/core/graphql/GraphQLEndpointTest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/core/graphql/GraphQLEndpointTest.java @@ -15,6 +15,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.function.Consumer; import java.util.stream.Collector; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -93,7 +94,6 @@ import com.gentics.mesh.parameter.impl.VersioningParametersImpl; import com.gentics.mesh.rest.client.MeshRestClient; import com.gentics.mesh.test.context.AbstractMeshTest; -import com.hazelcast.util.function.Consumer; import io.reactivex.Completable; import io.reactivex.Single; diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/core/project/ProjectInfoEndpointTest.java b/tests/tests-core/src/main/java/com/gentics/mesh/core/project/ProjectInfoEndpointTest.java index fb4e900a60..61ed44b88c 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/core/project/ProjectInfoEndpointTest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/core/project/ProjectInfoEndpointTest.java @@ -35,6 +35,6 @@ public void testReadUnknownProjectByName() { */ @Test public void testReadProjectWithEndSlash() { - call(() -> client().get("/" + encodeSegment(PROJECT_NAME) + "/", ProjectResponse.class), NOT_FOUND); + call(() -> client().get("/" + encodeSegment(PROJECT_NAME) + "/", ProjectResponse.class)); } } diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/core/rest/MeshRestAPITest.java b/tests/tests-core/src/main/java/com/gentics/mesh/core/rest/MeshRestAPITest.java index 32178c97b6..c41493ddca 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/core/rest/MeshRestAPITest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/core/rest/MeshRestAPITest.java @@ -28,12 +28,13 @@ public void test404Response() throws Exception { HttpClient client = vertx().createHttpClient(options); CompletableFuture future = new CompletableFuture<>(); - HttpClientRequest request = client.request(HttpMethod.POST, MeshVersion.CURRENT_API_BASE_PATH + "/test", rh -> { - rh.bodyHandler(bh -> { - future.complete(bh.toString()); - }); - }); - request.end(); + client.request(HttpMethod.POST, MeshVersion.CURRENT_API_BASE_PATH + "/test") + .compose(request -> request.send()) + .onComplete(response -> { + response.result().bodyHandler(bh -> { + future.complete(bh.toString()); + }); + }); String response = future.get(1, TimeUnit.SECONDS); assertTrue("The response string should not contain any html specific characters but it was {" + response + "} ", diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/core/utilities/AbstractValidateSchemaTest.java b/tests/tests-core/src/main/java/com/gentics/mesh/core/utilities/AbstractValidateSchemaTest.java index 2e566769da..df4936ab8b 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/core/utilities/AbstractValidateSchemaTest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/core/utilities/AbstractValidateSchemaTest.java @@ -7,6 +7,7 @@ import java.util.ArrayList; import java.util.List; +import io.vertx.ext.web.handler.HttpException; import org.junit.Assert; import org.junit.Test; @@ -14,7 +15,6 @@ import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; -import io.vertx.ext.web.handler.impl.HttpStatusException; import okhttp3.Response; public abstract class AbstractValidateSchemaTest extends AbstractMeshTest { @@ -339,7 +339,7 @@ public void testInvalidSchemas() { try { Response r = this.httpPost(this.path, schema).execute(); if (r.code() != 200) { - throw new HttpStatusException(r.code()); + throw new HttpException(r.code()); } JsonObject obj = new JsonObject(r.body().string()); assertEquals("Status should be invalid", obj.getString("status"), "INVALID"); @@ -416,7 +416,7 @@ public void testValidSchemas() { try { Response r = this.httpPost(this.path, schema).execute(); if (r.code() != 200) { - throw new HttpStatusException(r.code()); + throw new HttpException(r.code()); } JsonObject obj = new JsonObject(r.body().string()); assertEquals("Status should be valid", obj.getString("status"), "VALID"); diff --git a/tests/tests-core/src/main/java/com/gentics/mesh/core/webrootfield/WebRootFieldTypeTest.java b/tests/tests-core/src/main/java/com/gentics/mesh/core/webrootfield/WebRootFieldTypeTest.java index 1c501b8e22..d211c5c5db 100644 --- a/tests/tests-core/src/main/java/com/gentics/mesh/core/webrootfield/WebRootFieldTypeTest.java +++ b/tests/tests-core/src/main/java/com/gentics/mesh/core/webrootfield/WebRootFieldTypeTest.java @@ -11,12 +11,7 @@ import java.io.File; import java.io.IOException; -import java.util.Arrays; -import java.util.Calendar; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; +import java.util.*; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -577,13 +572,13 @@ private void testMicronodeList(boolean fieldShouldExist, boolean contentShouldEx valuesSet.add(micronode.getFields().getStringField("lastName").getString()); }); - json.forEach(result -> { - JsonObject o = (JsonObject) result; - assertTrue(valuesSet.remove(o.getJsonObject("fields").getString("firstName"))); - assertTrue(valuesSet.remove(o.getJsonObject("fields").getString("lastName"))); + for (Iterator iterator = json.iterator(); iterator.hasNext(); ) { + JsonObject result = (JsonObject) iterator.next(); + assertTrue(valuesSet.remove(result.getJsonObject("fields").getString("firstName"))); + assertTrue(valuesSet.remove(result.getJsonObject("fields").getString("lastName"))); - json.remove(result); - }); + iterator.remove(); + } assertTrue(valuesSet.isEmpty()); }; @@ -677,12 +672,11 @@ private void testNodeList(boolean fieldShouldExist, boolean contentShouldExist) Set valuesSet = list.stream().map(node -> node.getUuid()).collect(Collectors.toSet()); - json.forEach(result -> { - JsonObject o = (JsonObject) result; - assertTrue(valuesSet.remove(o.getString("uuid"))); - - json.remove(result); - }); + for (Iterator iterator = json.iterator(); iterator.hasNext(); ) { + JsonObject result = (JsonObject) iterator.next(); + assertTrue(valuesSet.remove(result.getString("uuid"))); + iterator.remove(); + } assertTrue(valuesSet.isEmpty()); };