diff --git a/LTS-CHANGELOG.adoc b/LTS-CHANGELOG.adoc index 147189d4fc..0994de33c0 100644 --- a/LTS-CHANGELOG.adoc +++ b/LTS-CHANGELOG.adoc @@ -18,13 +18,23 @@ The LTS changelog lists releases which are only accessible via a commercial subs All fixes and changes in LTS releases will be released the next minor release. Changes from LTS 1.4.x will be included in release 1.5.0. [[v1.9.14]] -== 1.9.14 (TBD) +== 1.9.14 (19.04.2023) + +icon:check[] Core: The name of a Mesh user is now forced to be unique on a database level, to prevent creating users with an already existing username. The duplicated username detection mechanism has also been improved. + +CAUTION: Duplicate usernames must be removed before the update, otherwise Mesh will fail to start! icon:check[] Monitoring: Calling the `/health/ready` endpoint while restoring an OrientDB backup would block the request. If too many requests were blocked in this way, Mesh would be considered not live any more. This has been fixed now, during a restore, the endpoint `/health/ready` will now fail immediately while the liveness status will be maintained. icon:check[] Monitoring: Failing calls to any `/health/...` endpoints will no longer log the whole stacktrace, since this does not contain useful information. +icon:check[] Core: Migration jobs have been made more robust: Migration jobs will now be aborted, in cases were the storage is no longer ready to be written to +(e.g. write quorum is not reached, or the storage is read-only due to insufficient disk space available). A periodic check (interval can be configured via setting `migrationTriggerInterval`) +will continue processing of aborted jobs. + +icon:check[] Clustering: when running in CUD coordination mode, the requests to trigger job migration were not delegated to the current master. This has been fixed now. + [[v1.9.13]] == 1.9.13 (22.03.2023) @@ -55,13 +65,23 @@ icon:check[] Core: The node migration process has been improved to reduce resour icon:check[] Core: Corner case of updating the webroot info might throw a false conflict exception, when the segment field value is reset for a schema. This has been fixed. [[v1.8.21]] -== 1.8.21 (TBD) +== 1.8.21 (19.04.2023) + +icon:check[] Core: The name of a Mesh user is now forced to be unique on a database level, to prevent creating users with an already existing username. The duplicated username detection mechanism has also been improved. + +CAUTION: Duplicate usernames must be removed before the update, otherwise Mesh will fail to start! icon:check[] Monitoring: Calling the `/health/ready` endpoint while restoring an OrientDB backup would block the request. If too many requests were blocked in this way, Mesh would be considered not live any more. This has been fixed now, during a restore, the endpoint `/health/ready` will now fail immediately while the liveness status will be maintained. icon:check[] Monitoring: Failing calls to any `/health/...` endpoints will no longer log the whole stacktrace, since this does not contain useful information. +icon:check[] Core: Migration jobs have been made more robust: Migration jobs will now be aborted, in cases were the storage is no longer ready to be written to +(e.g. write quorum is not reached, or the storage is read-only due to insufficient disk space available). A periodic check (interval can be configured via setting `migrationTriggerInterval`) +will continue processing of aborted jobs. + +icon:check[] Clustering: when running in CUD coordination mode, the requests to trigger job migration were not delegated to the current master. This has been fixed now. + [[v1.8.20]] == 1.8.20 (22.03.2023) diff --git a/api/src/main/java/com/gentics/mesh/etc/config/MeshOptions.java b/api/src/main/java/com/gentics/mesh/etc/config/MeshOptions.java index 68e739813d..60ffe8b2bc 100644 --- a/api/src/main/java/com/gentics/mesh/etc/config/MeshOptions.java +++ b/api/src/main/java/com/gentics/mesh/etc/config/MeshOptions.java @@ -24,6 +24,7 @@ public abstract class MeshOptions implements Option { public static final String DEFAULT_DIRECTORY_NAME = "graphdb"; public static final int DEFAULT_MAX_DEPTH = 10; public static final int DEFAULT_PLUGIN_TIMEOUT = 120; + public static final long DEFAULT_MIGRATION_TRIGGER_INTERVAL = 60_000; public static final String MESH_DEFAULT_LANG_ENV = "MESH_DEFAULT_LANG"; public static final String MESH_LANGUAGES_FILE_PATH_ENV = "MESH_LANGUAGES_FILE_PATH"; @@ -39,7 +40,9 @@ public abstract class MeshOptions implements Option { public static final String MESH_INITIAL_ADMIN_PASSWORD_ENV = "MESH_INITIAL_ADMIN_PASSWORD"; public static final String MESH_INITIAL_ADMIN_PASSWORD_FORCE_RESET_ENV = "MESH_INITIAL_ADMIN_PASSWORD_FORCE_RESET"; public static final String MESH_MAX_PURGE_BATCH_SIZE = "MESH_MAX_PURGE_BATCH_SIZE"; - private static final String MESH_MAX_MIGRATION_BATCH_SIZE = "MESH_MAX_MIGRATION_BATCH_SIZE"; + public static final String MESH_MAX_MIGRATION_BATCH_SIZE = "MESH_MAX_MIGRATION_BATCH_SIZE"; + public static final String MESH_MIGRATION_TRIGGER_INTERVAL = "MESH_MIGRATION_TRIGGER_INTERVAL"; + // TODO remove this setting. There should not be a default max depth. This is no longer needed once we remove the expand all parameter private int defaultMaxDepth = DEFAULT_MAX_DEPTH; @@ -143,6 +146,11 @@ public abstract class MeshOptions implements Option { @EnvironmentVariable(name = MESH_MAX_MIGRATION_BATCH_SIZE, description = "Override the maximum migration batch size") private int migrationMaxBatchSize = 50; + @JsonProperty(required = false) + @JsonPropertyDescription("Interval in ms for the automatic migration job trigger. Setting this to a non-positive value will disable automatic job triggering. Default: " + DEFAULT_MIGRATION_TRIGGER_INTERVAL + " ms.") + @EnvironmentVariable(name = MESH_MIGRATION_TRIGGER_INTERVAL, description = "Override the migration trigger interval") + private long migrationTriggerInterval = DEFAULT_MIGRATION_TRIGGER_INTERVAL; + @JsonProperty(required = true) @JsonPropertyDescription("GraphQL options.") private GraphQLOptions graphQLOptions = new GraphQLOptions(); @@ -520,6 +528,20 @@ public void setMigrationMaxBatchSize(int migrationMaxBatchSize) { this.migrationMaxBatchSize = migrationMaxBatchSize; } + /** + * Get the automatic job migration trigger interval in ms. + * @return interval in ms + */ + public long getMigrationTriggerInterval() { + return migrationTriggerInterval; + } + + @Setter + public MeshOptions setMigrationTriggerInterval(long migrationTriggerInterval) { + this.migrationTriggerInterval = migrationTriggerInterval; + return this; + } + /** * Validate this and the nested options. */ diff --git a/core/src/main/java/com/gentics/mesh/core/migration/AbstractMigrationHandler.java b/core/src/main/java/com/gentics/mesh/core/migration/AbstractMigrationHandler.java index 36fb402b1b..827fc53de5 100644 --- a/core/src/main/java/com/gentics/mesh/core/migration/AbstractMigrationHandler.java +++ b/core/src/main/java/com/gentics/mesh/core/migration/AbstractMigrationHandler.java @@ -35,6 +35,7 @@ import com.gentics.mesh.core.rest.node.FieldMap; import com.gentics.mesh.core.rest.node.FieldMapImpl; import com.gentics.mesh.core.rest.node.field.Field; +import com.gentics.mesh.distributed.RequestDelegator; import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.event.EventQueueBatch; import com.gentics.mesh.metric.MetricsService; @@ -61,13 +62,19 @@ public abstract class AbstractMigrationHandler extends AbstractHandler implement protected final MeshOptions options; + private final RequestDelegator delegator; + + private final boolean clusteringEnabled; + public AbstractMigrationHandler(Database db, BinaryUploadHandlerImpl binaryFieldHandler, MetricsService metrics, - Provider batchProvider, MeshOptions options) { + Provider batchProvider, MeshOptions options, RequestDelegator delegator) { this.db = db; this.binaryFieldHandler = binaryFieldHandler; this.metrics = metrics; this.batchProvider = batchProvider; this.options = options; + this.delegator = delegator; + clusteringEnabled = this.options.getClusterOptions().isEnabled(); } /** @@ -136,6 +143,28 @@ protected List migrateLoop(Queue containers, EventCauseInfo ca sqb.setCause(cause); int pollCount = options.getMigrationMaxBatchSize(); while (!containers.isEmpty()) { + // check whether the database is ready for the migration + if (db.isReadOnly(false)) { + errorsDetected.add(new MigrationAbortedException("Database is read-only.")); + return errorsDetected; + } + if (clusteringEnabled && db.clusterManager().isClusterTopologyLocked()) { + errorsDetected.add(new MigrationAbortedException("Cluster is locked due to topology change.")); + return errorsDetected; + } + if (clusteringEnabled && !db.clusterManager().isWriteQuorumReached()) { + errorsDetected.add(new MigrationAbortedException("Write quorum not reached.")); + return errorsDetected; + } + if (clusteringEnabled && !db.clusterManager().isLocalNodeOnline()) { + errorsDetected.add(new MigrationAbortedException("Local node is not online.")); + return errorsDetected; + } + if (clusteringEnabled && !delegator.isMaster()) { + errorsDetected.add(new MigrationAbortedException("Instance is not the master.")); + return errorsDetected; + } + List containerList = CollectionUtil.pollMany(containers, pollCount); try { // Each container migration has its own search queue batch which is then combined with other batch entries. diff --git a/core/src/main/java/com/gentics/mesh/core/migration/impl/BranchMigrationImpl.java b/core/src/main/java/com/gentics/mesh/core/migration/impl/BranchMigrationImpl.java index 97e2ac9428..b7ea1d46de 100644 --- a/core/src/main/java/com/gentics/mesh/core/migration/impl/BranchMigrationImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/migration/impl/BranchMigrationImpl.java @@ -33,6 +33,7 @@ import com.gentics.mesh.core.migration.BranchMigration; import com.gentics.mesh.core.rest.event.node.BranchMigrationCause; import com.gentics.mesh.core.result.Result; +import com.gentics.mesh.distributed.RequestDelegator; import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.event.EventQueueBatch; import com.gentics.mesh.metric.MetricsService; @@ -51,8 +52,9 @@ public class BranchMigrationImpl extends AbstractMigrationHandler implements Bra private static final Logger log = LoggerFactory.getLogger(BranchMigrationImpl.class); @Inject - public BranchMigrationImpl(Database db, BinaryUploadHandlerImpl nodeFieldAPIHandler, MetricsService metrics, Provider batchProvider, MeshOptions options) { - super(db, nodeFieldAPIHandler, metrics, batchProvider, options); + public BranchMigrationImpl(Database db, BinaryUploadHandlerImpl nodeFieldAPIHandler, MetricsService metrics, + Provider batchProvider, MeshOptions options, RequestDelegator delegator) { + super(db, nodeFieldAPIHandler, metrics, batchProvider, options, delegator); } @Override @@ -112,7 +114,11 @@ public Completable migrateBranch(BranchMigrationContext context) { log.error("Encountered migration error.", error); } } - result = Completable.error(new CompositeException(errorsDetected)); + if (errorsDetected.size() == 1) { + result = Completable.error(errorsDetected.get(0)); + } else { + result = Completable.error(new CompositeException(errorsDetected)); + } } return result; }); diff --git a/core/src/main/java/com/gentics/mesh/core/migration/impl/MicronodeMigrationImpl.java b/core/src/main/java/com/gentics/mesh/core/migration/impl/MicronodeMigrationImpl.java index 33a3c1dce1..7b87a5c186 100644 --- a/core/src/main/java/com/gentics/mesh/core/migration/impl/MicronodeMigrationImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/migration/impl/MicronodeMigrationImpl.java @@ -42,6 +42,7 @@ import com.gentics.mesh.core.rest.schema.FieldSchemaContainer; import com.gentics.mesh.core.rest.schema.ListFieldSchema; import com.gentics.mesh.core.verticle.handler.WriteLock; +import com.gentics.mesh.distributed.RequestDelegator; import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.event.EventQueueBatch; import com.gentics.mesh.metric.MetricsService; @@ -62,8 +63,10 @@ public class MicronodeMigrationImpl extends AbstractMigrationHandler implements private final WriteLock writeLock; @Inject - public MicronodeMigrationImpl(Database db, BinaryUploadHandlerImpl binaryFieldHandler, MetricsService metrics, Provider batchProvider, WriteLock writeLock, MeshOptions options) { - super(db, binaryFieldHandler, metrics, batchProvider, options); + public MicronodeMigrationImpl(Database db, BinaryUploadHandlerImpl binaryFieldHandler, MetricsService metrics, + Provider batchProvider, WriteLock writeLock, MeshOptions options, + RequestDelegator delegator) { + super(db, binaryFieldHandler, metrics, batchProvider, options, delegator); this.writeLock = writeLock; } @@ -130,7 +133,11 @@ public Completable migrateMicronodes(MicronodeMigrationContext context) { log.error("Encountered migration error.", error); } } - result = Completable.error(new CompositeException(errorsDetected)); + if (errorsDetected.size() == 1) { + result = Completable.error(errorsDetected.get(0)); + } else { + result = Completable.error(new CompositeException(errorsDetected)); + } } return result; }); diff --git a/core/src/main/java/com/gentics/mesh/core/migration/impl/NodeMigrationImpl.java b/core/src/main/java/com/gentics/mesh/core/migration/impl/NodeMigrationImpl.java index 03330ac103..e885f314f3 100644 --- a/core/src/main/java/com/gentics/mesh/core/migration/impl/NodeMigrationImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/migration/impl/NodeMigrationImpl.java @@ -46,10 +46,12 @@ import com.gentics.mesh.core.endpoint.migration.MigrationStatusHandler; import com.gentics.mesh.core.endpoint.node.BinaryUploadHandlerImpl; import com.gentics.mesh.core.migration.AbstractMigrationHandler; +import com.gentics.mesh.core.migration.MigrationAbortedException; import com.gentics.mesh.core.migration.NodeMigration; import com.gentics.mesh.core.rest.common.FieldContainer; import com.gentics.mesh.core.rest.event.node.SchemaMigrationCause; import com.gentics.mesh.core.verticle.handler.WriteLock; +import com.gentics.mesh.distributed.RequestDelegator; import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.event.EventQueueBatch; import com.gentics.mesh.metric.MetricsService; @@ -74,8 +76,8 @@ public class NodeMigrationImpl extends AbstractMigrationHandler implements NodeM @Inject public NodeMigrationImpl(Database db, BinaryUploadHandlerImpl nodeFieldAPIHandler, MetricsService metrics, Provider batchProvider, - WriteLock writeLock, MeshOptions options) { - super(db, nodeFieldAPIHandler, metrics, batchProvider, options); + WriteLock writeLock, MeshOptions options, RequestDelegator delegator) { + super(db, nodeFieldAPIHandler, metrics, batchProvider, options, delegator); migrationGauge = metrics.longGauge(NODE_MIGRATION_PENDING); this.writeLock = writeLock; } @@ -227,7 +229,18 @@ public Completable migrateNodes(NodeMigrationActionContext context) { migrationGauge.decrementAndGet(); } }); - } while (batchSize > 0 && currentBatch > 0 && currentBatch >= batchSize); + + // when containers is not empty, something bad happened and we need to let the migration fail immediately + if (!containers.isEmpty()) { + if (errorsDetected.size() > 1) { + return Completable.error(new CompositeException(errorsDetected)); + } else if (errorsDetected.size() == 1) { + return Completable.error(errorsDetected.get(0)); + } else { + return Completable.error(new MigrationAbortedException("Not all containers of the current batch were migrated.")); + } + } + } while (batchSize > 0 && currentBatch > 0 && currentBatch >= batchSize); // TODO prepare errors. They should be easy to understand and to grasp Completable result = Completable.complete(); @@ -237,7 +250,11 @@ public Completable migrateNodes(NodeMigrationActionContext context) { log.error("Encountered migration error.", error); } } - result = Completable.error(new CompositeException(errorsDetected)); + if (errorsDetected.size() == 1) { + result = Completable.error(errorsDetected.get(0)); + } else { + result = Completable.error(new CompositeException(errorsDetected)); + } } return result; }); diff --git a/core/src/main/java/com/gentics/mesh/core/verticle/job/JobWorkerVerticleImpl.java b/core/src/main/java/com/gentics/mesh/core/verticle/job/JobWorkerVerticleImpl.java index a67bd424b0..3ad67846d7 100644 --- a/core/src/main/java/com/gentics/mesh/core/verticle/job/JobWorkerVerticleImpl.java +++ b/core/src/main/java/com/gentics/mesh/core/verticle/job/JobWorkerVerticleImpl.java @@ -8,6 +8,8 @@ import com.gentics.mesh.cli.BootstrapInitializer; import com.gentics.mesh.core.db.Database; import com.gentics.mesh.core.jobs.JobProcessor; +import com.gentics.mesh.distributed.RequestDelegator; +import com.gentics.mesh.etc.config.MeshOptions; import com.gentics.mesh.verticle.AbstractJobVerticle; import dagger.Lazy; @@ -35,12 +37,39 @@ public class JobWorkerVerticleImpl extends AbstractJobVerticle implements JobWor private Lazy boot; private JobProcessor jobProcessor; private Database db; + private final RequestDelegator delegator; + private final boolean clusteringEnabled; @Inject - public JobWorkerVerticleImpl(Database db, Lazy boot, JobProcessor jobProcessor) { + public JobWorkerVerticleImpl(Database db, Lazy boot, JobProcessor jobProcessor, + MeshOptions options, RequestDelegator delegator) { this.db = db; this.boot = boot; this.jobProcessor = jobProcessor; + this.delegator = delegator; + this.clusteringEnabled = options.getClusterOptions().isEnabled(); + } + + @Override + public void start() throws Exception { + super.start(); + + long migrationTriggerInterval = boot.get().mesh().getOptions().getMigrationTriggerInterval(); + + if (migrationTriggerInterval > 0) { + vertx.setPeriodic(migrationTriggerInterval, id -> { + if (!isCurrentMaster()) { + log.debug("Not invoking job processing, because instance is not the current master"); + } else if(!isDatabaseReadyForJobs()) { + log.debug("Not invoking job processing, because instance is not ready to process jobs"); + } else if (jobProcessor.isProcessing()) { + log.debug("Not invoking job processing, because jobs are currently processed"); + } else { + log.debug("Invoke job processing"); + vertx.eventBus().publish(getJobAdress(), null); + } + }); + } } @Override @@ -58,4 +87,32 @@ public Completable executeJob(Message message) { return Completable.defer(() -> jobProcessor.process()); } + /** + * Check whether the instance is currently the master + * @return true for the master (or clustering not enabled) + */ + private boolean isCurrentMaster() { + if (clusteringEnabled) { + return delegator.isMaster(); + } else { + return true; + } + } + + /** + * Check whether the database is ready to process jobs. When clustering is enabled, this will check whether + *
    + *
  1. The local database is online
  2. + *
  3. The write quorum is reached
  4. + *
  5. The cluster is not locked due to topology changes
  6. + *
+ * @return true when the database is ready for job processing + */ + private boolean isDatabaseReadyForJobs() { + if (clusteringEnabled) { + return db.clusterManager().isLocalNodeOnline() && db.clusterManager().isWriteQuorumReached() && !db.clusterManager().isClusterTopologyLocked(); + } else { + return true; + } + } } diff --git a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java index 50e923345f..fe094a0126 100644 --- a/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java +++ b/distributed-coordinator/src/main/java/com/gentics/mesh/distributed/coordinator/proxy/ClusterEnabledRequestDelegatorImpl.java @@ -42,6 +42,8 @@ public class ClusterEnabledRequestDelegatorImpl implements RequestDelegator { private final HttpClient httpClient; private final MeshOptions options; + private static final Set blacklistPathPatternSet = createBlacklistPatternSet(); + private static final Set whiteListPathPatternSet = createWhitelistPatternSet(); @Inject @@ -75,7 +77,7 @@ public void handle(RoutingContext rc) { return; } - if (isWhitelisted(path)) { + if (isWhitelisted(path) && !isBlacklisted(path)) { if (log.isDebugEnabled()) { log.debug("URI {" + requestURI + "} with method {" + method.name() + "} is whitelisted. Skipping delegation"); } @@ -226,6 +228,26 @@ private void forwardHeaders(HttpServerRequest request, HttpClientRequest forward } + /** + * Check whether the given path is blacklisted (meaning: should be delegated to the master, even if also whitelisted) + * @param path path to check + * @return true iff the path is blacklisted + */ + public static boolean isBlacklisted(String path) { + for (Pattern pattern : blacklistPathPatternSet) { + Matcher m = pattern.matcher(path); + if (m.matches()) { + return true; + } + } + return false; + } + + /** + * Check whether the given path is whitelisted (meaning: should not be delegated to the master) + * @param path path to check + * @return true iff the path is whitelisted + */ public static boolean isWhitelisted(String path) { for (Pattern pattern : whiteListPathPatternSet) { Matcher m = pattern.matcher(path); @@ -236,6 +258,13 @@ public static boolean isWhitelisted(String path) { return false; } + private static Set createBlacklistPatternSet() { + Set patterns = new HashSet<>(); + patterns.add(Pattern.compile("/api/v[0-9]+/admin/processJobs")); + patterns.add(Pattern.compile("/api/v[0-9]+/admin/jobs/.*/process")); + return patterns; + } + private static Set createWhitelistPatternSet() { Set patterns = new HashSet<>(); patterns.add(Pattern.compile("/api/v[0-9]+/?")); diff --git a/doc/src/main/docs/generated/models/mesh-config.example.yml b/doc/src/main/docs/generated/models/mesh-config.example.yml index b4004c0ce1..4d7b16dc68 100644 --- a/doc/src/main/docs/generated/models/mesh-config.example.yml +++ b/doc/src/main/docs/generated/models/mesh-config.example.yml @@ -43,6 +43,7 @@ nodeName: null startInReadOnly: false versionPurgeMaxBatchSize: 10 migrationMaxBatchSize: 50 +migrationTriggerInterval: 60000 httpServer: port: 8080 sslPort: 8443 diff --git a/doc/src/main/docs/generated/tables/MeshOptions.adoc-include b/doc/src/main/docs/generated/tables/MeshOptions.adoc-include index 8bca11fa2a..e088de67de 100644 --- a/doc/src/main/docs/generated/tables/MeshOptions.adoc-include +++ b/doc/src/main/docs/generated/tables/MeshOptions.adoc-include @@ -57,6 +57,11 @@ | integer | The maximum amount of entities to be migrated in a single transaction. This setting affects schema, microschema and branch migrations +| migrationTriggerInterval +| false +| integer +| Interval in ms for the automatic migration job trigger. Setting this to a non-positive value will disable automatic job triggering. Default: 60000 ms. + | monitoring | false | object diff --git a/doc/src/main/docs/generated/tables/mesh-db-revs.adoc-include b/doc/src/main/docs/generated/tables/mesh-db-revs.adoc-include index 8be641db59..5dce68c19e 100644 --- a/doc/src/main/docs/generated/tables/mesh-db-revs.adoc-include +++ b/doc/src/main/docs/generated/tables/mesh-db-revs.adoc-include @@ -5,7 +5,7 @@ | Database revision -| *1.10.3* +| *1.10.4* | 6d5ccff3 | *1.9.13* diff --git a/doc/src/main/docs/generated/tables/mesh-env.adoc-include b/doc/src/main/docs/generated/tables/mesh-env.adoc-include index 5dc88e0737..3a59d02cf8 100644 --- a/doc/src/main/docs/generated/tables/mesh-env.adoc-include +++ b/doc/src/main/docs/generated/tables/mesh-env.adoc-include @@ -74,6 +74,9 @@ | *MESH_MONITORING_JVM_METRICS_ENABLED* | Override the configured JVM metrics enabled flag. +| *MESH_MIGRATION_TRIGGER_INTERVAL* +| Override the migration trigger interval + | *MESH_GRAPH_EXPORT_DIRECTORY* | Override the graph database export directory. diff --git a/mdm/api/src/main/java/com/gentics/mesh/core/jobs/JobProcessor.java b/mdm/api/src/main/java/com/gentics/mesh/core/jobs/JobProcessor.java index 18f760e17a..c12b1b052c 100644 --- a/mdm/api/src/main/java/com/gentics/mesh/core/jobs/JobProcessor.java +++ b/mdm/api/src/main/java/com/gentics/mesh/core/jobs/JobProcessor.java @@ -12,4 +12,10 @@ public interface JobProcessor { * @return completable */ Completable process(); + + /** + * Check whether jobs are currently being processed + * @return true when jobs are processed, false if not + */ + boolean isProcessing(); } diff --git a/mdm/common/src/main/java/com/gentics/mesh/core/data/dao/PersistingUserDao.java b/mdm/common/src/main/java/com/gentics/mesh/core/data/dao/PersistingUserDao.java index 094b194626..acbbbe26cb 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/core/data/dao/PersistingUserDao.java +++ b/mdm/common/src/main/java/com/gentics/mesh/core/data/dao/PersistingUserDao.java @@ -154,7 +154,11 @@ default HibUser create(String username, HibUser creator) { * @param creator * @return */ - default HibUser init(HibUser user, String username, HibUser creator) { + private HibUser init(HibUser user, String username, HibUser creator) { + HibUser conflicting = findByUsername(username); + if (conflicting != null && !conflicting.getUuid().equals(user.getUuid())) { + throw conflict(conflicting.getUuid(), username, "user_conflicting_username"); + } user.setUsername(username); user.enable(); user.generateBucketId(); @@ -497,12 +501,6 @@ default HibUser create(InternalActionContext ac, EventQueueBatch batch, String u throw error(FORBIDDEN, "error_missing_perm", userRoot.getUuid(), CREATE_PERM.getRestPerm().getName()); } String groupUuid = requestModel.getGroupUuid(); - String userName = requestModel.getUsername(); - HibUser conflictingUser = findByUsername(userName); - if (conflictingUser != null) { - throw conflict(conflictingUser.getUuid(), userName, "user_conflicting_username"); - } - HibUser user = create(requestModel.getUsername(), requestUser, uuid); user.setFirstname(requestModel.getFirstname()); user.setUsername(requestModel.getUsername()); diff --git a/mdm/common/src/main/java/com/gentics/mesh/core/jobs/JobProcessorImpl.java b/mdm/common/src/main/java/com/gentics/mesh/core/jobs/JobProcessorImpl.java index c09f7b2daa..a7004b6caa 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/core/jobs/JobProcessorImpl.java +++ b/mdm/common/src/main/java/com/gentics/mesh/core/jobs/JobProcessorImpl.java @@ -35,6 +35,8 @@ public class JobProcessorImpl implements JobProcessor { final Map jobProcessors; private Database db; + private boolean processing = false; + @Inject public JobProcessorImpl(Map jobProcessors, Database db) { this.jobProcessors = jobProcessors; @@ -47,7 +49,16 @@ public Completable process() { .map(this::process) .collect(Collectors.toList()); - return Completable.concat(jobsActions); + return Completable.concat(jobsActions).doOnSubscribe(d -> { + processing = true; + }).doAfterTerminate(() -> { + processing = false; + }); + } + + @Override + public boolean isProcessing() { + return processing; } private List getJobsToExecute() { diff --git a/mdm/common/src/main/java/com/gentics/mesh/core/migration/MigrationAbortedException.java b/mdm/common/src/main/java/com/gentics/mesh/core/migration/MigrationAbortedException.java new file mode 100644 index 0000000000..71b1809c45 --- /dev/null +++ b/mdm/common/src/main/java/com/gentics/mesh/core/migration/MigrationAbortedException.java @@ -0,0 +1,19 @@ +package com.gentics.mesh.core.migration; + +/** + * Exception that is thrown when a migration has been aborted + */ +public class MigrationAbortedException extends Exception { + /** + * Serial Version UUID + */ + private static final long serialVersionUID = 2877461998431596437L; + + /** + * Create instance + * @param reason reason for the abort + */ + public MigrationAbortedException(String reason) { + super(String.format("The migration job has been aborted. Reason: %s", reason)); + } +} diff --git a/mdm/common/src/main/java/com/gentics/mesh/core/migration/impl/MigrationStatusHandlerImpl.java b/mdm/common/src/main/java/com/gentics/mesh/core/migration/impl/MigrationStatusHandlerImpl.java index 2598c27eb6..1cfbd5b49b 100644 --- a/mdm/common/src/main/java/com/gentics/mesh/core/migration/impl/MigrationStatusHandlerImpl.java +++ b/mdm/common/src/main/java/com/gentics/mesh/core/migration/impl/MigrationStatusHandlerImpl.java @@ -16,6 +16,7 @@ import com.gentics.mesh.core.db.Database; import com.gentics.mesh.core.db.Tx; import com.gentics.mesh.core.endpoint.migration.MigrationStatusHandler; +import com.gentics.mesh.core.migration.MigrationAbortedException; import com.gentics.mesh.core.rest.job.JobStatus; import com.gentics.mesh.core.rest.job.JobWarningList; @@ -118,13 +119,17 @@ public MigrationStatusHandler done(JobWarningList warnings) { * @param failureMessage */ public MigrationStatusHandler error(Throwable error, String failureMessage) { - HibJob job = getJob(); - setStatus(FAILED); - log.error("Error handling migration", error); - - job.setStopTimestamp(); - job.setError(error); - commit(job); + if (error instanceof MigrationAbortedException) { + log.error("Migration has been aborted", error); + } else { + HibJob job = getJob(); + setStatus(FAILED); + log.error("Error handling migration", error); + + job.setStopTimestamp(); + job.setError(error); + commit(job); + } return this; } diff --git a/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/core/data/impl/UserImpl.java b/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/core/data/impl/UserImpl.java index 39ce72d2c4..160a8f5462 100644 --- a/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/core/data/impl/UserImpl.java +++ b/mdm/orientdb-wrapper/src/main/java/com/gentics/mesh/core/data/impl/UserImpl.java @@ -7,6 +7,7 @@ import static com.gentics.mesh.core.data.relationship.GraphRelationships.HAS_USER; import static com.gentics.mesh.core.data.util.HibClassConverter.toGraph; import static com.gentics.mesh.madl.index.EdgeIndexDefinition.edgeIndex; +import static com.gentics.mesh.madl.index.VertexIndexDefinition.vertexIndex; import java.util.Optional; import java.util.Spliterator; @@ -41,6 +42,7 @@ import com.gentics.mesh.core.result.Result; import com.gentics.mesh.core.result.TraversalResult; import com.gentics.mesh.event.EventQueueBatch; +import com.gentics.mesh.madl.field.FieldType; import com.gentics.mesh.parameter.PagingParameters; import com.gentics.mesh.util.ETag; import com.syncleus.ferma.traversals.VertexTraversal; @@ -88,6 +90,10 @@ public class UserImpl extends AbstractMeshCoreVertex implements Us public static void init(TypeHandler type, IndexHandler index) { type.createVertexType(UserImpl.class, MeshVertexImpl.class); index.createIndex(edgeIndex(ASSIGNED_TO_ROLE).withOut()); + // TODO this may affect a lot of user, so we'd play fair here and check before applying this. + index.createIndex(vertexIndex(UserImpl.class) + .withField(USERNAME_PROPERTY_KEY, FieldType.STRING) + .unique()); } @Override diff --git a/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java b/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java index 567a728ddd..8615ad33d1 100644 --- a/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java +++ b/services/jwt-auth/src/main/java/com/gentics/mesh/auth/oauth2/MeshOAuth2ServiceImpl.java @@ -230,7 +230,7 @@ protected Single syncUser(RoutingContext rc, JsonObject token) { .flatMapSingleElement(user -> db.singleTx(user.getDelegate()::getUuid).flatMap(uuid -> { // Compare the stored and current token id to see whether the current token is different. // In that case a sync must be invoked. - String lastSeenTokenId = TOKEN_ID_LOG.getIfPresent(user.getDelegate().getUuid()); + String lastSeenTokenId = TOKEN_ID_LOG.getIfPresent(uuid); if (lastSeenTokenId == null || !lastSeenTokenId.equals(cachingId)) { return assertReadOnlyDeactivated().andThen(db.singleTx(tx -> { HibUser admin = tx.userDao().findByUsername("admin");