heads, @Nonnull FlowNodeVisitor visitor) {
+ visitAll(heads, null, visitor);
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java
new file mode 100644
index 00000000..d7bbe21d
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java
@@ -0,0 +1,30 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.BlockEndNode;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * Matches start and end of a block. Any block
+ * @author Sam Van Oort
+ */
+public class BlockChunkFinder implements ChunkFinder {
+
+ @Override
+ public boolean isStartInsideChunk() {
+ return false;
+ }
+
+ @Override
+ public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) {
+ return current instanceof BlockStartNode;
+ }
+
+ @Override
+ public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous) {
+ return current instanceof BlockEndNode;
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java
new file mode 100644
index 00000000..f326ecf8
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java
@@ -0,0 +1,35 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * Used to define the start and end of a {@link FlowChunk} to split a {@link org.jenkinsci.plugins.workflow.flow.FlowExecution}
+ * (For use with a {@link SimpleChunkVisitor} in the {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)}
+ * @author Sam Van Oort
+ */
+public interface ChunkFinder {
+
+ /** If true, a chunk is implicitly created whenever we begin */
+ boolean isStartInsideChunk();
+
+ /**
+ * Test if the current node is the start of a new chunk (inclusive)
+ * @param current Node to test for being a start, it will begin the chunk and be included
+ * @param previous Previous node, to use in testing chunk
+ * @return True if current node is the beginning of chunk
+ */
+ boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous);
+
+ /**
+ * Test if the current node is the end of a chunk (inclusive)
+ * @param current Node to test for being end
+ * For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode}
+ *
For a legacy stage or marker, this will be first node of new stage (previous is the marker)
+ * @param previous Previous node, to use in testing chunk
+ * @return True if current is the end of a chunk (inclusive)
+ */
+ boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous);
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java
new file mode 100644
index 00000000..7fd3e9ae
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java
@@ -0,0 +1,106 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import java.util.ArrayDeque;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+
+/** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG.
+ *
+ *
Iteration order: depth-first search, revisiting parallel branches once done.
+ * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order.
+ *
+ *
The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster.
+ * @author Sam Van Oort
+ */
+@NotThreadSafe
+public class DepthFirstScanner extends AbstractFlowScanner {
+
+ protected ArrayDeque queue;
+
+ protected HashSet visited = new HashSet();
+
+ protected void reset() {
+ if (this.queue == null) {
+ this.queue = new ArrayDeque();
+ } else {
+ this.queue.clear();
+ }
+ this.visited.clear();
+ this.myCurrent = null;
+ this.myNext = null;
+ }
+
+ @Override
+ protected void setHeads(@Nonnull Collection heads) {
+ Iterator it = heads.iterator();
+ if (it.hasNext()) {
+ FlowNode f = it.next();
+ myCurrent = f;
+ myNext = f;
+ }
+ while (it.hasNext()) {
+ queue.add(it.next());
+ }
+ }
+
+ @Override
+ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) {
+ FlowNode output = null;
+
+ // Walk through parents of current node
+ List parents = current.getParents(); // Can't be null
+ for (FlowNode f : parents) {
+ // Only ParallelStep nodes may be visited multiple times... but we can't just filter those
+ // because that's in workflow-cps plugin which depends on this one.
+ if (!blackList.contains(f) && !(f instanceof BlockStartNode && visited.contains(f))) {
+ if (output == null ) {
+ output = f; // Do direct assignment rather than needless push/pop
+ } else {
+ queue.push(f);
+ }
+ }
+ }
+
+ if (output == null && queue.size() > 0) {
+ output = queue.pop();
+ }
+
+ // Only BlockStartNodes, specifically ParallelStep can be the parent of multiple child nodes
+ // Thus they're the only nodes we need to avoid visiting multiple times by recording the visit
+ if (output instanceof BlockStartNode) {
+ visited.add(output);
+ }
+ return output;
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java
new file mode 100644
index 00000000..2eacc197
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java
@@ -0,0 +1,43 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+
+import javax.annotation.Nonnull;
+import java.util.Iterator;
+
+/** Iterator that may be navigated through a filtered wrapper.
+ *
+ * As a rule, assume that returned Filterators wrap an iterator and pass calls to it.
+ * Thus the iterator position will change if next() is called on the filtered versions.
+ * Note also: you may filter a filterator, if needed.
+ * @author Sam Van Oort
+ */
+public interface Filterator extends Iterator {
+ /** Returns a filtered view of the iterator, which calls the iterator until matches are found */
+ @Nonnull
+ public Filterator filter(@Nonnull Predicate matchCondition);
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java
new file mode 100644
index 00000000..d620c551
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java
@@ -0,0 +1,93 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import java.util.Iterator;
+
+/** Filters an iterator against a match predicate by wrapping an iterator
+ * @author Sam Van Oort
+ */
+@NotThreadSafe
+class FilteratorImpl implements Filterator {
+ private boolean hasNext = false;
+ private T nextVal = null;
+ private Iterator wrapped = null;
+ private Predicate matchCondition = null;
+
+ public FilteratorImpl filter(Predicate matchCondition) {
+ return new FilteratorImpl(this, matchCondition);
+ }
+
+ public FilteratorImpl(@Nonnull Iterator it, @Nonnull Predicate matchCondition) {
+ this.wrapped = it;
+ this.matchCondition = matchCondition;
+
+ while(it.hasNext()) {
+ T val = it.next();
+ if (matchCondition.apply(val)) {
+ this.nextVal = val;
+ hasNext = true;
+ break;
+ }
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ return hasNext;
+ }
+
+ @Override
+ public T next() {
+ T returnVal = nextVal;
+ T nextMatch = null;
+
+ boolean foundMatch = false;
+ while(wrapped.hasNext()) {
+ nextMatch = wrapped.next();
+ if (matchCondition.apply(nextMatch)) {
+ foundMatch = true;
+ break;
+ }
+ }
+ if (foundMatch) {
+ this.nextVal = nextMatch;
+ this.hasNext = true;
+ } else {
+ this.nextVal = null;
+ this.hasNext = false;
+ }
+ return returnVal;
+ }
+
+ @Override
+ public void remove() {
+ wrapped.remove();
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java
new file mode 100644
index 00000000..6bce6d0c
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java
@@ -0,0 +1,53 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.Nonnull;
+
+/**
+ * Common container interface for a series of {@link FlowNode}s with a logical start and end.
+ * We use this because every plugin has a different way of storing info about the nodes.
+ *
+ *
Common uses:
+ *
+ * - A single FlowNode (when coupling with timing/status APIs)
+ * - A block (with a {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} and {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode})
+ * - A linear run of marked nodes (such as a legacy stage)
+ * - A parallel block (special case of block)
+ * - A parallel branch within a parallel block
+ * - A mix of types in sequence, such as nested structures
+ *
+ *
+ * @author Sam Van Oort
+ */
+public interface FlowChunk {
+ @Nonnull
+ FlowNode getFirstNode();
+
+ @Nonnull
+ FlowNode getLastNode();
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java
new file mode 100644
index 00000000..1285808b
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java
@@ -0,0 +1,17 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+
+/** FlowChunk with information about what comes before/after */
+public interface FlowChunkWithContext extends FlowChunk {
+
+ /** Return the node before this chunk, or null if it is the beginning */
+ @CheckForNull
+ FlowNode getNodeBefore();
+
+ /** Return the node after this chunk, or null if it is the end */
+ @CheckForNull
+ FlowNode getNodeAfter();
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java
new file mode 100644
index 00000000..790963e0
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java
@@ -0,0 +1,46 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.Nonnull;
+import java.util.Collection;
+
+/**
+ * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met
+ *
+ * This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)}
+ * @author Sam Van Oort
+ */
+public interface FlowNodeVisitor {
+ /**
+ * Visit the flow node, and indicate if we should continue analysis
+ *
+ * @param f Node to visit
+ * @return False if we should stop visiting nodes
+ */
+ boolean visit(@Nonnull FlowNode f);
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java
new file mode 100644
index 00000000..a1f256f2
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java
@@ -0,0 +1,81 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import hudson.model.Action;
+import org.jenkinsci.plugins.workflow.actions.ErrorAction;
+import org.jenkinsci.plugins.workflow.actions.LabelAction;
+import org.jenkinsci.plugins.workflow.actions.LogAction;
+import org.jenkinsci.plugins.workflow.actions.StageAction;
+import org.jenkinsci.plugins.workflow.actions.ThreadNameAction;
+import org.jenkinsci.plugins.workflow.actions.WorkspaceAction;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.Nonnull;
+
+/**
+ * Library of common functionality when analyzing/walking flow graphs
+ * @author Sam Van Oort
+ */
+public final class FlowScanningUtils {
+
+ /** Prevent instantiation */
+ private FlowScanningUtils() {}
+
+ /**
+ * Create a predicate that will match on all FlowNodes having a specific action present
+ * @param actionClass Action class to look for
+ * @return Predicate that will match when FlowNode has the action given
+ */
+ @Nonnull
+ public static Predicate hasActionPredicate(@Nonnull final Class extends Action> actionClass) {
+ return new Predicate() {
+ @Override
+ public boolean apply(FlowNode input) {
+ return (input != null && input.getAction(actionClass) != null);
+ }
+ };
+ }
+
+ // Default predicates, which may be used for common conditions
+ public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class);
+
+ /**
+ * Returns all {@link BlockStartNode}s enclosing the given FlowNode, starting from the inside out.
+ * This is useful if we want to obtain information about its scope, such as the workspace, parallel branch, or label.
+ * Warning: while this is efficient for one node, batch operations are far more efficient when handling many nodes.
+ * @param f {@link FlowNode} to start from.
+ * @return Iterator that returns all enclosing BlockStartNodes from the inside out.
+ */
+ @Nonnull
+ public static Filterator fetchEnclosingBlocks(@Nonnull FlowNode f) {
+ LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner();
+ scanner.setup(f);
+ return scanner.filter(MATCH_BLOCK_START);
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java
new file mode 100644
index 00000000..f6c2dfbf
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java
@@ -0,0 +1,602 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import org.jenkinsci.plugins.workflow.actions.ThreadNameAction;
+import org.jenkinsci.plugins.workflow.graph.BlockEndNode;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowEndNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Set;
+
+/**
+ * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order
+ * Think of it as the opposite of {@link DepthFirstScanner}.
+ *
+ *
This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees:
+ *
+ * - Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner)
+ * - All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner)
+ * - For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels)
+ *
+ *
+ * The big advantages of this approach:
+ *
+ * - Blocks are visited in the order they end (no backtracking) - helps with working a block at a time
+ * - Points are visited in linear order within a block (easy to use for analysis)
+ * - Minimal state information needed
+ * - Branch information is available for use here
+ *
+ *
+ * @author Sam Van Oort
+ */
+@NotThreadSafe
+public class ForkScanner extends AbstractFlowScanner {
+
+ @CheckForNull
+ public NodeType getCurrentType() {
+ return currentType;
+ }
+
+ @CheckForNull
+ public NodeType getNextType() {
+ return nextType;
+ }
+
+ /** Used to recognize special nodes */
+ public enum NodeType {
+ /** Not any of the parallel types */
+ NORMAL,
+ /**{@link BlockStartNode} starting a parallel block */
+ PARALLEL_START,
+ /**{@link BlockEndNode} ending a parallel block */
+ PARALLEL_END,
+ /**{@link BlockStartNode} starting a branch of a parallel */
+ PARALLEL_BRANCH_START,
+ /**{@link BlockEndNode} ending a parallel block... or last executed nodes */
+ PARALLEL_BRANCH_END,
+ }
+
+ // Last element in stack is end of myCurrent parallel start, first is myCurrent start
+ ArrayDeque parallelBlockStartStack = new ArrayDeque();
+
+ /** FlowNode that will terminate the myCurrent parallel block */
+ FlowNode currentParallelStartNode = null;
+
+ ParallelBlockStart currentParallelStart = null;
+
+ private boolean walkingFromFinish = false;
+
+ NodeType currentType = null;
+ NodeType nextType = null;
+
+ public ForkScanner() {
+
+ }
+
+ public ForkScanner(@Nonnull Collection heads) {
+ this.setup(heads);
+ }
+
+ public ForkScanner(@Nonnull Collection heads, @Nonnull Collection blackList) {
+ this.setup(heads, blackList);
+ }
+
+ @Override
+ protected void reset() {
+ parallelBlockStartStack.clear();
+ currentParallelStart = null;
+ currentParallelStartNode = null;
+ myCurrent = null;
+ myNext = null;
+ }
+
+
+ // A bit of a dirty hack, but it works around the fact that we need trivial access to classes from workflow-cps
+ // For this and only this test. So, we load them from a context that is aware of them.
+ // Ex: workflow-cps can automatically set this correctly. Not perfectly graceful but it works.
+ private static Predicate parallelStartPredicate = Predicates.alwaysFalse();
+
+ // Invoke this passing a test against the ParallelStep conditions
+ public static void setParallelStartPredicate(@Nonnull Predicate pred) {
+ parallelStartPredicate = pred;
+ }
+
+ // Needed because the *next* node might be a parallel start if we start in middle and we don't know it
+ public static boolean isParallelStart(@CheckForNull FlowNode f) {
+ return parallelStartPredicate.apply(f);
+ }
+
+ // Needed because the *next* node might be a parallel end and we don't know it from a normal one
+ public static boolean isParallelEnd(@CheckForNull FlowNode f) {
+ return f != null && f instanceof BlockEndNode && (f.getParents().size()>1 || isParallelStart(((BlockEndNode) f).getStartNode()));
+ }
+
+ /** If true, we are walking from the flow end node and have a complete view of the flow
+ * Needed because there are implications when not walking from a finished flow (blocks without a {@link BlockEndNode})*/
+ public boolean isWalkingFromFinish() {
+ return walkingFromFinish;
+ }
+
+ /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */
+ static class ParallelBlockStart {
+ BlockStartNode forkStart; // This is the node with child branches
+ ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet
+
+ ParallelBlockStart(BlockStartNode forkStart) {
+ this.forkStart = forkStart;
+ }
+
+ /** Strictly for internal use in the least common ancestor problem */
+ ParallelBlockStart() {}
+ }
+
+ interface FlowPiece { // Mostly a marker
+ /** If true, this is not a fork and has no following forks */
+ boolean isLeaf();
+ }
+
+ /** Linear (no parallels) run of FLowNodes */
+ // TODO see if this can be replaced with a FlowChunk acting as a container class for a list of FlowNodes
+ static class FlowSegment implements FlowPiece {
+ ArrayList visited = new ArrayList();
+ FlowPiece after;
+ boolean isLeaf = true;
+
+ @Override
+ public boolean isLeaf() {
+ return isLeaf;
+ }
+
+ /**
+ * We have discovered a forking node intersecting our FlowSegment in the middle or meeting at the end
+ * Now we need to split the flow, or pull out the fork point and make both branches follow it
+ * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments)
+ * @param joinPoint Node where the branches intersect/meet (fork point)
+ * @param joiningBranch Flow piece that is joining this
+ * @throws IllegalStateException When you try to split a segment on a node that it doesn't contain, or invalid graph structure
+ * @return Recreated fork
+ */
+ Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode joinPoint, @Nonnull FlowPiece joiningBranch) {
+ int index = visited.lastIndexOf(joinPoint); // Fork will be closer to end, so this is better than indexOf
+ Fork newFork = new Fork(joinPoint);
+
+ if (index < 0) {
+ throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment");
+ } else if (index == this.visited.size()-1) { // We forked just off the most recent node
+ newFork.following.add(this);
+ newFork.following.add(joiningBranch);
+ this.visited.remove(index);
+ } else if (index == 0) {
+ throw new IllegalStateException("We have a cyclic graph or heads that are not separate branches!");
+ } else { // Splitting at some midpoint within the segment, everything before becomes part of the following
+ // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it
+ // to a new segment and add that to the fork.
+
+ FlowSegment newSegment = new FlowSegment();
+ newSegment.after = this.after;
+ newSegment.visited.addAll(this.visited.subList(0, index));
+ newFork.following.add(newSegment);
+ newFork.following.add(joiningBranch);
+ this.after = newFork;
+ this.isLeaf = false;
+
+ // Remove the part before the fork point
+ this.visited.subList(0, index+1).clear();
+ for (FlowNode n : newSegment.visited) {
+ nodeMapping.put(n, newSegment);
+ }
+ }
+ nodeMapping.put(joinPoint, newFork);
+ return newFork;
+ }
+
+ public void add(FlowNode f) {
+ this.visited.add(f);
+ }
+ }
+
+ /** Internal class used for constructing the LeastCommonAncestor structure */
+ // TODO see if this can be replaced with a FlowChunk acting as a container class for parallels
+ // I.E. ParallelMemoryFlowChunk or similar
+ static class Fork extends ParallelBlockStart implements FlowPiece {
+ List following = new ArrayList();
+
+ @Override
+ public boolean isLeaf() {
+ return false;
+ }
+
+ public Fork(BlockStartNode forkNode) {
+ this.forkStart = forkNode;
+ }
+ }
+
+ /** Does a conversion of the fork container class to a set of block starts */
+ ArrayDeque convertForksToBlockStarts(ArrayDeque parallelForks) {
+ // Walk through and convert forks to parallel block starts, and find heads that point to them
+ ArrayDeque output = new ArrayDeque();
+ for (Fork f : parallelForks) {
+ // Do processing to assign heads to flowsegments
+ ParallelBlockStart start = new ParallelBlockStart();
+ start.forkStart = f.forkStart;
+ start.unvisited = new ArrayDeque();
+
+ // Add the nodes to the parallel starts here
+ for (FlowPiece fp : f.following) {
+ if (fp.isLeaf()) { // Forks are never leaves
+ start.unvisited.add(((FlowSegment)fp).visited.get(0));
+ }
+ }
+ output.add(start);
+ }
+ return output;
+ }
+
+ /**
+ * Create the necessary information about parallel blocks in order to provide flowscanning from inside incomplete parallel branches
+ * This works by walking back to construct the tree of parallel blocks covering all heads back to the Least Common Ancestor of all heads
+ * (the top parallel block). One by one, as branches join, we remove them from the list of live pieces and replace with their common ancestor.
+ *
+ * The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist:
+ *
+ * - We track FlowPieces, which are Forks (where branches merge) and FlowSegments (where there's a unforked sequence of nodes)
+ * - A map of FlowNode to its containing FlowPiece is created
+ * - For each head we start a new FlowSegment and create an iterator of all enclosing blocks (all we need for this)
+ * - We do a series of passes through all iterators looking to see if the parent of any given piece maps to an existing FlowPiece
+ *
+ * - Where there are no mappings, we add another node to the FlowSegment
+ * - Where an existing piece exists, if it's a Fork, we add the current piece on as a new branch
+ * - Where an existing piece exists if it's a FlowSegment, we create a fork:
+ *
- If we're joining at the most recent point, create a Fork with both branches following it, and replace that item's ForkSegment in the piece list with a Fork
+ * - If joining midway through, split the segment and create a fork as needed
+ *
+ * - When two pieces join together, we remove one from the list
+ * - When we're down to a single piece, we have the full ancestry & we're done
+ * - When we're down to a single piece, all heads have merged and we're done
+ *
+ * - Each time we merge a branch in, we need to remove an entry from enclosing blocks & live pieces
+ *
+ *
+ * There are some assumptions you need to know about to understand why this works:
+ *
+ * - None of the pieces have multiple parents, since we only look at enclosing blocks (only be a BlockEndNodes for a parallel block have multipel parents)
+ * - No cycles exist in the graph
+ * - Flow graphs are correctly constructed
+ * - Heads are all separate branches
+ *
+ *
+ * @param heads
+ */
+ ArrayDeque leastCommonAncestor(@Nonnull final Set heads) {
+ HashMap branches = new HashMap();
+ ArrayList> iterators = new ArrayList>();
+ ArrayList livePieces = new ArrayList();
+
+ ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter
+
+ Predicate notAHead = new Predicate() { // Filter out pre-existing heads
+ Collection checkHeads = convertToFastCheckable(heads);
+
+ @Override
+ public boolean apply(FlowNode input) { return !(checkHeads.contains(input)); }
+ };
+
+ for (FlowNode f : heads) {
+ iterators.add(FlowScanningUtils.fetchEnclosingBlocks(f).filter(notAHead)); // We can do this because Parallels always meet at a BlockStartNode
+ FlowSegment b = new FlowSegment();
+ b.add(f);
+ livePieces.add(b);
+ branches.put(f, b);
+ }
+
+ // Walk through, merging flownodes one-by-one until everything has merged to one ancestor
+ while (iterators.size() > 1) {
+ ListIterator> itIterator = iterators.listIterator();
+ ListIterator pieceIterator = livePieces.listIterator();
+
+ while (itIterator.hasNext()) {
+ Filterator blockStartIterator = itIterator.next();
+ FlowPiece myPiece = pieceIterator.next(); //Safe because we always remove/add with both iterators at once
+
+ // Welp we hit the end of a branch
+ if (!blockStartIterator.hasNext()) {
+ pieceIterator.remove();
+ itIterator.remove();
+ continue;
+ }
+
+ FlowNode nextBlockStart = blockStartIterator.next();
+
+ // Look for cases where two branches merge together
+ FlowPiece existingPiece = branches.get(nextBlockStart);
+ if (existingPiece == null && myPiece instanceof FlowSegment) { // No merge, just add to segment
+ ((FlowSegment) myPiece).add(nextBlockStart);
+ branches.put(nextBlockStart, myPiece);
+ } else if (existingPiece == null && myPiece instanceof Fork) { // No merge, we had a fork. Start a segment preceding the fork
+ FlowSegment newSegment = new FlowSegment();
+ newSegment.isLeaf = false;
+ newSegment.add(nextBlockStart);
+ newSegment.after = myPiece;
+ pieceIterator.remove();
+ pieceIterator.add(newSegment);
+ branches.put(nextBlockStart, newSegment);
+ } else if (existingPiece != null) { // Always not null. We're merging into another thing, we're going to eliminate a branch
+ if (existingPiece instanceof Fork) {
+ ((Fork) existingPiece).following.add(myPiece);
+ } else { // Split a flow segment so it forks against this one
+ Fork f = ((FlowSegment) existingPiece).split(branches, (BlockStartNode)nextBlockStart, myPiece);
+ // If we split the existing segment at its end, we created a fork replacing its latest node
+ // Thus we must replace the piece with the fork ahead of it
+ if (f.following.contains(existingPiece) ) {
+ int headIndex = livePieces.indexOf(existingPiece);
+ livePieces.set(headIndex, f);
+ }
+ parallelForks.add(f);
+ }
+
+ // Merging removes the piece & its iterator from heads
+ itIterator.remove();
+ pieceIterator.remove();
+ }
+ }
+ }
+
+ // If we hit issues with the ordering of blocks by depth, apply a sorting to the parallels by depth
+ return convertForksToBlockStarts(parallelForks);
+ }
+
+ @Override
+ protected void setHeads(@Nonnull Collection heads) {
+ if (heads.size() > 1) {
+ parallelBlockStartStack = leastCommonAncestor(new LinkedHashSet(heads));
+ currentParallelStart = parallelBlockStartStack.pop();
+ currentParallelStartNode = currentParallelStart.forkStart;
+ myCurrent = currentParallelStart.unvisited.pop();
+ myNext = myCurrent;
+ nextType = NodeType.PARALLEL_BRANCH_END;
+ walkingFromFinish = false;
+ } else {
+ FlowNode f = heads.iterator().next();
+ walkingFromFinish = f instanceof FlowEndNode;
+ myCurrent = f;
+ myNext = f;
+ if (isParallelEnd(f)) {
+ nextType = NodeType.PARALLEL_END;
+ } else if (isParallelStart(f)) {
+ nextType = NodeType.PARALLEL_START;
+ } else {
+ nextType = NodeType.NORMAL;
+ }
+ }
+ currentType = null;
+ }
+
+ /**
+ * Return the node that begins the current parallel head
+ * @return The FlowNode that marks current parallel start
+ */
+ @CheckForNull
+ public FlowNode getCurrentParallelStartNode() {
+ return currentParallelStartNode;
+ }
+
+
+ /** Return number of levels deep we are in parallel blocks */
+ public int getParallelDepth() {
+ return (currentParallelStart == null) ? 0 : 1 + parallelBlockStartStack.size();
+ }
+
+ /**
+ * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first)
+ * @param endNode Node where parents merge (final end node for the parallel block)
+ * @param parents Parent nodes that end here
+ * @return FlowNode myNext node to visit
+ */
+ FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) {
+ BlockStartNode start = endNode.getStartNode();
+
+ ArrayDeque branches = new ArrayDeque();
+ for (FlowNode f : parents) {
+ if (!blackList.contains(f)) {
+ branches.add(f);
+ }
+ }
+
+ FlowNode output = null;
+ if (branches.size() > 0) { // Push another branch start
+ ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start);
+ output = branches.pop();
+ parallelBlockStart.unvisited = branches;
+
+ if (currentParallelStart != null) {
+ parallelBlockStartStack.push(currentParallelStart);
+ }
+ currentParallelStart = parallelBlockStart;
+ currentParallelStartNode = start;
+ }
+ return output;
+ }
+
+ /**
+ * Invoked when we complete parallel block, walking from the head (so encountered after the end)
+ * @return FlowNode if we're the last node
+ */
+ FlowNode hitParallelStart() {
+ FlowNode output = null;
+
+ if (currentParallelStart != null) {
+ if (currentParallelStart.unvisited.isEmpty()) { // Strip off a completed branch
+ // We finished a nested set of parallel branches, visit the head and move up a level
+ output = currentParallelStartNode;
+
+ if (parallelBlockStartStack.size() > 0) {
+ // Finished a nested parallel block, move up a level
+ currentParallelStart = parallelBlockStartStack.pop();
+ currentParallelStartNode = currentParallelStart.forkStart;
+ } else { // At the top level, not inside any parallel block
+ currentParallelStart = null;
+ currentParallelStartNode = null;
+ }
+ }
+ } else {
+ throw new IllegalStateException("Hit a BlockStartNode with multiple children, and no record of the start!");
+ }
+
+ // Handle cases where the BlockStartNode for the parallel block is blackListed
+ return (output != null && !myBlackList.contains(output)) ? output : null;
+ }
+
+ @Override
+ public FlowNode next() {
+ currentType = nextType;
+ FlowNode output = super.next();
+ return output;
+ }
+
+ @Override
+ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) {
+ FlowNode output = null;
+
+ // First we look at the parents of the current node if present
+ List parents = current.getParents();
+ if (parents.isEmpty()) {
+ // welp, we're done with this node, guess we consult the queue?
+ } else if (parents.size() == 1) {
+ FlowNode p = parents.get(0);
+ if (p == currentParallelStartNode) {
+ // Terminating a parallel scan
+ FlowNode temp = hitParallelStart();
+ if (temp != null) { // Start node for current parallel block now that it is done
+ nextType = NodeType.PARALLEL_START;
+ return temp;
+ }
+ } else if (!blackList.contains(p)) {
+ if (p instanceof BlockStartNode && p.getAction(ThreadNameAction.class) != null) {
+ nextType = NodeType.PARALLEL_BRANCH_START;
+ } else if (ForkScanner.isParallelEnd(p)) {
+ nextType = NodeType.PARALLEL_END;
+ } else {
+ nextType = NodeType.NORMAL;
+ }
+ return p;
+ }
+ } else if (current instanceof BlockEndNode && parents.size() > 1) {
+ // We must be a BlockEndNode that begins this
+ BlockEndNode end = ((BlockEndNode) current);
+ FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't?
+ if (possibleOutput != null) {
+ nextType = NodeType.PARALLEL_BRANCH_END;
+ return possibleOutput;
+ }
+ } else {
+ throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent);
+ }
+
+ if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) {
+ output = currentParallelStart.unvisited.pop();
+ nextType = NodeType.PARALLEL_BRANCH_END;
+ }
+ if (output == null) {
+ nextType = null;
+ }
+ return output;
+ }
+
+ public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull Collection blacklist, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) {
+ ForkScanner scanner = new ForkScanner();
+ scanner.setup(heads, blacklist);
+ scanner.visitSimpleChunks(visitor, finder);
+ }
+
+ public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) {
+ ForkScanner scanner = new ForkScanner();
+ scanner.setup(heads);
+ scanner.visitSimpleChunks(visitor, finder);
+ }
+
+ /** Walk through flows */
+ public void visitSimpleChunks(@Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) {
+ FlowNode prev = null;
+ if (finder.isStartInsideChunk() && hasNext()) {
+ visitor.chunkEnd(this.myNext, null, this);
+ }
+ while(hasNext()) {
+ prev = (myCurrent != myNext) ? myCurrent : null;
+ FlowNode f = next();
+
+ boolean boundary = false;
+ if (finder.isChunkStart(myCurrent, prev)) {
+ visitor.chunkStart(myCurrent, myNext, this);
+ boundary = true;
+ }
+ if (finder.isChunkEnd(myCurrent, prev)) {
+ visitor.chunkEnd(myCurrent, prev, this);
+ boundary = true;
+ }
+ if (!boundary) {
+ visitor.atomNode(myNext, f, prev, this);
+ }
+
+ // Trigger on parallels
+ switch (currentType) {
+ case NORMAL:
+ break;
+ case PARALLEL_END:
+ visitor.parallelEnd(this.currentParallelStartNode, myCurrent, this);
+ break;
+ case PARALLEL_START:
+ visitor.parallelStart(myCurrent, prev, this);
+ break;
+ case PARALLEL_BRANCH_END:
+ visitor.parallelBranchEnd(this.currentParallelStartNode, myCurrent, this);
+ break;
+ case PARALLEL_BRANCH_START:
+ // Needed because once we hit the start of the last branch, the next node is our currentParallelStart
+ FlowNode parallelStart = (nextType == NodeType.PARALLEL_START) ? myNext : this.currentParallelStartNode;
+ visitor.parallelBranchStart(parallelStart, myCurrent, this);
+ break;
+ default:
+ throw new IllegalStateException("Unhandled type for current node");
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java
new file mode 100644
index 00000000..11806abf
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java
@@ -0,0 +1,46 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.actions.LabelAction;
+import org.jenkinsci.plugins.workflow.graph.BlockEndNode;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * Splits a flow execution into {@link FlowChunk}s whenever you have a label.
+ * This works for labelled blocks or single-step labels.
+ *
+ * Useful for collecting stages and parallel branches.
+ * @author Sam Van Oort
+ */
+public class LabelledChunkFinder implements ChunkFinder {
+
+ public boolean isStartInsideChunk() {
+ return true;
+ }
+
+ /** Start is anywhere with a {@link LabelAction} */
+ @Override
+ public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) {
+ LabelAction la = current.getAction(LabelAction.class);
+ return la != null;
+ }
+
+ /** End is where the previous node is a chunk start
+ * or this is a {@link BlockEndNode} whose {@link BlockStartNode} has a label action */
+ @Override
+ public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous) {
+ if (previous == null) {
+ return false;
+ }
+ if (current instanceof BlockEndNode) {
+ BlockStartNode bsn = ((BlockEndNode) current).getStartNode();
+ if (isChunkStart(bsn, null)) {
+ return true;
+ }
+ }
+ return isChunkStart(previous, null);
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java
new file mode 100644
index 00000000..2bf779b6
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java
@@ -0,0 +1,121 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.BlockEndNode;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Extension of {@link LinearScanner} that skips nested blocks at the current level, useful for finding enclosing blocks.
+ * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow).
+ *
+ * This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block).
+ *
+ *
Specifically:
+ *
+ * - Where a {@link BlockEndNode} is encountered, the scanner will jump to the {@link BlockStartNode} and go to its first parent.
+ * - The only case where you visit branches of a parallel block is if you begin inside it.
+ *
+ *
+ * Specific use cases:
+ *
+ * - Finding out the executor workspace used to run a FlowNode
+ * - Finding the start of the parallel block enclosing the current node
+ * - Locating the label applying to a given FlowNode (if any) if using labelled blocks
+ *
+ *
+ * @author Sam Van Oort
+ */
+@NotThreadSafe
+public class LinearBlockHoppingScanner extends LinearScanner {
+
+ @Override
+ public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) {
+ boolean possiblyStartable = super.setup(heads, blackList);
+ return possiblyStartable && myCurrent != null; // In case we start at an end block
+ }
+
+ @Override
+ protected void setHeads(@Nonnull Collection heads) {
+ if (heads.size() > 0) {
+ this.myCurrent = jumpBlockScan(heads.iterator().next(), myBlackList);
+ this.myNext = this.myCurrent;
+ }
+ }
+
+ /** Keeps jumping over blocks until we hit the first node preceding a block */
+ @CheckForNull
+ protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collection blacklistNodes) {
+ FlowNode candidate = node;
+
+ // Find the first candidate node preceding a block... and filtering by blacklist
+ while (candidate != null && candidate instanceof BlockEndNode) {
+ candidate = ((BlockEndNode) candidate).getStartNode();
+ if (blacklistNodes.contains(candidate)) {
+ return null;
+ }
+ List parents = candidate.getParents();
+ if (parents == null || parents.size() == 0) {
+ return null;
+ }
+ boolean foundNode = false;
+ for (FlowNode f : parents) {
+ if (!blacklistNodes.contains(f)) {
+ candidate = f; // Loop again b/c could be BlockEndNode
+ foundNode = true;
+ break;
+ }
+ }
+ if (!foundNode) {
+ return null;
+ }
+ }
+
+ return candidate;
+ }
+
+ @Override
+ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) {
+ if (current == null) {
+ return null;
+ }
+ List parents = current.getParents();
+ if (parents != null && parents.size() > 0) {
+ for (FlowNode f : parents) {
+ if (!blackList.contains(f)) {
+ return (f instanceof BlockEndNode) ? jumpBlockScan(f, blackList) : f;
+ }
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java
new file mode 100644
index 00000000..a7326812
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java
@@ -0,0 +1,80 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.NotThreadSafe;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks.
+ *
+ * Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode}
+ * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph.
+ *
+ *
Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks.
+ *
+ *
This is the fastest and simplest way to walk a flow, because you only care about a single node at a time.
+ * Nuance: where there are multiple parent nodes (in a parallel block), and one is blacklisted, we'll find the first non-blacklisted one.
+ * @author Sam Van Oort
+ */
+@NotThreadSafe
+public class LinearScanner extends AbstractFlowScanner {
+
+ @Override
+ protected void reset() {
+ this.myCurrent = null;
+ this.myNext = null;
+ this.myBlackList = Collections.EMPTY_SET;
+ }
+
+ @Override
+ protected void setHeads(@Nonnull Collection heads) {
+ if (heads.size() > 0) {
+ this.myCurrent = heads.iterator().next();
+ this.myNext = this.myCurrent;
+ }
+ }
+
+ @Override
+ protected FlowNode next(FlowNode current, @Nonnull Collection blackList) {
+ if (current == null) {
+ return null;
+ }
+ List parents = current.getParents();
+ if (parents != null && parents.size() > 0) {
+ for (FlowNode f : parents) {
+ if (!blackList.contains(f)) {
+ return f;
+ }
+ }
+ }
+ return null;
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java
new file mode 100644
index 00000000..43bba815
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java
@@ -0,0 +1,99 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * FlowChunk that holds direct references to the {@link FlowNode} instances and context info
+ * This makes it easy to use in analysis and visualizations, but inappropriate to retain in caches, etc
+ * @author Sam Van Oort
+ */
+public class MemoryFlowChunk implements FlowChunkWithContext {
+ protected FlowNode firstNode = null;
+ protected FlowNode lastNode = null;
+ protected FlowNode nodeBefore = null;
+ protected FlowNode nodeAfter = null;
+ private long pauseTimeMillis = 0;
+
+ public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) {
+ this.setNodeBefore(before);
+ this.setFirstNode(firstNode);
+ this.setLastNode(lastNode);
+ this.setNodeAfter(lastNode);
+ }
+
+ public MemoryFlowChunk() {
+
+ }
+
+ @Override
+ public FlowNode getFirstNode() {
+ return firstNode;
+ }
+
+ public void setFirstNode(FlowNode firstNode) {
+ this.firstNode = firstNode;
+ }
+
+
+ @Override
+ public FlowNode getLastNode() {
+ return lastNode;
+ }
+
+ public void setLastNode(FlowNode lastNode) {
+ this.lastNode = lastNode;
+ }
+
+ @Override
+ public FlowNode getNodeBefore() {
+ return nodeBefore;
+ }
+
+ public void setNodeBefore(FlowNode nodeBefore) {
+ this.nodeBefore = nodeBefore;
+ }
+
+ @Override
+ public FlowNode getNodeAfter() {
+ return nodeAfter;
+ }
+
+ public void setNodeAfter(FlowNode nodeAfter) {
+ this.nodeAfter = nodeAfter;
+ }
+
+ public long getPauseTimeMillis() {
+ return pauseTimeMillis;
+ }
+
+ public void setPauseTimeMillis(long pauseTimeMillis) {
+ this.pauseTimeMillis = pauseTimeMillis;
+ }
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java
new file mode 100644
index 00000000..024d7a5c
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java
@@ -0,0 +1,17 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import javax.annotation.Nonnull;
+import java.util.Map;
+
+/**
+ * FlowChunk mapping to the block from a Parallel step (with parallel branches inside)
+ */
+public interface ParallelFlowChunk extends FlowChunk {
+
+ /** Returns the branches of a parallel flow chunk, mapped by branch name and parallel branch block */
+ @Nonnull
+ Map getBranches();
+
+ @Nonnull
+ void setBranch(@Nonnull String branchName, @Nonnull ChunkType branchBlock);
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java
new file mode 100644
index 00000000..9a136ade
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java
@@ -0,0 +1,63 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Corresponds to a parallel block, acts as an in-memory container that can plug into status/timing APIs
+ * @author Sam Van Oort
+ */
+public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk {
+
+ // LinkedHashMap to preserve insert order
+ private LinkedHashMap branches = new LinkedHashMap();
+
+ public ParallelMemoryFlowChunk(@Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode) {
+ super (null,firstNode, lastNode, null);
+ }
+
+ public ParallelMemoryFlowChunk(@CheckForNull FlowNode nodeBefore, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) {
+ super (nodeBefore,firstNode, lastNode, nodeAfter);
+ }
+
+ public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branchBlock) {
+ branches.put(branchName, branchBlock);
+ }
+
+ @Override
+ @Nonnull
+ public Map getBranches() {
+ return Collections.unmodifiableMap(branches);
+ }
+
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java
new file mode 100644
index 00000000..24278e6a
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java
@@ -0,0 +1,121 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.BlockEndNode;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks.
+ * A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries.
+ *
+ *
Implementations get to decide how to use and handle chunks.
+ *
At a minimum they should handle:
+ *
+ * - Unbalanced numbers of chunk start/end calls (for incomplete flows)
+ * - A chunk end with no beginning (runs to start of flow, or never began)
+ * - A chunk start with no end (ex: a block that hasn't completed running)
+ * - Other starts/ends before we hit the closing one (nesting)
+ * - Atom nodes not within the current Chunk (visitor is responsible for handling state)
+ *
+ *
+ * Important implementation note: multiple callbacks can be invoked for a single node depending on its type.
+ * For example, we may capture parallels as chunks.
+ *
+ *
Callbacks Reporting on chunk/parallel information:
+ *
+ * - {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called on the current node when we hit start of a boundary (inclusive)
+ * - {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary (inclusive)
+ * - {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} called when a node is neither start nor end.
+ * - All the parallel methods are used to report on parallel status - helpful when we need to deal with parallels internal to chunks.
+ *
+ *
+ * @author Sam Van Oort
+ */
+public interface SimpleChunkVisitor {
+
+ /**
+ * Called when hitting the start of a chunk.
+ * @param startNode First node in chunk (marker), included in node
+ * @param beforeBlock First node before chunk (null if none exist)
+ * @param scanner Forkscanner used (for state tracking)
+ */
+ void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner);
+
+ /**
+ * Called when hitting the end of a chunk.
+ * @param endNode Last node in chunk
+ * @param afterChunk Node after chunk (null if we are on the last node)
+ * @param scanner Forkscanner used (for state tracking)
+ */
+ void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner);
+
+ /**
+ * Notifies that we've hit the start of a parallel block (the point where it branches out).
+ * @param parallelStartNode The {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} beginning it, next will be branches
+ * @param branchNode {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} for one of the branches (it will be labelled)
+ * @param scanner ForkScanner used
+ */
+ void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner);
+
+ /**
+ * Notifies that we've seen the end of a parallel block
+ * @param parallelStartNode First node of parallel ({@link BlockStartNode} before the branches)
+ * @param parallelEndNode Last node of parallel ({@link BlockEndNode})
+ * @param scanner
+ */
+ void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner);
+
+ /**
+ * Hit the start of a parallel branch
+ * @param parallelStartNode First node of parallel (BlockStartNode before the branches)
+ * @param branchStartNode BlockStartNode beginning the branch (this will have the ThreadNameAction giving its name)
+ * @param scanner
+ */
+ void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner);
+
+ /**
+ * Hit the end start of a parallel branch
+ * May not be invoked if we're inside an in-progress parallel
+ * @param parallelStartNode First node of parallel (BlockStartNode before the branches)
+ * @param branchEndNode Final node of the branch (may be BlockEndNode if done, otherwise just the last one executed)
+ * @param scanner
+ */
+ void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner);
+
+ /**
+ * Called for a flownode neither start nor end.
+ * Ways you may want to use this: accumulate pause time, collect errors, etc.
+ * Note: invocations don't guarantee whether or not you're within a marked chunk.
+ * @param before Node before the current
+ * @param atomNode The node itself
+ * @param after Node after the current
+ * @param scan Reference to our forkscanner, if we want to poke at the state within
+ */
+ void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan);
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java
new file mode 100644
index 00000000..2e9495bf
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java
@@ -0,0 +1,65 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+
+/**
+ * Simple handler for linear {@link FlowChunk}s (basic stages, etc), and designed to be extended.
+ * Note: only tracks one chunk at a time, so it won't handle nesting or parallels.
+ * Specifically, it will reset with each chunk start.
+ * Extend {@link #handleChunkDone(MemoryFlowChunk)} to gather up final chunks.
+ * Extend {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} to gather data about nodes in a chunk.
+ * @author Sam Van Oort
+ */
+public class StandardChunkVisitor implements SimpleChunkVisitor {
+
+ protected MemoryFlowChunk chunk = new MemoryFlowChunk();
+
+
+ /** Override me to do something once the chunk is finished (such as add it to a list).
+ * Note: the chunk will be mutated directly, so you need to copy it if you want to do something.
+ */
+ protected void handleChunkDone(@Nonnull MemoryFlowChunk chunk) {
+ // NO-OP initially
+ }
+
+ protected void resetChunk(@Nonnull MemoryFlowChunk chunk) {
+ chunk.setFirstNode(null);
+ chunk.setLastNode(null);
+ chunk.setNodeBefore(null);
+ chunk.setNodeAfter(null);
+ chunk.setPauseTimeMillis(0);
+ }
+
+ @Override
+ public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) {
+ chunk.setNodeBefore(beforeBlock);
+ chunk.setFirstNode(startNode);
+ handleChunkDone(chunk);
+ resetChunk(chunk);
+ }
+
+ @Override
+ public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner) {
+ chunk.setLastNode(endNode);
+ chunk.setNodeAfter(afterChunk);
+ }
+
+ @Override
+ public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) {}
+
+ @Override
+ public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) {}
+
+ @Override
+ public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) {}
+
+ @Override
+ public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) {}
+
+ /** Extend me to do something with nodes inside a chunk */
+ @Override
+ public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) {}
+}
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java
new file mode 100644
index 00000000..63374edc
--- /dev/null
+++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java
@@ -0,0 +1,11 @@
+/**
+ * Provides a library of methods to work with and analyze the graph of {@link org.jenkinsci.plugins.workflow.graph.FlowNode}s produced from a pipeline execution.
+ *
+ *
The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner}
+ * But in general it provides for iteration through the Directed Acyclic Graph (DAG) of a flow, filtering, search for matches, and
+ * visiting all nodes via internal iteration.
+ *
+ *
Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
\ No newline at end of file
diff --git a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java
index 48c4362b..26219fd2 100644
--- a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java
+++ b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java
@@ -50,7 +50,7 @@ public ListenableFuture> rehydrate() {
* An implementation should return quickly and avoid acquiring locks in this method itself (as opposed to the future).
* {@link ListenableFuture#cancel} should be implemented if possible.
* @param owner an owner handle on which you may, for example, call {@link FlowExecutionOwner#getListener}
- * @return a future on which {@link ListenableFuture#cancel} might be called; also polite to override {@link ListenableFuture#toString} for diagnostics
+ * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override the {@link Object#toString} method for diagnostics
*/
public ListenableFuture> rehydrate(FlowExecutionOwner owner) {
if (Util.isOverridden(Pickle.class, getClass(), "rehydrate")) {
diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java
new file mode 100644
index 00000000..9ddaed42
--- /dev/null
+++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java
@@ -0,0 +1,464 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition;
+import org.jenkinsci.plugins.workflow.flow.FlowExecution;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+import org.jenkinsci.plugins.workflow.job.WorkflowJob;
+import org.jenkinsci.plugins.workflow.job.WorkflowRun;
+import org.junit.Assert;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.jvnet.hudson.test.BuildWatcher;
+import org.jvnet.hudson.test.JenkinsRule;
+
+import java.util.AbstractSet;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+// Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers
+import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*;
+
+/**
+ * Tests for all the core parts of graph analysis except the ForkScanner, internals which is complex enough to merit its own tests
+ * @author Sam Van Oort
+ */
+public class FlowScannerTest {
+
+ @ClassRule
+ public static BuildWatcher buildWatcher = new BuildWatcher();
+
+ @Rule public JenkinsRule r = new JenkinsRule();
+
+
+ /** Tests the core logic separately from each implementation's scanner */
+ @Test
+ public void testAbstractScanner() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "SimpleLinear");
+ job.setDefinition(new CpsFlowDefinition(
+ "sleep 2 \n" +
+ "echo 'donothing'\n" +
+ "echo 'doitagain'"
+ ));
+
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode
+ 3 - SleepStep
+ 4 - EchoStep
+ 5 - EchoStep
+ 6 - FlowEndNode
+ */
+
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ FlowExecution exec = b.getExecution();
+ List heads = exec.getCurrentHeads();
+ FlowNode intermediateNode = exec.getNode("4");
+ AbstractFlowScanner linear = new LinearScanner();
+
+ // ## Bunch of tests for convertToFastCheckable ##
+ Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(null));
+ Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(new ArrayList()));
+
+ Collection coll = linear.convertToFastCheckable(Arrays.asList(intermediateNode));
+ Assert.assertTrue("Singleton set used for one element", coll instanceof AbstractSet);
+ Assert.assertEquals(1, coll.size());
+
+ Collection multipleItems = Arrays.asList(exec.getNode("3"), exec.getNode("2"));
+ coll = linear.convertToFastCheckable(multipleItems);
+ Assert.assertTrue("Original used for short list", coll instanceof List);
+ Assert.assertEquals(2, coll.size());
+
+ coll = linear.convertToFastCheckable(new LinkedHashSet(multipleItems));
+ Assert.assertTrue("Original used where set", coll instanceof LinkedHashSet);
+
+ multipleItems = new ArrayList();
+ for (int i=0; i < 3; i++) {
+ multipleItems.add(intermediateNode);
+ }
+ coll = linear.convertToFastCheckable(multipleItems);
+ Assert.assertTrue("Original used for short list", coll instanceof List);
+ Assert.assertEquals(3, coll.size());
+
+ multipleItems = new ArrayList();
+ for (int i=0; i < 10; i++) {
+ multipleItems.add(intermediateNode);
+ }
+ coll = linear.convertToFastCheckable(multipleItems);
+ Assert.assertTrue("Original used for short list", coll instanceof HashSet);
+ Assert.assertEquals(1, coll.size());
+
+
+ // Setup, return false if no nodes to iterate, else true
+ FlowNode lastNode = heads.get(0);
+ FlowNode nullNode = null;
+ Collection nullColl = null;
+
+ Assert.assertTrue(linear.setup(heads, null));
+ Assert.assertTrue(linear.setup(heads, Collections.EMPTY_SET));
+ Assert.assertFalse(linear.setup(nullColl, heads));
+ Assert.assertFalse(linear.setup(nullColl, null));
+ Assert.assertFalse(linear.setup(heads, heads));
+ Assert.assertTrue(linear.setup(heads));
+ Assert.assertFalse(linear.setup(nullColl));
+ Assert.assertFalse(linear.setup(Collections.EMPTY_SET));
+ Assert.assertTrue(linear.setup(lastNode));
+ Assert.assertTrue(linear.setup(lastNode, nullColl));
+ Assert.assertFalse(linear.setup(nullNode));
+ Assert.assertFalse(linear.setup(nullNode, heads));
+ Assert.assertFalse(linear.setup(nullNode, nullColl));
+ Assert.assertTrue(linear.setup(Arrays.asList(intermediateNode, lastNode), Collections.singleton(intermediateNode)));
+ Assert.assertEquals(lastNode, linear.myCurrent);
+
+ // First match, with no blacklist
+ int[] ids = {6, 5, 4, 3, 2};
+ FlowNode firstEchoNode = exec.getNode("5");
+ FlowExecution nullExecution = null;
+
+ Assert.assertEquals(firstEchoNode, linear.findFirstMatch(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP));
+ Assert.assertEquals(firstEchoNode, linear.findFirstMatch(heads, MATCH_ECHO_STEP));
+ Assert.assertEquals(firstEchoNode, linear.findFirstMatch(lastNode, MATCH_ECHO_STEP));
+ Assert.assertEquals(firstEchoNode, linear.findFirstMatch(exec, MATCH_ECHO_STEP));
+ Assert.assertEquals(null, linear.findFirstMatch(nullColl, MATCH_ECHO_STEP));
+ Assert.assertEquals(null, linear.findFirstMatch(Collections.EMPTY_SET, MATCH_ECHO_STEP));
+ Assert.assertEquals(null, linear.findFirstMatch(nullNode, MATCH_ECHO_STEP));
+ Assert.assertEquals(null, linear.findFirstMatch(nullExecution, MATCH_ECHO_STEP));
+
+
+ // Filtered nodes
+ assertNodeOrder("Filtered echo nodes", linear.filteredNodes(heads, MATCH_ECHO_STEP), 5, 4);
+ assertNodeOrder("Filtered echo nodes", linear.filteredNodes(heads, Collections.singleton(intermediateNode), MATCH_ECHO_STEP), 5);
+ Assert.assertEquals(0, linear.filteredNodes(heads, null, (Predicate) Predicates.alwaysFalse()).size());
+ Assert.assertEquals(0, linear.filteredNodes(nullNode, MATCH_ECHO_STEP).size());
+ Assert.assertEquals(0, linear.filteredNodes(Collections.EMPTY_SET, MATCH_ECHO_STEP).size());
+
+ // Same filter using the filterator
+ linear.setup(heads);
+ ArrayList collected = new ArrayList();
+ Filterator filt = linear.filter(MATCH_ECHO_STEP);
+ while (filt.hasNext()) {
+ collected.add(filt.next());
+ }
+ assertNodeOrder("Filterator filtered echo nodes", collected, 5, 4);
+
+
+ // Visitor pattern tests
+ FlowTestUtils.CollectingVisitor visitor = new FlowTestUtils.CollectingVisitor();
+ linear.visitAll(Collections.EMPTY_SET, null);
+ Assert.assertEquals(0, visitor.getVisited().size());
+
+ linear.visitAll(heads, visitor);
+ assertNodeOrder("Visiting all nodes", visitor.getVisited(), 6, 5, 4, 3, 2);
+
+ // And visiting with blacklist
+ visitor.visited.clear();
+ linear.visitAll(heads, Collections.singleton(intermediateNode), visitor);
+ assertNodeOrder("Visiting all nodes with blacklist", visitor.getVisited(), 6, 5);
+
+ // Tests for edge cases of the various basic APIs
+ linear.myNext = null;
+ Assert.assertFalse(linear.hasNext());
+ try {
+ linear.next();
+ Assert.fail("Should throw NoSuchElement exception");
+ } catch (NoSuchElementException nsee) {
+ // Passing case
+ }
+ Assert.assertTrue(linear.iterator() == linear);
+ try {
+ linear.remove();
+ Assert.fail("Should throw UnsupportedOperation exception");
+ } catch (UnsupportedOperationException usoe) {
+ // Passing case
+ }
+ }
+
+ /** Tests the basic scan algorithm, predicate use, start/stop nodes */
+ @Test
+ public void testSimpleScan() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted");
+ job.setDefinition(new CpsFlowDefinition(
+ "sleep 2 \n" +
+ "echo 'donothing'\n" +
+ "echo 'doitagain'"
+ ));
+
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode
+ 3 - SleepStep
+ 4 - EchoStep
+ 5 - EchoStep
+ 6 - FlowEndNode
+ */
+
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ FlowExecution exec = b.getExecution();
+ AbstractFlowScanner[] scans = {new LinearScanner(),
+ new DepthFirstScanner(),
+ new ForkScanner()
+ };
+
+ List heads = exec.getCurrentHeads();
+
+ // Iteration tests
+ for (AbstractFlowScanner scan : scans) {
+ System.out.println("Iteration test with scanner: " + scan.getClass());
+ scan.setup(heads, null);
+ assertNodeOrder("Testing full scan for scanner " + scan.getClass(), scan, 6, 5, 4, 3, 2);
+ Assert.assertFalse(scan.hasNext());
+
+ // Blacklist tests
+ scan.setup(heads, Collections.singleton(exec.getNode("4")));
+ assertNodeOrder("Testing full scan for scanner " + scan.getClass(), scan, 6, 5);
+ FlowNode f = scan.findFirstMatch(heads, Collections.singleton(exec.getNode("6")), (Predicate)Predicates.alwaysTrue());
+ Assert.assertNull(f);
+ }
+ }
+
+ /** Tests the basic scan algorithm where blocks are involved */
+ @Test
+ public void testBasicScanWithBlock() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted");
+ job.setDefinition(new CpsFlowDefinition(
+ "echo 'first'\n" +
+ "timeout(time: 10, unit: 'SECONDS') {\n" +
+ " echo 'second'\n" +
+ " echo 'third'\n" +
+ "}\n" +
+ "sleep 1"
+ ));
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode
+ 3 - EchoStep
+ 4 - TimeoutStep
+ 5 - TimeoutStep with BodyInvocationAction
+ 6 - EchoStep
+ 7 - EchoStep
+ 8 - StepEndNode (BlockEndNode), startId=5
+ 9 - StepEndNode (BlockEndNode), startId = 4
+ 10 - SleepStep
+ 11 - FlowEndNode
+ */
+
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ Predicate matchEchoStep = FlowTestUtils.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep");
+ FlowExecution exec = b.getExecution();
+ Collection heads = exec.getCurrentHeads();
+
+ // Linear analysis
+ LinearScanner linearScanner = new LinearScanner();
+ linearScanner.setup(heads);
+ assertNodeOrder("Linear scan with block", linearScanner, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2);
+ linearScanner.setup(exec.getNode("7"));
+ assertNodeOrder("Linear scan with block from middle ", linearScanner, 7, 6, 5, 4, 3, 2);
+
+ LinearBlockHoppingScanner linearBlockHoppingScanner = new LinearBlockHoppingScanner();
+
+ // // Test block jump core
+ FlowNode headCandidate = exec.getNode("8");
+ Assert.assertEquals(exec.getNode("4"), linearBlockHoppingScanner.jumpBlockScan(headCandidate, Collections.EMPTY_SET));
+ Assert.assertTrue("Setup should return true if we can iterate", linearBlockHoppingScanner.setup(headCandidate, null));
+
+ // Test the actual iteration
+ linearBlockHoppingScanner.setup(heads);
+ Assert.assertFalse(linearBlockHoppingScanner.hasNext());
+ linearBlockHoppingScanner.setup(exec.getNode("8"));
+ assertNodeOrder("Hopping over one block", linearBlockHoppingScanner, 4, 3, 2);
+ linearBlockHoppingScanner.setup(exec.getNode("7"));
+ assertNodeOrder("Hopping over one block", linearBlockHoppingScanner, 7, 6, 5, 4, 3, 2);
+
+ // Test the black list in combination with hopping
+ linearBlockHoppingScanner.setup(exec.getNode("8"), Collections.singleton(exec.getNode("5")));
+ Assert.assertFalse(linearBlockHoppingScanner.hasNext());
+ linearBlockHoppingScanner.setup(exec.getNode("8"), Collections.singleton(exec.getNode("4")));
+ Assert.assertFalse(linearBlockHoppingScanner.hasNext());
+ }
+
+
+ /** And the parallel case */
+ @Test
+ public void testParallelScan() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted");
+ job.setDefinition(new CpsFlowDefinition(
+ "echo 'first'\n" +
+ "def steps = [:]\n" +
+ "steps['1'] = {\n" +
+ " echo 'do 1 stuff'\n" +
+ "}\n" +
+ "steps['2'] = {\n" +
+ " echo '2a'\n" +
+ " echo '2b'\n" +
+ "}\n" +
+ "parallel steps\n" +
+ "echo 'final'"
+ ));
+
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode (BlockStartNode)
+ 3 - Echostep
+ 4 - ParallelStep (StepStartNode) (start branches)
+ 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1
+ 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2
+ 8 - EchoStep, (branch 1) parent=6
+ 9 - StepEndNode, (end branch 1) startId=6, parentId=8
+ 10 - EchoStep, (branch 2) parentId=7
+ 11 - EchoStep, (branch 2) parentId = 10
+ 12 - StepEndNode (end branch 2) startId=7 parentId=11,
+ 13 - StepEndNode (close branches), parentIds = 9,12, startId=4
+ 14 - EchoStep
+ 15 - FlowEndNode (BlockEndNode)
+ */
+
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ FlowExecution exec = b.getExecution();
+ Collection heads = b.getExecution().getCurrentHeads();
+
+ AbstractFlowScanner scanner = new LinearScanner();
+ scanner.setup(heads);
+ assertNodeOrder("Linear", scanner, 15, 14, 13, 9, 8, 6, 4, 3, 2);
+ scanner.setup(heads, Collections.singleton(exec.getNode("9")));
+ assertNodeOrder("Linear", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2);
+
+
+ // Depth first scanner and with blacklist
+ scanner = new DepthFirstScanner();
+ scanner.setup(heads);
+ assertNodeOrder("Depth first", scanner, 15, 14, 13, 9, 8, 6, 4, 3, 2, 12, 11, 10, 7);
+ scanner.setup(heads, Collections.singleton(exec.getNode("9")));
+ assertNodeOrder("Linear", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2);
+
+ scanner.setup(Arrays.asList(exec.getNode("9"), exec.getNode("12")));
+ assertNodeOrder("Depth-first scanner from inside parallels", scanner, 9, 8, 6, 4, 3, 2, 12, 11, 10, 7);
+
+ // We're going to test the ForkScanner in more depth since this is its natural use
+ scanner = new ForkScanner();
+ scanner.setup(heads);
+ assertNodeOrder("ForkedScanner", scanner, 15, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2);
+ scanner.setup(heads, Collections.singleton(exec.getNode("9")));
+ assertNodeOrder("ForkedScanner", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2);
+
+ // Test forkscanner midflow
+ scanner.setup(exec.getNode("14"));
+ assertNodeOrder("ForkedScanner", scanner, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2);
+
+ // Test forkscanner inside a parallel
+
+ List startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("12"));
+ scanner.setup(startingPoints);
+ assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2);
+
+ startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("11"));
+ scanner.setup(startingPoints);
+ assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 11, 10, 7, 4, 3, 2);
+
+
+ // Filtering at different points within branches
+ List blackList = Arrays.asList(exec.getNode("6"), exec.getNode("7"));
+ Assert.assertEquals(4, scanner.filteredNodes(heads, blackList, MATCH_ECHO_STEP).size());
+ Assert.assertEquals(4, scanner.filteredNodes(heads, Collections.singletonList(exec.getNode("4")), MATCH_ECHO_STEP).size());
+ blackList = Arrays.asList(exec.getNode("6"), exec.getNode("10"));
+ Assert.assertEquals(3, scanner.filteredNodes(heads, blackList, MATCH_ECHO_STEP).size());
+ }
+
+ @Test
+ public void testNestedParallelScan() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted");
+ job.setDefinition(new CpsFlowDefinition(
+ "echo 'first'\n" +
+ "def steps = [:]\n" +
+ "steps['1'] = {\n" +
+ " echo 'do 1 stuff'\n" +
+ "}\n" +
+ "steps['2'] = {\n" +
+ " echo '2a'\n" +
+ " def nested = [:]\n" +
+ " nested['2-1'] = {\n" +
+ " echo 'do 2-1'\n" +
+ " } \n" +
+ " nested['2-2'] = {\n" +
+ " sleep 1\n" +
+ " echo '2 section 2'\n" +
+ " }\n" +
+ " echo '2b'\n" +
+ " parallel nested\n" +
+ "}\n" +
+ "parallel steps\n" +
+ "echo 'final'"
+ ));
+
+ /** Parallel nested in parallel (ID-type)
+ * 2 - FlowStartNode (BlockStartNode)
+ * 3 - Echostep
+ * 4 - ParallelStep (stepstartnode)
+ * 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1
+ * 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2
+ * 8 - EchoStep (branch #1) - parentId=6
+ * 9 - StepEndNode (end branch #1) - startId=6
+ * 10 - EchoStep - parentId=7
+ * 11 - EchoStep
+ * 12 - ParallelStep (StepStartNode) - start inner parallel
+ * 14 - ParallelStep (StepStartNode) (start branch 2-1), parentId=12, ParallelLabellAction with branchName=2-1
+ * 15 - ParallelStep (StepStartNode) (start branch 2-2), parentId=12, ParallelLabelAction with branchName=2-2
+ * 16 - Echo (Branch2-1), parentId=14
+ * 17 - StepEndNode (end branch 2-1), parentId=16, startId=14
+ * 18 - SleepStep (branch 2-2) parentId=15
+ * 19 - EchoStep (branch 2-2)
+ * 20 - StepEndNode (end branch 2-2), startId=15
+ * 21 - StepEndNode (end inner parallel), parentIds=17,20, startId=12
+ * 22 - StepEndNode (end parallel #2), parent=21, startId=7
+ * 23 - StepEndNode (end outer parallel), parentIds=9,22, startId=4
+ * 24 - Echo
+ * 25 - FlowEndNode
+ */
+
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ FlowExecution exec = b.getExecution();
+ Collection heads = b.getExecution().getCurrentHeads();
+
+ // Basic test of DepthFirstScanner
+ AbstractFlowScanner scanner = new DepthFirstScanner();
+ Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP);
+ Assert.assertEquals(7, matches.size());
+
+
+ // We're going to test the ForkScanner in more depth since this is its natural use
+ scanner = new ForkScanner();
+ matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP);
+ Assert.assertEquals(7, matches.size());
+
+ heads = Arrays.asList(exec.getNode("20"), exec.getNode("17"), exec.getNode("9"));
+ matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP);
+ Assert.assertEquals(6, matches.size()); // Commented out since temporarily failing
+ }
+}
\ No newline at end of file
diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java
new file mode 100644
index 00000000..16bcbb0c
--- /dev/null
+++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java
@@ -0,0 +1,108 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+import com.google.common.base.Predicate;
+import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode;
+import org.jenkinsci.plugins.workflow.flow.FlowExecution;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+import org.jenkinsci.plugins.workflow.steps.StepDescriptor;
+import org.junit.Assert;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * Utilities for testing flow scanning
+ * @author Sam Van Oort
+ */
+public class FlowTestUtils {
+ public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) {
+ Predicate outputPredicate = new Predicate() {
+ @Override
+ public boolean apply(FlowNode input) {
+ if (input instanceof StepAtomNode) {
+ StepAtomNode san = (StepAtomNode)input;
+ StepDescriptor sd = san.getDescriptor();
+ return sd != null && descriptorId.equals(sd.getId());
+ }
+ return false;
+ }
+ };
+ return outputPredicate;
+ }
+
+ public static final class CollectingVisitor implements FlowNodeVisitor {
+ ArrayList visited = new ArrayList();
+
+ @Override
+ public boolean visit(@Nonnull FlowNode f) {
+ visited.add(f);
+ return true;
+ }
+
+ public void reset() {
+ this.visited.clear();
+ }
+
+ public ArrayList getVisited() {
+ return visited;
+ }
+ }
+
+ public static Predicate MATCH_ECHO_STEP = FlowTestUtils.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep");
+
+ /** Assert node ordering using their ids */
+ public static void assertNodeOrder(String description, Iterable nodes, String... nodeIds) {
+ ArrayList realIds = new ArrayList();
+ for (FlowNode f: nodes) {
+ Assert.assertNotNull(f);
+ realIds.add(f.getId());
+ }
+ Assert.assertArrayEquals(description, nodeIds, realIds.toArray());
+ }
+
+ /** Assert node ordering using iotas for their ids */
+ public static void assertNodeOrder(String description, Iterable nodes, int... nodeIds) {
+ String[] nodeIdStrings = new String[nodeIds.length];
+ for (int i=0; i coll, FlowExecution exec, int... iotas) {
+ try {
+ for (int nodeId : iotas) {
+ coll.add(exec.getNode(Integer.toString(nodeId)));
+ }
+ } catch (IOException ioe) {
+ throw new IllegalStateException("Failed to load node by id", ioe);
+ }
+
+ }
+}
diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java
new file mode 100644
index 00000000..3a531418
--- /dev/null
+++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java
@@ -0,0 +1,518 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2016, CloudBees, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition;
+import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode;
+import org.jenkinsci.plugins.workflow.cps.steps.ParallelStep;
+import org.jenkinsci.plugins.workflow.flow.FlowExecution;
+import org.jenkinsci.plugins.workflow.graph.BlockStartNode;
+import org.jenkinsci.plugins.workflow.graph.FlowGraphWalker;
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+import org.jenkinsci.plugins.workflow.job.WorkflowJob;
+import org.jenkinsci.plugins.workflow.job.WorkflowRun;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.jvnet.hudson.test.BuildWatcher;
+import org.jvnet.hudson.test.JenkinsRule;
+import org.junit.Assert;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+// Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers
+import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*;
+
+/**
+ * Tests for internals of ForkScanner
+ */
+public class ForkScannerTest {
+ @ClassRule
+ public static BuildWatcher buildWatcher = new BuildWatcher();
+
+ @Rule
+ public JenkinsRule r = new JenkinsRule();
+
+ public static Predicate predicateForCallEntryType(final TestVisitor.CallType type) {
+ return new Predicate() {
+ TestVisitor.CallType myType = type;
+ @Override
+ public boolean apply(TestVisitor.CallEntry input) {
+ return input.type != null && input.type == myType;
+ }
+ };
+ }
+
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode (BlockStartNode)
+ 3 - Echostep
+ 4 - ParallelStep (StepStartNode) (start branches)
+ 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1
+ 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2
+ 8 - EchoStep, (branch 1) parent=6
+ 9 - StepEndNode, (end branch 1) startId=6, parentId=8
+ 10 - EchoStep, (branch 2) parentId=7
+ 11 - EchoStep, (branch 2) parentId = 10
+ 12 - StepEndNode (end branch 2) startId=7 parentId=11,
+ 13 - StepEndNode (close branches), parentIds = 9,12, startId=4
+ 14 - EchoStep
+ 15 - FlowEndNode (BlockEndNode)
+ */
+ WorkflowRun SIMPLE_PARALLEL_RUN;
+
+ /** Parallel nested in parallel (ID-type)
+ * 2 - FlowStartNode (BlockStartNode)
+ * 3 - Echostep
+ * 4 - ParallelStep (stepstartnode)
+ * 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1
+ * 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2
+ * 8 - EchoStep (branch #1) - parentId=6
+ * 9 - StepEndNode (end branch #1) - startId=6
+ * 10 - EchoStep - parentId=7
+ * 11 - EchoStep
+ * 12 - ParallelStep (StepStartNode) - start inner parallel
+ * 14 - ParallelStep (StepStartNode) (start branch 2-1), parentId=12, ParallelLabellAction with branchName=2-1
+ * 15 - ParallelStep (StepStartNode) (start branch 2-2), parentId=12, ParallelLabelAction with branchName=2-2
+ * 16 - Echo (Branch2-1), parentId=14
+ * 17 - StepEndNode (end branch 2-1), parentId=16, startId=14
+ * 18 - SleepStep (branch 2-2) parentId=15
+ * 19 - EchoStep (branch 2-2)
+ * 20 - StepEndNode (end branch 2-2), startId=15
+ * 21 - StepEndNode (end inner parallel ), parentIds=17,20, startId=12
+ * 22 - StepEndNode (end parallel #2), parent=21, startId=7
+ * 23 - StepEndNode (end outer parallel), parentIds=9,22, startId=4
+ * 24 - Echo
+ * 25 - FlowEndNode
+ */
+ WorkflowRun NESTED_PARALLEL_RUN;
+
+ @Before
+ public void setUp() throws Exception {
+ r.jenkins.getInjector().injectMembers(this);
+
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "SimpleParallel");
+ job.setDefinition(new CpsFlowDefinition(
+ "echo 'first'\n" +
+ "def steps = [:]\n" +
+ "steps['1'] = {\n" +
+ " echo 'do 1 stuff'\n" +
+ "}\n" +
+ "steps['2'] = {\n" +
+ " echo '2a'\n" +
+ " echo '2b'\n" +
+ "}\n" +
+ "parallel steps\n" +
+ "echo 'final'"
+ ));
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ this.SIMPLE_PARALLEL_RUN = b;
+
+ job = r.jenkins.createProject(WorkflowJob.class, "NestedParallel");
+ job.setDefinition(new CpsFlowDefinition(
+ "echo 'first'\n" +
+ "def steps = [:]\n" +
+ "steps['1'] = {\n" +
+ " echo 'do 1 stuff'\n" +
+ "}\n" +
+ "steps['2'] = {\n" +
+ " echo '2a'\n" +
+ " echo '2b'\n" +
+ " def nested = [:]\n" +
+ " nested['2-1'] = {\n" +
+ " echo 'do 2-1'\n" +
+ " } \n" +
+ " nested['2-2'] = {\n" +
+ " sleep 1\n" +
+ " echo '2 section 2'\n" +
+ " }\n" +
+ " parallel nested\n" +
+ "}\n" +
+ "parallel steps\n" +
+ "echo 'final'"
+ ));
+ b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ this.NESTED_PARALLEL_RUN = b;
+ }
+
+ public static Predicate PARALLEL_START_PREDICATE = new Predicate() {
+ @Override
+ public boolean apply(FlowNode input) {
+ return input != null && input instanceof StepStartNode && (((StepStartNode) input).getDescriptor().getClass() == ParallelStep.DescriptorImpl.class);
+ }
+ };
+
+ @Test
+ public void testForkedScanner() throws Exception {
+ FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution();
+ Collection heads = SIMPLE_PARALLEL_RUN.getExecution().getCurrentHeads();
+
+ // Initial case
+ ForkScanner scanner = new ForkScanner();
+ scanner.setup(heads, null);
+ ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE);
+ Assert.assertNull(scanner.currentParallelStart);
+ Assert.assertNull(scanner.currentParallelStartNode);
+ Assert.assertNotNull(scanner.parallelBlockStartStack);
+ Assert.assertEquals(0, scanner.parallelBlockStartStack.size());
+ Assert.assertTrue(scanner.isWalkingFromFinish());
+
+ // Fork case
+ scanner.setup(exec.getNode("13"));
+ Assert.assertFalse(scanner.isWalkingFromFinish());
+ Assert.assertEquals(null, scanner.currentType);
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_END, scanner.nextType);
+ Assert.assertEquals("13", scanner.next().getId());
+ Assert.assertNotNull(scanner.parallelBlockStartStack);
+ Assert.assertEquals(0, scanner.parallelBlockStartStack.size());
+ Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode);
+
+ ForkScanner.ParallelBlockStart start = scanner.currentParallelStart;
+ Assert.assertEquals(1, start.unvisited.size());
+ Assert.assertEquals(exec.getNode("4"), start.forkStart);
+
+ Assert.assertEquals(exec.getNode("9"), scanner.next());
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType());
+ Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType());
+ Assert.assertEquals(exec.getNode("8"), scanner.next());
+ Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getCurrentType());
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getNextType());
+ Assert.assertEquals(exec.getNode("6"), scanner.next());
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getCurrentType());
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getNextType());
+ FlowNode f = scanner.next();
+ Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType());
+ Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType());
+ Assert.assertEquals(exec.getNode("12"), f);
+
+ // Now we test the least common ancestor bits
+ }
+
+ /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */
+ @Test
+ public void testFlowSegmentSplit() throws Exception {
+ FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution();
+
+ /** Flow structure (ID - type)
+ 2 - FlowStartNode (BlockStartNode)
+ 3 - Echostep
+ 4 - ParallelStep (StepStartNode) (start branches)
+ 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1
+ 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2
+ 8 - EchoStep, (branch 1) parent=6
+ 9 - StepEndNode, (end branch 1) startId=6, parentId=8
+ 10 - EchoStep, (branch 2) parentId=7
+ 11 - EchoStep, (branch 2) parentId = 10
+ 12 - StepEndNode (end branch 2) startId=7 parentId=11,
+ 13 - StepEndNode (close branches), parentIds = 9,12, startId=4
+ 14 - EchoStep
+ 15 - FlowEndNode (BlockEndNode)
+ */
+
+ HashMap nodeMap = new HashMap();
+ ForkScanner.FlowSegment mainBranch = new ForkScanner.FlowSegment();
+ ForkScanner.FlowSegment sideBranch = new ForkScanner.FlowSegment();
+ FlowNode BRANCH1_END = exec.getNode("9");
+ FlowNode BRANCH2_END = exec.getNode("12");
+ FlowNode START_PARALLEL = exec.getNode("4");
+
+ // Branch 1, we're going to run one flownode beyond the start of the parallel branch and then split
+ mainBranch.add(BRANCH1_END);
+ mainBranch.add(exec.getNode("8"));
+ mainBranch.add(exec.getNode("6"));
+ mainBranch.add(exec.getNode("4"));
+ mainBranch.add(exec.getNode("3")); // FlowNode beyond the fork point
+ for (FlowNode f : mainBranch.visited) {
+ nodeMap.put(f, mainBranch);
+ }
+ assertNodeOrder("Visited nodes", mainBranch.visited, 9, 8, 6, 4, 3);
+
+ // Branch 2
+ sideBranch.add(BRANCH2_END);
+ sideBranch.add(exec.getNode("11"));
+ sideBranch.add(exec.getNode("10"));
+ sideBranch.add(exec.getNode("7"));
+ for (FlowNode f : sideBranch.visited) {
+ nodeMap.put(f, sideBranch);
+ }
+ assertNodeOrder("Visited nodes", sideBranch.visited, 12, 11, 10, 7);
+
+ ForkScanner.Fork forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch);
+ ForkScanner.FlowSegment splitSegment = (ForkScanner.FlowSegment)nodeMap.get(BRANCH1_END); // New branch
+ Assert.assertNull(splitSegment.after);
+ assertNodeOrder("Branch 1 split after fork", splitSegment.visited, 9, 8, 6);
+
+ // Just the single node before the fork
+ Assert.assertEquals(forked, mainBranch.after);
+ assertNodeOrder("Head of flow, pre-fork", mainBranch.visited, 3);
+
+ // Fork point
+ Assert.assertEquals(forked, nodeMap.get(START_PARALLEL));
+ ForkScanner.FlowPiece[] follows = {splitSegment, sideBranch};
+ Assert.assertArrayEquals(follows, forked.following.toArray());
+
+ // Branch 2
+ Assert.assertEquals(sideBranch, nodeMap.get(BRANCH2_END));
+ assertNodeOrder("Branch 2", sideBranch.visited, 12, 11, 10, 7);
+
+ // Test me where splitting right at a fork point, where we should have a fork with and main branch shoudl become following
+ // Along with side branch (branch2)
+ nodeMap.clear();
+ mainBranch = new ForkScanner.FlowSegment();
+ sideBranch = new ForkScanner.FlowSegment();
+ mainBranch.visited.add(exec.getNode("6"));
+ mainBranch.visited.add(START_PARALLEL);
+ sideBranch.visited.add(exec.getNode("7"));
+ for (FlowNode f : mainBranch.visited) {
+ nodeMap.put(f, mainBranch);
+ }
+ nodeMap.put(exec.getNode("7"), sideBranch);
+
+ forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch);
+ follows = new ForkScanner.FlowSegment[2];
+ follows[0] = mainBranch;
+ follows[1] = sideBranch;
+ Assert.assertArrayEquals(follows, forked.following.toArray());
+ assertNodeOrder("Branch1", mainBranch.visited, 6);
+ Assert.assertNull(mainBranch.after);
+ assertNodeOrder("Branch2", sideBranch.visited, 7);
+ Assert.assertNull(sideBranch.after);
+ Assert.assertEquals(forked, nodeMap.get(START_PARALLEL));
+ Assert.assertEquals(mainBranch, nodeMap.get(exec.getNode("6")));
+ Assert.assertEquals(sideBranch, nodeMap.get(exec.getNode("7")));
+ }
+
+ @Test
+ public void testEmptyParallel() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "EmptyParallel");
+ job.setDefinition(new CpsFlowDefinition(
+ "parallel 'empty1': {}, 'empty2':{} \n" +
+ "echo 'done' "
+ ));
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+ ForkScanner scan = new ForkScanner();
+
+ List outputs = scan.filteredNodes(b.getExecution().getCurrentHeads(), (Predicate) Predicates.alwaysTrue());
+ Assert.assertEquals(9, outputs.size());
+ }
+
+ /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */
+ @Test
+ public void testLeastCommonAncestor() throws Exception {
+ FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution();
+
+ ForkScanner scan = new ForkScanner();
+ // Starts at the ends of the parallel branches
+ Set heads = new LinkedHashSet(Arrays.asList(exec.getNode("12"), exec.getNode("9")));
+ ArrayDeque starts = scan.leastCommonAncestor(heads);
+ Assert.assertEquals(1, starts.size());
+
+ ForkScanner.ParallelBlockStart start = starts.peek();
+ Assert.assertEquals(2, start.unvisited.size());
+ Assert.assertEquals(exec.getNode("4"), start.forkStart);
+ Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray());
+
+ // Ensure no issues with single start triggering least common ancestor
+ heads = new LinkedHashSet(Arrays.asList(exec.getNode("4")));
+ scan.setup(heads);
+ Assert.assertNull(scan.currentParallelStart);
+ Assert.assertTrue(scan.parallelBlockStartStack == null || scan.parallelBlockStartStack.isEmpty());
+
+ // Empty fork
+ heads = new LinkedHashSet(Arrays.asList(exec.getNode("6"), exec.getNode("7")));
+ starts = scan.leastCommonAncestor(heads);
+ Assert.assertEquals(1, starts.size());
+ ForkScanner.ParallelBlockStart pbs = starts.pop();
+ Assert.assertEquals(exec.getNode("4"), pbs.forkStart);
+ Assert.assertEquals(2, pbs.unvisited.size());
+ Assert.assertTrue(pbs.unvisited.contains(exec.getNode("6")));
+ Assert.assertTrue(pbs.unvisited.contains(exec.getNode("7")));
+
+ /** Now we do the same with nested run */
+ exec = NESTED_PARALLEL_RUN.getExecution();
+ heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20")));
+
+ // Problem: we get a parallel start with the same flowsegment in the following for more than one parallel start
+ starts = scan.leastCommonAncestor(heads);
+ Assert.assertEquals(2, starts.size());
+ ForkScanner.ParallelBlockStart inner = starts.getFirst();
+ ForkScanner.ParallelBlockStart outer = starts.getLast();
+
+ Assert.assertEquals(2, inner.unvisited.size());
+ Assert.assertEquals(exec.getNode("12"), inner.forkStart);
+
+ Assert.assertEquals(1, outer.unvisited.size());
+ Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek());
+ Assert.assertEquals(exec.getNode("4"), outer.forkStart);
+
+ heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20")));
+ starts = scan.leastCommonAncestor(heads);
+ Assert.assertEquals(2, starts.size());
+ }
+
+ /** For nodes, see {@link #SIMPLE_PARALLEL_RUN} */
+ @Test
+ public void testSimpleVisitor() throws Exception {
+ ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE);
+ FlowExecution exec = this.SIMPLE_PARALLEL_RUN.getExecution();
+ ForkScanner f = new ForkScanner();
+ f.setup(exec.getCurrentHeads());
+ TestVisitor visitor = new TestVisitor();
+
+ f.visitSimpleChunks(visitor, new BlockChunkFinder());
+
+ // 13 calls for chunk/atoms, 6 for parallels
+ Assert.assertEquals(19, visitor.calls.size());
+
+ // End has nothing after it, just last node (15)
+ TestVisitor.CallEntry last = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_END, 15, -1, -1, -1);
+ last.assertEquals(visitor.calls.get(0));
+
+ // Start has nothing before it, just the first node (2)
+ TestVisitor.CallEntry first = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_START, 2, -1, -1, -1);
+ first.assertEquals(visitor.calls.get(18));
+
+ int chunkStartCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_START)));
+ int chunkEndCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_END)));
+ Assert.assertEquals(4, chunkStartCount);
+ Assert.assertEquals(4, chunkEndCount);
+
+ // Verify the AtomNode calls are correct
+ List < TestVisitor.CallEntry > atomNodeCalls = Lists.newArrayList(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.ATOM_NODE)));
+ Assert.assertEquals(5, atomNodeCalls.size());
+ for (TestVisitor.CallEntry ce : atomNodeCalls) {
+ int beforeId = ce.ids[0];
+ int atomNodeId = ce.ids[1];
+ int afterId = ce.ids[2];
+ int alwaysEmpty = ce.ids[3];
+ Assert.assertTrue(ce+" beforeNodeId <= 0: "+beforeId, beforeId > 0);
+ Assert.assertTrue(ce + " atomNodeId <= 0: " + atomNodeId, atomNodeId > 0);
+ Assert.assertTrue(ce+" afterNodeId <= 0: "+afterId, afterId > 0);
+ Assert.assertEquals(-1, alwaysEmpty);
+ Assert.assertTrue(ce + "AtomNodeId >= afterNodeId", atomNodeId < afterId);
+ Assert.assertTrue(ce+ "beforeNodeId >= atomNodeId", beforeId < atomNodeId);
+ }
+
+
+ List parallelCalls = Lists.newArrayList(Iterables.filter(visitor.calls, new Predicate() {
+ @Override
+ public boolean apply(TestVisitor.CallEntry input) {
+ return input.type != null
+ && input.type != TestVisitor.CallType.ATOM_NODE
+ && input.type != TestVisitor.CallType.CHUNK_START
+ && input.type != TestVisitor.CallType.CHUNK_END;
+ }
+ }));
+ Assert.assertEquals(6, parallelCalls.size());
+ // Start to end
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(parallelCalls.get(0));
+
+ //Tests for parallel handling
+ // Start to end, in reverse order
+
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(parallelCalls.get(1));
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(parallelCalls.get(2));
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(parallelCalls.get(3));
+
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(parallelCalls.get(4));
+ new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 7).assertEquals(parallelCalls.get(5));
+
+ }
+
+ /** Checks for off-by one cases with multiple parallel, and with the leastCommonAncestor */
+ @Test
+ public void testTripleParallel() throws Exception {
+ WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "TripleParallel");
+ job.setDefinition(new CpsFlowDefinition(
+ "stage 'test'\n"+ // Id 3, Id 2 before that has the FlowStartNode
+ "parallel 'unit':{\n" + // Id 4 starts parallel, Id 7 is the block start for the unit branch
+ " echo \"Unit testing...\"\n" + // Id 10
+ "},'integration':{\n" + // Id 11 is unit branch end, Id 8 is the branch start for integration branch
+ " echo \"Integration testing...\"\n" + // Id 12
+ "}, 'ui':{\n" + // Id 13 in integration branch end, Id 9 is branch start for UI branch
+ " echo \"UI testing...\"\n" + // Id 14
+ "}" // Node 15 is UI branch end node, Node 16 is Parallel End node, Node 17 is FlowWendNode
+ ));
+ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0));
+
+ ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE);
+ FlowExecution exec = b.getExecution();
+ ForkScanner f = new ForkScanner();
+ f.setup(exec.getCurrentHeads());
+ TestVisitor visitor = new TestVisitor();
+ f.visitSimpleChunks(visitor, new BlockChunkFinder());
+
+ ArrayList parallels = Lists.newArrayList(Iterables.filter(visitor.calls,
+ Predicates.or(
+ predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START),
+ predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END))
+ )
+ );
+ Assert.assertEquals(6, parallels.size());
+
+ // Visiting from partially completed branches
+ // Verify we still get appropriate parallels callbacks for a branch end
+ // even if in-progress and no explicit end node
+ ArrayList ends = new ArrayList();
+ ends.add(exec.getNode("11"));
+ ends.add(exec.getNode("12"));
+ ends.add(exec.getNode("14"));
+ visitor = new TestVisitor();
+ f.setup(ends);
+ f.visitSimpleChunks(visitor, new BlockChunkFinder());
+ parallels = Lists.newArrayList(Iterables.filter(visitor.calls,
+ Predicates.or(
+ predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START),
+ predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END))
+ )
+ );
+ Assert.assertEquals(6, parallels.size());
+ Assert.assertEquals(17, visitor.calls.size());
+
+ // Test the least common ancestor implementation with triplicate
+ FlowNode[] branchHeads = {exec.getNode("7"), exec.getNode("8"), exec.getNode("9")};
+ ArrayDeque starts = f.leastCommonAncestor(new HashSet(Arrays.asList(branchHeads)));
+ Assert.assertEquals(1, starts.size());
+ ForkScanner.ParallelBlockStart pbs = starts.pop();
+ Assert.assertEquals(exec.getNode("4"), pbs.forkStart);
+ Assert.assertEquals(3, pbs.unvisited.size());
+ Assert.assertTrue(pbs.unvisited.contains(exec.getNode("7")));
+ Assert.assertTrue(pbs.unvisited.contains(exec.getNode("8")));
+ Assert.assertTrue(pbs.unvisited.contains(exec.getNode("9")));
+ }
+}
diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java
new file mode 100644
index 00000000..cb0f59ea
--- /dev/null
+++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java
@@ -0,0 +1,110 @@
+package org.jenkinsci.plugins.workflow.graphanalysis;
+
+import org.jenkinsci.plugins.workflow.graph.FlowNode;
+import org.junit.Assert;
+
+import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test visitor class, tracks invocations of methods
+ */
+public class TestVisitor implements SimpleChunkVisitor {
+ public enum CallType {
+ ATOM_NODE,
+ CHUNK_START,
+ CHUNK_END,
+ PARALLEL_START,
+ PARALLEL_END,
+ PARALLEL_BRANCH_START,
+ PARALLEL_BRANCH_END
+ }
+
+ public static class CallEntry {
+ CallType type;
+ int[] ids = {-1, -1, -1, -1};
+
+ public void setIds(FlowNode... nodes) {
+ for (int i=0; i calls = new ArrayList();
+
+ @Override
+ public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.CHUNK_START, startNode, beforeBlock));
+ }
+
+ @Override
+ public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.CHUNK_END, endNode, afterChunk));
+ }
+
+ @Override
+ public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.PARALLEL_START, parallelStartNode, branchNode));
+ }
+
+ @Override
+ public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.PARALLEL_END, parallelStartNode, parallelEndNode));
+ }
+
+ @Override
+ public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.PARALLEL_BRANCH_START, parallelStartNode, branchStartNode));
+ }
+
+ @Override
+ public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) {
+ calls.add(new CallEntry(CallType.PARALLEL_BRANCH_END, parallelStartNode, branchEndNode));
+ }
+
+ @Override
+ public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) {
+ calls.add(new CallEntry(CallType.ATOM_NODE, before, atomNode, after));
+ }
+}