diff --git a/README.md b/README.md
index b6473f12..75c8f300 100644
--- a/README.md
+++ b/README.md
@@ -67,7 +67,7 @@ To use the template, run the following command(s):
docker run -p 8080:8080 -p 7300:7300 --env-file=config.env -it -v ${PWD}/genesis.json:/app/genesis.json ghcr.io/base-org/pessimism:latest
```
-**Note**: If you want to bootstrap the application and run specific heuristics/pipelines upon start, update config.env `BOOTSTRAP_PATH` value to the location of your genesis.json file then run
+**Note**: If you want to bootstrap the application and run specific heuristics/paths upon start, update config.env `BOOTSTRAP_PATH` value to the location of your genesis.json file then run
### Building and Running New Images
@@ -143,7 +143,6 @@ A bootstrap config file is used to define the initial state of the pessimism ser
[
{
"network": "layer1",
- "pipeline_type": "live",
"type": "contract_event",
"start_height": null,
"alerting_params": {
@@ -157,7 +156,6 @@ A bootstrap config file is used to define the initial state of the pessimism ser
},
{
"network": "layer1",
- "pipeline_type": "live",
"type": "balance_enforcement",
"start_height": null,
"alerting_params": {
diff --git a/cmd/main.go b/cmd/main.go
index 4cf76507..0a13a2a7 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -95,7 +95,7 @@ func RunPessimism(_ *cli.Context) error {
return err
}
- logger.Info("Received bootstrapped session UUIDs", zap.Any(logging.SUUIDKey, ids))
+ logger.Info("Received bootstrapped session UUIDs", zap.Any(logging.Session, ids))
logger.Debug("Application state successfully bootstrapped")
}
diff --git a/config.env.template b/config.env.template
index ad26bd20..01ab5772 100644
--- a/config.env.template
+++ b/config.env.template
@@ -2,7 +2,7 @@
L1_RPC_ENDPOINT=
L2_RPC_ENDPOINT=
-# Oracle Geth Block Poll Intervals (ms)
+# Chain
L1_POLL_INTERVAL=5000
L2_POLL_INTERVAL=5000
@@ -39,4 +39,4 @@ ENABLE_METRICS=1 # 0 to disable, 1 to enable
METRICS_READ_HEADER_TIMEOUT=60
# Concurrency Management
-MAX_PIPELINE_COUNT=10
\ No newline at end of file
+MAX_PATH_COUNT=10
\ No newline at end of file
diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock
index e677d3be..6b381259 100644
--- a/docs/Gemfile.lock
+++ b/docs/Gemfile.lock
@@ -82,7 +82,7 @@ GEM
octokit (~> 4.0)
public_suffix (>= 3.0, < 5.0)
typhoeus (~> 1.3)
- html-pipeline (2.14.3)
+ html-path (2.14.3)
activesupport (>= 2)
nokogiri (>= 1.4)
http_parser.rb (0.8.0)
@@ -125,7 +125,7 @@ GEM
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-mentions (1.6.0)
- html-pipeline (~> 2.3)
+ html-path (~> 2.3)
jekyll (>= 3.7, < 5.0)
jekyll-optional-front-matter (0.3.2)
jekyll (>= 3.0, < 5.0)
@@ -194,7 +194,7 @@ GEM
listen (~> 3.0)
jemoji (0.12.0)
gemoji (~> 3.0)
- html-pipeline (~> 2.2)
+ html-path (~> 2.2)
jekyll (>= 3.0, < 5.0)
kramdown (2.3.2)
rexml
diff --git a/docs/architecture/alerting.markdown b/docs/architecture/alerting.markdown
index 188dc9b6..6097f6c6 100644
--- a/docs/architecture/alerting.markdown
+++ b/docs/architecture/alerting.markdown
@@ -24,7 +24,7 @@ end
subgraph AM["Alerting Manager"]
alertingRelay --> |Alert|EL
- EL[eventLoop] --> |Alert SUUID|AS["AlertStore"]
+ EL[eventLoop] --> |Alert UUID|AS["AlertStore"]
AS --> |Alert Policy|EL
EL --> |Submit alert|SR["SeverityRouter"]
SR --> SH["Slack"]
@@ -48,7 +48,7 @@ An `Alert` type stores all necessary metadata for external consumption by a down
The alert store is a persistent storage layer that is used to store alerting entries. As of now, the alert store only supports configurable alert destinations for each alerting entry. Ie:
```
- (SUUID) --> (AlertDestination)
+ (UUID) --> (AlertDestination)
```
### Alert Destinations
@@ -77,7 +77,7 @@ Done! You should now see any generated alerts being forwarded to your specified
#### PagerDuty
-The PagerDuty alert destination is a configurable destination that allows alerts to be sent to a specific PagerDuty services via the use of integration keys. Pessimism also uses the SUUID associated with an alert as a deduplication key for PagerDuty. This is done to ensure that PagerDuty will not be spammed with duplicate or incidents.
+The PagerDuty alert destination is a configurable destination that allows alerts to be sent to a specific PagerDuty services via the use of integration keys. Pessimism also uses the UUID associated with an alert as a deduplication key for PagerDuty. This is done to ensure that PagerDuty will not be spammed with duplicate or incidents.
### Alert CoolDowns
@@ -88,7 +88,6 @@ An example of this is shown below:
```json
{
"network": "layer1",
- "pipeline_type": "live",
"type": "balance_enforcement",
"start_height": null,
"alerting_params": {
diff --git a/docs/architecture/api.markdown b/docs/architecture/api.markdown
index e5f0eb09..34b58ecc 100644
--- a/docs/architecture/api.markdown
+++ b/docs/architecture/api.markdown
@@ -12,7 +12,7 @@ Currently, interactive endpoint documentation is hosted via [Swagger UI](https:/
### Configuration
-The API can be customly configured using environment variables stored in a `config.env` file. The following environment variables are used to configure the API:
+The API can be configured using environment variables stored in a `config.env` file. The following environment variables are used to configure the API:
- `SERVER_HOST`: The host address to serve the API on (eg. `localhost`)
- `SERVER_PORT`: The port to serve the API on (eg. `8080`)
@@ -20,9 +20,9 @@ The API can be customly configured using environment variables stored in a `conf
- `SERVER_READ_TIMEOUT`: The read timeout second duration for the server (eg. `10`)
- `SERVER_WRITE_TIMEOUT`: The write timeout second duration for the server (eg. `10`)
-### Components
+### Processes
-The Pessimism API is broken down into the following constituent components:
+The Pessimism API is broken down into the following constituent processes:
- `handlers`: The handlers package contains the HTTP handlers for the API. Each handler is responsible for handling a specific endpoint and is responsible for parsing the request, calling the appropriate service method, and renders a response.
- `service`: The service package contains the business logic for the API. The service is responsible for handling calls to the core Pessimism subsystems and is responsible for validating incoming requests.
diff --git a/docs/architecture/architecture.markdown b/docs/architecture/architecture.markdown
index d017867c..55010e21 100644
--- a/docs/architecture/architecture.markdown
+++ b/docs/architecture/architecture.markdown
@@ -8,8 +8,8 @@ permalink: /architecture
There are *three subsystems* that drive Pessimism’s architecture:
-1. [ETL](./etl.markdown) - Modularized data extraction system for retrieving and processing external chain data in the form of a DAG known as the Pipeline DAG
-2. [Risk Engine](./engine.markdown) - Logical execution platform that runs a set of heuristics on the data funneled from the Pipeline DAG
+1. [ETL](./etl.markdown) - Modularized data extraction system for retrieving and processing external chain data in the form of a DAG known as the Path DAG
+2. [Risk Engine](./engine.markdown) - Logical execution platform that runs a set of heuristics on the data funneled from the Path DAG
3. [Alerting](./alerting.markdown) - Alerting system that is used to notify users of heuristic failures
These systems will be accessible by a client through the use of a JSON-RPC API that has unilateral access to all three primary subsystems.
@@ -23,7 +23,7 @@ The API will be supported to allow Pessimism users via client to:
## Diagram
The following diagram illustrates the core interaction flow between the three primary subsystems, API, and external data sources:
-![high level component diagram](../assets/images/high_level_diagram.png)
+![high level process diagram](../assets/images/high_level_diagram.png)
## Shared State
diff --git a/docs/architecture/engine.markdown b/docs/architecture/engine.markdown
index 456a0f2f..d401a345 100644
--- a/docs/architecture/engine.markdown
+++ b/docs/architecture/engine.markdown
@@ -22,7 +22,7 @@ graph LR;
subgraph A["Engine Manager"]
C(eventLoop) -.-> C;
C --> |input|D("execute()")
- D --> |input.PUUID|E[SessionStore]
+ D --> |input.PathID|E[SessionStore]
E --> |"[]sessions"|D
D --> |"[]sessions"|F[Risk Engine]
F --> |"[]outcome"|D
@@ -45,28 +45,13 @@ The ETL publishes `Heuristic Input` to the Risk Engine using a relay channel. Th
## Heuristic Session
-A heuristic session refers to the execution and representation of a single heuristic. A heuristic session is uniquely identified by a `SUUID` and is associated with a single `PUUID`. A heuristic session is created by the `EngineManager` when a user requests to run an active session. The `EngineManager` will create a new `HeuristicSession` and pass it to the `RiskEngine` to be executed. The `RiskEngine` will then execute the heuristic session and return an `InvalidationOutcome` to the `EngineManager`. The `EngineManager` will then create an `Alert` using the `InvalidationOutcome` and publish it to the Alerting system.
-
-## Session UUID (SUUID)
-
-The SUUID is a unique identifier that is used to identify a specific heuristic session. The SUUID is generated by the `EngineManager` when a user requests to run a new heuristic session. The SUUID is used to uniquely identify a specific heuristic session. This allows the `EngineManager` to perform operations on a specific heuristic session such as removing it or updating it.
-
-A `SUUID` constitutes of both a unique `UUID` and a `PID`.
-
-A `SessionPID` is encoded using the following 3 byte array sequence:
-
-```
- 0 1 2 3
- |-----------|-----------|-----------|
- network pipeline heuristic
- type type type
-```
+A heuristic session refers to the execution and representation of a single heuristic. A heuristic session is uniquely identified by a `UUID` and is associated with a single `PathID`. A heuristic session is created by the `EngineManager` when a user requests to run an active session. The `EngineManager` will create a new `HeuristicSession` and pass it to the `RiskEngine` to be executed. The `RiskEngine` will then execute the heuristic session and return an `InvalidationOutcome` to the `EngineManager`. The `EngineManager` will then create an `Alert` using the `InvalidationOutcome` and publish it to the Alerting system.
## Heuristic Input
The heuristic input is a struct that contains the following fields:
-* `PUUID` - The ID of the heuristic that the input data is intended for
+* `PathID` - The ID of the heuristic that the input data is intended for
* `Input` - Transit data that was generated by the ETL
## Heuristic
@@ -80,17 +65,17 @@ Every hardcoded heuristic must implement an `Heuristic` interface to be compatib
```
type Heuristic interface {
Addressing() bool
- InputType() core.RegisterType
- Invalidate(core.TransitData) (*core.InvalOutcome, bool, error)
- SUUID() core.SUUID
- SetSUUID(core.SUUID)
+ InputType() core.TopicType
+ Invalidate(core.Event) (*core.InvalOutcome, bool, error)
+ UUID() core.UUID
+ SetUUID(core.UUID)
}
```
### Heuristic Input Type
-The heuristic input type is a `RegisterType` that defines the type of data that the heuristic will receive as input. The heuristic input type is defined by the `InputType()` method of the `Heuristic` interface. The heuristic input type is used by the `RiskEngine` to determine if the input data is compatible with the heuristic. If the input data is not compatible with the heuristic, the `RiskEngine` will not execute the heuristic and will return an error.
+The heuristic input type is a `TopicType` that defines the type of data that the heuristic will receive as input. The heuristic input type is defined by the `InputType()` method of the `Heuristic` interface. The heuristic input type is used by the `RiskEngine` to determine if the input data is compatible with the heuristic. If the input data is not compatible with the heuristic, the `RiskEngine` will not execute the heuristic and will return an error.
### Addressing
diff --git a/docs/architecture/etl.markdown b/docs/architecture/etl.markdown
index 17ba0ce3..3c0f0042 100644
--- a/docs/architecture/etl.markdown
+++ b/docs/architecture/etl.markdown
@@ -8,27 +8,26 @@ permalink: /architecture/etl
{% endraw %}
-The Pessimism ETL is a generalized abstraction for a DAG-based component system that continuously transforms chain data into inputs for consumption by a Risk Engine in the form of intertwined data “pipelines”. This DAG based representation of ETL operations is done to ensure that the application can optimally scale to support many active heuristics. This design allows for the reuse of modularized ETL components and de-duplication of conflicting pipelines under certain key logical circumstances.
+The Pessimism ETL is a generalized abstraction for a DAG-based process system that continuously transforms chain data into inputs for consumption by a Risk Engine in the form of intertwined data “paths”. This DAG based representation of ETL operations is done to ensure that the application can optimally scale to support many active heuristics. This design allows for the reuse of modularized ETL processes and de-duplication of conflicting paths under certain key logical circumstances.
-## Component
+## Process
-A component refers to a graph node within the ETL system. Every component performs some operation for transforming data from any data source into a consumable input for the Risk Engine to ingest.
-Currently, there are three total component types:
+A process refers to a graph node within the ETL system. Every process performs some operation for transforming data from any data source into a consumable input for the Risk Engine to ingest.
+Currently, there are three total process types:
-1. `Pipe` - Used to perform local arbitrary computations _(e.g. Extracting L1Withdrawal transactions from a block)_
-2. `Oracle` - Used to poll and collect data from some third-party source _(e.g. Querying real-time account balance amounts from an op-geth execution client)_
-3. `Aggregator` - Used to synchronize events between asynchronous data sources _(e.g. Synchronizing L1/L2 blocks to understand real-time changes in bridging TVL)_
+1. `Subscriber` - Used to perform local arbitrary computations _(e.g. Extracting L1Withdrawal transactions from a block)_
+2. `Reader` - Used to poll and collect data from some third-party source _(e.g. Querying real-time account balance amounts from an op-geth execution client)_
### Inter-Connectivity
-The diagram below showcases how interactivity between components occurs:
+The diagram below showcases how interactivity between processes occurs:
{% raw %}
graph LR;
- A((Component0)) -->|dataX| C[Ingress];
+ A((Process0)) -->|dataX| C[Ingress];
- subgraph B["Component1"]
+ subgraph B["Process1"]
C --> D[ingressHandler];
D --> |dataX| E(eventLoop);
E --> |dataY| F[egressHandler];
@@ -36,42 +35,42 @@ graph LR;
F --> |dataY| H[egress1];
end
- G --> K((Component2));
- H --> J((Component3));
+ G --> K((Process2));
+ H --> J((Process3));
{% endraw %}
#### Egress Handler
-All component types use an `egressHandler` struct for routing transit data to actively subscribed downstream ETL components.
+All process types use an `egressHandler` struct for routing transit data to actively subscribed downstream ETL processes.
{% raw %}
flowchart TD;
- Component-->|Transit_Data|A[Egress0];
- Component-->|Transit_Data|B[Egress1];
+ Process-->|Transit_Data|A[Egress0];
+ Process-->|Transit_Data|B[Egress1];
{% endraw %}
#### Ingress Handler
-All component types also use an `ingressHandler` struct for ingesting active transit data from upstream ETL components.
+All process types also use an `ingressHandler` struct for ingesting active transit data from upstream ETL processes.
-### Component UUID (CUUID)
+### Process ID
-All components have a UUID that stores critical identification data. Component IDs are used by higher order abstractions to:
+All processes have an ID that stores critical identification data. Process IDs are used by higher order abstractions to:
-* Represent a component DAG
-* Understand when component duplicates occur in the system
+* Represent a process DAG
+* Understand when duplicate processes are generated
-Component UUID's constitute of both a randomly generated `UUID` and a deterministic `PID`. This is done to ensure uniqueness of each component instance while also ensuring collision based properties so that components can be reused when viable.
+Process ID's constitute of both a randomly generated `UUID` and a deterministic `PID`. This is done to ensure uniqueness of each process instance while also ensuring collision based properties so that processes can be reused when viable.
-A `ComponentPID` is encoded using the following four byte sequence:
+A `ProcIdentifier` is encoded using the following four byte sequence:
```
0 1 2 3 4
|--------|--------|--------|--------|
- network pipeline component register
+ network path process register
id type type type
```
@@ -79,17 +78,17 @@ A `ComponentPID` is encoded using the following four byte sequence:
**NOTE - State handling policies by management abstractions has yet to be properly fleshed out**
-### Pipe
+### Subscriber
-Pipes are used to perform arbitrary transformations on some provided upstream input data.
+Subscribers are used to perform arbitrary transformations on some provided upstream input data.
Once input data processing has been completed, the output data is then submitted to its respective destination(s).
#### Attributes
-* An `ActivityState` channel with a pipeline manager
-* Ingress handler that other components can write to
+* An `ActivityState` channel with a path manager
+* Ingress handler that other processes can write to
* `TransformFunc` - A processing function that performs some data translation/transformation on respective inputs
-* An `egressHandler` that stores dependencies to write to (i.e. Other pipeline components, heuristic engine)
+* An `egressHandler` that stores dependencies to write to (i.e. Other path processes, heuristic engine)
* A specified output data type
#### Example Use Case(s)
@@ -97,20 +96,19 @@ Once input data processing has been completed, the output data is then submitted
* Generating opcode traces for some EVM transaction
* Parsing emitted events from a transaction
-### Oracle
+### Reader
-Oracles are responsible for collecting data from some external third party _(e.g. L1 geth node, L2 rollup node, etc.)_. As of now, oracle's are configurable through the use of a standard `OracleDefinition` interface that allows developers to write arbitrary oracle logic.
+Oracles are responsible for collecting data from some external third party _(e.g. L1 geth node, L2 rollup node, etc.)_. As of now, reader's are configurable through the use of a standard `OracleDefinition` interface that allows developers to write arbitrary reader logic.
The following key interface functions are supported/enforced:
* `ReadRoutine` - Routine used for reading/polling real-time data for some arbitrarily configured data source
-* `BackTestRoutine` - _Optional_ routine used for sequentially backtesting from some starting to ending block heights.
-Unlike other components, `Oracles` actually employ _2 go routines_ to safely operate. This is because the definition routines are run as a separate go routine with a communication channel to the actual `Oracle` event loop. This is visualized below:
+Unlike other processes, `Oracles` actually employ _2 go routines_ to safely operate. This is because the definition routines are run as a separate go routine with a communication channel to the actual `Reader` event loop. This is visualized below:
{% raw %}
graph LR;
- subgraph A[Oracle]
+ subgraph A[Reader]
B[eventLoop]-->|channel|ODefRoutine;
B[eventLoop]-->|context|ODefRoutine;
B-->B;
@@ -120,9 +118,9 @@ graph LR;
#### Attributes
-* A communication channel with the pipeline manager
+* A communication channel with the path manager
* Poller/subscription logic that performs real-time data reads on some third-party source
-* An `egressHandler` that stores dependencies to write to (i.e. Other pipeline components, heuristic engine)
+* An `egressHandler` that stores dependencies to write to (i.e. Other path processes, heuristic engine)
* A specified output data type
* _(Optional)_ Interface with some storage (postgres, mongo, etc.) to persist lively extracted data
@@ -136,14 +134,14 @@ graph LR;
### (TBD) Aggregator
-**NOTE - This component type is still in-development**
-Aggregators are used to solve the problem where a pipe or a heuristic input will require multiple sources of data to perform an execution sequence. Since aggregators are subscribing to more than one data stream with different output frequencies, they must employ a synchronization policy for collecting and propagating multi-data inputs within a highly asynchronous environment.
+**NOTE - This process type is still in-development**
+Aggregators are used to solve the problem where a Subscriber or a heuristic input will require multiple sources of data to perform an execution sequence. Since aggregators are subscribing to more than one data stream with different output frequencies, they must employ a synchronization policy for collecting and propagating multi-data inputs within a highly asynchronous environment.
#### Attributes
-* Able to read heterogenous transit data from an arbitrary number of component ingresses
+* Able to read heterogenous transit data from an arbitrary number of process ingresses
* A synchronization policy that defines how different transit data from multiple ingress streams will be aggregated into a collectively bound single piece of data
-* EgressHandler to handle downstream transit data routing to other components or destinations
+* EgressHandler to handle downstream transit data routing to other processes or destinations
#### Single Value Subscription
@@ -176,18 +174,18 @@ This should be extendable to any number of heterogenous data sources.
## Registry
-A registry submodule is used to store all ETL data register definitions that provide the blueprint for a unique ETL component type. A register definition consists of:
+A registry submodule is used to store all ETL data register definitions that provide the blueprint for a unique ETL process type. A register definition consists of:
-* `DataType` - The output data type of the component node. This is used for data serialization/deserialization by both the ETL and Risk Engine subsystems.
-* `ComponentType` - The type of component being invoked (_e.g. Oracle_).
-* `ComponentConstructor` - Constructor function used to create unique component instances. All components must implement the `Component` interface.
-* `Dependencies` - Ordered slice of data register dependencies that are necessary for the component to operate. For example, a component that requires a geth block would have a dependency list of `[geth.block]`. This dependency list is used to ensure that the ETL can properly construct a component graph that satisfies all component dependencies.
+* `DataType` - The output data type of the process node. This is used for data serialization/deserialization by both the ETL and Risk Engine subsystems.
+* `ProcessType` - The type of process being invoked (_e.g. Oracle_).
+* `ProcessConstructor` - Constructor function used to create unique process instances. All processes must implement the `Process` interface.
+* `Dependencies` - Ordered slice of data register dependencies that are necessary for the process to operate. For example, a process that requires a geth block would have a dependency list of `[geth.block]`. This dependency list is used to ensure that the ETL can properly construct a process graph that satisfies all process dependencies.
## Addressing
-Some component's require knowledge of a specific address to properly function. For example, an oracle that polls a geth node for native ETH balance amounts would need knowledge of the address to poll. To support this, the ETL leverages a shared state store between the ETL and Risk Engine subsystems.
+Some process's require knowledge of a specific address to properly function. For example, an reader that polls a geth node for native ETH balance amounts would need knowledge of the address to poll. To support this, the ETL leverages a shared state store between the ETL and Risk Engine subsystems.
-Shown below is how the ETL and Risk Engine interact with the shared state store using a `BalanceOracle` component as an example:
+Shown below is how the ETL and Risk Engine interact with the shared state store using a `BalanceOracle` process as an example:
{% raw %}
@@ -198,8 +196,8 @@ graph LR;
subgraph EM["Engine Subsystem"]
- SessionHander --> |"Set(PUUID, address)"|state
- SessionHander --> |"Delete(PUUID, address)"|state
+ SessionHander --> |"Set(PathID, address)"|state
+ SessionHander --> |"Delete(PathID, address)"|state
end
subgraph ETL["ETL Subsystem"]
@@ -210,7 +208,7 @@ graph LR;
GETH --> |"{4} []balance"|BO
BO("Balance
- Oracle") --> |"{1} Get(PUUID)"|state
+ Reader") --> |"{1} Get(PathID)"|state
BO -."eventLoop()".-> BO
state --> |"{2} []address"|BO
@@ -218,33 +216,33 @@ graph LR;
{% endraw %}
-### Geth Block Oracle Register
+### Geth Block Reader Register
-A `GethBlock` register refers to a block output extracted from a go-ethereum node. This register is used for creating `Oracle` components that poll and extract block data from a go-ethereum node in real-time.
+A `BlockHeader` register refers to a block output extracted from a go-ethereum node. This register is used for creating `Reader` processes that poll and extract block data from a go-ethereum node in real-time.
-### Geth Account Balance Oracle Register
+### Geth Account Balance Reader Register
-An `AccountBalance` register refers to a native ETH balance output extracted from a go-ethereum node. This register is used for creating `Oracle` components that poll and extract native ETH balance data for some state persisted addresses from a go-ethereum node in real-time.
-Unlike, the `GethBlock` register, this register requires knowledge of an address set that's shared with the risk engine to properly function and is therefore addressable. Because of this, any heuristic that uses this register must also be addressable.
+An `AccountBalance` register refers to a native ETH balance output extracted from a go-ethereum node. This register is used for creating `Reader` processes that poll and extract native ETH balance data for some state persisted addresses from a go-ethereum node in real-time.
+Unlike, the `BlockHeader` register, this register requires knowledge of an address set that's shared with the risk engine to properly function and is therefore addressable. Because of this, any heuristic that uses this register must also be addressable.
## Managed ETL
-### Component Graph
+### Process Graph
-The ETL uses a `ComponentGraph` construct to represent and store critical component inter-connectivity data _(ie. component node entries and graph edges)_.
+The ETL uses a `ProcessGraph` construct to represent and store critical process inter-connectivity data _(ie. process node entries and graph edges)_.
-A graph edge is represented as a binded communication path between two arbitrary component nodes (`c1`, `c2`). Adding an edge from some component (`c1`) to some downstream component (`c2`) results in `c1` having a path to the ingress of `c2` in its [egress handler](#egress-handler). This would look something like:
+A graph edge is represented as a binded communication path between two arbitrary process nodes (`c1`, `c2`). Adding an edge from some process (`c1`) to some downstream process (`c2`) results in `c1` having a path to the ingress of `c2` in its [egress handler](#egress-handler). This would look something like:
{% raw %}
graph TB;
subgraph "\nEdge"
- subgraph A[component0]
+ subgraph A[process0]
B[egressHandler];
end
- subgraph D[component1]
+ subgraph D[process1]
B -.-> |egress| C(ingressHandler)
end
@@ -256,69 +254,69 @@ graph TB;
{% endraw %}
-**NOTE:** The component graph used in the ETL is represented as a _DAG_ (Directed Acyclic Graph), meaning that no bipartite edge relationships should exist between two components (`c1`, `c2`) where `c1-->c2` && `c2-->c1`. While there are no explicit checks for this in the code software, it should be impossible given that all components declare entrypoint register dependencies within their metadata, meaning that a component could only be susceptible to bipartite connectivity in the circumstance where a component registry definition declares inversal input->output of an existing component.
+**NOTE:** The process graph used in the ETL is represented as a _DAG_ (Directed Acyclic Graph), meaning that no bipartite edge relationships should exist between two processes (`c1`, `c2`) where `c1-->c2` && `c2-->c1`. While there are no explicit checks for this in the code software, it should be impossible given that all processes declare entrypoint register dependencies within their metadata, meaning that a process could only be susceptible to bipartite connectivity in the circumstance where a process registry definition declares inversal input->output of an existing process
-### Pipeline
+### Path
-Pipelines are used to represent some full component path in a DAG based `ComponentGraph`. A pipeline is a sequence of components that are connected together in a way to express meaningful ETL operations for extracting some heuristic input for consumption by the Risk Engine.
+Paths are used to represent some full process path in a DAG based `ProcessGraph`. A path is a sequence of processes that are connected together in a way to express meaningful ETL operations for extracting some heuristic input for consumption by the Risk Engine.
-### Pipeline States
+### Path States
-* `Backfill` - Backfill denotes that the pipeline is currently performing a backfill operation. This means the pipeline is sequentially reading data from some starting height to the most recent block height. This is useful for building state dependent pipelines that require some knowledge of prior history to make live assessments. For example, detecting imbalances between the native ETH deposit supply on the L1 portal contract and the TVL unlocked on the L2 chain would require indexing the prior history of L1 deposits to construct correct supply values.
-* `Live` - Live denotes that the pipeline is currently performing live operations. This means the pipeline is reading data from the most recent block height.
-* `Stopped` - Stopped denotes that the pipeline is currently not performing any operations. This means the pipeline is neither reading nor processing any data.
-* `Paused` - Paused denotes that the pipeline is currently not performing any operations. This means the pipeline is neither reading nor processing any data. The difference between `Stopped` and `Paused` is that a `Paused` pipeline can be resumed at any time while a `Stopped` pipeline must be restarted.
-* `Error` - Error denotes that the pipeline is currently in an error state. This means the pipeline is neither reading nor processing any data. The difference between `Stopped` and `Error` is that an `Error` pipeline can be resumed at any time while a `Stopped` pipeline must be restarted.
+* `Backfill` - Backfill denotes that the path is currently performing a backfill operation. This means the path is sequentially reading data from some starting height to the most recent block height. This is useful for building state dependent paths that require some knowledge of prior history to make live assessments. For example, detecting imbalances between the native ETH deposit supply on the L1 portal contract and the TVL unlocked on the L2 chain would require indexing the prior history of L1 deposits to construct correct supply values.
+* `Live` - Live denotes that the path is currently performing live operations. This means the path is reading data from the most recent block height.
+* `Stopped` - Stopped denotes that the path is currently not performing any operations. This means the path is neither reading nor processing any data.
+* `Paused` - Paused denotes that the path is currently not performing any operations. This means the path is neither reading nor processing any data. The difference between `Stopped` and `Paused` is that a `Paused` path can be resumed at any time while a `Stopped` path must be restarted.
+* `Error` - Error denotes that the path is currently in an error state. This means the path is neither reading nor processing any data. The difference between `Stopped` and `Error` is that an `Error` path can be resumed at any time while a `Stopped` path must be restarted.
-### Pipeline Types
+### Path Types
-There are two types of pipelines:
+There are two types of paths:
**Live**
-A live pipeline is a pipeline that is actively running and performing ETL operations on some data fetched in real-time. For example, a live pipeline could be used to extract newly curated block data from a go-ethereum node.
+A live path is a path that is actively running and performing ETL operations on some data fetched in real-time. For example, a live path could be used to extract newly curated block data from a go-ethereum node.
**Backtest**
-A backtest pipeline is a pipeline that is used to sequentially backtest some component sequence from some starting to ending block height. For example, a backtest pipeline could be used to backtest a _balance_enforcement_ heuristic between L1 block heights `0` to `1000`.
+A backtest path is a path that is used to sequentially backtest some process sequence from some starting to ending block height. For example, a backtest path could be used to backtest a _balance_enforcement_ heuristic between L1 block heights `0` to `1000`.
-### Pipeline UUID (PUUID)
+### Path UUID (PathID)
-All pipelines have a PUUID that stores critical identification data. Pipeline UUIDs are used by higher order abstractions to:
+All paths have a PathID that stores critical identification data. Path UUIDs are used by higher order abstractions to:
* Route heuristic inputs between the ETL and Risk Engine
-* Understand when pipeline collisions between `PIDs` occur
+* Understand when path collisions between `PIDs` occur
-Pipeline UUID's constitute of both a randomly generated `UUID` and a deterministic `PID`. This is done to ensure uniqueness of each component instance while also ensuring collision based properties so that overlapping components can be deduplicated when viable.
+Path UUID's constitute of both a randomly generated `UUID` and a deterministic `PID`. This is done to ensure uniqueness of each process instance while also ensuring collision based properties so that overlapping processes can be deduplicated when viable.
-A `PipelinePID` is encoded using the following 9 byte array sequence:
+A `PathPID` is encoded using the following 9 byte array sequence:
```
0 1 5 9
|--------|----------------------------------------|----------------------------------------|
- Pipeline first pipeline path last pipeline path
- type component PID sequence component PID sequence
+ Path first path path last path path
+ type process PID sequence process PID sequence
```
### Collision Analysis
**NOTE - This section is still in-development**
-Pipeline collisions occur when two pipelines with the same `PID` are generated. This can occur when two pipelines have identical component sequences and valid stateful properties.
+Path collisions occur when two paths with the same `PID` are generated. This can occur when two paths have identical process sequences and valid stateful properties.
-For some pipeline collision to occur between two pipelines (`P0`, `P1`), the following properties must hold true:
+For some path collision to occur between two paths (`P0`, `P1`), the following properties must hold true:
1. `P0` must have the same `PID` as `P1`
-2. `P0` and `P1` must be live pipelines that aren't performing backtests or backfilling operations
+2. `P0` and `P1` must be live paths that aren't performing backtests or backfilling operations
-Once a collision is detected, the ETL will attempt to deduplicate the pipeline by:
+Once a collision is detected, the ETL will attempt to deduplicate the path by:
1. Stopping the event loop of `P1`
-2. Removing the `PID` of `P1` from the pipeline manager
+2. Removing the `PID` of `P1` from the path manager
3. Merging shared state from `P1` to `P0`
## ETL Manager
-`EtlManager` is used for connecting lower-level objects (_Component Graph, Pipeline_) together in a way to express meaningful ETL administration logic; ie:
+`EtlManager` is used for connecting lower-level objects (_Process Graph, Path_) together in a way to express meaningful ETL administration logic; ie:
-* Creating a new pipeline
-* Removing a pipeline
-* Merging some pipelines
-* Updating a pipeline
+* Creating a new path
+* Removing a path
+* Merging some paths
+* Updating a path
diff --git a/docs/heuristics.markdown b/docs/heuristics.markdown
index 8685bf2f..a267506e 100644
--- a/docs/heuristics.markdown
+++ b/docs/heuristics.markdown
@@ -27,7 +27,6 @@ curl --location --request POST 'http://localhost:8080/v0/heuristic' \
"method": "run",
"params": {
"network": "layer1",
- "pipeline_type": "live",
"type": "balance_enforcement",
"start_height": null,
"alert_destination": "slack",
@@ -62,7 +61,6 @@ curl --location --request POST 'http://localhost:8080/v0/heuristic' \
"method": "run",
"params": {
"network": "layer1",
- "pipeline_type": "live",
"type": "contract_event",
"start_height": null,
"alert_destination": "slack",
@@ -101,7 +99,6 @@ curl --location --request POST 'http://localhost:8080/v0/heuristic' \
"method": "run",
"params": {
"network": "layer1",
- "pipeline_type": "live",
"type": "withdrawal_safety",
"start_height": null,
"alert_destination": "slack",
@@ -116,7 +113,7 @@ curl --location --request POST 'http://localhost:8080/v0/heuristic' \
**NOTE:** This heuristic requires an active RPC connection to both L1 and L2 networks. Furthermore, the Pessimism implementation of fault-detector assumes that a submitted L2 output on L1 will correspond to a canonical block on L2.
-The hardcoded `fault_detector` heuristic scans for active `OutputProposed` events on an L1 Output Oracle contract. Once an event is detected, the heuristic implementation proceeds to reconstruct a local state output for the corresponding L2 block. If there is a mismatch between the L1 output and the local state output, the heuristic alerts.
+The hardcoded `fault_detector` heuristic scans for active `OutputProposed` events on an L1 Output Reader contract. Once an event is detected, the heuristic implementation proceeds to reconstruct a local state output for the corresponding L2 block. If there is a mismatch between the L1 output and the local state output, the heuristic alerts.
### Parameters
@@ -134,7 +131,6 @@ curl --location --request POST 'http://localhost:8080/v0/heuristic' \
"method": "run",
"params": {
"network": "layer1",
- "pipeline_type": "live",
"type": "fault_detector",
"start_height": null,
"alert_destination": "slack",
diff --git a/docs/telemetry.markdown b/docs/telemetry.markdown
index 4e9bcac0..030f4fb3 100644
--- a/docs/telemetry.markdown
+++ b/docs/telemetry.markdown
@@ -24,12 +24,12 @@ which can be pasted directly below to keep current system metric documentation u
| METRIC | DESCRIPTION | LABELS | TYPE |
|-------------------------------------------|--------------------------------------------------------|----------------------------------------|---------|
| pessimism_up | 1 if the service is up | | gauge |
-| pessimism_heuristics_active_heuristics | Number of active heuristics | heuristic,network,pipeline | gauge |
-| pessimism_etl_active_pipelines | Number of active pipelines | pipeline,network | gauge |
+| pessimism_heuristics_active_heuristics | Number of active heuristics | heuristic,network,path | gauge |
+| pessimism_etl_active_paths | Number of active paths | path,network | gauge |
| pessimism_heuristics_heuristic_runs_total | Number of times a specific heuristic has been run | network,heuristic | counter |
-| pessimism_alerts_generated_total | Number of total alerts generated for a given heuristic | network,heuristic,pipeline,destination | counter |
+| pessimism_alerts_generated_total | Number of total alerts generated for a given heuristic | network,heuristic,path,destination | counter |
| pessimism_node_errors_total | Number of node errors caught | node | counter |
| pessimism_block_latency | Millisecond latency of block processing | network | gauge |
-| pessimism_pipeline_latency | Millisecond latency of pipeline processing | puuid | gauge |
+| pessimism_path_latency | Millisecond latency of path processing | PathID | gauge |
| pessimism_heuristic_execution_time | Nanosecond time of heuristic execution | heuristic | gauge |
| pessimism_heuristic_errors_total | Number of errors generated by heuristic executions | heuristic | counter |
diff --git a/e2e/alerting_test.go b/e2e/alerting_test.go
index b66ec088..1df8f596 100644
--- a/e2e/alerting_test.go
+++ b/e2e/alerting_test.go
@@ -28,7 +28,6 @@ func TestMultiDirectiveRouting(t *testing.T) {
ids, err := ts.App.BootStrap([]*models.SessionRequestParams{{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.ContractEvent.String(),
StartHeight: nil,
EndHeight: nil,
@@ -65,8 +64,8 @@ func TestMultiDirectiveRouting(t *testing.T) {
// Wait for Pessimism to process the newly emitted event and send a notification to the mocked Slack
// and PagerDuty servers.
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- pUUID := ids[0].PUUID
- height, err := ts.Subsystems.PipelineHeight(pUUID)
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
if err != nil {
return false, err
}
@@ -91,17 +90,16 @@ func TestMultiDirectiveRouting(t *testing.T) {
// balance enforcement heuristic session on L2 network with a cooldown.
func TestCoolDown(t *testing.T) {
- ts := e2e.CreateL2TestSuite(t)
+ ts := e2e.CreateSysTestSuite(t)
defer ts.Close()
- alice := ts.L2Cfg.Secrets.Addresses().Alice
- bob := ts.L2Cfg.Secrets.Addresses().Bob
+ alice := ts.Cfg.Secrets.Addresses().Alice
+ bob := ts.Cfg.Secrets.Addresses().Bob
alertMsg := "one baby to another says:"
// Deploy a balance enforcement heuristic session for Alice using a cooldown.
- _, err := ts.App.BootStrap([]*models.SessionRequestParams{{
+ ids, err := ts.App.BootStrap([]*models.SessionRequestParams{{
Network: core.Layer2.String(),
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
StartHeight: nil,
EndHeight: nil,
@@ -120,21 +118,21 @@ func TestCoolDown(t *testing.T) {
require.NoError(t, err, "Failed to bootstrap balance enforcement heuristic session")
// Get Alice's balance.
- aliceAmt, err := ts.L2Geth.L2Client.BalanceAt(context.Background(), alice, nil)
+ aliceAmt, err := ts.L2Client.BalanceAt(context.Background(), alice, nil)
require.NoError(t, err, "Failed to get Alice's balance")
// Determine the gas cost of the transaction.
gasAmt := 1_000_001
bigAmt := big.NewInt(1_000_001)
- gasPrice := big.NewInt(int64(ts.L2Cfg.DeployConfig.L2GenesisBlockGasLimit))
+ gasPrice := big.NewInt(int64(ts.Cfg.DeployConfig.L2GenesisBlockGasLimit))
gasCost := gasPrice.Mul(gasPrice, bigAmt)
- signer := types.LatestSigner(ts.L2Geth.L2ChainConfig)
+ signer := types.LatestSigner(ts.Sys.L2GenesisCfg.Config)
// Create a transaction from Alice to Bob that will drain almost all of Alice's ETH.
- drainAliceTx := types.MustSignNewTx(ts.L2Cfg.Secrets.Alice, signer, &types.DynamicFeeTx{
- ChainID: big.NewInt(int64(ts.L2Cfg.DeployConfig.L2ChainID)),
+ drainAliceTx := types.MustSignNewTx(ts.Cfg.Secrets.Alice, signer, &types.DynamicFeeTx{
+ ChainID: big.NewInt(int64(ts.Cfg.DeployConfig.L2ChainID)),
Nonce: 0,
GasTipCap: big.NewInt(100),
GasFeeCap: big.NewInt(100000),
@@ -145,12 +143,21 @@ func TestCoolDown(t *testing.T) {
Data: nil,
})
- // Send the transaction to drain Alice's account of almost all ETH.
- _, err = ts.L2Geth.AddL2Block(context.Background(), drainAliceTx)
- require.NoError(t, err, "Failed to create L2 block with transaction")
+ err = ts.L2Client.SendTransaction(context.Background(), drainAliceTx)
+ require.NoError(t, err)
+
+ receipt, err := wait.ForReceipt(context.Background(), ts.L2Client, drainAliceTx.Hash(), types.ReceiptStatusSuccessful)
+ require.NoError(t, err)
+
+ require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
+ if err != nil {
+ return false, err
+ }
- // Wait for Pessimism to process the balance change and send a notification to the mocked Slack server.
- time.Sleep(2 * time.Second)
+ return height != nil && height.Uint64() > receipt.BlockNumber.Uint64(), nil
+ }))
// Check that the balance enforcement was triggered using the mocked server cache.
posts := ts.TestSlackSvr.SlackAlerts()
diff --git a/e2e/etl_test.go b/e2e/etl_test.go
new file mode 100644
index 00000000..ff127313
--- /dev/null
+++ b/e2e/etl_test.go
@@ -0,0 +1,13 @@
+package e2e_test
+
+// TODO - Test that backfilling heuristics alert
+// func TestBackfill(t *testing.T) {
+
+// // 1 - Move the L1 chain forward 10 blocks and create system transaction
+
+// // 2 - Move the L1 chain forward another 10 blocks
+
+// // 3 - Wire pessimism to start backfilling a system tx heuristic from height 0
+
+// // 4 - Assert that pessimism detected the system tx event
+// }
diff --git a/e2e/heuristic_test.go b/e2e/heuristic_test.go
index b78d6dad..1930c061 100644
--- a/e2e/heuristic_test.go
+++ b/e2e/heuristic_test.go
@@ -31,17 +31,16 @@ import (
// balance enforcement heuristic session on L2 network.
func TestBalanceEnforcement(t *testing.T) {
- ts := e2e.CreateL2TestSuite(t)
+ ts := e2e.CreateSysTestSuite(t)
defer ts.Close()
- alice := ts.L2Cfg.Secrets.Addresses().Alice
- bob := ts.L2Cfg.Secrets.Addresses().Bob
+ alice := ts.Cfg.Secrets.Addresses().Alice
+ bob := ts.Cfg.Secrets.Addresses().Bob
alertMsg := "one baby to another says:"
// Deploy a balance enforcement heuristic session for Alice.
- _, err := ts.App.BootStrap([]*models.SessionRequestParams{{
+ ids, err := ts.App.BootStrap([]*models.SessionRequestParams{{
Network: core.Layer2.String(),
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
StartHeight: nil,
EndHeight: nil,
@@ -58,21 +57,21 @@ func TestBalanceEnforcement(t *testing.T) {
require.NoError(t, err, "Failed to bootstrap balance enforcement heuristic session")
// Get Alice's balance.
- aliceAmt, err := ts.L2Geth.L2Client.BalanceAt(context.Background(), alice, nil)
+ aliceAmt, err := ts.L2Client.BalanceAt(context.Background(), alice, nil)
require.NoError(t, err, "Failed to get Alice's balance")
// Determine the gas cost of the transaction.
gasAmt := 1_000_001
bigAmt := big.NewInt(1_000_001)
- gasPrice := big.NewInt(int64(ts.L2Cfg.DeployConfig.L2GenesisBlockGasLimit))
+ gasPrice := big.NewInt(int64(ts.Cfg.DeployConfig.L2GenesisBlockGasLimit))
gasCost := gasPrice.Mul(gasPrice, bigAmt)
- signer := types.LatestSigner(ts.L2Geth.L2ChainConfig)
+ signer := types.LatestSigner(ts.Sys.L2GenesisCfg.Config)
// Create a transaction from Alice to Bob that will drain almost all of Alice's ETH.
- drainAliceTx := types.MustSignNewTx(ts.L2Cfg.Secrets.Alice, signer, &types.DynamicFeeTx{
- ChainID: big.NewInt(int64(ts.L2Cfg.DeployConfig.L2ChainID)),
+ drainAliceTx := types.MustSignNewTx(ts.Cfg.Secrets.Alice, signer, &types.DynamicFeeTx{
+ ChainID: big.NewInt(int64(ts.Cfg.DeployConfig.L2ChainID)),
Nonce: 0,
GasTipCap: big.NewInt(100),
GasFeeCap: big.NewInt(100000),
@@ -86,11 +85,23 @@ func TestBalanceEnforcement(t *testing.T) {
require.Equal(t, len(ts.TestPagerDutyServer.PagerDutyAlerts()), 0, "No alerts should be sent before the transaction is sent")
// Send the transaction to drain Alice's account of almost all ETH.
- _, err = ts.L2Geth.AddL2Block(context.Background(), drainAliceTx)
+
+ err = ts.L2Client.SendTransaction(context.Background(), drainAliceTx)
+ require.NoError(t, err)
+
+ receipt, err := wait.ForReceipt(context.Background(), ts.L2Client, drainAliceTx.Hash(), types.ReceiptStatusSuccessful)
require.NoError(t, err, "Failed to create L2 block with transaction")
// Wait for Pessimism to process the balance change and send a notification to the mocked Slack server.
- time.Sleep(1 * time.Second)
+ require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
+ if err != nil {
+ return false, err
+ }
+
+ return height != nil && height.Uint64() > receipt.BlockNumber.Uint64(), nil
+ }))
// Check that the balance enforcement was triggered using the mocked server cache.
pdMsgs := ts.TestPagerDutyServer.PagerDutyAlerts()
@@ -100,12 +111,12 @@ func TestBalanceEnforcement(t *testing.T) {
assert.Contains(t, pdMsgs[0].Payload.Summary, "balance_enforcement", "Balance enforcement alert was not sent")
// Get Bobs's balance.
- bobAmt, err := ts.L2Geth.L2Client.BalanceAt(context.Background(), bob, nil)
+ bobAmt, err := ts.L2Client.BalanceAt(context.Background(), bob, nil)
require.NoError(t, err, "Failed to get Alice's balance")
// Create a transaction to send the ETH back to Alice.
- drainBobTx := types.MustSignNewTx(ts.L2Cfg.Secrets.Bob, signer, &types.DynamicFeeTx{
- ChainID: big.NewInt(int64(ts.L2Cfg.DeployConfig.L2ChainID)),
+ drainBobTx := types.MustSignNewTx(ts.Cfg.Secrets.Bob, signer, &types.DynamicFeeTx{
+ ChainID: big.NewInt(int64(ts.Cfg.DeployConfig.L2ChainID)),
Nonce: 0,
GasTipCap: big.NewInt(100),
GasFeeCap: big.NewInt(100000),
@@ -116,11 +127,22 @@ func TestBalanceEnforcement(t *testing.T) {
})
// Send the transaction to re-disperse the ETH from Bob back to Alice.
- _, err = ts.L2Geth.AddL2Block(context.Background(), drainBobTx)
- require.NoError(t, err, "Failed to create L2 block with transaction")
+ err = ts.L2Client.SendTransaction(context.Background(), drainBobTx)
+ require.NoError(t, err)
- // Wait for Pessimism to process the balance change.
- time.Sleep(1 * time.Second)
+ receipt, err = wait.ForReceipt(context.Background(), ts.L2Client, drainBobTx.Hash(), types.ReceiptStatusSuccessful)
+ require.NoError(t, err)
+
+ // Wait for Pessimism to process the balance change and send a notification to the mocked Slack server.
+ require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
+ if err != nil {
+ return false, err
+ }
+
+ return height != nil && height.Uint64() > receipt.BlockNumber.Uint64(), nil
+ }))
// Empty the mocked PagerDuty server cache.
ts.TestPagerDutyServer.ClearAlerts()
@@ -129,7 +151,7 @@ func TestBalanceEnforcement(t *testing.T) {
time.Sleep(1 * time.Second)
// Ensure that no new alerts were sent.
- assert.Equal(t, len(ts.TestPagerDutyServer.Payloads), 0, "No alerts should be sent after the transaction is sent")
+ assert.Equal(t, 0, len(ts.TestPagerDutyServer.Payloads))
}
// TestContractEvent ... Tests the E2E flow of a single
@@ -146,7 +168,6 @@ func TestContractEvent(t *testing.T) {
// Deploy a contract event heuristic session for the L1 system config address.
ids, err := ts.App.BootStrap([]*models.SessionRequestParams{{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.ContractEvent.String(),
StartHeight: nil,
EndHeight: nil,
@@ -185,8 +206,8 @@ func TestContractEvent(t *testing.T) {
// Wait for Pessimism to process the newly emitted event and send a notification to the mocked Slack server.
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- pUUID := ids[0].PUUID
- height, err := ts.Subsystems.PipelineHeight(pUUID)
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
if err != nil {
return false, err
}
@@ -223,7 +244,6 @@ func TestWithdrawalSafetyAllInvariants(t *testing.T) {
ids, err := ts.App.BootStrap([]*models.SessionRequestParams{
{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.WithdrawalSafety.String(),
StartHeight: nil,
EndHeight: nil,
@@ -282,8 +302,8 @@ func TestWithdrawalSafetyAllInvariants(t *testing.T) {
// Wait for Pessimism to process the proven withdrawal and send a notification to the mocked Slack server.
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- pUUID := ids[0].PUUID
- height, err := ts.Subsystems.PipelineHeight(pUUID)
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
if err != nil {
return false, err
}
@@ -317,8 +337,8 @@ func TestWithdrawalSafetyAllInvariants(t *testing.T) {
// // Wait for Pessimism to process the finalized withdrawal and send a notification to the mocked Slack server.
// require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- // pUUID := ids[0].PUUID
- // height, err := ts.Subsystems.PipelineHeight(pUUID)
+ // id := ids[0].PathID
+ // height, err := ts.Subsystems.PathHeight(id)
// if err != nil {
// return false, err
// }
@@ -343,7 +363,6 @@ func TestWithdrawalSafetyNoInvariants(t *testing.T) {
ids, err := ts.App.BootStrap([]*models.SessionRequestParams{
{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.WithdrawalSafety.String(),
StartHeight: nil,
EndHeight: nil,
@@ -403,8 +422,8 @@ func TestWithdrawalSafetyNoInvariants(t *testing.T) {
// Wait for Pessimism to process the proven withdrawal and send a notification to the mocked Slack server.
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- pUUID := ids[0].PUUID
- height, err := ts.Subsystems.PipelineHeight(pUUID)
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
if err != nil {
return false, err
}
@@ -439,7 +458,6 @@ func TestFaultDetector(t *testing.T) {
// Deploys a fault detector heuristic session instance using the locally spun-up Op-Stack chain
ids, err := ts.App.BootStrap([]*models.SessionRequestParams{{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.FaultDetector.String(),
StartHeight: big.NewInt(0),
EndHeight: nil,
@@ -471,8 +489,8 @@ func TestFaultDetector(t *testing.T) {
require.Nil(t, err)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
- pUUID := ids[0].PUUID
- height, err := ts.Subsystems.PipelineHeight(pUUID)
+ id := ids[0].PathID
+ height, err := ts.Subsystems.PathHeight(id)
if err != nil {
return false, err
}
diff --git a/e2e/setup.go b/e2e/setup.go
index 2499e7e3..22a17ea7 100644
--- a/e2e/setup.go
+++ b/e2e/setup.go
@@ -17,9 +17,9 @@ import (
"github.com/base-org/pessimism/internal/metrics"
"github.com/base-org/pessimism/internal/mocks"
"github.com/base-org/pessimism/internal/state"
- "github.com/golang/mock/gomock"
-
"github.com/base-org/pessimism/internal/subsystem"
+ ix_node "github.com/ethereum-optimism/optimism/indexer/node"
+ "github.com/golang/mock/gomock"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum/go-ethereum/ethclient"
@@ -50,88 +50,6 @@ type SysTestSuite struct {
L2Client *ethclient.Client
}
-// L2TestSuite ... Stores all the information needed to run an e2e L2Geth test
-type L2TestSuite struct {
- t *testing.T
-
- L2Geth *op_e2e.OpGeth
- L2Cfg *op_e2e.SystemConfig
-
- App *app.Application
- AppCfg *config.Config
- Close func()
-
- TestSlackSvr *TestSlackServer
- TestPagerDutyServer *TestPagerDutyServer
-}
-
-// CreateSysTestSuite ... Creates a new L2Geth test suite
-func CreateL2TestSuite(t *testing.T) *L2TestSuite {
- ctx := context.Background()
- nodeCfg := op_e2e.DefaultSystemConfig(t)
- logging.New(core.Development)
-
- node, err := op_e2e.NewOpGeth(t, ctx, &nodeCfg)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(os.Getenv("ENABLE_ROLLUP_LOGS")) == 0 {
- t.Log("set env 'ENABLE_ROLLUP_LOGS' to show rollup logs")
- for name, logger := range nodeCfg.Loggers {
- t.Logf("discarding logs for %s", name)
- logger.SetHandler(log.DiscardHandler())
- }
- }
-
- ss := state.NewMemState()
-
- bundle := &client.Bundle{
- L1Client: node.L2Client,
- L2Client: node.L2Client,
- }
- ctx = app.InitializeContext(ctx, ss, bundle)
-
- appCfg := DefaultTestConfig()
-
- slackServer := NewTestSlackServer("127.0.0.1", 0)
-
- pagerdutyServer := NewTestPagerDutyServer("127.0.0.1", 0)
-
- slackURL := fmt.Sprintf("http://127.0.0.1:%d", slackServer.Port)
- pagerdutyURL := fmt.Sprintf("http://127.0.0.1:%d", pagerdutyServer.Port)
-
- appCfg.AlertConfig.PagerdutyAlertEventsURL = pagerdutyURL
- appCfg.AlertConfig.RoutingParams = DefaultRoutingParams(core.StringFromEnv(slackURL))
-
- pess, kill, err := app.NewPessimismApp(ctx, appCfg)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := pess.Start(); err != nil {
- t.Fatal(err)
- }
-
- go pess.ListenForShutdown(kill)
-
- return &L2TestSuite{
- t: t,
- L2Geth: node,
- L2Cfg: &nodeCfg,
- App: pess,
- Close: func() {
- kill()
- node.Close()
- slackServer.Close()
- pagerdutyServer.Close()
- },
- AppCfg: appCfg,
- TestSlackSvr: slackServer,
- TestPagerDutyServer: pagerdutyServer,
- }
-}
-
// CreateSysTestSuite ... Creates a new SysTestSuite
func CreateSysTestSuite(t *testing.T) *SysTestSuite {
t.Log("Creating system test suite")
@@ -165,7 +83,19 @@ func CreateSysTestSuite(t *testing.T) *SysTestSuite {
ctrl := gomock.NewController(t)
ixClient := mocks.NewMockIxClient(ctrl)
+ l2NodeClient, err := ix_node.DialEthClient(sys.EthInstances["sequencer"].HTTPEndpoint(), metrics.NoopMetrics)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ l1NodeClient, err := ix_node.DialEthClient(sys.EthInstances["l1"].HTTPEndpoint(), metrics.NoopMetrics)
+ if err != nil {
+ t.Fatal(err)
+ }
+
bundle := &client.Bundle{
+ L1Node: l1NodeClient,
+ L2Node: l2NodeClient,
L1Client: sys.Clients["l1"],
L2Client: sys.Clients["sequencer"],
L2Geth: gethClient,
@@ -226,7 +156,7 @@ func CreateSysTestSuite(t *testing.T) *SysTestSuite {
func DefaultTestConfig() *config.Config {
l1PollInterval := 900
l2PollInterval := 300
- maxPipelines := 10
+ maxPaths := 10
workerCount := 4
return &config.Config{
@@ -249,9 +179,9 @@ func DefaultTestConfig() *config.Config {
Port: 0,
},
SystemConfig: &subsystem.Config{
- MaxPipelineCount: maxPipelines,
- L2PollInterval: l2PollInterval,
- L1PollInterval: l1PollInterval,
+ MaxPathCount: maxPaths,
+ L2PollInterval: l2PollInterval,
+ L1PollInterval: l1PollInterval,
},
}
}
diff --git a/genesis.example.json b/genesis.example.json
index ad244e09..823434c5 100644
--- a/genesis.example.json
+++ b/genesis.example.json
@@ -1,7 +1,6 @@
[
{
"network": "layer1",
- "pipeline_type": "live",
"type": "contract_event",
"start_height": null,
"alerting_params": {
@@ -15,7 +14,6 @@
},
{
"network": "layer1",
- "pipeline_type": "live",
"type": "balance_enforcement",
"start_height": null,
"alerting_params": {
diff --git a/internal/alert/cooldown _test.go b/internal/alert/cooldown _test.go
index d59ca557..bb99eac3 100644
--- a/internal/alert/cooldown _test.go
+++ b/internal/alert/cooldown _test.go
@@ -21,15 +21,15 @@ func Test_CoolDown(t *testing.T) {
construction: alert.NewCoolDownHandler,
testFunc: func(t *testing.T, cdh alert.CoolDownHandler) {
// Add a cooldown for one second
- cdh.Add(core.NilSUUID(), time.Duration(1_000_000_000))
+ cdh.Add(core.UUID{}, time.Duration(1_000_000_000))
- cooled := cdh.IsCoolDown(core.NilSUUID())
+ cooled := cdh.IsCoolDown(core.UUID{})
assert.True(t, cooled)
// Sleep for one second
time.Sleep(1_000_000_000)
cdh.Update()
- cooled = cdh.IsCoolDown(core.NilSUUID())
+ cooled = cdh.IsCoolDown(core.UUID{})
assert.False(t, cooled)
},
},
diff --git a/internal/alert/cooldown.go b/internal/alert/cooldown.go
index b7f42c74..f9d73689 100644
--- a/internal/alert/cooldown.go
+++ b/internal/alert/cooldown.go
@@ -8,40 +8,40 @@ import (
// CoolDownHandler ... Interface for the cool down handler
type CoolDownHandler interface {
- Add(suuid core.SUUID, coolDownTime time.Duration)
+ Add(id core.UUID, coolDownTime time.Duration)
Update()
- IsCoolDown(suuid core.SUUID) bool
+ IsCoolDown(id core.UUID) bool
}
// coolDownHandler ... Implementation of CoolDownHandler
type coolDownHandler struct {
- sessions map[core.SUUID]time.Time
+ sessions map[core.UUID]time.Time
}
// NewCoolDownHandler ... Initializer
func NewCoolDownHandler() CoolDownHandler {
return &coolDownHandler{
- sessions: make(map[core.SUUID]time.Time),
+ sessions: make(map[core.UUID]time.Time),
}
}
// Add ... Adds a session to the cool down handler
-func (cdh *coolDownHandler) Add(sUUID core.SUUID, coolDownTime time.Duration) {
- cdh.sessions[sUUID] = time.Now().Add(coolDownTime)
+func (cdh *coolDownHandler) Add(id core.UUID, coolDownTime time.Duration) {
+ cdh.sessions[id] = time.Now().Add(coolDownTime)
}
// Update ... Updates the cool down handler
func (cdh *coolDownHandler) Update() {
- for sUUID, t := range cdh.sessions {
+ for id, t := range cdh.sessions {
if t.Before(time.Now()) {
- delete(cdh.sessions, sUUID)
+ delete(cdh.sessions, id)
}
}
}
// IsCoolDown ... Checks if the session is in cool down
-func (cdh *coolDownHandler) IsCoolDown(sUUID core.SUUID) bool {
- if t, ok := cdh.sessions[sUUID]; ok {
+func (cdh *coolDownHandler) IsCoolDown(id core.UUID) bool {
+ if t, ok := cdh.sessions[id]; ok {
return t.After(time.Now())
}
diff --git a/internal/alert/interpolator.go b/internal/alert/interpolator.go
index 925edf22..c5998a6a 100644
--- a/internal/alert/interpolator.go
+++ b/internal/alert/interpolator.go
@@ -39,36 +39,26 @@ const (
`
)
-// Interpolator ... Interface for interpolating messages
-type Interpolator interface {
- InterpolateSlackMessage(sev core.Severity, sUUID core.SUUID, content string, msg string) string
- InterpolatePagerDutyMessage(sUUID core.SUUID, message string) string
-}
-
-// interpolator ... Interpolator implementation
-type interpolator struct{}
+type Interpolator struct{}
-// NewInterpolator ... Initializer
-func NewInterpolator() Interpolator {
- return &interpolator{}
+func NewInterpolator() *Interpolator {
+ return &Interpolator{}
}
-// InterpolateSlackMessage ... Interpolates a slack message with the given heuristic session UUID and message
-func (*interpolator) InterpolateSlackMessage(sev core.Severity, sUUID core.SUUID, content string, msg string) string {
+func (*Interpolator) SlackMessage(a core.Alert, msg string) string {
return fmt.Sprintf(SlackMsgFmt,
- sev.Symbol(),
- sUUID.PID.HeuristicType().String(),
- sUUID.PID.Network(),
- cases.Title(language.English).String(sev.String()),
- sUUID.String(),
- fmt.Sprintf(CodeBlockFmt, content),
+ a.Sev.Symbol(),
+ a.HT.String(),
+ a.Net.String(),
+ cases.Title(language.English).String(a.Sev.String()),
+ a.HeuristicID.String(),
+ fmt.Sprintf(CodeBlockFmt, a.Content),
msg)
}
-// InterpolatePagerDutyMessage ... Interpolates a pagerduty message with the given heuristic session UUID and message
-func (*interpolator) InterpolatePagerDutyMessage(sUUID core.SUUID, message string) string {
+func (*Interpolator) PagerDutyMessage(a core.Alert) string {
return fmt.Sprintf(PagerDutyMsgFmt,
- sUUID.PID.HeuristicType().String(),
- sUUID.PID.Network(),
- message)
+ a.HT.String(),
+ a.Net.String(),
+ a.Content)
}
diff --git a/internal/alert/interpolator_test.go b/internal/alert/interpolator_test.go
index a570ae32..4c4bc9b5 100644
--- a/internal/alert/interpolator_test.go
+++ b/internal/alert/interpolator_test.go
@@ -8,14 +8,14 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_InterpolatePagerDutyMessage(t *testing.T) {
- sUUID := core.NilSUUID()
+func TestPagerDutyMessage(t *testing.T) {
- msg := "Test alert"
+ a := core.Alert{
+ HeuristicID: core.UUID{},
+ Content: "Test alert",
+ }
expected := "\n\tHeuristic Triggered: unknown\n\tNetwork: unknown\n\tAssessment: \n\tTest alert\n\t"
-
- actual := alert.NewInterpolator().InterpolatePagerDutyMessage(sUUID, msg)
-
- assert.Equal(t, expected, actual, "should be equal")
+ actual := new(alert.Interpolator).PagerDutyMessage(a)
+ assert.Equal(t, expected, actual)
}
diff --git a/internal/alert/manager.go b/internal/alert/manager.go
index 2397eafb..7f2b43f1 100644
--- a/internal/alert/manager.go
+++ b/internal/alert/manager.go
@@ -16,7 +16,7 @@ import (
// Manager ... Interface for alert manager
type Manager interface {
- AddSession(core.SUUID, *core.AlertPolicy) error
+ AddSession(core.UUID, *core.AlertPolicy) error
Transit() chan core.Alert
core.Subsystem
@@ -36,7 +36,7 @@ type alertManager struct {
cfg *Config
store Store
- interpolator Interpolator
+ interpolator *Interpolator
cdHandler CoolDownHandler
cm RoutingDirectory
@@ -59,7 +59,7 @@ func NewManager(ctx context.Context, cfg *Config, cm RoutingDirectory) Manager {
cm: cm,
cancel: cancel,
- interpolator: NewInterpolator(),
+ interpolator: new(Interpolator),
store: NewStore(),
alertTransit: make(chan core.Alert),
metrics: metrics.WithContext(ctx),
@@ -70,8 +70,8 @@ func NewManager(ctx context.Context, cfg *Config, cm RoutingDirectory) Manager {
}
// AddSession ... Adds a heuristic session to the alert manager store
-func (am *alertManager) AddSession(sUUID core.SUUID, policy *core.AlertPolicy) error {
- return am.store.AddAlertPolicy(sUUID, policy)
+func (am *alertManager) AddSession(id core.UUID, policy *core.AlertPolicy) error {
+ return am.store.AddAlertPolicy(id, policy)
}
// Transit ... Returns inter-subsystem transit channel for receiving alerts
@@ -82,7 +82,7 @@ func (am *alertManager) Transit() chan core.Alert {
// handleSlackPost ... Handles posting an alert to slack channels
func (am *alertManager) handleSlackPost(alert core.Alert, policy *core.AlertPolicy) error {
- slackClients := am.cm.GetSlackClients(alert.Criticality)
+ slackClients := am.cm.GetSlackClients(alert.Sev)
if slackClients == nil {
am.logger.Warn("No slack clients defined for criticality", zap.Any("alert", alert))
return nil
@@ -90,8 +90,8 @@ func (am *alertManager) handleSlackPost(alert core.Alert, policy *core.AlertPoli
// Create event trigger
event := &client.AlertEventTrigger{
- Message: am.interpolator.InterpolateSlackMessage(alert.Criticality, alert.SUUID, alert.Content, policy.Msg),
- Severity: alert.Criticality,
+ Message: am.interpolator.SlackMessage(alert, policy.Msg),
+ Severity: alert.Sev,
}
for _, sc := range slackClients {
@@ -112,7 +112,7 @@ func (am *alertManager) handleSlackPost(alert core.Alert, policy *core.AlertPoli
// handlePagerDutyPost ... Handles posting an alert to pagerduty
func (am *alertManager) handlePagerDutyPost(alert core.Alert) error {
- pdClients := am.cm.GetPagerDutyClients(alert.Criticality)
+ pdClients := am.cm.GetPagerDutyClients(alert.Sev)
if pdClients == nil {
am.logger.Warn("No pagerduty clients defined for criticality", zap.Any("alert", alert))
@@ -120,9 +120,9 @@ func (am *alertManager) handlePagerDutyPost(alert core.Alert) error {
}
event := &client.AlertEventTrigger{
- Message: am.interpolator.InterpolatePagerDutyMessage(alert.SUUID, alert.Content),
- DedupKey: alert.PUUID,
- Severity: alert.Criticality,
+ Message: am.interpolator.PagerDutyMessage(alert),
+ DedupKey: alert.PathID,
+ Severity: alert.Sev,
}
for _, pdc := range pdClients {
@@ -164,28 +164,28 @@ func (am *alertManager) EventLoop() error {
case alert := <-am.alertTransit: // Upstream alert
// 1. Fetch alert policy
- policy, err := am.store.GetAlertPolicy(alert.SUUID)
+ policy, err := am.store.GetAlertPolicy(alert.HeuristicID)
if err != nil {
am.logger.Error("Could not determine alerting destination", zap.Error(err))
continue
}
// 2. Check if alert is in cool down
- if policy.HasCoolDown() && am.cdHandler.IsCoolDown(alert.SUUID) {
+ if policy.HasCoolDown() && am.cdHandler.IsCoolDown(alert.HeuristicID) {
am.logger.Debug("Alert is in cool down",
- zap.String(logging.SUUIDKey, alert.SUUID.String()))
+ zap.String(logging.UUID, alert.HeuristicID.String()))
continue
}
// 3. Log & propagate alert
am.logger.Info("received alert",
- zap.String(logging.SUUIDKey, alert.SUUID.String()))
+ zap.String(logging.UUID, alert.HeuristicID.String()))
am.HandleAlert(alert, policy)
// 4. Add alert to cool down if applicable
if policy.HasCoolDown() {
- am.cdHandler.Add(alert.SUUID, time.Duration(policy.CoolDown)*time.Second)
+ am.cdHandler.Add(alert.HeuristicID, time.Duration(policy.CoolDown)*time.Second)
}
}
}
@@ -193,7 +193,7 @@ func (am *alertManager) EventLoop() error {
// HandleAlert ... Handles the alert propagation logic
func (am *alertManager) HandleAlert(alert core.Alert, policy *core.AlertPolicy) {
- alert.Criticality = policy.Severity()
+ alert.Sev = policy.Severity()
if err := am.handleSlackPost(alert, policy); err != nil {
am.logger.Error("could not post to slack", zap.Error(err))
diff --git a/internal/alert/manager_test.go b/internal/alert/manager_test.go
index b7373a51..db92e3d1 100644
--- a/internal/alert/manager_test.go
+++ b/internal/alert/manager_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_EventLoop(t *testing.T) {
+func TestEventLoop(t *testing.T) {
cfg := &config.Config{
AlertConfig: &alert.Config{
@@ -54,15 +54,15 @@ func Test_EventLoop(t *testing.T) {
cm.SetSlackClients([]client.SlackClient{mocks.NewMockSlackClient(c)}, core.LOW)
alert := core.Alert{
- Criticality: core.LOW,
- SUUID: core.NilSUUID(),
+ Sev: core.LOW,
+ HeuristicID: core.UUID{},
}
policy := &core.AlertPolicy{
Sev: core.LOW.String(),
Msg: "test",
}
- err := am.AddSession(core.NilSUUID(), policy)
+ err := am.AddSession(core.UUID{}, policy)
assert.Nil(t, err)
for _, cli := range cm.GetSlackClients(core.LOW) {
@@ -78,10 +78,10 @@ func Test_EventLoop(t *testing.T) {
ingress <- alert
time.Sleep(1 * time.Second)
- testid := core.MakeSUUID(1, 1, 1)
+ id := core.NewUUID()
alert = core.Alert{
- Criticality: core.UNKNOWN,
- SUUID: testid,
+ Sev: core.UNKNOWN,
+ HeuristicID: id,
}
ingress <- alert
time.Sleep(1 * time.Second)
@@ -108,15 +108,15 @@ func Test_EventLoop(t *testing.T) {
cm.SetPagerDutyClients([]client.PagerDutyClient{mocks.NewMockPagerDutyClient(c)}, core.MEDIUM)
alert := core.Alert{
- Criticality: core.MEDIUM,
- SUUID: core.NilSUUID(),
+ Sev: core.MEDIUM,
+ HeuristicID: core.UUID{},
}
policy := &core.AlertPolicy{
Sev: core.MEDIUM.String(),
Msg: "test",
}
- err := am.AddSession(core.NilSUUID(), policy)
+ err := am.AddSession(core.UUID{}, policy)
assert.Nil(t, err)
for _, cli := range cm.GetPagerDutyClients(core.MEDIUM) {
@@ -132,10 +132,10 @@ func Test_EventLoop(t *testing.T) {
ingress <- alert
time.Sleep(1 * time.Second)
- testid := core.MakeSUUID(1, 1, 1)
+ id := core.UUID{}
alert = core.Alert{
- Criticality: core.UNKNOWN,
- SUUID: testid,
+ Sev: core.UNKNOWN,
+ HeuristicID: id,
}
ingress <- alert
time.Sleep(1 * time.Second)
@@ -163,14 +163,14 @@ func Test_EventLoop(t *testing.T) {
cm.SetPagerDutyClients([]client.PagerDutyClient{mocks.NewMockPagerDutyClient(c), mocks.NewMockPagerDutyClient(c)}, core.HIGH)
alert := core.Alert{
- Criticality: core.HIGH,
- SUUID: core.NilSUUID(),
+ Sev: core.HIGH,
+ HeuristicID: core.UUID{},
}
policy := &core.AlertPolicy{
Sev: core.HIGH.String(),
Msg: "test",
}
- err := am.AddSession(core.NilSUUID(), policy)
+ err := am.AddSession(core.UUID{}, policy)
assert.Nil(t, err)
for _, cli := range cm.GetPagerDutyClients(core.HIGH) {
@@ -195,10 +195,10 @@ func Test_EventLoop(t *testing.T) {
}
ingress <- alert
time.Sleep(1 * time.Second)
- testid := core.MakeSUUID(1, 1, 1)
+ id := core.UUID{}
alert = core.Alert{
- Criticality: core.UNKNOWN,
- SUUID: testid,
+ Sev: core.UNKNOWN,
+ HeuristicID: id,
}
ingress <- alert
time.Sleep(1 * time.Second)
diff --git a/internal/alert/store.go b/internal/alert/store.go
index 518b192f..a2a30b06 100644
--- a/internal/alert/store.go
+++ b/internal/alert/store.go
@@ -10,39 +10,39 @@ import (
// NOTE - This is a simple in-memory store, using this interface
// we can easily swap it out for a persistent store
type Store interface {
- AddAlertPolicy(core.SUUID, *core.AlertPolicy) error
- GetAlertPolicy(sUUID core.SUUID) (*core.AlertPolicy, error)
+ AddAlertPolicy(core.UUID, *core.AlertPolicy) error
+ GetAlertPolicy(id core.UUID) (*core.AlertPolicy, error)
}
// store ... Alert store implementation
// Used to store critical alerting metadata (ie. alert destination, message, etc.)
type store struct {
- defMap map[core.SUUID]*core.AlertPolicy
+ defMap map[core.UUID]*core.AlertPolicy
}
// NewStore ... Initializer
func NewStore() Store {
return &store{
- defMap: make(map[core.SUUID]*core.AlertPolicy),
+ defMap: make(map[core.UUID]*core.AlertPolicy),
}
}
// AddAlertPolicy ... Adds an alert policy for the given heuristic session UUID
// NOTE - There can only be one alert destination per heuristic session UUID
-func (am *store) AddAlertPolicy(sUUID core.SUUID, policy *core.AlertPolicy) error {
- if _, exists := am.defMap[sUUID]; exists {
- return fmt.Errorf("alert destination already exists for heuristic session %s", sUUID.String())
+func (am *store) AddAlertPolicy(id core.UUID, policy *core.AlertPolicy) error {
+ if _, exists := am.defMap[id]; exists {
+ return fmt.Errorf("alert destination already exists for heuristic %s", id.String())
}
- am.defMap[sUUID] = policy
+ am.defMap[id] = policy
return nil
}
-// GetAlertPolicy ... Returns the alert destination for the given heuristic session UUID
-func (am *store) GetAlertPolicy(sUUID core.SUUID) (*core.AlertPolicy, error) {
- dest, exists := am.defMap[sUUID]
+// GetAlertPolicy ... Returns the alert destination for the given heuristic UUID
+func (am *store) GetAlertPolicy(id core.UUID) (*core.AlertPolicy, error) {
+ dest, exists := am.defMap[id]
if !exists {
- return nil, fmt.Errorf("alert destination does not exist for heuristic session %s", sUUID.String())
+ return nil, fmt.Errorf("alert destination does not exist for heuristic %s", id.String())
}
return dest, nil
diff --git a/internal/alert/store_test.go b/internal/alert/store_test.go
index 694f3dcd..2a2b49a5 100644
--- a/internal/alert/store_test.go
+++ b/internal/alert/store_test.go
@@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_Store(t *testing.T) {
+func TestStore(t *testing.T) {
var tests = []struct {
name string
description string
@@ -22,18 +22,18 @@ func Test_Store(t *testing.T) {
testLogic: func(t *testing.T) {
am := alert.NewStore()
- sUUID := core.MakeSUUID(core.Layer1, core.Live, core.BalanceEnforcement)
+ id := core.UUID{}
policy := &core.AlertPolicy{
Msg: "test message",
Dest: core.Slack.String(),
}
- err := am.AddAlertPolicy(sUUID, policy)
- assert.NoError(t, err, "failed to add Alert Policy")
+ err := am.AddAlertPolicy(id, policy)
+ assert.NoError(t, err)
- actualPolicy, err := am.GetAlertPolicy(sUUID)
- assert.NoError(t, err, "failed to get Alert Policy")
- assert.Equal(t, policy, actualPolicy, "Alert Policy mismatch")
+ actualPolicy, err := am.GetAlertPolicy(id)
+ assert.NoError(t, err)
+ assert.Equal(t, policy, actualPolicy)
},
},
{
@@ -42,17 +42,17 @@ func Test_Store(t *testing.T) {
testLogic: func(t *testing.T) {
am := alert.NewStore()
- sUUID := core.MakeSUUID(core.Layer1, core.Live, core.BalanceEnforcement)
+ id := core.UUID{}
policy := &core.AlertPolicy{
Dest: core.Slack.String(),
}
- err := am.AddAlertPolicy(sUUID, policy)
- assert.NoError(t, err, "failed to add Alert Policy")
+ err := am.AddAlertPolicy(id, policy)
+ assert.NoError(t, err)
// add again
- err = am.AddAlertPolicy(sUUID, policy)
- assert.Error(t, err, "failed to add Alert Policy")
+ err = am.AddAlertPolicy(id, policy)
+ assert.Error(t, err)
},
},
{
@@ -60,7 +60,7 @@ func Test_Store(t *testing.T) {
description: "Test NewStore logic",
testLogic: func(t *testing.T) {
am := alert.NewStore()
- assert.NotNil(t, am, "failed to instantiate alert store")
+ assert.NotNil(t, am)
},
},
}
diff --git a/internal/api/handlers/handlers_test.go b/internal/api/handlers/handlers_test.go
index c4db0388..5447cae3 100644
--- a/internal/api/handlers/handlers_test.go
+++ b/internal/api/handlers/handlers_test.go
@@ -2,23 +2,13 @@ package handlers_test
import (
"context"
- "fmt"
"testing"
"github.com/base-org/pessimism/internal/api/handlers"
- "github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/mocks"
"github.com/golang/mock/gomock"
)
-func testSUUID1() core.SUUID {
- return core.MakeSUUID(1, 1, 1)
-}
-
-func testError1() error {
- return fmt.Errorf("test error 1")
-}
-
type testSuite struct {
mockSvc mocks.MockService
diff --git a/internal/api/handlers/heuristic.go b/internal/api/handlers/heuristic.go
index 875aaf48..4074f2cd 100644
--- a/internal/api/handlers/heuristic.go
+++ b/internal/api/handlers/heuristic.go
@@ -29,7 +29,7 @@ func (ph *PessimismHandler) RunHeuristic(w http.ResponseWriter, r *http.Request)
return
}
- sUUID, err := ph.service.ProcessHeuristicRequest(body)
+ id, err := ph.service.ProcessHeuristicRequest(body)
if err != nil {
logging.WithContext(ph.ctx).
Error("Could not process heuristic request", zap.Error(err))
@@ -38,5 +38,5 @@ func (ph *PessimismHandler) RunHeuristic(w http.ResponseWriter, r *http.Request)
return
}
- renderHeuristicResponse(w, r, models.NewSessionAcceptedResp(sUUID))
+ renderHeuristicResponse(w, r, models.NewSessionAcceptedResp(id))
}
diff --git a/internal/api/handlers/heuristic_test.go b/internal/api/handlers/heuristic_test.go
index 5e566c60..e0167cff 100644
--- a/internal/api/handlers/heuristic_test.go
+++ b/internal/api/handlers/heuristic_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_ProcessHeuristicRequest(t *testing.T) {
+func TestHeuristicRequest(t *testing.T) {
var tests = []struct {
name string
@@ -67,7 +67,7 @@ func Test_ProcessHeuristicRequest(t *testing.T) {
ts.mockSvc.EXPECT().
ProcessHeuristicRequest(gomock.Any()).
- Return(core.NilSUUID(), testError1()).
+ Return(core.UUID{}, fmt.Errorf("test")).
Times(1)
return ts
@@ -98,7 +98,7 @@ func Test_ProcessHeuristicRequest(t *testing.T) {
},
{
name: "Process Heuristic Success",
- description: "When a heuristic is successfully processed, a suuid should be rendered",
+ description: "When a heuristic is successfully processed, a new UUID should be rendered",
function: "RunHeuristic",
constructionLogic: func() testSuite {
@@ -106,7 +106,7 @@ func Test_ProcessHeuristicRequest(t *testing.T) {
ts.mockSvc.EXPECT().
ProcessHeuristicRequest(gomock.Any()).
- Return(testSUUID1(), nil).
+ Return(core.UUID{}, nil).
Times(1)
return ts
@@ -135,7 +135,7 @@ func Test_ProcessHeuristicRequest(t *testing.T) {
assert.Equal(t, actualResp.Status, models.OK)
assert.Equal(t, actualResp.Code, http.StatusAccepted)
- assert.Contains(t, actualResp.Result[logging.SUUIDKey], testSUUID1().PID.String())
+ assert.Contains(t, actualResp.Result[logging.Session], "")
},
},
}
diff --git a/internal/api/models/heuristic.go b/internal/api/models/heuristic.go
index c3f5a3b3..d5f92013 100644
--- a/internal/api/models/heuristic.go
+++ b/internal/api/models/heuristic.go
@@ -42,7 +42,6 @@ const (
// SessionRequestParams ... Request params for heuristic operation
type SessionRequestParams struct {
Network string `json:"network"`
- PType string `json:"pipeline_type"`
HeuristicType string `json:"type"`
StartHeight *big.Int `json:"start_height"`
@@ -73,11 +72,6 @@ func (hrp *SessionRequestParams) NetworkType() core.Network {
return core.StringToNetwork(hrp.Network)
}
-// PipelineType ... Returns the pipeline type
-func (hrp *SessionRequestParams) PipelineType() core.PipelineType {
- return core.StringToPipelineType(hrp.PType)
-}
-
// Heuristic ... Returns the heuristic type
func (hrp *SessionRequestParams) Heuristic() core.HeuristicType {
return core.StringToHeuristicType(hrp.HeuristicType)
@@ -87,13 +81,13 @@ func (hrp *SessionRequestParams) AlertPolicy() *core.AlertPolicy {
return hrp.AlertingParams
}
-// GeneratePipelineConfig ... Generates a pipeline config using the request params
-func (hrp *SessionRequestParams) GeneratePipelineConfig(pollInterval time.Duration,
- regType core.RegisterType) *core.PipelineConfig {
- return &core.PipelineConfig{
- Network: hrp.NetworkType(),
- DataType: regType,
- PipelineType: hrp.PipelineType(),
+// NewPathCfg ... Generates a path config using the request params
+func (hrp *SessionRequestParams) NewPathCfg(pollInterval time.Duration,
+ regType core.TopicType) *core.PathConfig {
+ return &core.PathConfig{
+ Network: hrp.NetworkType(),
+ DataType: regType,
+ PathType: core.Live,
ClientConfig: &core.ClientConfig{
Network: hrp.NetworkType(),
PollInterval: pollInterval,
@@ -109,7 +103,7 @@ func (hrp *SessionRequestParams) SessionConfig() *core.SessionConfig {
AlertPolicy: hrp.AlertPolicy(),
Type: hrp.Heuristic(),
Params: hrp.Params(),
- PT: hrp.PipelineType(),
+ PT: core.Live,
}
}
diff --git a/internal/api/models/heuristic_test.go b/internal/api/models/heuristic_test.go
index f56fa184..e8842daf 100644
--- a/internal/api/models/heuristic_test.go
+++ b/internal/api/models/heuristic_test.go
@@ -16,7 +16,6 @@ func Test_SessionRequestParams(t *testing.T) {
"test": "test",
},
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
}
@@ -30,18 +29,14 @@ func Test_SessionRequestParams(t *testing.T) {
n := irp.NetworkType()
assert.Equal(t, n, core.Layer1)
- // Ensure that pipeline type is set correctly
- pt := irp.PipelineType()
- assert.Equal(t, pt, core.Live)
-
// Ensure that heuristic type is set correctly
it := irp.Heuristic()
assert.Equal(t, it, core.BalanceEnforcement)
- // Ensure that the pipeline config is set correctly
- pConfig := irp.GeneratePipelineConfig(0, 0)
+ // Ensure that the path config is set correctly
+ pConfig := irp.NewPathCfg(0, 0)
assert.Equal(t, pConfig.Network, core.Layer1)
- assert.Equal(t, pConfig.PipelineType, core.Live)
+ assert.Equal(t, pConfig.PathType, core.Live)
sConfig := irp.SessionConfig()
assert.Equal(t, sConfig.Type, core.BalanceEnforcement)
diff --git a/internal/api/models/models.go b/internal/api/models/models.go
index 94aed10e..675d32f7 100644
--- a/internal/api/models/models.go
+++ b/internal/api/models/models.go
@@ -8,11 +8,11 @@ import (
)
// NewSessionAcceptedResp ...Returns a heuristic response with status accepted
-func NewSessionAcceptedResp(id core.SUUID) *SessionResponse {
+func NewSessionAcceptedResp(id core.UUID) *SessionResponse {
return &SessionResponse{
Status: OK,
Code: http.StatusAccepted,
- Result: Result{logging.SUUIDKey: id.String()},
+ Result: Result{logging.UUID: id.String()},
}
}
diff --git a/internal/api/service/heuristic.go b/internal/api/service/heuristic.go
index 431017b9..4f87eb13 100644
--- a/internal/api/service/heuristic.go
+++ b/internal/api/service/heuristic.go
@@ -6,33 +6,33 @@ import (
)
// ProcessHeuristicRequest ... Processes a heuristic request type
-func (svc *PessimismService) ProcessHeuristicRequest(ir *models.SessionRequestBody) (core.SUUID, error) {
+func (svc *PessimismService) ProcessHeuristicRequest(ir *models.SessionRequestBody) (core.UUID, error) {
if ir.MethodType() == models.Run { // Deploy heuristic session
return svc.RunHeuristicSession(&ir.Params)
}
// TODO - Add support for other method types (ie. delete. update)
- return core.NilSUUID(), nil
+ return core.UUID{}, nil
}
// RunHeuristicSession ... Runs a heuristic session provided
-func (svc *PessimismService) RunHeuristicSession(params *models.SessionRequestParams) (core.SUUID, error) {
- pConfig, err := svc.m.BuildPipelineCfg(params)
+func (svc *PessimismService) RunHeuristicSession(params *models.SessionRequestParams) (core.UUID, error) {
+ pConfig, err := svc.m.BuildPathCfg(params)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
sConfig := params.SessionConfig()
deployCfg, err := svc.m.BuildDeployCfg(pConfig, sConfig)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
- sUUID, err := svc.m.RunSession(deployCfg)
+ id, err := svc.m.RunHeuristic(deployCfg)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
- return sUUID, nil
+ return id, nil
}
diff --git a/internal/api/service/heuristic_test.go b/internal/api/service/heuristic_test.go
index b2f6a5e0..2bc3cab2 100644
--- a/internal/api/service/heuristic_test.go
+++ b/internal/api/service/heuristic_test.go
@@ -16,7 +16,7 @@ func testErr() error {
}
func Test_RunHeuristicSession(t *testing.T) {
- testSUUID := core.MakeSUUID(1, 1, 1)
+ id := core.UUID{}
ctrl := gomock.NewController(t)
@@ -26,7 +26,6 @@ func Test_RunHeuristicSession(t *testing.T) {
Method: "run",
Params: models.SessionRequestParams{
Network: "layer1",
- PType: "live",
HeuristicType: "contract_event",
StartHeight: nil,
EndHeight: nil,
@@ -46,7 +45,7 @@ func Test_RunHeuristicSession(t *testing.T) {
ts := createTestSuite(ctrl)
ts.mockSub.EXPECT().
- BuildPipelineCfg(&defaultBody.Params).
+ BuildPathCfg(&defaultBody.Params).
Return(nil, nil).
Times(1)
@@ -56,8 +55,8 @@ func Test_RunHeuristicSession(t *testing.T) {
Times(1)
ts.mockSub.EXPECT().
- RunSession(testCfg).
- Return(testSUUID, nil).
+ RunHeuristic(testCfg).
+ Return(id, nil).
Times(1)
return ts
@@ -65,19 +64,19 @@ func Test_RunHeuristicSession(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := defaultBody.Clone()
- actualSUUID, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
+ id, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
assert.NoError(t, err)
- assert.Equal(t, testSUUID, actualSUUID)
+ assert.Equal(t, id, id)
},
},
{
- name: "Failure when building pipeline config",
+ name: "Failure when building path config",
constructionLogic: func() *testSuite {
ts := createTestSuite(ctrl)
ts.mockSub.EXPECT().
- BuildPipelineCfg(&defaultBody.Params).
+ BuildPathCfg(&defaultBody.Params).
Return(nil, testErr()).
Times(1)
return ts
@@ -85,10 +84,10 @@ func Test_RunHeuristicSession(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := defaultBody.Clone()
- actualSUUID, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
+ id, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
{
@@ -97,7 +96,7 @@ func Test_RunHeuristicSession(t *testing.T) {
ts := createTestSuite(ctrl)
ts.mockSub.EXPECT().
- BuildPipelineCfg(&defaultBody.Params).
+ BuildPathCfg(&defaultBody.Params).
Return(nil, nil).
Times(1)
@@ -111,10 +110,10 @@ func Test_RunHeuristicSession(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := defaultBody.Clone()
- actualSUUID, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
+ id, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
{
@@ -123,7 +122,7 @@ func Test_RunHeuristicSession(t *testing.T) {
ts := createTestSuite(ctrl)
ts.mockSub.EXPECT().
- BuildPipelineCfg(&defaultBody.Params).
+ BuildPathCfg(&defaultBody.Params).
Return(nil, nil).
Times(1)
@@ -133,8 +132,8 @@ func Test_RunHeuristicSession(t *testing.T) {
Times(1)
ts.mockSub.EXPECT().
- RunSession(testCfg).
- Return(core.NilSUUID(), testErr()).
+ RunHeuristic(testCfg).
+ Return(core.UUID{}, testErr()).
Times(1)
return ts
@@ -142,10 +141,10 @@ func Test_RunHeuristicSession(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := defaultBody.Clone()
- actualSUUID, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
+ id, err := ts.apiSvc.ProcessHeuristicRequest(testParams)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
}
diff --git a/internal/api/service/service.go b/internal/api/service/service.go
index 5c05d21d..05598114 100644
--- a/internal/api/service/service.go
+++ b/internal/api/service/service.go
@@ -12,8 +12,8 @@ import (
// Service ... Interface for API service
type Service interface {
- ProcessHeuristicRequest(ir *models.SessionRequestBody) (core.SUUID, error)
- RunHeuristicSession(params *models.SessionRequestParams) (core.SUUID, error)
+ ProcessHeuristicRequest(ir *models.SessionRequestBody) (core.UUID, error)
+ RunHeuristicSession(params *models.SessionRequestParams) (core.UUID, error)
CheckHealth() *models.HealthCheck
CheckETHRPCHealth(n core.Network) bool
diff --git a/internal/app/app.go b/internal/app/app.go
index 51b1fca6..75e8eb75 100644
--- a/internal/app/app.go
+++ b/internal/app/app.go
@@ -74,35 +74,35 @@ func (a *Application) End() <-chan os.Signal {
}
// BootStrap ... Bootstraps the application
-func (a *Application) BootStrap(sessions []*BootSession) ([]*core.HeuristicID, error) {
+func (a *Application) BootStrap(sessions []*BootSession) ([]core.SessionID, error) {
logger := logging.WithContext(a.ctx)
- ids := make([]*core.HeuristicID, 0, len(sessions))
+ ids := make([]core.SessionID, 0, len(sessions))
for _, session := range sessions {
- pConfig, err := a.Subsystems.BuildPipelineCfg(session)
+ pConfig, err := a.Subsystems.BuildPathCfg(session)
if err != nil {
return nil, err
}
sConfig := session.SessionConfig()
- deployCfg, err := a.Subsystems.BuildDeployCfg(pConfig, sConfig)
+ cfg, err := a.Subsystems.BuildDeployCfg(pConfig, sConfig)
if err != nil {
return nil, err
}
- sUUID, err := a.Subsystems.RunSession(deployCfg)
+ id, err := a.Subsystems.RunHeuristic(cfg)
if err != nil {
return nil, err
}
- ids = append(ids, &core.HeuristicID{
- SUUID: sUUID,
- PUUID: deployCfg.PUUID,
+ ids = append(ids, core.SessionID{
+ HeuristicID: id,
+ PathID: cfg.PathID,
})
logger.Info("heuristic session started",
- zap.String(logging.SUUIDKey, sUUID.String()))
+ zap.String(logging.Session, cfg.PathID.String()))
}
return ids, nil
diff --git a/internal/app/init.go b/internal/app/init.go
index a479664c..3704319b 100644
--- a/internal/app/init.go
+++ b/internal/app/init.go
@@ -12,7 +12,7 @@ import (
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/engine"
e_registry "github.com/base-org/pessimism/internal/engine/registry"
- "github.com/base-org/pessimism/internal/etl/pipeline"
+ "github.com/base-org/pessimism/internal/etl"
"github.com/base-org/pessimism/internal/etl/registry"
"github.com/base-org/pessimism/internal/logging"
"github.com/base-org/pessimism/internal/metrics"
@@ -77,20 +77,21 @@ func InitializeAlerting(ctx context.Context, cfg *config.Config) (alert.Manager,
}
// InitializeETL ... Performs dependency injection to build etl struct
-func InitializeETL(ctx context.Context, transit chan core.HeuristicInput) pipeline.Manager {
- compRegistry := registry.NewRegistry()
- analyzer := pipeline.NewAnalyzer(compRegistry)
- store := pipeline.NewEtlStore()
- dag := pipeline.NewComponentGraph()
+func InitializeETL(ctx context.Context, transit chan core.HeuristicInput) etl.ETL {
+ r := registry.New()
+ analyzer := etl.NewAnalyzer(r)
+ store := etl.NewStore()
+ dag := etl.NewGraph()
- return pipeline.NewManager(ctx, analyzer, compRegistry, store, dag, transit)
+ return etl.New(ctx, analyzer, r, store, dag, transit)
}
// InitializeEngine ... Performs dependency injection to build engine struct
func InitializeEngine(ctx context.Context, cfg *config.Config, transit chan core.Alert) engine.Manager {
- store := engine.NewSessionStore()
- am := engine.NewAddressingMap()
+ store := engine.NewStore()
+ am := engine.NewAddressMap()
re := engine.NewHardCodedEngine(transit)
+
it := e_registry.NewHeuristicTable()
return engine.NewManager(ctx, cfg.EngineConfig, re, am, store, it, transit)
@@ -98,7 +99,7 @@ func InitializeEngine(ctx context.Context, cfg *config.Config, transit chan core
// NewPessimismApp ... Performs dependency injection to build app struct
func NewPessimismApp(ctx context.Context, cfg *config.Config) (*Application, func(), error) {
- mSvr, mShutDown, err := InitializeMetrics(ctx, cfg)
+ stats, shutDown, err := InitializeMetrics(ctx, cfg)
if err != nil {
return nil, nil, err
}
@@ -120,11 +121,11 @@ func NewPessimismApp(ctx context.Context, cfg *config.Config) (*Application, fun
appShutDown := func() {
shutDown()
- mShutDown()
+ shutDown()
if err := m.Shutdown(); err != nil {
logging.WithContext(ctx).Error("error shutting down subsystems", zap.Error(err))
}
}
- return New(ctx, cfg, m, svr, mSvr), appShutDown, nil
+ return New(ctx, cfg, m, svr, stats), appShutDown, nil
}
diff --git a/internal/client/alert.go b/internal/client/alert.go
index 5ba32dc7..e1a7f133 100644
--- a/internal/client/alert.go
+++ b/internal/client/alert.go
@@ -1,4 +1,4 @@
-//go:generate mockgen -package mocks --destination ../../mocks/alert_client.go . AlertClient
+//go:generate mockgen -package mocks --destination ../mocks/alert_client.go . AlertClient
package client
@@ -18,7 +18,7 @@ type AlertClient interface {
type AlertEventTrigger struct {
Message string
Severity core.Severity
- DedupKey core.PUUID
+ DedupKey core.PathID
}
// AlertAPIResponse ... A standardized response for alert clients
diff --git a/internal/client/alert_test.go b/internal/client/alert_test.go
index 611d92de..3252f9b4 100644
--- a/internal/client/alert_test.go
+++ b/internal/client/alert_test.go
@@ -13,14 +13,14 @@ func TestToPagerDutyEvent(t *testing.T) {
alert := &client.AlertEventTrigger{
Message: "test",
Severity: core.HIGH,
- DedupKey: core.NilPUUID(),
+ DedupKey: core.PathID{},
}
- sPuuid := alert.DedupKey.String()
+ sPathID := alert.DedupKey.String()
res := alert.ToPagerdutyEvent()
assert.Equal(t, core.Critical, res.Severity)
assert.Equal(t, "test", res.Message)
- assert.Equal(t, sPuuid, res.DedupKey)
+ assert.Equal(t, sPathID, res.DedupKey)
alert.Severity = core.MEDIUM
res = alert.ToPagerdutyEvent()
diff --git a/internal/client/client.go b/internal/client/client.go
index 01ac37bb..808fd25c 100644
--- a/internal/client/client.go
+++ b/internal/client/client.go
@@ -7,6 +7,7 @@ import (
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/logging"
ix_client "github.com/ethereum-optimism/optimism/indexer/client"
+ ix_node "github.com/ethereum-optimism/optimism/indexer/node"
"go.uber.org/zap"
)
@@ -22,7 +23,9 @@ type Config struct {
type Bundle struct {
IxClient IxClient
L1Client EthClient
+ L1Node ix_node.EthClient
L2Client EthClient
+ L2Node ix_node.EthClient
L2Geth GethClient
}
@@ -36,12 +39,24 @@ func NewBundle(ctx context.Context, cfg *Config) (*Bundle, error) {
return nil, err
}
+ l1NodeClient, err := NewNodeClient(ctx, cfg.L1RpcEndpoint)
+ if err != nil {
+ logger.Fatal("Error creating L1 node client", zap.Error(err))
+ return nil, err
+ }
+
l2Client, err := NewEthClient(ctx, cfg.L2RpcEndpoint)
if err != nil {
logger.Fatal("Error creating L1 client", zap.Error(err))
return nil, err
}
+ l2NodeClient, err := NewNodeClient(ctx, cfg.L2RpcEndpoint)
+ if err != nil {
+ logger.Fatal("Error creating L2 node client", zap.Error(err))
+ return nil, err
+ }
+
l2Geth, err := NewGethClient(cfg.L2RpcEndpoint)
if err != nil {
logger.Fatal("Error creating L2 GETH client", zap.Error(err))
@@ -54,9 +69,11 @@ func NewBundle(ctx context.Context, cfg *Config) (*Bundle, error) {
}
return &Bundle{
- IxClient: ixClient,
L1Client: l1Client,
+ L1Node: l1NodeClient,
L2Client: l2Client,
+ L2Node: l2NodeClient,
+ IxClient: ixClient,
L2Geth: l2Geth,
}, nil
}
@@ -71,6 +88,20 @@ func FromContext(ctx context.Context) (*Bundle, error) {
return b, nil
}
+// NodeClient ...
+func (b *Bundle) NodeClient(n core.Network) (ix_node.EthClient, error) {
+ switch n {
+ case core.Layer1:
+ return b.L1Node, nil
+
+ case core.Layer2:
+ return b.L2Node, nil
+
+ default:
+ return nil, fmt.Errorf("invalid network supplied")
+ }
+}
+
// FromNetwork ... Retrieves an eth client from the context
func FromNetwork(ctx context.Context, n core.Network) (EthClient, error) {
bundle, err := FromContext(ctx)
diff --git a/internal/client/eth.go b/internal/client/eth.go
index 8c4d71c6..3d7760b1 100644
--- a/internal/client/eth.go
+++ b/internal/client/eth.go
@@ -1,28 +1,19 @@
-//go:generate mockgen -package mocks --destination ../mocks/eth_client.go . EthClient
+//go:generate mockgen -package mocks --destination ../mocks/eth_client.go . EthClient,NodeClient
package client
-/*
- NOTE
- eth client docs: https://pkg.go.dev/github.com/ethereum/go-ethereum/ethclient
- eth api docs: https://geth.ethereum.org/docs/rpc/server
-*/
-
import (
"context"
"math/big"
+ "github.com/base-org/pessimism/internal/metrics"
+ ix_node "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum/go-ethereum"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
)
-// TODO (#20) : Introduce optional Retry-able EthClient
-
-// EthClient ... Provides interface wrapper for ethClient functions
-// Useful for mocking go-ethereum json rpc client logic
type EthClient interface {
CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
@@ -36,7 +27,24 @@ type EthClient interface {
ch chan<- types.Log) (ethereum.Subscription, error)
}
+type NodeClient interface {
+ BlockHeaderByNumber(*big.Int) (*types.Header, error)
+ BlockHeaderByHash(common.Hash) (*types.Header, error)
+ BlockHeadersByRange(*big.Int, *big.Int) ([]types.Header, error)
+
+ TxByHash(common.Hash) (*types.Transaction, error)
+
+ StorageHash(common.Address, *big.Int) (common.Hash, error)
+ FilterLogs(ethereum.FilterQuery) ([]types.Log, error)
+}
+
// NewEthClient ... Initializer
func NewEthClient(ctx context.Context, rawURL string) (EthClient, error) {
return ethclient.DialContext(ctx, rawURL)
}
+
+func NewNodeClient(ctx context.Context, rpcURL string) (NodeClient, error) {
+ stats := metrics.WithContext(ctx)
+
+ return ix_node.DialEthClient(rpcURL, stats)
+}
diff --git a/internal/common/common_test.go b/internal/common/common_test.go
index 4e284383..b41c4a86 100644
--- a/internal/common/common_test.go
+++ b/internal/common/common_test.go
@@ -6,7 +6,6 @@ import (
"github.com/base-org/pessimism/internal/common"
"github.com/base-org/pessimism/internal/common/math"
- "github.com/base-org/pessimism/internal/core"
geth_common "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
@@ -34,32 +33,3 @@ func Test_SliceToAddresses(t *testing.T) {
[]geth_common.Address{geth_common.HexToAddress("0x00000000"), geth_common.HexToAddress("0x00000001")})
}
-
-// Test_DLQ ... Tests all DLQ functionality
-func Test_DLQ(t *testing.T) {
- dlq := common.NewTransitDLQ(5)
-
- // A. Add 5 elements and test size
- for i := 0; i < 5; i++ {
- td := core.NewTransitData(core.RegisterType(0), nil)
-
- err := dlq.Add(&td)
- assert.NoError(t, err)
- }
-
- // B. Add 6th element and test error
- td := core.NewTransitData(core.RegisterType(0), nil)
- err := dlq.Add(&td)
-
- assert.Error(t, err)
-
- // C. Pop 1 element and test size
- elem, err := dlq.Pop()
- assert.Equal(t, elem.Type, core.RegisterType(0))
- assert.NoError(t, err)
-
- // D. Pop all elements and test size
- entries := dlq.PopAll()
- assert.Equal(t, len(entries), 4)
- assert.True(t, dlq.Empty(), true)
-}
diff --git a/internal/common/dlq.go b/internal/common/dlq.go
deleted file mode 100644
index 03cf290d..00000000
--- a/internal/common/dlq.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package common
-
-import (
- "fmt"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-const (
- dlqFullMsg = "the dead letter queue is full with %d elements"
- dlqEmptyMsg = "the dead letter queue is empty"
-)
-
-// DLQ ... Dead Letter Queue construct
-// Used to store ETL events
-// that failed to be processed
-type DLQ[E any] struct {
- size int
- dlq []*E
-}
-
-// NewTransitDLQ ... Initializer
-func NewTransitDLQ(size int) *DLQ[core.TransitData] {
- return &DLQ[core.TransitData]{
- size: size,
- dlq: make([]*core.TransitData, 0, size),
- }
-}
-
-// Add ... Adds an entry to the DLQ if it is not full
-func (d *DLQ[E]) Add(entry *E) error {
- if len(d.dlq) >= d.size {
- return fmt.Errorf(dlqFullMsg, d.size)
- }
-
- d.dlq = append(d.dlq, entry)
- return nil
-}
-
-// Pop ... Removes the first element from the DLQ,
-// typically for re-processing
-func (d *DLQ[E]) Pop() (*E, error) {
- if len(d.dlq) == 0 {
- return nil, fmt.Errorf(dlqEmptyMsg)
- }
-
- entry := d.dlq[0]
- d.dlq = d.dlq[1:]
- return entry, nil
-}
-
-// PopAll ... Removes all elements from the DLQ,
-// typically for re-processing
-func (d *DLQ[E]) PopAll() []*E {
- entries := d.dlq
- d.dlq = make([]*E, 0, d.size)
- return entries
-}
-
-// Empty ... Checks if the DLQ is empty
-func (d *DLQ[E]) Empty() bool {
- return len(d.dlq) == 0
-}
-
-// Size ... Returns the size of the DLQ
-func (d *DLQ[E]) Size() int {
- return len(d.dlq)
-}
-
-// Full ... Checks if the DLQ is full
-func (d *DLQ[E]) Full() bool {
- return len(d.dlq) >= d.size
-}
diff --git a/internal/config/config.go b/internal/config/config.go
index 482bba3a..064c75db 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -89,9 +89,9 @@ func NewConfig(fileName core.FilePath) *Config {
},
SystemConfig: &subsystem.Config{
- MaxPipelineCount: getEnvInt("MAX_PIPELINE_COUNT"),
- L1PollInterval: getEnvInt("L1_POLL_INTERVAL"),
- L2PollInterval: getEnvInt("L2_POLL_INTERVAL"),
+ MaxPathCount: getEnvInt("MAX_PATH_COUNT"),
+ L1PollInterval: getEnvInt("L1_POLL_INTERVAL"),
+ L2PollInterval: getEnvInt("L2_POLL_INTERVAL"),
},
}
diff --git a/internal/core/alert.go b/internal/core/alert.go
index 589d2e3a..8829a55b 100644
--- a/internal/core/alert.go
+++ b/internal/core/alert.go
@@ -109,11 +109,13 @@ func (s Severity) ToPagerDutySev() PagerDutySeverity {
// Alert ... An alert
type Alert struct {
- Criticality Severity
- PUUID PUUID
- SUUID SUUID
+ Net Network
+ HT HeuristicType
+ Sev Severity
+ PathID PathID
+ HeuristicID UUID
Timestamp time.Time
- Ptype PipelineType
+ PathType PathType
Content string
}
diff --git a/internal/core/alert_test.go b/internal/core/alert_test.go
index 5d710231..3ed8289a 100644
--- a/internal/core/alert_test.go
+++ b/internal/core/alert_test.go
@@ -15,7 +15,7 @@ func TestStringToSev(t *testing.T) {
assert.Equal(t, core.StringToSev(""), core.UNKNOWN)
}
-func TestSeverity_String(t *testing.T) {
+func TestSevString(t *testing.T) {
assert.Equal(t, core.LOW.String(), "low")
assert.Equal(t, core.MEDIUM.String(), "medium")
assert.Equal(t, core.HIGH.String(), "high")
@@ -23,7 +23,6 @@ func TestSeverity_String(t *testing.T) {
}
func TestToPagerDutySev(t *testing.T) {
-
assert.Equal(t, core.LOW.ToPagerDutySev(), core.PagerDutySeverity("warning"))
assert.Equal(t, core.MEDIUM.ToPagerDutySev(), core.PagerDutySeverity("error"))
assert.Equal(t, core.HIGH.ToPagerDutySev(), core.PagerDutySeverity("critical"))
diff --git a/internal/core/config.go b/internal/core/config.go
index 3c18945a..d812faef 100644
--- a/internal/core/config.go
+++ b/internal/core/config.go
@@ -3,9 +3,14 @@ package core
import (
"math/big"
"time"
+
+ "github.com/ethereum-optimism/optimism/op-service/retry"
)
-// ClientConfig ... Configuration passed through to an oracle component constructor
+func RetryStrategy() *retry.ExponentialStrategy {
+ return &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
+}
+
type ClientConfig struct {
Network Network
PollInterval time.Duration
@@ -14,29 +19,21 @@ type ClientConfig struct {
EndHeight *big.Int
}
-// SessionConfig ... Configuration passed through to a session constructor
type SessionConfig struct {
Network Network
- PT PipelineType
+ PT PathType
AlertPolicy *AlertPolicy
Type HeuristicType
Params *SessionParams
}
-// PipelineConfig ... Configuration passed through to a pipeline constructor
-type PipelineConfig struct {
+type PathConfig struct {
Network Network
- DataType RegisterType
- PipelineType PipelineType
+ DataType TopicType
+ PathType PathType
ClientConfig *ClientConfig
}
-// Backfill ... Returns true if the oracle is configured to backfill
func (oc *ClientConfig) Backfill() bool {
return oc.StartHeight != nil
}
-
-// Backtest ... Returns true if the oracle is configured to backtest
-func (oc *ClientConfig) Backtest() bool {
- return oc.EndHeight != nil
-}
diff --git a/internal/core/constants.go b/internal/core/constants.go
index bb0fd2ce..edef5960 100644
--- a/internal/core/constants.go
+++ b/internal/core/constants.go
@@ -21,7 +21,7 @@ const (
Clients
)
-// Network ... Represents the network for which a pipeline's oracle
+// Network ... Represents the network for which a path's reader
// is subscribed to.
type Network uint8
diff --git a/internal/core/core.go b/internal/core/core.go
index 20d7b3b1..c7f4cab2 100644
--- a/internal/core/core.go
+++ b/internal/core/core.go
@@ -12,94 +12,86 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// TransitOption ... Option used to initialize transit data
-type TransitOption = func(*TransitData)
+// RelayOption ... Option used to initialize transit data
+type RelayOption = func(*Event)
// WithAddress ... Injects address to transit data
-func WithAddress(address common.Address) TransitOption {
- return func(td *TransitData) {
- td.Address = address
+func WithAddress(address common.Address) RelayOption {
+ return func(e *Event) {
+ e.Address = address
}
}
// WithOriginTS ... Injects origin timestamp to transit data
-func WithOriginTS(t time.Time) TransitOption {
- return func(td *TransitData) {
- td.OriginTS = t
+func WithOriginTS(t time.Time) RelayOption {
+ return func(e *Event) {
+ e.OriginTS = t
}
}
-// TransitData ... Standardized type used for data inter-communication
-// between all ETL components and Risk Engine
-type TransitData struct {
+type Event struct {
OriginTS time.Time
Timestamp time.Time
Network Network
- Type RegisterType
+ Type TopicType
Address common.Address
Value any
}
-// NewTransitData ... Initializes transit data with supplied options
-// NOTE - transit data is used as a standard data representation
-// for communication between all ETL components and the risk engine
-func NewTransitData(rt RegisterType, val any, opts ...TransitOption) TransitData {
- td := TransitData{
+func NewEvent(rt TopicType, val any, opts ...RelayOption) Event {
+ e := Event{
Timestamp: time.Now(),
Type: rt,
Value: val,
}
for _, opt := range opts { // Apply options
- opt(&td)
+ opt(&e)
}
- return td
+ return e
}
-// Addressed ... Indicates whether the transit data has an
-// associated address field
-func (td *TransitData) Addressed() bool {
- return td.Address != common.Address{0}
+// Addressed ... Indicates whether the event is addressed
+func (e *Event) Addressed() bool {
+ return e.Address != common.Address{0}
}
// NewTransitChannel ... Builds new transit channel
-func NewTransitChannel() chan TransitData {
- return make(chan TransitData)
+func NewTransitChannel() chan Event {
+ return make(chan Event)
}
-// HeuristicInput ... Standardized type used to supply
-// the Risk Engine
type HeuristicInput struct {
- PUUID PUUID
- Input TransitData
+ PathID PathID
+ Input Event
}
// ExecInputRelay ... Represents a inter-subsystem
-// relay used to bind final ETL pipeline outputs to risk engine inputs
+// relay used to bind final ETL path outputs to risk engine inputs
type ExecInputRelay struct {
- pUUID PUUID
- outChan chan HeuristicInput
+ PathID PathID
+ relay chan HeuristicInput
}
// NewEngineRelay ... Initializer
-func NewEngineRelay(pUUID PUUID, outChan chan HeuristicInput) *ExecInputRelay {
+func NewEngineRelay(id PathID, relay chan HeuristicInput) *ExecInputRelay {
return &ExecInputRelay{
- pUUID: pUUID,
- outChan: outChan,
+ PathID: id,
+ relay: relay,
}
}
-// RelayTransitData ... Creates heuristic input from transit data to send to risk engine
-func (eir *ExecInputRelay) RelayTransitData(td TransitData) error {
+// RelayEvent ... Creates heuristic input from transit data to send to risk engine
+func (eir *ExecInputRelay) RelayEvent(e Event) error {
hi := HeuristicInput{
- PUUID: eir.pUUID,
- Input: td,
+ PathID: eir.PathID,
+ Input: e,
}
- eir.outChan <- hi
+ eir.relay <- hi
return nil
}
diff --git a/internal/core/core_test.go b/internal/core/core_test.go
index f877c0bf..69caca8e 100644
--- a/internal/core/core_test.go
+++ b/internal/core/core_test.go
@@ -10,42 +10,42 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_TransitData(t *testing.T) {
+func Test_Event(t *testing.T) {
// Verify construction
- td := core.NewTransitData(
- core.GethBlock,
+ e := core.NewEvent(
+ core.BlockHeader,
nil,
)
- assert.NotNil(t, td, "TransitData should not be nil")
- assert.NotNil(t, td.Timestamp, "TransitData timestamp should not be nil")
+ assert.NotNil(t, e, "Event should not be nil")
+ assert.NotNil(t, e.Timestamp, "Event timestamp should not be nil")
// Verify addressing
- addressed := td.Addressed()
- assert.False(t, addressed, "TransitData should not be addressed")
+ addressed := e.Addressed()
+ assert.False(t, addressed, "Event should not be addressed")
- td.Address = common.HexToAddress("0x456")
- addressed = td.Addressed()
- assert.True(t, addressed, "TransitData should be addressed")
+ e.Address = common.HexToAddress("0x456")
+ addressed = e.Addressed()
+ assert.True(t, addressed, "Event should be addressed")
}
func Test_EngineRelay(t *testing.T) {
- outChan := make(chan core.HeuristicInput)
+ feed := make(chan core.HeuristicInput)
- eir := core.NewEngineRelay(core.NilPUUID(), outChan)
- dummyTD := core.NewTransitData(core.AccountBalance, nil)
+ eir := core.NewEngineRelay(core.PathID{}, feed)
+ dummyTD := core.NewEvent(core.BlockHeader, nil)
// Verify relay and wrapping
go func() {
- _ = eir.RelayTransitData(dummyTD)
+ _ = eir.RelayEvent(dummyTD)
}()
- heurInput := <-outChan
+ args := <-feed
- assert.NotNil(t, heurInput, "HeuristicInput should not be nil")
- assert.Equal(t, heurInput.PUUID, core.NilPUUID(), "HeuristicInput PUUID should be nil")
- assert.Equal(t, heurInput.Input, dummyTD, "HeuristicInput Input should be dummyTD")
+ assert.NotNil(t, args)
+ assert.Equal(t, args.PathID, core.PathID{})
+ assert.Equal(t, args.Input, dummyTD)
}
func Test_SessionParams(t *testing.T) {
diff --git a/internal/core/etl.go b/internal/core/etl.go
index b5724956..9ce0a066 100644
--- a/internal/core/etl.go
+++ b/internal/core/etl.go
@@ -1,67 +1,29 @@
package core
-// ComponentType ... Denotes the ETL component type
-type ComponentType uint8
+// ProcessType ... Denotes the ETL process type
+type ProcessType uint8
const (
- Oracle ComponentType = iota + 1
- Pipe
- Aggregator
+ Read ProcessType = iota + 1
+ Subscribe
)
-// String ... Converts the component type to a string
-func (ct ComponentType) String() string {
+// String ... Converts the process type to a string
+func (ct ProcessType) String() string {
switch ct {
- case Oracle:
- return "oracle"
+ case Read:
+ return "reader"
- case Pipe:
- return "pipe"
-
- case Aggregator:
- return "aggregator"
+ case Subscribe:
+ return "subscriber"
}
return UnknownType
}
-// PipelineType ...
-type PipelineType uint8
+// PathType ...
+type PathType uint8
const (
- Backtest PipelineType = iota + 1
- Live
- MockTest
+ Live PathType = iota + 1
)
-
-// StringToPipelineType ... Converts a string to a pipeline type
-func StringToPipelineType(stringType string) PipelineType {
- switch stringType {
- case "backtest":
- return Backtest
-
- case "live":
- return Live
-
- case "mocktest":
- return MockTest
- }
-
- return PipelineType(0)
-}
-
-// String ... Converts the pipeline type to a string
-func (pt PipelineType) String() string {
- switch pt {
- case Backtest:
- return "backtest"
-
- case Live:
- return "live"
-
- case MockTest:
- return "mocktest"
- }
-
- return UnknownType
-}
diff --git a/internal/core/id.go b/internal/core/id.go
index 77fac971..6c52f7ac 100644
--- a/internal/core/id.go
+++ b/internal/core/id.go
@@ -12,18 +12,12 @@ type UUID struct {
uuid.UUID
}
-// newUUID ... Constructor
-func newUUID() UUID {
+func NewUUID() UUID {
return UUID{
uuid.New(),
}
}
-// nilUUID ... Returns a zero'd out 16 byte array
-func nilUUID() UUID {
- return UUID{[16]byte{0}}
-}
-
// ShortString ... Short string representation for easier
// debugging and ensuring conformance with pessimism specific abstractions
// https://pkg.go.dev/github.com/google/UUID#UUID.String
@@ -42,187 +36,110 @@ func (id UUID) ShortString() string {
uid[7])
}
-// ComponentPID ... Component Primary ID
-type ComponentPID [4]byte
+type ProcIdentifier [4]byte
// Represents a non-deterministic ID that's assigned to
-// every uniquely constructed ETL component
-type CUUID struct {
- PID ComponentPID
+// every uniquely constructed ETL process
+type ProcessID struct {
+ ID ProcIdentifier
UUID UUID
}
// Used for local lookups to look for active collisions
-type PipelinePID [9]byte
+type PathIdentifier [9]byte
// Represents a non-deterministic ID that's assigned to
-// every uniquely constructed ETL pipeline
-type PUUID struct {
- PID PipelinePID
+// every uniquely constructed ETL path
+type PathID struct {
+ ID PathIdentifier
UUID UUID
}
-// PipelineType ... Returns pipeline type decoding from encoded pid byte
-func (uuid PUUID) PipelineType() PipelineType {
- return PipelineType(uuid.PID[0])
-}
-
-func (uuid PUUID) NetworkType() Network {
- return Network(uuid.PID[1])
-}
-
-// SessionPID ... Heuristic session Primary ID
-type SessionPID [3]byte
-
-// Represents a non-deterministic ID that's assigned to
-// every uniquely constructed heuristic session
-type SUUID struct {
- PID SessionPID
- UUID UUID
-}
-
-// Network ... Returns network decoding from encoded pid byte
-func (pid SessionPID) Network() Network {
- return Network(pid[0])
-}
-
-// HeuristicType ... Returns heuristic type decoding from encoded pid byte
-func (pid SessionPID) HeuristicType() HeuristicType {
- return HeuristicType(pid[2])
-}
-
-// NOTE - This is useful for error handling with functions that
-// also return a ComponentID
-// NilCUUID ... Returns a zero'd out or empty component UUID
-func NilCUUID() CUUID {
- return CUUID{
- PID: ComponentPID{0},
- UUID: nilUUID(),
- }
-}
-
-// NilPUUID ... Returns a zero'd out or empty pipeline UUID
-func NilPUUID() PUUID {
- return PUUID{
- PID: PipelinePID{0},
- UUID: nilUUID(),
- }
+func (id PathID) Equal(other PathID) bool {
+ return id.ID == other.ID
}
-// NilSUUID ... Returns a zero'd out or empty heuristic UUID
-func NilSUUID() SUUID {
- return SUUID{
- PID: SessionPID{0},
- UUID: nilUUID(),
- }
+func (id PathID) NetworkType() Network {
+ return Network(id.ID[1])
}
-// MakeCUUID ... Constructs a component PID sequence & random UUID
-func MakeCUUID(pt PipelineType, ct ComponentType, rt RegisterType, n Network) CUUID {
- cID := ComponentPID{
+// MakeProcessID ...
+func MakeProcessID(pt PathType, ct ProcessType, tt TopicType, n Network) ProcessID {
+ cID := ProcIdentifier{
byte(n),
byte(pt),
byte(ct),
- byte(rt),
+ byte(tt),
}
- return CUUID{
- PID: cID,
- UUID: newUUID(),
+ return ProcessID{
+ ID: cID,
+ UUID: NewUUID(),
}
}
-// MakePUUID ... Constructs a pipeline PID sequence & random UUID
-func MakePUUID(pt PipelineType, firstCID, lastCID CUUID) PUUID {
- cID1, cID2 := firstCID.PID, lastCID.PID
+func MakePathID(pt PathType, proc1, proc2 ProcessID) PathID {
+ id1, id2 := proc1.ID, proc2.ID
- pID := PipelinePID{
+ pathID := PathIdentifier{
byte(pt),
- cID1[0],
- cID1[1],
- cID1[2],
- cID1[3],
- cID2[0],
- cID2[1],
- cID2[2],
- cID2[3],
+ id1[0],
+ id1[1],
+ id1[2],
+ id1[3],
+ id2[0],
+ id2[1],
+ id2[2],
+ id2[3],
}
- return PUUID{
- PID: pID,
- UUID: newUUID(),
+ return PathID{
+ ID: pathID,
+ UUID: NewUUID(),
}
}
-// MakeSUUID ... Constructs a heuristic PID sequence & random UUID
-func MakeSUUID(n Network, pt PipelineType, ht HeuristicType) SUUID {
- pID := SessionPID{
- byte(n),
- byte(pt),
- byte(ht),
- }
-
- return SUUID{
- PID: pID,
- UUID: newUUID(),
- }
-}
-
-// String ... Returns string representation of a component PID
-func (pid ComponentPID) String() string {
- return fmt.Sprintf("%s:%s:%s:%s",
+// String ... Returns string representation of a process PID
+func (pid ProcIdentifier) String() string {
+ return fmt.Sprintf("%s:%s:%s",
Network(pid[0]).String(),
- PipelineType(pid[1]).String(),
- ComponentType(pid[2]).String(),
- RegisterType(pid[3]).String(),
+ ProcessType(pid[2]).String(),
+ TopicType(pid[3]).String(),
)
}
-// String ... Returns string representation of a component UUID
-func (uuid CUUID) String() string {
- return fmt.Sprintf("%s::%s",
- uuid.PID.String(),
- uuid.UUID.ShortString(),
- )
+func (id ProcessID) String() string {
+ return id.UUID.ShortString()
}
-// Type ... Returns component type byte value from component UUID
-func (uuid CUUID) Type() ComponentType {
- return ComponentType(uuid.PID[2])
+func (id ProcessID) Identifier() string {
+ return id.ID.String()
+}
+func (id ProcessID) Type() ProcessType {
+ return ProcessType(id.ID[2])
}
-// String ... Returns string representation of a pipeline PID
-func (pid PipelinePID) String() string {
- pt := PipelineType(pid[0]).String()
- cID1 := ComponentPID(*(*[4]byte)(pid[1:5])).String()
- cID2 := ComponentPID(*(*[4]byte)(pid[5:9])).String()
+func (id PathIdentifier) String() string {
+ first := ProcIdentifier(*(*[4]byte)(id[1:5])).String()
+ last := ProcIdentifier(*(*[4]byte)(id[5:9])).String()
- return fmt.Sprintf("%s::%s::%s", pt, cID1, cID2)
+ return fmt.Sprintf("%s::%s", first, last)
}
-// String ... Returns string representation of a pipeline UUID
-func (uuid PUUID) String() string {
- return fmt.Sprintf("%s:::%s",
- uuid.PID.String(), uuid.UUID.ShortString(),
- )
+func (id PathID) Network() Network {
+ return Network(id.ID[1])
}
-// String ... Returns string representation of a heuristic session PID
-func (pid SessionPID) String() string {
- return fmt.Sprintf("%s:%s:%s",
- Network(pid[0]).String(),
- PipelineType(pid[1]).String(),
- HeuristicType(pid[2]).String(),
- )
+func (id PathID) String() string {
+ return id.UUID.ShortString()
}
-// String ... Returns string representation of a heuristic session UUID
-func (uuid SUUID) String() string {
- return fmt.Sprintf("%s::%s",
- uuid.PID.String(), uuid.UUID.ShortString())
+func (id PathID) Identifier() string {
+ return id.ID.String()
}
-type HeuristicID struct {
- PUUID PUUID
- SUUID SUUID
+type SessionID struct {
+ PathID PathID
+ ProcID ProcessID
+ HeuristicID UUID
}
diff --git a/internal/core/id_test.go b/internal/core/id_test.go
index 09f0b588..28f8d8e5 100644
--- a/internal/core/id_test.go
+++ b/internal/core/id_test.go
@@ -7,41 +7,29 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_Component_ID(t *testing.T) {
+func TestProcessID(t *testing.T) {
- expectedPID := core.ComponentPID([4]byte{1, 1, 1, 1})
- actualID := core.MakeCUUID(1, 1, 1, 1)
+ expected := core.ProcIdentifier([4]byte{1, 1, 1, 1})
+ actual := core.MakeProcessID(1, 1, 1, 1)
- assert.Equal(t, expectedPID, actualID.PID)
+ assert.Equal(t, expected, actual.ID)
- expectedStr := "layer1:backtest:oracle:account_balance"
- actualStr := actualID.PID.String()
+ expectedStr := "layer1:reader:block_header"
+ actualStr := actual.Identifier()
assert.Equal(t, expectedStr, actualStr)
}
-func Test_Pipeline_ID(t *testing.T) {
- expectedID := core.PipelinePID([9]byte{1, 1, 1, 1, 1, 1, 1, 1, 1})
- actualID := core.MakePUUID(1,
- core.MakeCUUID(1, 1, 1, 1),
- core.MakeCUUID(1, 1, 1, 1))
+func TestPathID(t *testing.T) {
+ expected := core.PathIdentifier([9]byte{1, 1, 1, 1, 1, 1, 1, 1, 1})
+ actual := core.MakePathID(1,
+ core.MakeProcessID(1, 1, 1, 1),
+ core.MakeProcessID(1, 1, 1, 1))
- assert.Equal(t, expectedID, actualID.PID)
+ assert.Equal(t, expected, actual.ID)
- expectedStr := "backtest::layer1:backtest:oracle:account_balance::layer1:backtest:oracle:account_balance"
- actualStr := actualID.PID.String()
-
- assert.Equal(t, expectedStr, actualStr)
-}
-
-func Test_HeuristicSession_ID(t *testing.T) {
- expectedID := core.SessionPID([3]byte{1, 2, 1})
- actualID := core.MakeSUUID(1, 2, 1)
-
- assert.Equal(t, expectedID, actualID.PID)
-
- expectedStr := "layer1:live:balance_enforcement"
- actualStr := actualID.PID.String()
+ expectedStr := "layer1:reader:block_header::layer1:reader:block_header"
+ actualStr := actual.Identifier()
assert.Equal(t, expectedStr, actualStr)
}
diff --git a/internal/core/register.go b/internal/core/register.go
deleted file mode 100644
index a5e5e76d..00000000
--- a/internal/core/register.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package core
-
-// RegisterType ... One byte register type enum
-type RegisterType uint8
-
-const (
- AccountBalance RegisterType = iota + 1
- GethBlock
- EventLog
-)
-
-// String ... Returns string representation of a
-// register enum
-func (rt RegisterType) String() string {
- switch rt {
- case AccountBalance:
- return "account_balance"
-
- case GethBlock:
- return "geth_block"
-
- case EventLog:
- return "event_log"
- }
-
- return UnknownType
-}
-
-// DataRegister ... Represents an ETL subsytem data type that
-// can be produced and consumed by heterogenous components
-type DataRegister struct {
- Addressing bool
- Sk *StateKey
-
- DataType RegisterType
- ComponentType ComponentType
- ComponentConstructor interface{}
- Dependencies []RegisterType
-}
-
-// StateKey ... Returns a cloned state key for a data register
-func (dr *DataRegister) StateKey() *StateKey {
- return dr.Sk.Clone()
-}
-
-// Stateful ... Indicates whether the data register has statefulness
-func (dr *DataRegister) Stateful() bool {
- return dr.Sk != nil
-}
-
-// RegisterDependencyPath ... Represents an inclusive acyclic sequential
-// path of data register dependencies
-type RegisterDependencyPath struct {
- Path []*DataRegister
-}
-
-// GeneratePUUID ... Generates a PUUID for an existing dependency path
-// provided an enumerated pipeline and network type
-func (rdp RegisterDependencyPath) GeneratePUUID(pt PipelineType, n Network) PUUID {
- firstComp, lastComp := rdp.Path[0], rdp.Path[len(rdp.Path)-1]
- firstUUID := MakeCUUID(pt, firstComp.ComponentType, firstComp.DataType, n)
- lastUUID := MakeCUUID(pt, lastComp.ComponentType, lastComp.DataType, n)
-
- return MakePUUID(pt, firstUUID, lastUUID)
-}
diff --git a/internal/core/state.go b/internal/core/state.go
index b678e2b4..76cfb4ef 100644
--- a/internal/core/state.go
+++ b/internal/core/state.go
@@ -4,28 +4,24 @@ import (
"fmt"
)
-// StateKey ... Represents a key in the state store
type StateKey struct {
Nesting bool
- Prefix RegisterType
+ Prefix TopicType
ID string
- PUUID *PUUID
+ PathID *PathID
}
-// Clone ... Returns a copy of the state key
func (sk *StateKey) Clone() *StateKey {
return &StateKey{
Nesting: sk.Nesting,
Prefix: sk.Prefix,
ID: sk.ID,
- PUUID: sk.PUUID,
+ PathID: sk.PathID,
}
}
-// MakeStateKey ... Builds a minimal state key using
-// a prefix and key
-func MakeStateKey(pre RegisterType, id string, nest bool) *StateKey {
+func MakeStateKey(pre TopicType, id string, nest bool) *StateKey {
return &StateKey{
Nesting: nest,
Prefix: pre,
@@ -33,30 +29,25 @@ func MakeStateKey(pre RegisterType, id string, nest bool) *StateKey {
}
}
-// IsNested ... Indicates whether the state key is nested
-// NOTE - This is used to determine if the state key maps
-// to a value slice of state keys in the state store (ie. nested)
func (sk *StateKey) IsNested() bool {
return sk.Nesting
}
-// SetPUUID ... Adds a pipeline UUID to the state key prefix
-func (sk *StateKey) SetPUUID(pUUID PUUID) error {
- if sk.PUUID != nil {
- return fmt.Errorf("state key already has a pipeline UUID %s", sk.PUUID.String())
+func (sk *StateKey) SetPathID(id PathID) error {
+ if sk.PathID != nil {
+ return fmt.Errorf("state key already has a path UUID %s", sk.PathID.String())
}
- sk.PUUID = &pUUID
+ sk.PathID = &id
return nil
}
-// String ... Returns a string representation of the state key
func (sk StateKey) String() string {
- pUUID := ""
+ id := ""
- if sk.PUUID != nil {
- pUUID = sk.PUUID.String()
+ if sk.PathID != nil {
+ id = sk.PathID.String()
}
- return fmt.Sprintf("%s-%s-%s", pUUID, sk.Prefix, sk.ID)
+ return fmt.Sprintf("%s-%s-%s", id, sk.Prefix, sk.ID)
}
diff --git a/internal/core/state_test.go b/internal/core/state_test.go
index b1259338..a95fe30f 100644
--- a/internal/core/state_test.go
+++ b/internal/core/state_test.go
@@ -7,19 +7,20 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_StateKey(t *testing.T) {
+func TestStateKey(t *testing.T) {
sk := core.MakeStateKey(0, "testId", false)
- puuid := core.NilPUUID()
- puuid.PID[0] = 1
+ id := core.PathID{}
+ id.ID[0] = 1
// Successfully set the key
- err := sk.SetPUUID(puuid)
+ err := sk.SetPathID(id)
assert.NoError(t, err)
- assert.Contains(t, sk.String(), puuid.String())
+ assert.Contains(t, sk.String(), id.String())
- pUUID2 := core.NilPUUID()
+ // Fail to set key again
+ id = core.PathID{}
- err = sk.SetPUUID(pUUID2)
- assert.Error(t, err, "cannot set puuid more than once")
+ err = sk.SetPathID(id)
+ assert.Error(t, err, "cannot set PathID more than once")
}
diff --git a/internal/core/topic.go b/internal/core/topic.go
new file mode 100644
index 00000000..5366d287
--- /dev/null
+++ b/internal/core/topic.go
@@ -0,0 +1,51 @@
+package core
+
+type TopicType uint8
+
+const (
+ BlockHeader TopicType = iota + 1
+ Log
+)
+
+func (rt TopicType) String() string {
+ switch rt {
+ case BlockHeader:
+ return "block_header"
+
+ case Log:
+ return "log"
+ }
+
+ return UnknownType
+}
+
+type DataTopic struct {
+ Addressing bool
+ Sk *StateKey
+
+ DataType TopicType
+ ProcessType ProcessType
+ Constructor interface{}
+ Dependencies []TopicType
+}
+
+func (dt *DataTopic) StateKey() *StateKey {
+ return dt.Sk.Clone()
+}
+
+func (dt *DataTopic) Stateful() bool {
+ return dt.Sk != nil
+}
+
+// Represents an inclusive acyclic sequential path of data register dependencies
+type TopicPath struct {
+ Path []*DataTopic
+}
+
+func (tp TopicPath) GeneratePathID(pt PathType, n Network) PathID {
+ proc1, proc2 := tp.Path[0], tp.Path[len(tp.Path)-1]
+ id1 := MakeProcessID(pt, proc1.ProcessType, proc1.DataType, n)
+ id2 := MakeProcessID(pt, proc2.ProcessType, proc2.DataType, n)
+
+ return MakePathID(pt, id1, id2)
+}
diff --git a/internal/engine/addressing.go b/internal/engine/addressing.go
index c25bbcc6..904bd37c 100644
--- a/internal/engine/addressing.go
+++ b/internal/engine/addressing.go
@@ -7,60 +7,50 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// AddressingMap ... Interface for mapping addresses to session UUIDs
-type AddressingMap interface {
- GetSUUIDsByPair(address common.Address, pUUID core.PUUID) ([]core.SUUID, error)
- Insert(addr common.Address, pUUID core.PUUID, sUUID core.SUUID) error
+type AddressMap struct {
+ m map[common.Address]map[core.PathID][]core.UUID
}
-// addressingMap ... Implementation of AddressingMap
-type addressingMap struct {
- m map[common.Address]map[core.PUUID][]core.SUUID
+func NewAddressMap() *AddressMap {
+ return &AddressMap{
+ m: make(map[common.Address]map[core.PathID][]core.UUID),
+ }
}
-// GetSessionUUIDsByPair ... Gets the session UUIDs by the pair of address and pipeline UUID
-func (am *addressingMap) GetSUUIDsByPair(address common.Address, pUUID core.PUUID) ([]core.SUUID, error) {
+func (am *AddressMap) Get(address common.Address, id core.PathID) ([]core.UUID, error) {
if _, found := am.m[address]; !found {
- return []core.SUUID{}, fmt.Errorf("address provided is not tracked %s", address.String())
+ return []core.UUID{}, fmt.Errorf("address provided is not tracked %s", address.String())
}
- if _, found := am.m[address][pUUID]; !found {
- return []core.SUUID{}, fmt.Errorf("PUUID provided is not tracked %s", pUUID.String())
+ if _, found := am.m[address][id]; !found {
+ return []core.UUID{}, fmt.Errorf("id provided is not tracked %s", id.String())
}
- return am.m[address][pUUID], nil
+ return am.m[address][id], nil
}
-// Insert ... Inserts a new entry into the addressing map
-func (am *addressingMap) Insert(addr common.Address, pUUID core.PUUID, sUUID core.SUUID) error {
+func (am *AddressMap) Insert(addr common.Address, id core.PathID, uuid core.UUID) error {
// 1. Check if address exists; create nested entry & return if not
if _, found := am.m[addr]; !found {
- am.m[addr] = make(map[core.PUUID][]core.SUUID)
- am.m[addr][pUUID] = []core.SUUID{sUUID}
+ am.m[addr] = make(map[core.PathID][]core.UUID)
+ am.m[addr][id] = []core.UUID{uuid}
return nil
}
- // 2. Check if pipeline UUID exists; create entry & return if not
- if _, found := am.m[addr][pUUID]; !found {
- am.m[addr][pUUID] = []core.SUUID{sUUID}
+ // 2. Check if path UUID exists; create entry & return if not
+ if _, found := am.m[addr][id]; !found {
+ am.m[addr][id] = []core.UUID{uuid}
return nil
}
// 3. Ensure that entry doesn't already exist
- for _, entry := range am.m[addr][pUUID] {
- if entry == sUUID {
+ for _, entry := range am.m[addr][id] {
+ if entry == uuid {
return fmt.Errorf("entry already exists")
}
}
// 4. Append entry and return
- am.m[addr][pUUID] = append(am.m[addr][pUUID], sUUID)
+ am.m[addr][id] = append(am.m[addr][id], uuid)
return nil
}
-
-// NewAddressingMap ... Initializer
-func NewAddressingMap() AddressingMap {
- return &addressingMap{
- m: make(map[common.Address]map[core.PUUID][]core.SUUID),
- }
-}
diff --git a/internal/engine/addressing_test.go b/internal/engine/addressing_test.go
index efbc9de3..25871ef3 100644
--- a/internal/engine/addressing_test.go
+++ b/internal/engine/addressing_test.go
@@ -10,38 +10,38 @@ import (
)
var (
- testPUUID = core.MakePUUID(0,
- core.MakeCUUID(core.Live, 0, 0, 0),
- core.MakeCUUID(core.Live, 0, 0, 0))
+ pathID = core.MakePathID(0,
+ core.MakeProcessID(core.Live, 0, 0, 0),
+ core.MakeProcessID(core.Live, 0, 0, 0))
)
-func Test_GetSUUIDsByPair(t *testing.T) {
- am := engine.NewAddressingMap()
+func TestGetUUIDs(t *testing.T) {
+ am := engine.NewAddressMap()
- sUUID := core.NilSUUID()
+ id1 := core.NewUUID()
+ id2 := core.NewUUID()
address := common.HexToAddress("0x24")
- err := am.Insert(address, testPUUID, sUUID)
- assert.NoError(t, err, "should not error")
+ err := am.Insert(address, pathID, id1)
+ assert.NoError(t, err)
// Test for found
- ids, err := am.GetSUUIDsByPair(address, testPUUID)
- assert.NoError(t, err, "should not error")
- assert.Equal(t, core.NilSUUID(), ids[0], "should be equal")
+ ids, err := am.Get(address, pathID)
+ assert.NoError(t, err)
+ assert.Equal(t, id1, ids[0])
- // Test for found with multiple entries
- sUUID2 := core.MakeSUUID(0, 0, 1)
- err = am.Insert(address, testPUUID, sUUID2)
- assert.NoError(t, err, "should not error")
+ // Test for multiple
+ err = am.Insert(address, pathID, id2)
+ assert.NoError(t, err)
- ids, err = am.GetSUUIDsByPair(address, testPUUID)
- assert.NoError(t, err, "should not error")
- assert.Len(t, ids, 2, "should have length of 2")
- assert.Contains(t, ids, sUUID, "should contain sUUID")
- assert.Contains(t, ids, sUUID2, "should contain sUUID2")
+ ids, err = am.Get(address, pathID)
+ assert.NoError(t, err)
+ assert.Len(t, ids, 2)
+ assert.Contains(t, ids, id1)
+ assert.Contains(t, ids, id2)
// Test for not found
- ids, err = am.GetSUUIDsByPair(address, core.NilPUUID())
+ ids, err = am.Get(address, core.PathID{})
assert.Error(t, err, "should error")
assert.Empty(t, ids, "should be empty")
}
diff --git a/internal/engine/engine.go b/internal/engine/engine.go
index ede6a1fa..6369e750 100644
--- a/internal/engine/engine.go
+++ b/internal/engine/engine.go
@@ -32,8 +32,8 @@ type ExecInput struct {
// RiskEngine ... Execution engine interface
type RiskEngine interface {
Type() Type
- Execute(context.Context, core.TransitData,
- heuristic.Heuristic) *heuristic.ActivationSet
+ Execute(ctx context.Context, data core.Event,
+ h heuristic.Heuristic) (*heuristic.ActivationSet, error)
AddWorkerIngress(chan ExecInput)
EventLoop(context.Context)
}
@@ -63,24 +63,24 @@ func (hce *hardCodedEngine) AddWorkerIngress(ingress chan ExecInput) {
}
// Execute ... Executes the heuristic
-func (hce *hardCodedEngine) Execute(ctx context.Context, data core.TransitData,
- h heuristic.Heuristic) *heuristic.ActivationSet {
+func (hce *hardCodedEngine) Execute(ctx context.Context, data core.Event,
+ h heuristic.Heuristic) (*heuristic.ActivationSet, error) {
logger := logging.WithContext(ctx)
logger.Debug("Performing heuristic assessment",
- zap.String(logging.SUUIDKey, h.SUUID().String()))
- activationSet, err := h.Assess(data)
+ zap.String(logging.UUID, h.ID().ShortString()))
+ as, err := h.Assess(data)
if err != nil {
logger.Error("Failed to perform activation option for heuristic", zap.Error(err),
- zap.String("heuristic_type", h.SUUID().PID.HeuristicType().String()))
+ zap.String("heuristic_type", h.TopicType().String()))
metrics.WithContext(ctx).
RecordAssessmentError(h)
- return heuristic.NoActivations()
+ return nil, err
}
- return activationSet
+ return as, nil
}
// EventLoop ... Event loop for the risk engine
@@ -93,39 +93,38 @@ func (hce *hardCodedEngine) EventLoop(ctx context.Context) {
logger.Info("Risk engine event loop cancelled")
return
- case execInput := <-hce.heuristicIn: // Heuristic input received
+ case args := <-hce.heuristicIn: // Heuristic input received
logger.Debug("Heuristic input received",
- zap.String(logging.SUUIDKey, execInput.h.SUUID().String()))
- // (1) Execute heuristic with retry strategy
+ zap.String(logging.UUID, args.h.ID().ShortString()))
+
start := time.Now()
- var actSet *heuristic.ActivationSet
+ as, err := retry.Do[*heuristic.ActivationSet](ctx, 10, core.RetryStrategy(),
+ func() (*heuristic.ActivationSet, error) {
+ metrics.WithContext(ctx).RecordHeuristicRun(args.hi.PathID.Network(), args.h)
+ return hce.Execute(ctx, args.hi.Input, args.h)
+ })
- retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
- if _, err := retry.Do[any](ctx, 10, retryStrategy, func() (any, error) {
- actSet = hce.Execute(ctx, execInput.hi.Input, execInput.h)
- metrics.WithContext(ctx).RecordHeuristicRun(execInput.h)
- metrics.WithContext(ctx).RecordInvExecutionTime(execInput.h, float64(time.Since(start).Nanoseconds()))
- // a-ok!
- return 0, nil
- }); err != nil {
+ if err != nil {
logger.Error("Failed to execute heuristic", zap.Error(err))
- metrics.WithContext(ctx).RecordAssessmentError(execInput.h)
+ metrics.WithContext(ctx).RecordAssessmentError(args.h)
}
- // (2) Send alerts for respective activations
- if actSet.Activated() {
- for _, act := range actSet.Entries() {
+ metrics.WithContext(ctx).RecordAssessmentTime(args.h, float64(time.Since(start).Nanoseconds()))
+ if as.Activated() {
+ for _, act := range as.Entries() {
alert := core.Alert{
- Timestamp: act.TimeStamp,
- SUUID: execInput.h.SUUID(),
- Content: act.Message,
- PUUID: execInput.hi.PUUID,
- Ptype: execInput.hi.PUUID.PipelineType(),
+ Timestamp: act.TimeStamp,
+ HeuristicID: args.h.ID(),
+ HT: args.h.Type(),
+ Content: act.Message,
+ PathID: args.hi.PathID,
+ Net: args.hi.PathID.Network(),
}
logger.Warn("Heuristic alert",
- zap.String(logging.SUUIDKey, execInput.h.SUUID().String()),
+ zap.String(logging.UUID, args.h.ID().ShortString()),
+ zap.String("heuristic_type", args.hi.PathID.String()),
zap.String("message", act.Message))
hce.alertEgress <- alert
diff --git a/internal/engine/engine_test.go b/internal/engine/engine_test.go
index 998f7f95..d0bdc680 100644
--- a/internal/engine/engine_test.go
+++ b/internal/engine/engine_test.go
@@ -33,7 +33,7 @@ func testErr() error {
return fmt.Errorf("test error")
}
-func Test_HardCodedEngine(t *testing.T) {
+func TestHardCodedEngine(t *testing.T) {
var tests = []struct {
name string
test func(t *testing.T, ts *testSuite)
@@ -41,39 +41,47 @@ func Test_HardCodedEngine(t *testing.T) {
{
name: "Activation Failure From Error",
test: func(t *testing.T, ts *testSuite) {
- td := core.TransitData{}
+ e := core.Event{}
- ts.mockHeuristic.EXPECT().Assess(td).
- Return(heuristic.NoActivations(), testErr()).Times(1)
+ ts.mockHeuristic.EXPECT().Assess(gomock.Any()).
+ Return(heuristic.NoActivations(), testErr()).AnyTimes()
- ts.mockHeuristic.EXPECT().SUUID().
- Return(core.NilSUUID()).Times(2)
+ ts.mockHeuristic.EXPECT().ID().
+ Return(core.UUID{}).AnyTimes()
- as := ts.re.Execute(context.Background(), td, ts.mockHeuristic)
- assert.False(t, as.Activated())
+ ts.mockHeuristic.EXPECT().TopicType().
+ Return(core.BlockHeader).AnyTimes()
+ as, err := ts.re.Execute(context.Background(), e, ts.mockHeuristic)
+ assert.Nil(t, as)
+ assert.NotNil(t, err)
}},
{
name: "Successful Activation",
test: func(t *testing.T, ts *testSuite) {
- td := core.TransitData{}
+ e := core.Event{}
expectedOut := heuristic.NewActivationSet().Add(
&heuristic.Activation{
Message: "20 inch blade on the Impala",
})
- ts.mockHeuristic.EXPECT().Assess(td).
+ ts.mockHeuristic.EXPECT().Assess(e).
Return(expectedOut, nil).Times(1)
- ts.mockHeuristic.EXPECT().SUUID().
- Return(core.NilSUUID()).Times(1)
+ ts.mockHeuristic.EXPECT().ID().
+ Return(core.UUID{}).Times(1)
- as := ts.re.Execute(context.Background(), td, ts.mockHeuristic)
+ ts.mockHeuristic.EXPECT().TopicType().
+ Return(core.BlockHeader).AnyTimes()
+
+ as, err := ts.re.Execute(context.Background(), e, ts.mockHeuristic)
+ assert.Nil(t, err)
assert.NotNil(t, as)
assert.True(t, as.Activated())
assert.Equal(t, expectedOut, as)
- }},
+ },
+ },
}
for i, test := range tests {
diff --git a/internal/engine/heuristic/config.go b/internal/engine/heuristic/config.go
index c5d7d73b..acaf75b8 100644
--- a/internal/engine/heuristic/config.go
+++ b/internal/engine/heuristic/config.go
@@ -8,7 +8,7 @@ type DeployConfig struct {
StateKey *core.StateKey
Network core.Network
- PUUID core.PUUID
+ PathID core.PathID
Reuse bool
HeuristicType core.HeuristicType
diff --git a/internal/engine/heuristic/heuristic.go b/internal/engine/heuristic/heuristic.go
index c183ca56..322b126a 100644
--- a/internal/engine/heuristic/heuristic.go
+++ b/internal/engine/heuristic/heuristic.go
@@ -16,34 +16,31 @@ const (
// HardCoded ... Hard coded execution type (ie native application code)
HardCoded ExecutionType = iota
- invalidInTypeErr = "invalid input type provided for heuristic. expected %s, got %s"
+ invalidTopicErr = "invalid input type provided for heuristic. expected %s, got %s"
)
-// Heuristic ... Interface that all heuristic implementations must adhere to
type Heuristic interface {
- InputType() core.RegisterType
- ValidateInput(core.TransitData) error
- Assess(td core.TransitData) (*ActivationSet, error)
- SUUID() core.SUUID
- SetSUUID(core.SUUID)
+ TopicType() core.TopicType
+ Validate(core.Event) error
+ Assess(e core.Event) (*ActivationSet, error)
+ Type() core.HeuristicType
+ ID() core.UUID
+ SetID(core.UUID)
}
-// BaseHeuristicOpt ... Functional option for BaseHeuristic
-type BaseHeuristicOpt = func(bi *BaseHeuristic) *BaseHeuristic
+type BaseHeuristicOpt = func(bh *BaseHeuristic) *BaseHeuristic
-// BaseHeuristic ... Base heuristic implementation
type BaseHeuristic struct {
- sUUID core.SUUID
- inType core.RegisterType
+ ht core.HeuristicType
+ id core.UUID
+ topic core.TopicType
}
-// NewBaseHeuristic ... Initializer for BaseHeuristic
-// This is a base type that's inherited by all hardcoded
-// heuristic implementations
-func NewBaseHeuristic(inType core.RegisterType,
+func New(topic core.TopicType, t core.HeuristicType,
opts ...BaseHeuristicOpt) Heuristic {
bi := &BaseHeuristic{
- inType: inType,
+ ht: t,
+ topic: topic,
}
for _, opt := range opts {
@@ -53,36 +50,34 @@ func NewBaseHeuristic(inType core.RegisterType,
return bi
}
-// SUUID ... Returns the heuristic session UUID
-func (bi *BaseHeuristic) SUUID() core.SUUID {
- return bi.sUUID
+func (bi *BaseHeuristic) Type() core.HeuristicType {
+ return bi.ht
}
-// InputType ... Returns the input type for the heuristic
-func (bi *BaseHeuristic) InputType() core.RegisterType {
- return bi.inType
+func (bi *BaseHeuristic) ID() core.UUID {
+ return bi.id
}
-// Assess ... Determines if a heuristic activation has occurred; defaults to no-op
-func (bi *BaseHeuristic) Assess(_ core.TransitData) (*ActivationSet, error) {
+func (bi *BaseHeuristic) TopicType() core.TopicType {
+ return bi.topic
+}
+
+func (bi *BaseHeuristic) Assess(_ core.Event) (*ActivationSet, error) {
return NoActivations(), nil
}
-// SetSUUID ... Sets the heuristic session UUID
-func (bi *BaseHeuristic) SetSUUID(sUUID core.SUUID) {
- bi.sUUID = sUUID
+func (bi *BaseHeuristic) SetID(id core.UUID) {
+ bi.id = id
}
-// ValidateInput ... Validates the input type for the heuristic
-func (bi *BaseHeuristic) ValidateInput(td core.TransitData) error {
- if td.Type != bi.InputType() {
- return fmt.Errorf(invalidInTypeErr, bi.InputType(), td.Type)
+func (bi *BaseHeuristic) Validate(e core.Event) error {
+ if e.Type != bi.TopicType() {
+ return fmt.Errorf(invalidTopicErr, bi.TopicType(), e.Type)
}
return nil
}
-// Activation ... Represents an activation event
type Activation struct {
TimeStamp time.Time
Message string
@@ -98,28 +93,23 @@ func NewActivationSet() *ActivationSet {
}
}
-// Len ... Returns the number of activations in the set
func (as *ActivationSet) Len() int {
return len(as.acts)
}
-// Add ... Adds an activation to the set
func (as *ActivationSet) Add(a *Activation) *ActivationSet {
as.acts = append(as.acts, a)
return as
}
-// Entries ... Returns the activations in the set
func (as *ActivationSet) Entries() []*Activation {
return as.acts
}
-// Activated ... Returns true if the activation set is not empty
func (as *ActivationSet) Activated() bool {
return as.Len() > 0
}
-// NoActivations ... Returns an empty activation set
func NoActivations() *ActivationSet {
return NewActivationSet()
}
diff --git a/internal/engine/heuristic/heuristic_test.go b/internal/engine/heuristic/heuristic_test.go
index d2030628..cdbb8710 100644
--- a/internal/engine/heuristic/heuristic_test.go
+++ b/internal/engine/heuristic/heuristic_test.go
@@ -9,30 +9,31 @@ import (
)
func Test_BaseHeuristic(t *testing.T) {
- testSUUID := core.MakeSUUID(1, 1, 1)
- bi := heuristic.NewBaseHeuristic(core.RegisterType(0))
+ expected := core.NewUUID()
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
- // Test SUUID
- bi.SetSUUID(testSUUID)
- actualSUUID := bi.SUUID()
+ h.SetID(expected)
+ actual := h.ID()
- assert.Equal(t, testSUUID, actualSUUID, "SUUIDs should match")
+ assert.Equal(t, expected, actual)
- // Test InputType
- actualInputType := bi.InputType()
- assert.Equal(t, core.RegisterType(0), actualInputType, "Input types should match")
+ tt := h.TopicType()
+ assert.Equal(t, core.TopicType(0), tt)
// Test validate
- err := bi.ValidateInput(core.TransitData{
- Type: core.RegisterType(0),
+ err := h.Validate(core.Event{
+ Type: core.TopicType(0),
})
- assert.Nil(t, err, "Error should be nil")
+ assert.Nil(t, err)
- err = bi.ValidateInput(core.TransitData{
- Type: core.RegisterType(1),
+ err = h.Validate(core.Event{
+ Type: core.TopicType(1),
})
- assert.NotNil(t, err, "Error should not be nil")
+ assert.NotNil(t, err)
+
+ ht := h.Type()
+ assert.Equal(t, core.BalanceEnforcement, ht)
}
diff --git a/internal/engine/manager.go b/internal/engine/manager.go
index 49d9e401..ce471494 100644
--- a/internal/engine/manager.go
+++ b/internal/engine/manager.go
@@ -22,19 +22,19 @@ type Config struct {
// Manager ... Engine manager interface
type Manager interface {
- GetInputType(ht core.HeuristicType) (core.RegisterType, error)
+ GetInputType(ht core.HeuristicType) (core.TopicType, error)
Transit() chan core.HeuristicInput
- DeleteHeuristicSession(core.SUUID) (core.SUUID, error)
- DeployHeuristicSession(cfg *heuristic.DeployConfig) (core.SUUID, error)
+ DeleteHeuristicSession(core.UUID) (core.UUID, error)
+ DeployHeuristic(cfg *heuristic.DeployConfig) (core.UUID, error)
core.Subsystem
}
/*
NOTE - Manager will need to understand
- when pipeline changes occur that require remapping
- heuristic sessions to other pipelines
+ when path changes occur that require remapping
+ heuristic sessions to other paths
*/
// engineManager ... Engine management abstraction
@@ -51,14 +51,14 @@ type engineManager struct {
metrics metrics.Metricer
engine RiskEngine
- addresser AddressingMap
- store SessionStore
+ addressing *AddressMap
+ store *Store
heuristics registry.HeuristicTable
}
// NewManager ... Initializer
-func NewManager(ctx context.Context, cfg *Config, engine RiskEngine, addr AddressingMap,
- store SessionStore, it registry.HeuristicTable, alertEgress chan core.Alert) Manager {
+func NewManager(ctx context.Context, cfg *Config, engine RiskEngine, addr *AddressMap,
+ store *Store, it registry.HeuristicTable, alertEgress chan core.Alert) Manager {
ctx, cancel := context.WithCancel(ctx)
em := &engineManager{
@@ -68,7 +68,7 @@ func NewManager(ctx context.Context, cfg *Config, engine RiskEngine, addr Addres
etlIngress: make(chan core.HeuristicInput),
workerEgress: make(chan ExecInput),
engine: engine,
- addresser: addr,
+ addressing: addr,
store: store,
heuristics: it,
metrics: metrics.WithContext(ctx),
@@ -92,18 +92,15 @@ func (em *engineManager) Transit() chan core.HeuristicInput {
}
// DeleteHeuristicSession ... Deletes a heuristic session
-func (em *engineManager) DeleteHeuristicSession(_ core.SUUID) (core.SUUID, error) {
- return core.NilSUUID(), nil
+func (em *engineManager) DeleteHeuristicSession(_ core.UUID) (core.UUID, error) {
+ return core.UUID{}, nil
}
-// updateSharedState ... Updates the shared state store
-// with contextual information about the heuristic session
-// to the ETL (e.g. address, events)
func (em *engineManager) updateSharedState(params *core.SessionParams,
- sk *core.StateKey, pUUID core.PUUID) error {
- err := sk.SetPUUID(pUUID)
- // PUUID already exists in key but is different than the one we want
- if err != nil && sk.PUUID != &pUUID {
+ sk *core.StateKey, id core.PathID) error {
+ err := sk.SetPathID(id)
+ // PathID already exists in key but is different than the one we want
+ if err != nil && sk.PathID != &id {
return err
}
@@ -125,7 +122,7 @@ func (em *engineManager) updateSharedState(params *core.SessionParams,
Nesting: false,
Prefix: sk.Prefix,
ID: params.Address().String(),
- PUUID: &pUUID,
+ PathID: &id,
}
err = state.InsertUnique(em.ctx, innerKey, argStr)
@@ -136,57 +133,55 @@ func (em *engineManager) updateSharedState(params *core.SessionParams,
}
logging.WithContext(em.ctx).Debug("Setting to state store",
- zap.String(logging.PUUIDKey, pUUID.String()),
+ zap.String(logging.Path, id.String()),
zap.String(logging.AddrKey, params.Address().String()))
return nil
}
-// DeployHeuristicSession ... Deploys a heuristic session to be processed by the engine
-func (em *engineManager) DeployHeuristicSession(cfg *heuristic.DeployConfig) (core.SUUID, error) {
- reg, exists := em.heuristics[cfg.HeuristicType]
+func (em *engineManager) DeployHeuristic(cfg *heuristic.DeployConfig) (core.UUID, error) {
+ h, exists := em.heuristics[cfg.HeuristicType]
if !exists {
- return core.NilSUUID(), fmt.Errorf("heuristic type %s not found", cfg.HeuristicType)
+ return core.UUID{}, fmt.Errorf("heuristic type %s not found", cfg.HeuristicType)
}
- if reg.PrepareValidate != nil { // Prepare & validate the heuristic params for stateful consumption
- err := reg.PrepareValidate(cfg.Params)
+ if h.PrepareValidate != nil {
+ err := h.PrepareValidate(cfg.Params)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
}
- // Build heuristic instance using constructor function from register definition
- h, err := reg.Constructor(em.ctx, cfg.Params)
+ id := core.NewUUID()
+ // Build heuristic instance using constructor functions from data topic definitions
+ instance, err := h.Constructor(em.ctx, cfg.Params)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
- // Generate session UUID and set it to the heuristic
- sUUID := core.MakeSUUID(cfg.Network, cfg.PUUID.PipelineType(), cfg.HeuristicType)
- h.SetSUUID(sUUID)
+ instance.SetID(id)
- err = em.store.AddSession(sUUID, cfg.PUUID, h)
+ err = em.store.AddSession(id, cfg.PathID, instance)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
// Shared subsystem state management
if cfg.Stateful {
- err = em.addresser.Insert(cfg.Params.Address(), cfg.PUUID, sUUID)
+ err = em.addressing.Insert(cfg.Params.Address(), cfg.PathID, id)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
- err = em.updateSharedState(cfg.Params, cfg.StateKey, cfg.PUUID)
+ err = em.updateSharedState(cfg.Params, cfg.StateKey, cfg.PathID)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
}
- em.metrics.IncActiveHeuristics(cfg.HeuristicType, cfg.Network, cfg.PUUID.PipelineType())
+ em.metrics.IncActiveHeuristics(cfg.HeuristicType, cfg.Network)
- return sUUID, nil
+ return id, nil
}
// EventLoop ... Event loop for the engine manager
@@ -209,7 +204,7 @@ func (em *engineManager) EventLoop() error {
}
// GetInputType ... Returns the register input type for the heuristic type
-func (em *engineManager) GetInputType(ht core.HeuristicType) (core.RegisterType, error) {
+func (em *engineManager) GetInputType(ht core.HeuristicType) (core.TopicType, error) {
val, exists := em.heuristics[ht]
if !exists {
return 0, fmt.Errorf("heuristic type %s not found", ht)
@@ -224,33 +219,31 @@ func (em *engineManager) Shutdown() error {
return nil
}
-// executeHeuristics ... Executes all heuristics associated with the input etl pipeline
func (em *engineManager) executeHeuristics(ctx context.Context, data core.HeuristicInput) {
- if data.Input.Addressed() { // Address based heuristic
+ if data.Input.Addressed() {
em.executeAddressHeuristics(ctx, data)
- } else { // Non Address based heuristic
+ } else {
em.executeNonAddressHeuristics(ctx, data)
}
}
-// executeAddressHeuristics ... Executes all address specific heuristics associated with the input etl pipeline
func (em *engineManager) executeAddressHeuristics(ctx context.Context, data core.HeuristicInput) {
logger := logging.WithContext(ctx)
- ids, err := em.addresser.GetSUUIDsByPair(data.Input.Address, data.PUUID)
+ ids, err := em.addressing.Get(data.Input.Address, data.PathID)
if err != nil {
- logger.Error("Could not fetch heuristics by address:pipeline",
+ logger.Error("Could not fetch heuristics by address:path",
zap.Error(err),
- zap.String(logging.PUUIDKey, data.PUUID.String()))
+ zap.String(logging.Path, data.PathID.String()))
return
}
- for _, sUUID := range ids {
- h, err := em.store.GetInstanceByUUID(sUUID)
+ for _, id := range ids {
+ h, err := em.store.GetHeuristic(id)
if err != nil {
- logger.Error("Could not find session by heuristic sUUID",
+ logger.Error("Could not find session by heuristic id",
zap.Error(err),
- zap.String(logging.PUUIDKey, sUUID.String()))
+ zap.String(logging.Path, id.String()))
continue
}
@@ -258,27 +251,24 @@ func (em *engineManager) executeAddressHeuristics(ctx context.Context, data core
}
}
-// executeNonAddressHeuristics ... Executes all non address specific heuristics associated with the input etl pipeline
func (em *engineManager) executeNonAddressHeuristics(ctx context.Context, data core.HeuristicInput) {
logger := logging.WithContext(ctx)
- // Fetch all session UUIDs associated with the pipeline
- sUUIDs, err := em.store.GetSUUIDsByPUUID(data.PUUID)
+ ids, err := em.store.GetIDs(data.PathID)
if err != nil {
- logger.Error("Could not fetch heuristics for pipeline",
+ logger.Error("Could not fetch heuristics for path",
zap.Error(err),
- zap.String(logging.PUUIDKey, data.PUUID.String()))
+ zap.String(logging.Path, data.PathID.String()))
}
- // Fetch all heuristics for a slice of SUUIDs
- heuristics, err := em.store.GetInstancesByUUIDs(sUUIDs)
+ heuristics, err := em.store.GetHeuristics(ids)
if err != nil {
- logger.Error("Could not fetch heuristics for pipeline",
+ logger.Error("Could not fetch heuristics for path",
zap.Error(err),
- zap.String(logging.PUUIDKey, data.PUUID.String()))
+ zap.String(logging.Path, data.PathID.String()))
}
- for _, h := range heuristics { // Execute all heuristics associated with the pipeline
+ for _, h := range heuristics { // Execute all heuristics associated with the path
em.executeHeuristic(ctx, data, h)
}
}
diff --git a/internal/engine/manager_test.go b/internal/engine/manager_test.go
deleted file mode 100644
index eaa14962..00000000
--- a/internal/engine/manager_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package engine_test
-
-import (
- "context"
- "sync"
- "testing"
- "time"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/engine"
- "github.com/base-org/pessimism/internal/engine/heuristic"
- "github.com/base-org/pessimism/internal/engine/registry"
- "github.com/base-org/pessimism/internal/state"
- "github.com/ethereum/go-ethereum/common"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_EventLoop(t *testing.T) {
- // Setup test dependencies
- alertChan := make(chan core.Alert)
- testPUUID := core.NilPUUID()
-
- ctx := context.Background()
- ss := state.NewMemState()
-
- ctx = context.WithValue(ctx, core.State, ss)
-
- em := engine.NewManager(ctx,
- &engine.Config{WorkerCount: 1},
- engine.NewHardCodedEngine(alertChan),
- engine.NewAddressingMap(),
- engine.NewSessionStore(),
- registry.NewHeuristicTable(),
- alertChan,
- )
-
- ingress := em.Transit()
-
- // Spinup event loop routine w/ closure
- wg := sync.WaitGroup{}
- wg.Add(1)
- go func() {
- defer wg.Done()
- _ = em.EventLoop()
- }()
-
- defer func() {
- _ = em.Shutdown()
- wg.Wait()
- }()
-
- isp := core.NewSessionParams()
- isp.SetValue("address", common.HexToAddress("0x69").String())
- isp.SetValue("upper", 420)
-
- // Deploy heuristic session
- deployCfg := &heuristic.DeployConfig{
- HeuristicType: core.BalanceEnforcement,
- Network: core.Layer1,
- Stateful: true,
- StateKey: &core.StateKey{},
- AlertingPolicy: &core.AlertPolicy{
- Dest: core.Slack.String(),
- },
- Params: isp,
- PUUID: testPUUID,
- }
-
- suuid, err := em.DeployHeuristicSession(deployCfg)
- assert.NoError(t, err)
- assert.NotNil(t, suuid)
-
- // Construct heuristic input
- hi := core.HeuristicInput{
- PUUID: testPUUID,
- Input: core.TransitData{
- Type: core.AccountBalance,
- Address: common.HexToAddress("0x69"),
- Value: float64(666),
- },
- }
-
- // Send heuristic input to event loop
- ingress <- hi
- ticker := time.NewTicker(1 * time.Second)
-
- // Receive alert from event loop
- select {
- case <-ticker.C:
- assert.FailNow(t, "Timed out waiting for alert data")
-
- case alert := <-alertChan:
- assert.NotNil(t, alert)
- assert.Equal(t, alert.PUUID, testPUUID)
- }
-}
diff --git a/internal/engine/registry/balance_enforce.go b/internal/engine/registry/balance_enforce.go
index 293a258f..dea18810 100644
--- a/internal/engine/registry/balance_enforce.go
+++ b/internal/engine/registry/balance_enforce.go
@@ -1,13 +1,19 @@
package registry
import (
+ "context"
"encoding/json"
"fmt"
"time"
+ "github.com/base-org/pessimism/internal/common/math"
+
+ "github.com/base-org/pessimism/internal/client"
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/engine/heuristic"
"github.com/base-org/pessimism/internal/logging"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"go.uber.org/zap"
)
@@ -25,6 +31,7 @@ func (bi *BalanceInvConfig) Unmarshal(isp *core.SessionParams) error {
// BalanceHeuristic ...
type BalanceHeuristic struct {
+ ctx context.Context
cfg *BalanceInvConfig
heuristic.Heuristic
@@ -41,40 +48,48 @@ const reportMsg = `
`
// NewBalanceHeuristic ... Initializer
-func NewBalanceHeuristic(cfg *BalanceInvConfig) (heuristic.Heuristic, error) {
+func NewBalanceHeuristic(ctx context.Context, cfg *BalanceInvConfig) (heuristic.Heuristic, error) {
return &BalanceHeuristic{
+ ctx: ctx,
cfg: cfg,
- Heuristic: heuristic.NewBaseHeuristic(core.AccountBalance),
+ Heuristic: heuristic.New(core.BlockHeader, core.BalanceEnforcement),
}, nil
}
// Assess ... Checks if the balance is within the bounds
// specified in the config
-func (bi *BalanceHeuristic) Assess(td core.TransitData) (*heuristic.ActivationSet, error) {
- logging.NoContext().Debug("Checking activation for balance heuristic", zap.String("data", fmt.Sprintf("%v", td)))
+func (bi *BalanceHeuristic) Assess(e core.Event) (*heuristic.ActivationSet, error) {
+ logging.NoContext().Debug("Checking activation for balance heuristic", zap.String("data", fmt.Sprintf("%v", e)))
+
+ header, ok := e.Value.(types.Header)
+ if !ok {
+ return nil, fmt.Errorf(couldNotCastErr, "BlockHeader")
+ }
- // 1. Validate and extract balance input
- err := bi.ValidateInput(td)
+ client, err := client.FromNetwork(bi.ctx, e.Network)
if err != nil {
return nil, err
}
- balance, ok := td.Value.(float64)
- if !ok {
- return nil, fmt.Errorf(couldNotCastErr, "float64")
+ // See if a tx changed the balance for the address
+ balance, err := client.BalanceAt(bi.ctx, common.HexToAddress(bi.cfg.Address), header.Number)
+ if err != nil {
+ return nil, err
}
+ ethBalance, _ := math.WeiToEther(balance).Float64()
+
activated := false
// 2. Assess if balance > upper bound
if bi.cfg.UpperBound != nil &&
- *bi.cfg.UpperBound < balance {
+ *bi.cfg.UpperBound < ethBalance {
activated = true
}
// 3. Assess if balance < lower bound
if bi.cfg.LowerBound != nil &&
- *bi.cfg.LowerBound > balance {
+ *bi.cfg.LowerBound > ethBalance {
activated = true
}
@@ -94,7 +109,7 @@ func (bi *BalanceHeuristic) Assess(td core.TransitData) (*heuristic.ActivationSe
lower = "-∞"
}
- msg := fmt.Sprintf(reportMsg, balance, upper, lower, bi.SUUID(), bi.cfg.Address)
+ msg := fmt.Sprintf(reportMsg, balance, upper, lower, bi.ID(), bi.cfg.Address)
return heuristic.NewActivationSet().Add(
&heuristic.Activation{
diff --git a/internal/engine/registry/balance_enforce_test.go b/internal/engine/registry/balance_enforce_test.go
index e9976448..82901313 100644
--- a/internal/engine/registry/balance_enforce_test.go
+++ b/internal/engine/registry/balance_enforce_test.go
@@ -1,10 +1,17 @@
package registry_test
import (
+ "context"
+ "math/big"
"testing"
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/engine/registry"
+ "github.com/base-org/pessimism/internal/mocks"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
@@ -12,7 +19,9 @@ func Test_Balance_Assess(t *testing.T) {
upper := float64(5)
lower := float64(1)
- bi, err := registry.NewBalanceHeuristic(
+ ctx, ms := mocks.Context(context.Background(), gomock.NewController(t))
+
+ bi, err := registry.NewBalanceHeuristic(ctx,
®istry.BalanceInvConfig{
Address: "0x123",
UpperBound: &upper,
@@ -21,31 +30,50 @@ func Test_Balance_Assess(t *testing.T) {
assert.NoError(t, err)
+ num := big.NewInt(1)
// No activation
- testData1 := core.TransitData{
- Type: core.AccountBalance,
- Value: float64(3),
+ testData1 := core.Event{
+ Network: core.Layer1,
+ Type: core.BlockHeader,
+ Value: types.Header{
+ Number: num,
+ },
}
+ ms.MockL1.EXPECT().
+ BalanceAt(ctx, common.HexToAddress("0x123"), num).Return(big.NewInt(3000000000000000000), nil).Times(1)
as, err := bi.Assess(testData1)
assert.NoError(t, err)
assert.False(t, as.Activated())
// Upper bound activation
- testData2 := core.TransitData{
- Type: core.AccountBalance,
- Value: float64(6),
+ num = num.Add(num, big.NewInt(1))
+ testData2 := core.Event{
+ Network: core.Layer1,
+ Type: core.BlockHeader,
+ Value: types.Header{
+ Number: num,
+ },
}
+ ms.MockL1.EXPECT().
+ BalanceAt(ctx, common.HexToAddress("0x123"), num).Return(big.NewInt(6000000000000000000), nil).Times(1)
+
as, err = bi.Assess(testData2)
assert.NoError(t, err)
assert.True(t, as.Activated())
+ num = num.Add(num, big.NewInt(1))
// Lower bound activation
- testData3 := core.TransitData{
- Type: core.AccountBalance,
- Value: float64(0.1),
+ testData3 := core.Event{
+ Network: core.Layer1,
+ Type: core.BlockHeader,
+ Value: types.Header{
+ Number: num,
+ },
}
+ ms.MockL1.EXPECT().
+ BalanceAt(ctx, common.HexToAddress("0x123"), num).Return(big.NewInt(600000000000000000), nil).Times(1)
as, err = bi.Assess(testData3)
assert.NoError(t, err)
diff --git a/internal/engine/registry/contract_event.go b/internal/engine/registry/contract_event.go
index c8d8d0e6..0dc17da8 100644
--- a/internal/engine/registry/contract_event.go
+++ b/internal/engine/registry/contract_event.go
@@ -54,24 +54,24 @@ func NewEventHeuristic(cfg *EventInvConfig) heuristic.Heuristic {
cfg: cfg,
sigs: sigs,
- Heuristic: heuristic.NewBaseHeuristic(core.EventLog),
+ Heuristic: heuristic.New(core.Log, core.ContractEvent),
}
}
// Assess ... Checks if the balance is within the bounds
// specified in the config
-func (ei *EventHeuristic) Assess(td core.TransitData) (*heuristic.ActivationSet, error) {
+func (ei *EventHeuristic) Assess(e core.Event) (*heuristic.ActivationSet, error) {
// 1. Validate and extract the log event from the transit data
- err := ei.ValidateInput(td)
+ err := ei.Validate(e)
if err != nil {
return nil, err
}
- if td.Address != common.HexToAddress(ei.cfg.Address) {
- return nil, fmt.Errorf(invalidAddrErr, ei.cfg.Address, td.Address.String())
+ if e.Address != common.HexToAddress(ei.cfg.Address) {
+ return nil, fmt.Errorf(invalidAddrErr, ei.cfg.Address, e.Address.String())
}
- log, success := td.Value.(types.Log)
+ log, success := e.Value.(types.Log)
if !success {
return nil, fmt.Errorf(couldNotCastErr, "types.Log")
}
diff --git a/internal/engine/registry/contract_event_test.go b/internal/engine/registry/contract_event_test.go
index d6e6e92d..c1415007 100644
--- a/internal/engine/registry/contract_event_test.go
+++ b/internal/engine/registry/contract_event_test.go
@@ -28,15 +28,15 @@ func Test_Event_Log_Heuristic(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("0x420"))
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Address: common.HexToAddress("0x0000000000000000000000000000000000000420"),
Value: types.Log{
Topics: []common.Hash{hash},
},
}
- as, err := ei.Assess(td)
+ as, err := ei.Assess(e)
assert.NoError(t, err)
assert.NotNil(t, as)
@@ -55,15 +55,15 @@ func Test_Event_Log_Heuristic(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("0x420"))
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Address: common.HexToAddress("0x0000000000000000000000000000000000000069"),
Value: types.Log{
Topics: []common.Hash{hash},
},
}
- as, err := ei.Assess(td)
+ as, err := ei.Assess(e)
assert.Error(t, err)
assert.Nil(t, as)
@@ -81,15 +81,15 @@ func Test_Event_Log_Heuristic(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("0x420"))
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Address: common.HexToAddress("0x0000000000000000000000000000000000000420"),
Value: types.Log{
Topics: []common.Hash{hash},
},
}
- as, err := ei.Assess(td)
+ as, err := ei.Assess(e)
assert.NoError(t, err)
assert.NotNil(t, as)
diff --git a/internal/engine/registry/fault_detector.go b/internal/engine/registry/fault_detector.go
index 5578c841..3bf6cb55 100644
--- a/internal/engine/registry/fault_detector.go
+++ b/internal/engine/registry/fault_detector.go
@@ -56,8 +56,8 @@ func blockToInfo(b *types.Block) blockInfo {
return blockInfo{b}
}
-// faultDetectorInv ... faultDetectorInv implementation
-type faultDetectorInv struct {
+// faultDetection ... faultDetection implementation
+type faultDetection struct {
cfg *FaultDetectorCfg
l2tol1MessagePasser common.Address
@@ -84,7 +84,7 @@ func NewFaultDetector(ctx context.Context, cfg *FaultDetectorCfg) (heuristic.Heu
return nil, err
}
- return &faultDetectorInv{
+ return &faultDetection{
cfg: cfg,
l2OutputOracleFilter: outputOracle,
@@ -94,26 +94,26 @@ func NewFaultDetector(ctx context.Context, cfg *FaultDetectorCfg) (heuristic.Heu
l2Client: bundle.L2Client,
l2GethClient: bundle.L2Geth,
- Heuristic: heuristic.NewBaseHeuristic(core.EventLog),
+ Heuristic: heuristic.New(core.Log, core.FaultDetector),
}, nil
}
// Assess ... Performs the fault detection heuristic logic
-func (fd *faultDetectorInv) Assess(td core.TransitData) (*heuristic.ActivationSet, error) {
+func (fd *faultDetection) Assess(e core.Event) (*heuristic.ActivationSet, error) {
logging.NoContext().Debug("Checking activation for fault detector heuristic",
- zap.String("data", fmt.Sprintf("%v", td)))
+ zap.String("data", fmt.Sprintf("%v", e)))
// 1. Validate and extract data input
- err := fd.ValidateInput(td)
+ err := fd.Validate(e)
if err != nil {
return nil, err
}
- if td.Address.String() != fd.cfg.L2OutputOracle {
- return nil, fmt.Errorf(invalidAddrErr, td.Address.String(), fd.cfg.L2OutputOracle)
+ if e.Address.String() != fd.cfg.L2OutputOracle {
+ return nil, fmt.Errorf(invalidAddrErr, e.Address.String(), fd.cfg.L2OutputOracle)
}
- log, success := td.Value.(types.Log)
+ log, success := e.Value.(types.Log)
if !success {
return nil, fmt.Errorf(couldNotCastErr, "types.Log")
}
@@ -153,7 +153,7 @@ func (fd *faultDetectorInv) Assess(td core.TransitData) (*heuristic.ActivationSe
if expectedStateRoot != actualStateRoot {
return heuristic.NewActivationSet().Add(&heuristic.Activation{
TimeStamp: time.Now(),
- Message: fmt.Sprintf(faultDetectMsg, fd.cfg.L2OutputOracle, fd.cfg.L2ToL1Address, fd.SUUID(), log.TxHash),
+ Message: fmt.Sprintf(faultDetectMsg, fd.cfg.L2OutputOracle, fd.cfg.L2ToL1Address, fd.ID(), log.TxHash),
}), nil
}
diff --git a/internal/engine/registry/fault_detector_test.go b/internal/engine/registry/fault_detector_test.go
index 94211cc4..e4495c2a 100644
--- a/internal/engine/registry/fault_detector_test.go
+++ b/internal/engine/registry/fault_detector_test.go
@@ -85,12 +85,12 @@ func Test_FaultDetector(t *testing.T) {
Return(nil, assert.AnError).
Times(1)
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Value: testLog,
}
- as, err := ts.fd.Assess(td)
+ as, err := ts.fd.Assess(e)
assert.Nil(t, as)
assert.Error(t, err)
@@ -111,12 +111,12 @@ func Test_FaultDetector(t *testing.T) {
Return(nil, testErr()).
Times(1)
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Value: testLog,
}
- as, err := ts.fd.Assess(td)
+ as, err := ts.fd.Assess(e)
assert.Error(t, err)
assert.Nil(t, as)
@@ -146,12 +146,12 @@ func Test_FaultDetector(t *testing.T) {
}, nil).
Times(1)
- td := core.TransitData{
- Type: core.EventLog,
+ e := core.Event{
+ Type: core.Log,
Value: testLog,
}
- as, err := ts.fd.Assess(td)
+ as, err := ts.fd.Assess(e)
assert.NotNil(t, as)
assert.True(t, as.Activated())
assert.NoError(t, err)
diff --git a/internal/engine/registry/registry.go b/internal/engine/registry/registry.go
index 1b23980e..9b5bcdce 100644
--- a/internal/engine/registry/registry.go
+++ b/internal/engine/registry/registry.go
@@ -17,7 +17,7 @@ type HeuristicTable map[core.HeuristicType]*InvRegister
type InvRegister struct {
PrepareValidate func(*core.SessionParams) error
Policy core.ChainSubscription
- InputType core.RegisterType
+ InputType core.TopicType
Constructor func(ctx context.Context, isp *core.SessionParams) (heuristic.Heuristic, error)
}
@@ -27,25 +27,25 @@ func NewHeuristicTable() HeuristicTable {
core.BalanceEnforcement: {
PrepareValidate: ValidateAddressing,
Policy: core.BothNetworks,
- InputType: core.AccountBalance,
+ InputType: core.BlockHeader,
Constructor: constructBalanceEnforcement,
},
core.ContractEvent: {
- PrepareValidate: ValidateEventTracking,
+ PrepareValidate: ValidateTracking,
Policy: core.BothNetworks,
- InputType: core.EventLog,
+ InputType: core.Log,
Constructor: constructEventInv,
},
core.FaultDetector: {
PrepareValidate: FaultDetectionPrepare,
Policy: core.OnlyLayer1,
- InputType: core.EventLog,
+ InputType: core.Log,
Constructor: constructFaultDetector,
},
core.WithdrawalSafety: {
PrepareValidate: WithdrawHeuristicPrep,
Policy: core.OnlyLayer1,
- InputType: core.EventLog,
+ InputType: core.Log,
Constructor: constructWithdrawalSafety,
},
}
@@ -66,7 +66,7 @@ func constructEventInv(_ context.Context, isp *core.SessionParams) (heuristic.He
}
// constructBalanceEnforcement ... Constructs a balance heuristic instance
-func constructBalanceEnforcement(_ context.Context, isp *core.SessionParams) (heuristic.Heuristic, error) {
+func constructBalanceEnforcement(ctx context.Context, isp *core.SessionParams) (heuristic.Heuristic, error) {
cfg := &BalanceInvConfig{}
err := cfg.Unmarshal(isp)
@@ -74,7 +74,7 @@ func constructBalanceEnforcement(_ context.Context, isp *core.SessionParams) (he
return nil, err
}
- return NewBalanceHeuristic(cfg)
+ return NewBalanceHeuristic(ctx, cfg)
}
// constructFaultDetector ... Constructs a fault detector heuristic instance
@@ -110,8 +110,8 @@ func constructWithdrawalSafety(ctx context.Context, isp *core.SessionParams) (he
return NewWithdrawalSafetyHeuristic(ctx, cfg)
}
-// ValidateEventTracking ... Ensures that an address and nested args exist in the session params
-func ValidateEventTracking(cfg *core.SessionParams) error {
+// ValidateTracking ... Ensures that an address and nested args exist in the session params
+func ValidateTracking(cfg *core.SessionParams) error {
err := ValidateAddressing(cfg)
if err != nil {
return err
@@ -148,7 +148,7 @@ func ValidateNoTopicsExist(cfg *core.SessionParams) error {
// WithdrawHeuristicPrep ... Ensures that the l2 to l1 message passer exists
// and performs a "hack" operation to set the address key as the l2tol1MessagePasser
-// address for upstream ETL components (ie. event log) to know which L1 address to
+// address for upstream ETL process (ie. event log) to know which L1 address to
// query for events
func WithdrawHeuristicPrep(cfg *core.SessionParams) error {
l1Portal, err := cfg.Value(core.L1Portal)
diff --git a/internal/engine/registry/registry_test.go b/internal/engine/registry/registry_test.go
index 22ff62c2..ff384fab 100644
--- a/internal/engine/registry/registry_test.go
+++ b/internal/engine/registry/registry_test.go
@@ -22,15 +22,15 @@ func Test_AddressPreprocess(t *testing.T) {
func Test_EventPreprocess(t *testing.T) {
isp := core.NewSessionParams()
- err := registry.ValidateEventTracking(isp)
+ err := registry.ValidateTracking(isp)
assert.Error(t, err, "failure should occur when no address is provided")
isp.SetValue(logging.AddrKey, "0x69")
- err = registry.ValidateEventTracking(isp)
+ err = registry.ValidateTracking(isp)
assert.Error(t, err, "failure should occur when no event is provided")
isp.SetNestedArg("transfer(address,address,uint256)")
- err = registry.ValidateEventTracking(isp)
+ err = registry.ValidateTracking(isp)
assert.Nil(t, err, "no error should occur when nested args are provided")
}
diff --git a/internal/engine/registry/withdrawal_safety.go b/internal/engine/registry/withdrawal_safety.go
index e7abc0d1..7db36617 100644
--- a/internal/engine/registry/withdrawal_safety.go
+++ b/internal/engine/registry/withdrawal_safety.go
@@ -124,24 +124,24 @@ func NewWithdrawalSafetyHeuristic(ctx context.Context, cfg *WithdrawalSafetyCfg)
ixClient: clients.IxClient,
l1Client: clients.L1Client,
- Heuristic: heuristic.NewBaseHeuristic(core.EventLog),
+ Heuristic: heuristic.New(core.Log, core.WithdrawalSafety),
}, nil
}
// Assess ...
-func (wsh *WithdrawalSafetyHeuristic) Assess(td core.TransitData) (*heuristic.ActivationSet, error) {
+func (wsh *WithdrawalSafetyHeuristic) Assess(e core.Event) (*heuristic.ActivationSet, error) {
// TODO - Support running from withdrawal finalized events as well
// 1. Validate input
logging.NoContext().Debug("Checking activation for withdrawal enforcement heuristic",
- zap.String("data", fmt.Sprintf("%v", td)))
+ zap.String("data", fmt.Sprintf("%v", e)))
- err := wsh.ValidateInput(td)
+ err := wsh.Validate(e)
if err != nil {
return nil, err
}
- log, success := td.Value.(types.Log)
+ log, success := e.Value.(types.Log)
if !success {
return nil, fmt.Errorf(couldNotCastErr, "types.Log")
}
@@ -224,7 +224,7 @@ func (wsh *WithdrawalSafetyHeuristic) Assess(td core.TransitData) (*heuristic.Ac
&heuristic.Activation{
TimeStamp: time.Now(),
Message: fmt.Sprintf(WithdrawalSafetyMsg, msg, wsh.cfg.L1PortalAddress, wsh.cfg.L2ToL1Address,
- wsh.SUUID(), log.TxHash.String(), corrWithdrawal.TransactionHash, math.WeiToEther(withdrawalWEI).String()),
+ wsh.ID(), log.TxHash.String(), corrWithdrawal.TransactionHash, math.WeiToEther(withdrawalWEI).String()),
},
), nil
}
diff --git a/internal/engine/store.go b/internal/engine/store.go
index c9ac2e68..92d4c36c 100644
--- a/internal/engine/store.go
+++ b/internal/engine/store.go
@@ -7,34 +7,24 @@ import (
"github.com/base-org/pessimism/internal/engine/heuristic"
)
-// SessionStore ...
-type SessionStore interface {
- AddSession(sUUID core.SUUID, pID core.PUUID, h heuristic.Heuristic) error
- GetInstanceByUUID(sUUID core.SUUID) (heuristic.Heuristic, error)
- GetInstancesByUUIDs(sUUIDs []core.SUUID) ([]heuristic.Heuristic, error)
- GetSUUIDsByPUUID(pUUID core.PUUID) ([]core.SUUID, error)
+type Store struct {
+ ids map[core.PathID][]core.UUID
+ instanceMap map[core.UUID]heuristic.Heuristic // no duplicates
}
-// sessionStore ...
-type sessionStore struct {
- idMap map[core.PUUID][]core.SUUID
- instanceMap map[core.SUUID]heuristic.Heuristic // no duplicates
-}
-
-// NewSessionStore ... Initializer
-func NewSessionStore() SessionStore {
- return &sessionStore{
- instanceMap: make(map[core.SUUID]heuristic.Heuristic),
- idMap: make(map[core.PUUID][]core.SUUID),
+// NewStore ... Initializer
+func NewStore() *Store {
+ return &Store{
+ instanceMap: make(map[core.UUID]heuristic.Heuristic),
+ ids: make(map[core.PathID][]core.UUID),
}
}
-// GetInstancesByUUIDs ... Fetches in-order all heuristics associated with a set of session UUIDs
-func (ss *sessionStore) GetInstancesByUUIDs(sUUIDs []core.SUUID) ([]heuristic.Heuristic, error) {
- heuristics := make([]heuristic.Heuristic, len(sUUIDs))
+func (s *Store) GetHeuristics(ids []core.UUID) ([]heuristic.Heuristic, error) {
+ heuristics := make([]heuristic.Heuristic, len(ids))
- for i, uuid := range sUUIDs {
- session, err := ss.GetInstanceByUUID(uuid)
+ for i, id := range ids {
+ session, err := s.GetHeuristic(id)
if err != nil {
return nil, err
}
@@ -45,40 +35,36 @@ func (ss *sessionStore) GetInstancesByUUIDs(sUUIDs []core.SUUID) ([]heuristic.He
return heuristics, nil
}
-// GetInstanceByUUID .... Fetches heuristic session by SUUID
-func (ss *sessionStore) GetInstanceByUUID(sUUID core.SUUID) (heuristic.Heuristic, error) {
- if entry, found := ss.instanceMap[sUUID]; found {
+func (s *Store) GetHeuristic(id core.UUID) (heuristic.Heuristic, error) {
+ if entry, found := s.instanceMap[id]; found {
return entry, nil
}
return nil, fmt.Errorf("heuristic UUID doesn't exists in store heuristic mapping")
}
-// GetSUUIDsByPUUID ... Returns all heuristic session ids associated with pipeline
-func (ss *sessionStore) GetSUUIDsByPUUID(pUUID core.PUUID) ([]core.SUUID, error) {
- if sessionIDs, found := ss.idMap[pUUID]; found {
+func (s *Store) GetIDs(id core.PathID) ([]core.UUID, error) {
+ if sessionIDs, found := s.ids[id]; found {
return sessionIDs, nil
}
- return nil, fmt.Errorf("pipeline UUID doesn't exists in store heuristic mapping")
+ return nil, fmt.Errorf("path UUID doesn't exists in store heuristic mapping")
}
-// AddSession ... Adds a heuristic session to the store
-func (ss *sessionStore) AddSession(sUUID core.SUUID,
- pUUID core.PUUID, h heuristic.Heuristic) error {
- if _, found := ss.instanceMap[sUUID]; found {
+func (s *Store) AddSession(uuid core.UUID,
+ id core.PathID, h heuristic.Heuristic) error {
+ if _, found := s.instanceMap[uuid]; found {
return fmt.Errorf("heuristic UUID already exists in store pid mapping")
}
- if _, found := ss.idMap[pUUID]; !found {
- ss.idMap[pUUID] = make([]core.SUUID, 0)
+ if _, found := s.ids[id]; !found {
+ s.ids[id] = make([]core.UUID, 0)
}
- ss.instanceMap[sUUID] = h
- ss.idMap[pUUID] = append(ss.idMap[pUUID], sUUID)
+ s.instanceMap[uuid] = h
+ s.ids[id] = append(s.ids[id], uuid)
return nil
}
-// RemoveInvSession ... Removes an existing heuristic session from the store
-func (ss *sessionStore) RemoveInvSession(_ core.SUUID,
- _ core.PUUID, _ heuristic.Heuristic) error {
+func (s *Store) RemoveInvSession(_ core.UUID,
+ _ core.PathID, _ heuristic.Heuristic) error {
return nil
}
diff --git a/internal/engine/store_test.go b/internal/engine/store_test.go
index c76abbaa..edbf22d0 100644
--- a/internal/engine/store_test.go
+++ b/internal/engine/store_test.go
@@ -11,131 +11,130 @@ import (
)
func TestSessionStore(t *testing.T) {
- sUUID1 := core.MakeSUUID(core.Layer1, core.Live, core.HeuristicType(0))
- sUUID2 := core.MakeSUUID(core.Layer2, core.Live, core.HeuristicType(0))
- pUUID1 := core.NilPUUID()
+ id1 := core.NewUUID()
+ id2 := core.NewUUID()
var tests = []struct {
name string
function string
- constructor func() engine.SessionStore
- testFunc func(t *testing.T, ss engine.SessionStore)
+ constructor func() *engine.Store
+ testFunc func(t *testing.T, ss *engine.Store)
}{
{
name: "Successful Retrieval",
- constructor: func() engine.SessionStore {
- ss := engine.NewSessionStore()
+ constructor: func() *engine.Store {
+ ss := engine.NewStore()
- h := heuristic.NewBaseHeuristic(core.RegisterType(0))
- h.SetSUUID(sUUID1)
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ h.SetID(id1)
- _ = ss.AddSession(sUUID1, pUUID1, h)
+ _ = ss.AddSession(id1, core.PathID{}, h)
return ss
},
- testFunc: func(t *testing.T, ss engine.SessionStore) {
+ testFunc: func(t *testing.T, ss *engine.Store) {
// Ensure that the heuristic is retrievable
- h, err := ss.GetInstanceByUUID(sUUID1)
+ h, err := ss.GetHeuristic(id1)
assert.NoError(t, err)
- assert.Equal(t, h.SUUID(), sUUID1)
+ assert.Equal(t, h.ID(), id1)
- // Ensure that pipeline UUIDs are retrievable
- sUUIDs, err := ss.GetSUUIDsByPUUID(pUUID1)
+ // Ensure that path UUIDs are retrievable
+ ids, err := ss.GetIDs(core.PathID{})
assert.NoError(t, err)
- assert.Equal(t, sUUIDs, []core.SUUID{sUUID1})
+ assert.Equal(t, ids, []core.UUID{id1})
},
},
{
name: "Successful Retrieval with Multiple Heuristics",
- constructor: func() engine.SessionStore {
- ss := engine.NewSessionStore()
+ constructor: func() *engine.Store {
+ ss := engine.NewStore()
- h := heuristic.NewBaseHeuristic(core.RegisterType(0))
- h.SetSUUID(sUUID1)
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ h.SetID(id1)
- _ = ss.AddSession(sUUID1, pUUID1, h)
+ _ = ss.AddSession(id1, core.PathID{}, h)
- h2 := heuristic.NewBaseHeuristic(core.RegisterType(0))
- h2.SetSUUID(sUUID2)
+ h2 := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ h2.SetID(id2)
- _ = ss.AddSession(sUUID2, pUUID1, h2)
+ _ = ss.AddSession(id2, core.PathID{}, h2)
return ss
},
- testFunc: func(t *testing.T, ss engine.SessionStore) {
+ testFunc: func(t *testing.T, ss *engine.Store) {
// Ensure that the first inserted heuristic is retrievable
- h, err := ss.GetInstanceByUUID(sUUID1)
+ h, err := ss.GetHeuristic(id1)
assert.NoError(t, err)
- assert.Equal(t, h.SUUID(), sUUID1)
+ assert.Equal(t, h.ID(), id1)
// Ensure that the second inserted heuristic is retrievable
- h2, err := ss.GetInstanceByUUID(sUUID2)
+ h2, err := ss.GetHeuristic(id2)
assert.NoError(t, err)
- assert.Equal(t, h2.SUUID(), sUUID2)
+ assert.Equal(t, h2.ID(), id2)
- // Ensure that pipeline UUIDs are retrievable
- sUUIDs, err := ss.GetSUUIDsByPUUID(pUUID1)
+ // Ensure that path UUIDs are retrievable
+ ids, err := ss.GetIDs(core.PathID{})
assert.NoError(t, err)
- assert.Equal(t, sUUIDs, []core.SUUID{sUUID1, sUUID2})
+ assert.Equal(t, ids, []core.UUID{id1, id2})
// Ensure that both heuristics are retrievable at once
- hs, err := ss.GetInstancesByUUIDs([]core.SUUID{sUUID1, sUUID2})
+ hs, err := ss.GetHeuristics([]core.UUID{id1, id2})
assert.NoError(t, err)
assert.Equal(t, hs, []heuristic.Heuristic{h, h2})
},
},
{
name: "Successful Retrieval",
- constructor: func() engine.SessionStore {
- ss := engine.NewSessionStore()
+ constructor: func() *engine.Store {
+ ss := engine.NewStore()
- h := heuristic.NewBaseHeuristic(core.RegisterType(0))
- h.SetSUUID(sUUID1)
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ h.SetID(id1)
- _ = ss.AddSession(sUUID1, pUUID1, h)
+ _ = ss.AddSession(id1, core.PathID{}, h)
return ss
},
- testFunc: func(t *testing.T, ss engine.SessionStore) {
+ testFunc: func(t *testing.T, ss *engine.Store) {
// Ensure that the heuristic is retrievable
- h, err := ss.GetInstanceByUUID(sUUID1)
+ h, err := ss.GetHeuristic(id1)
assert.NoError(t, err)
- assert.Equal(t, h.SUUID(), sUUID1)
+ assert.Equal(t, h.ID(), id1)
- // Ensure that pipeline UUIDs are retrievable
- sUUIDs, err := ss.GetSUUIDsByPUUID(pUUID1)
+ // Ensure that path UUIDs are retrievable
+ ids, err := ss.GetIDs(core.PathID{})
assert.NoError(t, err)
- assert.Equal(t, sUUIDs, []core.SUUID{sUUID1})
+ assert.Equal(t, ids, []core.UUID{id1})
},
},
{
name: "Failed Retrieval",
- constructor: func() engine.SessionStore {
- ss := engine.NewSessionStore()
+ constructor: func() *engine.Store {
+ ss := engine.NewStore()
- h := heuristic.NewBaseHeuristic(core.RegisterType(0))
- _ = ss.AddSession(sUUID1, pUUID1, h)
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ _ = ss.AddSession(id1, core.PathID{}, h)
return ss
},
- testFunc: func(t *testing.T, ss engine.SessionStore) {
- h, err := ss.GetInstanceByUUID(sUUID2)
+ testFunc: func(t *testing.T, ss *engine.Store) {
+ h, err := ss.GetHeuristic(id2)
assert.Nil(t, h)
assert.Error(t, err)
},
},
{
- name: "Failed Add with Duplicate SUUIDs",
- constructor: func() engine.SessionStore {
- ss := engine.NewSessionStore()
+ name: "Failed Add with Duplicate IDs",
+ constructor: func() *engine.Store {
+ ss := engine.NewStore()
- h := heuristic.NewBaseHeuristic(core.RegisterType(0))
- _ = ss.AddSession(sUUID1, pUUID1, h)
+ h := heuristic.New(core.TopicType(0), core.BalanceEnforcement)
+ _ = ss.AddSession(id1, core.PathID{}, h)
return ss
},
- testFunc: func(t *testing.T, ss engine.SessionStore) {
- // Ensure that only one suuid can exist in the store
- err := ss.AddSession(sUUID1, pUUID1, heuristic.NewBaseHeuristic(core.RegisterType(0)))
+ testFunc: func(t *testing.T, ss *engine.Store) {
+ // Ensure that only one uuid can exist in the store
+ err := ss.AddSession(id1, core.PathID{}, heuristic.New(core.TopicType(0), core.BalanceEnforcement))
assert.Error(t, err)
},
},
diff --git a/internal/etl/pipeline/analysis.go b/internal/etl/analysis.go
similarity index 57%
rename from internal/etl/pipeline/analysis.go
rename to internal/etl/analysis.go
index 7c37e6a1..a6f72071 100644
--- a/internal/etl/pipeline/analysis.go
+++ b/internal/etl/analysis.go
@@ -1,53 +1,53 @@
-package pipeline
+package etl
import (
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/etl/registry"
)
-// Analyzer ... Interface for analyzing pipelines
+// Analyzer ... Interface for analyzing paths
type Analyzer interface {
- Mergable(p1 Pipeline, p2 Pipeline) bool
- // MergePipelines(ctx context.Context, p1 Pipeline, p2 Pipeline) (Pipeline, error)
+ Mergable(p1 Path, p2 Path) bool
+ // MergePaths(ctx context.Context, p1 Path, p2 Path) (Path, error)
}
// analyzer ... Implementation of Analyzer
type analyzer struct {
- dRegistry registry.Registry
+ r *registry.Registry
}
// NewAnalyzer ... Initializer
-func NewAnalyzer(dRegistry registry.Registry) Analyzer {
+func NewAnalyzer(r *registry.Registry) Analyzer {
return &analyzer{
- dRegistry: dRegistry,
+ r: r,
}
}
-// Mergable ... Returns true if pipelines can be merged or deduped
-func (a *analyzer) Mergable(p1 Pipeline, p2 Pipeline) bool {
- // Invalid if pipelines are not the same length
- if len(p1.Components()) != len(p2.Components()) {
+// Mergable ... Returns true if paths can be merged or deduped
+func (a *analyzer) Mergable(path1 Path, path2 Path) bool {
+ // Invalid if paths are not the same length
+ if len(path1.Processes()) != len(path2.Processes()) {
return false
}
- // Invalid if pipelines are not live
- if p1.Config().PipelineType != core.Live ||
- p2.Config().PipelineType != core.Live {
+ // Invalid if paths are not live
+ if path1.Config().PathType != core.Live ||
+ path2.Config().PathType != core.Live {
return false
}
- // Invalid if either pipeline requires a backfill
- // NOTE - This is a temporary solution to prevent live backfills on two pipelines
+ // Invalid if either path requires a backfill
+ // NOTE - This is a temporary solution to prevent live backfills on two paths
// from being merged.
- // In the future, this should only check the current state of each pipeline
+ // In the future, this should only check the current state of each path
// to ensure that the backfill has been completed for both.
- if p1.Config().ClientConfig.Backfill() ||
- p2.Config().ClientConfig.Backfill() {
+ if path1.Config().ClientConfig.Backfill() ||
+ path2.Config().ClientConfig.Backfill() {
return false
}
- // Invalid if pipelines do not share the same PID
- if p1.UUID().PID != p2.UUID().PID {
+ // Invalid if paths do not share the same PID
+ if path1.UUID().ID != path2.UUID().ID {
return false
}
@@ -56,18 +56,18 @@ func (a *analyzer) Mergable(p1 Pipeline, p2 Pipeline) bool {
// NOTE - This is intentionally commented out for now as its not in-use.
-// // MergePipelines ... Merges two pipelines into one (p1 --merge-> p2)
-// func (a *analyzer) MergePipelines(ctx context.Context, p1 Pipeline, p2 Pipeline) (Pipeline, error) {
-// for i, compi := range p1.Components() {
-// compj := p2.Components()[i]
+// // MergePaths ... Merges two paths into one (p1 --merge-> p2)
+// func (a *analyzer) MergePaths(ctx context.Context, p1 Path, p2 Path) (Path, error) {
+// for i, compi := range p1.processes() {
+// compj := p2.processes()[i]
-// reg, err := a.dRegistry.GetRegister(compi.OutputType())
+// reg, err := a.r.GetDataTopic(compi.OutputType())
// if err != nil {
// return nil, err
// }
// if reg.Stateful() { // Merge state items from compi into compj
-// err = a.mergeComponentState(ctx, compi, compj, p1.UUID(), p2.UUID())
+// err = a.mergeprocessestate(ctx, compi, compj, p1.UUID(), p2.UUID())
// if err != nil {
// return nil, err
// }
@@ -76,9 +76,9 @@ func (a *analyzer) Mergable(p1 Pipeline, p2 Pipeline) bool {
// return p2, nil
// }
-// // mergeComponentState ... Merges state items from p2 into p1
-// func (a *analyzer) mergeComponentState(ctx context.Context, compi, compj component.Component,
-// p1, p2 core.PUUID) error {
+// // mergeprocessestate ... Merges state items from p2 into p1
+// func (a *analyzer) mergeprocessestate(ctx context.Context, compi, compj processProcess,
+// p1, p2 core.PathID) error {
// ss, err := state.FromContext(ctx)
// if err != nil {
// return err
@@ -107,8 +107,8 @@ func (a *analyzer) Mergable(p1 Pipeline, p2 Pipeline) bool {
// }
// // MergeNestedStateKeys ... Merges nested state keys from p1 into p2
-// func (a *analyzer) MergeNestedStateKeys(ctx context.Context, c1, c2 component.Component,
-// p1, p2 core.PUUID, ss state.Store) error {
+// func (a *analyzer) MergeNestedStateKeys(ctx context.Context, c1, c2 processProcess,
+// p1, p2 core.PathID, ss state.Store) error {
// items, err := ss.GetSlice(ctx, c1.StateKey())
// if err != nil {
// return err
@@ -118,13 +118,13 @@ func (a *analyzer) Mergable(p1 Pipeline, p2 Pipeline) bool {
// key1 := &core.StateKey{
// Prefix: c1.OutputType(),
// ID: item,
-// PUUID: &p1,
+// PathID: &p1,
// }
// key2 := &core.StateKey{
// Prefix: c2.OutputType(),
// ID: item,
-// PUUID: &p2,
+// PathID: &p2,
// }
// nestedValues, err := ss.GetSlice(ctx, key1)
diff --git a/internal/etl/analysis_test.go b/internal/etl/analysis_test.go
new file mode 100644
index 00000000..2465ed51
--- /dev/null
+++ b/internal/etl/analysis_test.go
@@ -0,0 +1,91 @@
+package etl_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/etl/registry"
+ "github.com/base-org/pessimism/internal/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_Mergable(t *testing.T) {
+ var tests = []struct {
+ name string
+ description string
+ construction func() etl.Analyzer
+ test func(t *testing.T, a etl.Analyzer)
+ }{
+ {
+ name: "Successful Path Merge",
+ description: "Mergable function should return true if paths are mergable",
+ construction: func() etl.Analyzer {
+ r := registry.New()
+ return etl.NewAnalyzer(r)
+ },
+ test: func(t *testing.T, a etl.Analyzer) {
+ // Setup test paths
+ mockOracle, err := mocks.NewReader(context.Background(), core.BlockHeader)
+ assert.NoError(t, err)
+
+ processes := []process.Process{mockOracle}
+ id1 := core.MakePathID(0, core.MakeProcessID(core.Live, 0, 0, 0), core.MakeProcessID(core.Live, 0, 0, 0))
+ id2 := core.MakePathID(0, core.MakeProcessID(core.Live, 0, 0, 0), core.MakeProcessID(core.Live, 0, 0, 0))
+
+ testCfg := &core.PathConfig{
+ PathType: core.Live,
+ ClientConfig: &core.ClientConfig{},
+ }
+
+ p1, err := etl.NewPath(testCfg, id1, processes)
+ assert.NoError(t, err)
+
+ p2, err := etl.NewPath(testCfg, id2, processes)
+ assert.NoError(t, err)
+
+ assert.True(t, a.Mergable(p1, p2))
+ },
+ },
+ {
+ name: "Failure Path Merge",
+ description: "Mergable function should return false when PID's do not match",
+ construction: func() etl.Analyzer {
+ r := registry.New()
+ return etl.NewAnalyzer(r)
+ },
+ test: func(t *testing.T, a etl.Analyzer) {
+ // Setup test paths
+ reader, err := mocks.NewReader(context.Background(), core.BlockHeader)
+ assert.NoError(t, err)
+
+ processes := []process.Process{reader}
+ id1 := core.MakePathID(0, core.MakeProcessID(core.Live, 1, 0, 0), core.MakeProcessID(core.Live, 0, 0, 0))
+ id2 := core.MakePathID(0, core.MakeProcessID(core.Live, 0, 0, 0), core.MakeProcessID(core.Live, 0, 0, 0))
+
+ testCfg := &core.PathConfig{
+ PathType: core.Live,
+ ClientConfig: &core.ClientConfig{},
+ }
+
+ p1, err := etl.NewPath(testCfg, id1, processes)
+ assert.NoError(t, err)
+
+ p2, err := etl.NewPath(testCfg, id2, processes)
+ assert.NoError(t, err)
+
+ assert.False(t, a.Mergable(p1, p2))
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ a := test.construction()
+ test.test(t, a)
+ })
+ }
+
+}
diff --git a/internal/etl/component/aggregator.go b/internal/etl/component/aggregator.go
deleted file mode 100644
index eb923b58..00000000
--- a/internal/etl/component/aggregator.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package component
-
-// TODO(#12): No Aggregation Component Support
-type Aggregator struct{}
diff --git a/internal/etl/component/component.go b/internal/etl/component/component.go
deleted file mode 100644
index 05bcd216..00000000
--- a/internal/etl/component/component.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package component
-
-import (
- "sync"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-const (
- killSig = 0
-)
-
-// Component ... Generalized interface that all pipeline components must adhere to
-type Component interface {
- /*
- NOTE - Storing the PUUID assumes that one component
- can only be a part of one pipeline at a time. This could be
- problematic if we want to have a component be a part of multiple
- pipelines at once. In that case, we would need to store a slice
- of PUUIDs instead.
- */
- // PUUID ... Returns component's PUUID
- PUUID() core.PUUID
-
- // UUID ...
- UUID() core.CUUID
- // Type ... Returns component enum type
- Type() core.ComponentType
-
- // AddRelay ... Adds an engine relay to component egress routing
- AddRelay(relay *core.ExecInputRelay) error
-
- // AddEgress ...
- AddEgress(core.CUUID, chan core.TransitData) error
- // RemoveEgress ...
- RemoveEgress(core.CUUID) error
-
- // Close ... Signifies a component to stop operating
- Close() error
-
- // EventLoop ... Component driver function; spun up as separate go routine
- EventLoop() error
-
- // GetIngress ... Returns component ingress channel for some register type value
- GetIngress(rt core.RegisterType) (chan core.TransitData, error)
-
- // OutputType ... Returns component output data type
- OutputType() core.RegisterType
-
- StateKey() *core.StateKey
-
- // TODO(#24): Add Internal Component Activity State Tracking
- ActivityState() ActivityState
-}
-
-// metaData ... Component agnostic struct that stores component metadata and routing state
-type metaData struct {
- id core.CUUID
- pUUID core.PUUID
-
- cType core.ComponentType
- output core.RegisterType
- state ActivityState
-
- inTypes []core.RegisterType
-
- closeChan chan int
- stateChan chan StateChange
- sk *core.StateKey
-
- *ingressHandler
- *egressHandler
-
- *sync.RWMutex
-}
-
-// newMetaData ... Initializer
-func newMetaData(ct core.ComponentType, ot core.RegisterType) *metaData {
- return &metaData{
- id: core.NilCUUID(),
- pUUID: core.NilPUUID(),
-
- cType: ct,
- egressHandler: newEgressHandler(),
- ingressHandler: newIngressHandler(),
- state: Inactive,
- closeChan: make(chan int),
- stateChan: make(chan StateChange),
- output: ot,
- RWMutex: &sync.RWMutex{},
- }
-}
-
-// ActivityState ... Returns component current activity state
-func (meta *metaData) ActivityState() ActivityState {
- return meta.state
-}
-
-// StateKey ... Returns component's state key
-func (meta *metaData) StateKey() *core.StateKey {
- return meta.sk
-}
-
-// UUID ... Returns component's CUUID
-func (meta *metaData) UUID() core.CUUID {
- return meta.id
-}
-
-// UUID ... Returns component's PUUID
-// NOTE - This currently assumes that component collisions are impossible
-func (meta *metaData) PUUID() core.PUUID {
- return meta.pUUID
-}
-
-// Type ... Returns component's type
-func (meta *metaData) Type() core.ComponentType {
- return meta.cType
-}
-
-// OutputType ... Returns component's data output type
-func (meta *metaData) OutputType() core.RegisterType {
- return meta.output
-}
-
-// emitStateChange ... Emits a stateChange event to stateChan
-func (meta *metaData) emitStateChange(as ActivityState) {
- event := StateChange{
- ID: meta.id,
- From: meta.state,
- To: as,
- }
-
- meta.state = as
- meta.stateChan <- event // Send to upstream consumers
-}
-
-// Option ... Component type agnostic option
-type Option = func(*metaData)
-
-// WithCUUID ... Passes component UUID to component metadata field
-func WithCUUID(id core.CUUID) Option {
- return func(meta *metaData) {
- meta.id = id
- }
-}
-
-// WithPUUID ... Passes component PUUID to component metadata field
-func WithPUUID(pUUID core.PUUID) Option {
- return func(meta *metaData) {
- meta.pUUID = pUUID
- }
-}
-
-// WithEventChan ... Passes state channel to component metadata field
-func WithEventChan(sc chan StateChange) Option {
- return func(md *metaData) {
- md.stateChan = sc
- }
-}
-
-// WithInTypes ... Passes input types to component metadata field
-func WithInTypes(its []core.RegisterType) Option {
- return func(md *metaData) {
- md.inTypes = its
- }
-}
-
-// WithStateKey ... Passes state key to component metadata field
-func WithStateKey(key *core.StateKey) Option {
- return func(md *metaData) {
- md.sk = key
- }
-}
diff --git a/internal/etl/component/egress.go b/internal/etl/component/egress.go
deleted file mode 100644
index aecc9029..00000000
--- a/internal/etl/component/egress.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package component
-
-import (
- "fmt"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-// egressHandler ... Used to route transit data from a component to it's respective edge components.
-// Also used to manage egresses or "edge routes" for some component.
-type egressHandler struct {
- egresses map[core.ComponentPID]chan core.TransitData
-
- relay *core.ExecInputRelay
-}
-
-// newEgress ... Initializer
-func newEgressHandler() *egressHandler {
- return &egressHandler{
- egresses: make(map[core.ComponentPID]chan core.TransitData),
- relay: nil,
- }
-}
-
-// PathEnd ... Returns true if no egresses exist and an engine relay exists, false otherwise
-func (eh *egressHandler) PathEnd() bool {
- return len(eh.egresses) == 0 && eh.HasEngineRelay()
-}
-
-// Send ... Sends single piece of transitData to all innner mapping value channels
-func (eh *egressHandler) Send(td core.TransitData) error {
- if len(eh.egresses) == 0 && !eh.HasEngineRelay() {
- return fmt.Errorf(egressNotExistErr)
- }
-
- if eh.HasEngineRelay() {
- if err := eh.relay.RelayTransitData(td); err != nil {
- return err
- }
- }
-
- // NOTE - Consider introducing a fail safe timeout to ensure that freezing on clogged chanel buffers is recognized
- for _, channel := range eh.egresses {
- channel <- td
- }
-
- return nil
-}
-
-// SendBatch ... Sends slice of transitData to all innner mapping value channels
-func (eh *egressHandler) SendBatch(dataSlice []core.TransitData) error {
- // NOTE - Consider introducing a fail safe timeout to ensure that freezing on clogged chanel buffers is recognized
- for _, data := range dataSlice {
- // NOTE - Does it make sense to fail loudly here?
-
- if err := eh.Send(data); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// AddEgress ... Inserts a new egress given an ID and channel; fail on key collision
-func (eh *egressHandler) AddEgress(componentID core.CUUID, outChan chan core.TransitData) error {
- if _, found := eh.egresses[componentID.PID]; found {
- return fmt.Errorf(egressAlreadyExistsErr, componentID.String())
- }
-
- eh.egresses[componentID.PID] = outChan
- return nil
-}
-
-// RemoveEgress ... Removes an egress given an ID; fail if no key found
-func (eh *egressHandler) RemoveEgress(componentID core.CUUID) error {
- if _, found := eh.egresses[componentID.PID]; !found {
- return fmt.Errorf(egressNotFoundErr, componentID.PID.String())
- }
-
- delete(eh.egresses, componentID.PID)
- return nil
-}
-
-// HasEngineRelay ... Returns true if engine relay exists, false otherwise
-func (eh *egressHandler) HasEngineRelay() bool {
- return eh.relay != nil
-}
-
-// AddRelay ... Adds a relay assuming no existing ones
-func (eh *egressHandler) AddRelay(relay *core.ExecInputRelay) error {
- if eh.HasEngineRelay() {
- return fmt.Errorf(engineEgressExistsErr)
- }
-
- eh.relay = relay
- return nil
-}
diff --git a/internal/etl/component/egress_test.go b/internal/etl/component/egress_test.go
deleted file mode 100644
index 97ff26a8..00000000
--- a/internal/etl/component/egress_test.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package component
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Add_Remove_Egress(t *testing.T) {
- var tests = []struct {
- name string
- description string
-
- constructionLogic func() *egressHandler
- testLogic func(*testing.T, *egressHandler)
- }{
- {
- name: "Successful Multi Add Test",
- description: "When multiple egresses are passed to AddEgress function, they should successfully be added to the egress mapping",
-
- constructionLogic: func() *egressHandler {
- handler := newEgressHandler()
- return handler
- },
-
- testLogic: func(t *testing.T, eh *egressHandler) {
-
- for _, id := range []core.CUUID{
- core.MakeCUUID(1, 54, 43, 32),
- core.MakeCUUID(2, 54, 43, 32),
- core.MakeCUUID(3, 54, 43, 32),
- core.MakeCUUID(4, 54, 43, 32)} {
- outChan := make(chan core.TransitData)
- err := eh.AddEgress(id, outChan)
-
- assert.NoError(t, err, "Ensuring that no error when adding new egress")
-
- _, exists := eh.egresses[id.PID]
- assert.True(t, exists, "Ensuring that key exists")
- }
- },
- },
- {
- name: "Failed Add Test",
- description: "When existing direcegresstive is passed to AddEgress function it should fail to be added to the egress mapping",
-
- constructionLogic: func() *egressHandler {
- id := core.MakeCUUID(1, 54, 43, 32)
- outChan := make(chan core.TransitData)
-
- handler := newEgressHandler()
- if err := handler.AddEgress(id, outChan); err != nil {
- panic(err)
- }
-
- return handler
- },
-
- testLogic: func(t *testing.T, eh *egressHandler) {
- id := core.MakeCUUID(1, 54, 43, 32)
- outChan := make(chan core.TransitData)
- err := eh.AddEgress(id, outChan)
-
- assert.Error(t, err, "Error was not generated when adding conflicting egresses with same ID")
- assert.Equal(t, err.Error(), fmt.Sprintf(egressAlreadyExistsErr, id.String()), "Ensuring that returned error is a not found type")
- },
- },
- {
- name: "Successful Remove Test",
- description: "When existing egress is passed to RemoveEgress function, it should be removed from mapping",
-
- constructionLogic: func() *egressHandler {
- id := core.MakeCUUID(1, 54, 43, 32)
- outChan := make(chan core.TransitData)
-
- handler := newEgressHandler()
- if err := handler.AddEgress(id, outChan); err != nil {
- panic(err)
- }
-
- return handler
- },
-
- testLogic: func(t *testing.T, eh *egressHandler) {
-
- err := eh.RemoveEgress(core.MakeCUUID(1, 54, 43, 32))
-
- assert.NoError(t, err, "Ensuring that no error is thrown when removing an existing egress")
-
- _, exists := eh.egresses[core.MakeCUUID(1, 54, 43, 32).PID]
- assert.False(t, exists, "Ensuring that key is removed from mapping")
- },
- }, {
- name: "Failed Remove Test",
- description: "When non-existing egress key is passed to RemoveEgress function, an error should be returned",
-
- constructionLogic: func() *egressHandler {
- id := core.MakeCUUID(1, 54, 43, 32)
- outChan := make(chan core.TransitData)
-
- handler := newEgressHandler()
- if err := handler.AddEgress(id, outChan); err != nil {
- panic(err)
- }
-
- return handler
- },
-
- testLogic: func(t *testing.T, eh *egressHandler) {
-
- cID := core.MakeCUUID(69, 69, 69, 69)
- err := eh.RemoveEgress(cID)
-
- assert.Error(t, err, "Ensuring that an error is thrown when trying to remove a non-existent egress")
- assert.Equal(t, err.Error(), fmt.Sprintf(egressNotFoundErr, cID.PID.String()))
- },
- },
- {
- name: "Passed Engine Egress Test",
- description: "When a relay is passed to AddRelay, it should be used during transit operations",
-
- constructionLogic: newEgressHandler,
-
- testLogic: func(t *testing.T, eh *egressHandler) {
- relayChan := make(chan core.HeuristicInput)
-
- pUUID := core.NilPUUID()
-
- relay := core.NewEngineRelay(pUUID, relayChan)
-
- handler := newEgressHandler()
-
- err := handler.AddRelay(relay)
- assert.NoError(t, err)
-
- testData := core.TransitData{Network: 2, Value: "goodbye closed-source blocksec monitoring"}
- expectedInput := core.HeuristicInput{
- PUUID: pUUID,
- Input: testData,
- }
-
- go func(t *testing.T) {
- assert.NoError(t, handler.Send(testData))
- }(t)
-
- actualInput := <-relayChan
-
- assert.Equal(t, actualInput, expectedInput)
-
- },
- },
- {
- name: "Failed Engine Egress Test",
- description: "When relay already exists and AddRelay function is called, an error should be returned",
-
- constructionLogic: func() *egressHandler {
- relayChan := make(chan core.HeuristicInput)
-
- pUUID := core.NilPUUID()
-
- relay := core.NewEngineRelay(pUUID, relayChan)
-
- handler := newEgressHandler()
-
- if err := handler.AddRelay(relay); err != nil {
- panic(err)
- }
-
- return handler
- },
-
- testLogic: func(t *testing.T, eh *egressHandler) {
- relayChan := make(chan core.HeuristicInput)
-
- pUUID := core.NilPUUID()
-
- relay := core.NewEngineRelay(pUUID, relayChan)
-
- err := eh.AddRelay(relay)
-
- assert.Error(t, err)
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s", i, tc.name), func(t *testing.T) {
- testegress := tc.constructionLogic()
- tc.testLogic(t, testegress)
- })
-
- }
-}
-
-func Test_Transit_Output(t *testing.T) {
- testHandler := newEgressHandler()
-
- var egresses = []struct {
- channel chan core.TransitData
- id core.CUUID
- }{
- {
- channel: make(chan core.TransitData, 1),
- id: core.MakeCUUID(3, 54, 43, 32),
- },
- {
- channel: make(chan core.TransitData, 1),
- id: core.MakeCUUID(1, 54, 43, 32),
- },
- {
- channel: make(chan core.TransitData, 1),
- id: core.MakeCUUID(1, 2, 43, 32),
- },
- {
- channel: make(chan core.TransitData, 1),
- id: core.MakeCUUID(1, 4, 43, 32),
- },
- }
-
- for _, egress := range egresses {
- err := testHandler.AddEgress(egress.id, egress.channel)
- assert.NoError(t, err, "Received error when trying to add egress")
- }
-
- expectedOutput := core.TransitData{
- Timestamp: time.Date(1969, time.April, 1, 4, 20, 0, 0, time.Local),
- Type: 3,
- Value: 0x42069,
- }
-
- err := testHandler.Send(expectedOutput)
- assert.NoError(t, err, "Receieved error when trying to transit output")
-
- for _, egress := range egresses {
- actualOutput := <-egress.channel
-
- assert.Equal(t, actualOutput, expectedOutput, "Ensuring transited data is actually returned on channels used by egress")
- }
-
-}
diff --git a/internal/etl/component/ingress.go b/internal/etl/component/ingress.go
deleted file mode 100644
index b618e472..00000000
--- a/internal/etl/component/ingress.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package component
-
-import (
- "fmt"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-// ingressHandler ... Used to manage ingresses for some component
-// NOTE: An edge is only possible between two components (C0, C1) where C0 -> C1
-// if C0.outGresses[registerType] ⊆ C1.ingresses
-type ingressHandler struct {
- ingreses map[core.RegisterType]chan core.TransitData
-}
-
-// newIngressHandler ... Initializer
-func newIngressHandler() *ingressHandler {
- return &ingressHandler{
- ingreses: make(map[core.RegisterType]chan core.TransitData),
- }
-}
-
-// GetIngress ... Fetches ingress channel for some register type
-func (ih *ingressHandler) GetIngress(rt core.RegisterType) (chan core.TransitData, error) {
- val, found := ih.ingreses[rt]
- if !found {
- return nil, fmt.Errorf(ingressNotFoundErr, rt.String())
- }
-
- return val, nil
-}
-
-// createIngress ... Creates ingress channel for some register type
-func (ih *ingressHandler) createIngress(rt core.RegisterType) error {
- if _, found := ih.ingreses[rt]; found {
- return fmt.Errorf(ingressAlreadyExistsErr, rt.String())
- }
-
- ih.ingreses[rt] = core.NewTransitChannel()
-
- return nil
-}
diff --git a/internal/etl/component/oracle.go b/internal/etl/component/oracle.go
deleted file mode 100644
index 17980d2a..00000000
--- a/internal/etl/component/oracle.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package component
-
-import (
- "context"
- "math/big"
- "sync"
- "time"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
- "go.uber.org/zap"
-)
-
-// OracleDefinition ... Provides a generalized interface for developers to bind their own functionality to
-type OracleDefinition interface {
- BackTestRoutine(ctx context.Context, componentChan chan core.TransitData,
- startHeight *big.Int, endHeight *big.Int) error
- ReadRoutine(ctx context.Context, componentChan chan core.TransitData) error
- Height() (*big.Int, error)
-}
-
-// Oracle ... Component used to represent a data source reader; E.g, Eth block indexing, interval API polling
-type Oracle struct {
- ctx context.Context
-
- definition OracleDefinition
- oracleChannel chan core.TransitData
-
- wg *sync.WaitGroup
-
- *metaData
-}
-
-// NewOracle ... Initializer
-func NewOracle(ctx context.Context, outType core.RegisterType,
- od OracleDefinition, opts ...Option) (Component, error) {
- o := &Oracle{
- ctx: ctx,
- definition: od,
- oracleChannel: core.NewTransitChannel(),
- wg: &sync.WaitGroup{},
-
- metaData: newMetaData(core.Oracle, outType),
- }
-
- for _, opt := range opts {
- opt(o.metaData)
- }
-
- logging.WithContext(ctx).Info("Constructed component",
- zap.String(logging.CUUIDKey, o.metaData.id.String()))
-
- return o, nil
-}
-
-// Height ... Returns the current block height of the oracle
-func (o *Oracle) Height() (*big.Int, error) {
- return o.definition.Height()
-}
-
-// Close ... This function is called at the end when processes related to oracle need to shut down
-func (o *Oracle) Close() error {
- logging.WithContext(o.ctx).
- Info("Waiting for oracle definition go routines to finish",
- zap.String(logging.CUUIDKey, o.id.String()))
- o.closeChan <- killSig
-
- o.wg.Wait()
- logging.WithContext(o.ctx).Info("Oracle definition go routines have exited",
- zap.String(logging.CUUIDKey, o.id.String()))
- return nil
-}
-
-// EventLoop ... Component loop that actively waits and transits register data
-// from a channel that the definition's read routine writes to
-func (o *Oracle) EventLoop() error {
- // TODO(#24) - Add Internal Component Activity State Tracking
-
- logger := logging.WithContext(o.ctx)
-
- logger.Debug("Starting component event loop",
- zap.String(logging.CUUIDKey, o.id.String()))
-
- o.wg.Add(1)
-
- routineCtx, cancel := context.WithCancel(o.ctx)
- // o.emitStateChange(Live)
-
- // Spawn definition read routine
- go func() {
- defer o.wg.Done()
- if err := o.definition.ReadRoutine(routineCtx, o.oracleChannel); err != nil {
- logger.Error("Received error from read routine",
- zap.String(logging.CUUIDKey, o.id.String()),
- zap.Error(err))
- }
- }()
-
- for {
- select {
- case registerData := <-o.oracleChannel:
- logger.Debug("Sending data",
- zap.String(logging.CUUIDKey, o.id.String()))
-
- if err := o.egressHandler.Send(registerData); err != nil {
- logger.Error(transitErr, zap.String("ID", o.id.String()))
- }
-
- if o.egressHandler.PathEnd() {
- latency := float64(time.Since(registerData.OriginTS).Milliseconds())
- metrics.WithContext(o.ctx).
- RecordPipelineLatency(o.pUUID, latency)
- }
-
- case <-o.closeChan:
- logger.Debug("Received component shutdown signal",
- zap.String(logging.CUUIDKey, o.id.String()))
-
- // o.emitStateChange(Terminated)
- logger.Debug("Closing component channel and context",
- zap.String(logging.CUUIDKey, o.id.String()))
- close(o.oracleChannel)
- cancel() // End definition routine
-
- logger.Debug("Component shutdown success",
- zap.String(logging.CUUIDKey, o.id.String()))
- return nil
- }
- }
-}
diff --git a/internal/etl/component/pipe.go b/internal/etl/component/pipe.go
deleted file mode 100644
index 78086b23..00000000
--- a/internal/etl/component/pipe.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package component
-
-import (
- "context"
- "time"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
- "go.uber.org/zap"
-)
-
-type PipeDefinition interface {
- Transform(ctx context.Context, data core.TransitData) ([]core.TransitData, error)
-}
-
-// Pipe ... Component used to represent any arbitrary computation; pipes must can read from all component types
-// E.G. (ORACLE || CONVEYOR || PIPE) -> PIPE
-
-type Pipe struct {
- ctx context.Context
- inType core.RegisterType
-
- def PipeDefinition
-
- *metaData
-}
-
-// NewPipe ... Initializer
-func NewPipe(ctx context.Context, pd PipeDefinition, inType core.RegisterType,
- outType core.RegisterType, opts ...Option) (Component, error) {
- // TODO - Validate inTypes size
-
- pipe := &Pipe{
- ctx: ctx,
- def: pd,
- inType: inType,
-
- metaData: newMetaData(core.Pipe, outType),
- }
-
- if err := pipe.createIngress(inType); err != nil {
- return nil, err
- }
-
- for _, opt := range opts {
- opt(pipe.metaData)
- }
-
- return pipe, nil
-}
-
-// Close ... Shuts down component by emitting a kill signal to a close channel
-func (p *Pipe) Close() error {
- p.closeChan <- killSig
-
- return nil
-}
-
-// EventLoop ... Driver loop for component that actively subscribes
-// to an input channel where transit data is read, transformed, and transitte
-// to downstream components
-func (p *Pipe) EventLoop() error {
- logger := logging.WithContext(p.ctx)
-
- logger.Info("Starting event loop",
- zap.String("ID", p.id.String()),
- )
-
- inChan, err := p.GetIngress(p.inType)
- if err != nil {
- return err
- }
-
- for {
- select {
- case inputData := <-inChan:
- // 1. Transform the input data to some output data
- // NOTE ... Continuing to process data even if there is an error
- // is done in the event of partial processing where some data
- // may be successfully processed and some may not
- outputData, err := p.def.Transform(p.ctx, inputData)
- if err != nil {
- // TODO - Introduce metrics service (`prometheus`) call
- logger.Error(err.Error(), zap.String("ID", p.id.String()))
- }
-
- // 2. Determine if component is at the end of a pipeline, emit metrics if so
- if p.egressHandler.PathEnd() {
- latency := float64(time.Since(inputData.OriginTS).Milliseconds())
-
- metrics.WithContext(p.ctx).
- RecordPipelineLatency(p.pUUID,
- latency)
- }
-
- // 3. Verify that some output data was produced and continue if not
- length := len(outputData)
- logger.Debug("Received transformation output data",
- zap.String("ID", p.id.String()),
- zap.Int("Length", length))
-
- if length == 0 {
- continue
- }
-
- // 4. Batch send output data to subscribed downstream components
- logger.Debug("Sending data batch",
- zap.String("ID", p.id.String()),
- zap.String("Type", p.OutputType().String()))
-
- if err := p.egressHandler.SendBatch(outputData); err != nil {
- logger.Error(transitErr, zap.String("ID", p.id.String()))
- }
-
- // Manager is telling us to shutdown
- case <-p.closeChan:
- logger.Debug("Received component shutdown signal",
- zap.String("ID", p.id.String()))
-
- // p.emitStateChange(Terminated)
-
- return nil
- }
- }
-}
diff --git a/internal/etl/component/types.go b/internal/etl/component/types.go
deleted file mode 100644
index 2dac9d88..00000000
--- a/internal/etl/component/types.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package component
-
-import (
- "context"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-type ActivityState int
-
-const (
- Inactive ActivityState = iota
- Live
- Terminated
-)
-
-func (as ActivityState) String() string {
- switch as {
- case Inactive:
- return "inactive"
-
- case Live:
- return "live"
-
- case Terminated:
- return "terminated"
- }
-
- return "unknown"
-}
-
-// StateChange ... Represents a component state change event
-// that is processed by component management logic to determine
-// proper pipeline states and where to deduplicate
-type StateChange struct {
- ID core.CUUID
-
- From ActivityState // S
- To ActivityState // S'
-}
-
-// EgressHandler specific errors
-const (
- engineEgressExistsErr = "engine egress already exists"
- egressAlreadyExistsErr = "%s egress key already exists within component router mapping"
- egressNotFoundErr = "no egress key %s exists within component router mapping"
- egressNotExistErr = "received transit request with 0 out channels to write to"
-
- transitErr = "received transit error: %s"
-)
-
-// IngressHandler specific errors
-const (
- ingressAlreadyExistsErr = "ingress already exists for %s"
- ingressNotFoundErr = "ingress not found for %s"
-)
-
-type (
- // OracleConstructorFunc ... Type declaration that a registry oracle component constructor must adhere to
- OracleConstructorFunc = func(context.Context, *core.ClientConfig, ...Option) (Component, error)
-
- // PipeConstructorFunc ... Type declaration that a registry pipe component constructor must adhere to
- PipeConstructorFunc = func(context.Context, *core.ClientConfig, ...Option) (Component, error)
-)
-
-// OracleType ...
-type OracleType = string
-
-const (
- // BackTestOracle ... Represents an oracle used for backtesting some heuristic
- BacktestOracle OracleType = "backtest"
- // LiveOracle ... Represents an oracle used for powering some live heuristic
- LiveOracle OracleType = "live"
-)
diff --git a/internal/etl/etl.go b/internal/etl/etl.go
new file mode 100644
index 00000000..76523b5c
--- /dev/null
+++ b/internal/etl/etl.go
@@ -0,0 +1,282 @@
+//go:generate mockgen -package mocks --destination ../mocks/etl.go . ETL
+
+package etl
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/etl/registry"
+
+ "github.com/base-org/pessimism/internal/logging"
+ "github.com/base-org/pessimism/internal/metrics"
+
+ "go.uber.org/zap"
+)
+
+type ETL interface {
+ CreateProcess(cc *core.ClientConfig, id core.ProcessID, PathID core.PathID,
+ dt *core.DataTopic) (process.Process, error)
+ GetStateKey(rt core.TopicType) (*core.StateKey, bool, error)
+ GetBlockHeight(id core.PathID) (*big.Int, error)
+ CreateProcessPath(cfg *core.PathConfig) (core.PathID, bool, error)
+ Run(id core.PathID) error
+ ActiveCount() int
+
+ core.Subsystem
+}
+type etl struct {
+ ctx context.Context
+ cancel context.CancelFunc
+
+ analyzer Analyzer
+ dag *Graph
+ store *Store
+ metrics metrics.Metricer
+
+ egress chan core.HeuristicInput
+
+ registry *registry.Registry
+ wg sync.WaitGroup
+}
+
+// New ... Initializer
+func New(ctx context.Context, analyzer Analyzer, r *registry.Registry,
+ store *Store, dag *Graph, eo chan core.HeuristicInput) ETL {
+ ctx, cancel := context.WithCancel(ctx)
+ stats := metrics.WithContext(ctx)
+ return &etl{
+ analyzer: analyzer,
+ ctx: ctx,
+ cancel: cancel,
+ dag: dag,
+ store: store,
+ registry: r,
+ egress: eo,
+ metrics: stats,
+ wg: sync.WaitGroup{},
+ }
+}
+
+// GetDataTopic ... Returns a data register for a given register type
+func (etl *etl) GetDataTopic(rt core.TopicType) (*core.DataTopic, error) {
+ return etl.registry.GetDataTopic(rt)
+}
+
+func (etl *etl) CreateProcessPath(cfg *core.PathConfig) (core.PathID, bool, error) {
+ // NOTE - If some of these early sub-system operations succeed but lower function
+ // code logic fails, then some rollback will need be triggered to undo prior applied state operations
+ logger := logging.WithContext(etl.ctx)
+
+ depPath, err := etl.registry.TopicPath(cfg.DataType)
+ if err != nil {
+ return core.PathID{}, false, err
+ }
+
+ id := depPath.GeneratePathID(cfg.PathType, cfg.Network)
+
+ processes, err := etl.topicPath(cfg, id, depPath)
+ if err != nil {
+ return core.PathID{}, false, err
+ }
+
+ logger.Debug("Constructing path",
+ zap.String(logging.Path, id.String()))
+
+ path, err := NewPath(cfg, id, processes)
+ if err != nil {
+ return core.PathID{}, false, err
+ }
+
+ mergeID, err := etl.getMergePath(id, path)
+ if err != nil {
+ return core.PathID{}, false, err
+ }
+
+ nilID := core.PathID{}
+ if mergeID != nilID { // An existing path can be reused
+ return mergeID, true, nil
+ }
+
+ // Bind communication route between path and risk engine
+ if err := path.AddEngineRelay(etl.egress); err != nil {
+ return core.PathID{}, false, err
+ }
+
+ // Add path object to the store
+ etl.store.AddPath(id, path)
+
+ return id, false, nil
+}
+
+// RunPath ...
+func (etl *etl) Run(id core.PathID) error {
+ // 1. Get path from store
+ path, err := etl.store.GetPathByID(id)
+ if err != nil {
+ return err
+ }
+
+ // 2. Add path processes to graph
+ if err := etl.dag.AddMany(path.Processes()); err != nil {
+ return err
+ }
+
+ logging.WithContext(etl.ctx).Info("Running path",
+ zap.String(logging.Path, id.String()))
+
+ // 3. Run processes
+ path.Run(&etl.wg)
+ etl.metrics.IncActivePaths(id.NetworkType())
+ return nil
+}
+
+// EventLoop ... Driver ran as separate go routine
+func (etl *etl) EventLoop() error {
+ logger := logging.WithContext(etl.ctx)
+
+ for {
+ <-etl.ctx.Done()
+ logger.Info("Shutting down ETL")
+ return nil
+ }
+}
+
+// Shutdown ... Shuts down all paths
+func (etl *etl) Shutdown() error {
+ etl.cancel()
+ logger := logging.WithContext(etl.ctx)
+
+ for _, p := range etl.store.Paths() {
+ logger.Info("Shutting down path",
+ zap.String(logging.Path, p.UUID().String()))
+
+ if err := p.Close(); err != nil {
+ logger.Error("Failed to close path",
+ zap.String(logging.Path, p.UUID().String()))
+ return err
+ }
+ etl.metrics.DecActivePaths(p.UUID().NetworkType())
+ }
+ logger.Debug("Waiting for all process routines to end")
+ etl.wg.Wait()
+
+ return nil
+}
+
+// ActiveCount ... Returns the number of active paths
+func (etl *etl) ActiveCount() int {
+ return etl.store.ActiveCount()
+}
+
+func (etl *etl) topicPath(cfg *core.PathConfig, pathID core.PathID,
+ depPath core.TopicPath) ([]process.Process, error) {
+ processes := make([]process.Process, 0)
+
+ for _, register := range depPath.Path {
+ id := core.MakeProcessID(cfg.PathType, register.ProcessType, register.DataType, cfg.Network)
+
+ p, err := etl.CreateProcess(cfg.ClientConfig, id, pathID, register)
+ if err != nil {
+ return []process.Process{}, err
+ }
+
+ processes = append(processes, p)
+ }
+
+ return processes, nil
+}
+
+// getMergePath ... Returns a path UUID if a merging opportunity exists
+func (etl *etl) getMergePath(id core.PathID, path Path) (core.PathID, error) {
+ paths := etl.store.GetExistingPaths(id)
+
+ for _, id := range paths {
+ p, err := etl.store.GetPathByID(id)
+ if err != nil {
+ return core.PathID{}, err
+ }
+
+ if etl.analyzer.Mergable(path, p) { // Deploy heuristics to existing paths instead
+ // This is a bit hacky since we aren't actually merging the paths
+ return p.UUID(), nil
+ }
+ }
+
+ return core.PathID{}, nil
+}
+
+func (etl *etl) CreateProcess(cc *core.ClientConfig, id core.ProcessID, pathID core.PathID,
+ dt *core.DataTopic) (process.Process, error) {
+ logging.WithContext(etl.ctx).Debug("constructing process",
+ zap.String("type", dt.ProcessType.String()),
+ zap.String("register_type", dt.DataType.String()))
+
+ // embed options to avoid constructor boilerplate
+ opts := []process.Option{process.WithID(id), process.WithPathID(pathID)}
+
+ if dt.Stateful() {
+ // Propagate state key to process so that it can be used
+ // by the process's definition logic
+ sk := dt.StateKey()
+ err := sk.SetPathID(pathID)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, process.WithStateKey(sk))
+ }
+
+ switch dt.ProcessType {
+ case core.Read:
+ init, success := dt.Constructor.(process.Constructor)
+ if !success {
+ return nil, fmt.Errorf(fmt.Sprintf(couldNotCastErr, core.Read.String()))
+ }
+
+ return init(etl.ctx, cc, opts...)
+
+ case core.Subscribe:
+ init, success := dt.Constructor.(process.Constructor)
+ if !success {
+ return nil, fmt.Errorf(fmt.Sprintf(couldNotCastErr, core.Subscribe.String()))
+ }
+
+ return init(etl.ctx, cc, opts...)
+
+ default:
+ return nil, fmt.Errorf(unknownCompType, dt.ProcessType.String())
+ }
+}
+
+// GetStateKey ... Returns a state key provided a register type
+func (etl *etl) GetStateKey(rt core.TopicType) (*core.StateKey, bool, error) {
+ dr, err := etl.registry.GetDataTopic(rt)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if dr.Stateful() {
+ return dr.StateKey(), true, nil
+ }
+
+ return nil, false, nil
+}
+
+func (etl *etl) GetBlockHeight(id core.PathID) (*big.Int, error) {
+ path, err := etl.store.GetPathByID(id)
+ if err != nil {
+ return nil, err
+ }
+
+ height, err := path.BlockHeight()
+ if err != nil {
+ return nil, err
+ }
+
+ return height, nil
+}
diff --git a/internal/etl/etl_test.go b/internal/etl/etl_test.go
new file mode 100644
index 00000000..a662a17a
--- /dev/null
+++ b/internal/etl/etl_test.go
@@ -0,0 +1,191 @@
+package etl_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/base-org/pessimism/internal/core"
+
+ "github.com/base-org/pessimism/internal/etl"
+ "github.com/base-org/pessimism/internal/mocks"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ id1 = core.MakeProcessID(69, 69, 69, 69)
+ id2 = core.MakeProcessID(42, 42, 42, 42)
+)
+
+func Test_Graph(t *testing.T) {
+ var tests = []struct {
+ name string
+ function string
+ description string
+
+ constructionLogic func() *etl.Graph
+ testLogic func(*testing.T, *etl.Graph)
+ }{
+ {
+ name: "Successful Process Node Insertion",
+ function: "AddProcess",
+ description: "When a process is added to the graph, it should persist within the graph's edge mapping",
+
+ constructionLogic: etl.NewGraph,
+ testLogic: func(t *testing.T, g *etl.Graph) {
+ id := core.MakeProcessID(69, 69, 69, 69)
+
+ process, err := mocks.NewSubscriber(context.Background(), core.BlockHeader, core.BlockHeader)
+ assert.NoError(t, err)
+
+ err = g.Add(id, process)
+ assert.NoError(t, err, "Process addition should resolve to Nil")
+
+ actualProcess, err := g.GetProcess(id)
+ assert.NoError(t, err, "Process retrieval should resolve to Nil")
+
+ assert.Equal(t, process, actualProcess)
+
+ edges := g.Edges()
+
+ assert.Contains(t, edges, id)
+
+ assert.Len(t, edges[id], 0, "No edges should exist yet")
+
+ },
+ },
+ {
+ name: "Failed Cyclic Edge Addition",
+ function: "addEdge",
+ description: "When an edge between two processes already exists (A->B), then an inverted edge (B->A) should not be possible",
+
+ constructionLogic: func() *etl.Graph {
+ g := etl.NewGraph()
+
+ comp1, err := mocks.NewReader(context.Background(), core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id1, comp1); err != nil {
+ panic(err)
+ }
+
+ comp2, err := mocks.NewSubscriber(context.Background(), core.BlockHeader, core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id2, comp2); err != nil {
+ panic(err)
+ }
+
+ if err = g.Subscribe(id1, id2); err != nil {
+ panic(err)
+ }
+
+ return g
+ },
+
+ testLogic: func(t *testing.T, g *etl.Graph) {
+ err := g.Subscribe(id2, id1)
+ assert.Error(t, err)
+
+ },
+ },
+ {
+ name: "Failed Duplicate Edge Addition",
+ function: "AddEdge",
+ description: "When a unique edge exists between two processes (A->B), a new edge should not be possible",
+
+ constructionLogic: func() *etl.Graph {
+ g := etl.NewGraph()
+
+ comp1, err := mocks.NewReader(context.Background(), core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id1, comp1); err != nil {
+ panic(err)
+ }
+
+ comp2, err := mocks.NewSubscriber(context.Background(), core.BlockHeader, core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id2, comp2); err != nil {
+ panic(err)
+ }
+
+ if err = g.Subscribe(id1, id2); err != nil {
+ panic(err)
+ }
+
+ return g
+ },
+
+ testLogic: func(t *testing.T, g *etl.Graph) {
+ err := g.Subscribe(id1, id2)
+ assert.Error(t, err)
+
+ },
+ },
+ {
+ name: "Successful Edge Addition",
+ function: "AddEdge",
+ description: "When two processes are inserted, an edge should be possible between them",
+
+ constructionLogic: func() *etl.Graph {
+ g := etl.NewGraph()
+
+ comp1, err := mocks.NewReader(context.Background(), core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id1, comp1); err != nil {
+ panic(err)
+ }
+
+ comp2, err := mocks.NewSubscriber(context.Background(), core.BlockHeader, core.BlockHeader)
+ if err != nil {
+ panic(err)
+ }
+
+ if err = g.Add(id2, comp2); err != nil {
+ panic(err)
+ }
+
+ return g
+ },
+
+ testLogic: func(t *testing.T, g *etl.Graph) {
+ comp1, _ := g.GetProcess(id1)
+
+ err := g.Subscribe(id1, id2)
+ assert.NoError(t, err)
+
+ err = comp1.AddSubscriber(id2, core.NewTransitChannel())
+ assert.Error(t, err)
+
+ assert.True(t, g.Exists(id1))
+ assert.True(t, g.Exists(id2))
+
+ edgeMap := g.Edges()
+ assert.Contains(t, edgeMap[id1], id2, "ID1 should have a mapped edge to ID2")
+
+ },
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
+ testRouter := tc.constructionLogic()
+ tc.testLogic(t, testRouter)
+ })
+
+ }
+}
diff --git a/internal/etl/graph.go b/internal/etl/graph.go
new file mode 100644
index 00000000..98f3cafd
--- /dev/null
+++ b/internal/etl/graph.go
@@ -0,0 +1,145 @@
+package etl
+
+import (
+ "fmt"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+)
+
+type node struct {
+ p process.Process
+ edges map[core.ProcessID]interface{}
+ outType core.TopicType
+}
+
+func newNode(c process.Process, rt core.TopicType) *node {
+ return &node{
+ p: c,
+ outType: rt,
+ edges: make(map[core.ProcessID]interface{}),
+ }
+}
+
+func NewGraph() *Graph {
+ return &Graph{
+ edgeMap: make(map[core.ProcessID]*node),
+ }
+}
+
+// Represents a directed acyclic process graph (DAG)
+type Graph struct {
+ edgeMap map[core.ProcessID]*node
+}
+
+func (graph *Graph) Exists(id core.ProcessID) bool {
+ _, exists := graph.edgeMap[id]
+ return exists
+}
+
+func (graph *Graph) GetProcess(id core.ProcessID) (process.Process, error) {
+ if graph.Exists(id) {
+ return graph.edgeMap[id].p, nil
+ }
+
+ return nil, fmt.Errorf(procNotFoundErr, id)
+}
+
+/*
+NOTE - There is no check to ensure that a cyclic edge is being added, meaning
+ a caller could create an edge between B->A assuming edge A->B already exists.
+ This would contradict the acyclic assumption of a DAG but is fortunately
+ circumnavigated since all processes declare entrypoint register dependencies,
+ meaning that process could only be susceptible to bipartite connectivity
+ in the circumstance where a process declares inverse input->output of an
+ existing process.
+*/
+
+// Adds subscription or edge between two preconstructed constructed process nodes
+func (graph *Graph) Subscribe(from, to core.ProcessID) error {
+ fromNode, found := graph.edgeMap[from]
+ if !found {
+ return fmt.Errorf(procNotFoundErr, from.String())
+ }
+
+ toNode, found := graph.edgeMap[to]
+ if !found {
+ return fmt.Errorf(procNotFoundErr, to.String())
+ }
+
+ if _, exists := fromNode.edges[toNode.p.ID()]; exists {
+ return fmt.Errorf(edgeExistsErr, from.String(), to.String())
+ }
+
+ relay, err := toNode.p.GetRelay(fromNode.outType)
+ if err != nil {
+ return err
+ }
+
+ if err := fromNode.p.AddSubscriber(to, relay); err != nil {
+ return err
+ }
+
+ // Update edge mapping with new link
+ graph.edgeMap[from].edges[to] = nil
+
+ return nil
+}
+
+// TODO(#23): Manager DAG process Removal Support
+// removeEdge ... Removes an edge from the graph
+func (graph *Graph) RemoveEdge(_, _ core.ProcessID) error {
+ return nil
+}
+
+// TODO(#23): Manager DAG process Removal Support
+// Remove ... Removes a process from the graph
+func (graph *Graph) Remove(_ core.ProcessID) error {
+ return nil
+}
+
+func (graph *Graph) Add(id core.ProcessID, p process.Process) error {
+ if _, exists := graph.edgeMap[id]; exists {
+ return fmt.Errorf(procExistsErr, id)
+ }
+
+ graph.edgeMap[id] = newNode(p, p.EmitType())
+
+ return nil
+}
+
+func (graph *Graph) AddMany(processes []process.Process) error {
+ // Add all process entries to graph
+ for _, p := range processes {
+ if err := graph.Add(p.ID(), p); err != nil {
+ return err
+ }
+ }
+
+ // Add edges between processes
+ for i := 1; i < len(processes); i++ {
+ err := graph.Subscribe(processes[i].ID(), processes[i-1].ID())
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (graph *Graph) Edges() map[core.ProcessID][]core.ProcessID {
+ uuidMap := make(map[core.ProcessID][]core.ProcessID, len(graph.edgeMap))
+
+ for id, cEntry := range graph.edgeMap {
+ cEdges := make([]core.ProcessID, len(cEntry.edges))
+
+ i := 0
+ for edge := range cEntry.edges {
+ cEdges[i] = edge
+ i++
+ }
+
+ uuidMap[id] = cEdges
+ }
+
+ return uuidMap
+}
diff --git a/internal/etl/manager_test.go b/internal/etl/manager_test.go
new file mode 100644
index 00000000..87fa8c6f
--- /dev/null
+++ b/internal/etl/manager_test.go
@@ -0,0 +1,124 @@
+package etl
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/registry"
+ "github.com/base-org/pessimism/internal/mocks"
+ "github.com/base-org/pessimism/internal/state"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestETL(t *testing.T) {
+ var tests = []struct {
+ name string
+ function string
+ description string
+
+ constructionLogic func() ETL
+ testLogic func(t *testing.T, m ETL)
+ }{
+ {
+ name: "Success - Subscription Process",
+ function: "CreateProcess",
+
+ constructionLogic: func() ETL {
+ r := registry.New()
+ ctrl := gomock.NewController(t)
+
+ ctx, _ := mocks.Context(context.Background(), ctrl)
+
+ ctx = context.WithValue(ctx, core.State, state.NewMemState())
+
+ return New(ctx, NewAnalyzer(r), r, NewStore(), NewGraph(), nil)
+ },
+
+ testLogic: func(t *testing.T, etl ETL) {
+ id := core.MakeProcessID(1, 1, 1, 1)
+
+ register, err := registry.New().GetDataTopic(core.BlockHeader)
+
+ assert.NoError(t, err)
+
+ cc := &core.ClientConfig{
+ Network: core.Layer1,
+ }
+ p, err := etl.CreateProcess(cc, id, core.PathID{}, register)
+ assert.NoError(t, err)
+
+ assert.Equal(t, p.ID(), id)
+ assert.Equal(t, p.Type(), register.ProcessType)
+ assert.Equal(t, p.EmitType(), register.DataType)
+
+ },
+ },
+ {
+ name: "Successful Path Creation",
+ function: "CreatePath",
+ description: "CreatePath should reuse an existing path when necessary",
+
+ constructionLogic: func() ETL {
+ reg := registry.New()
+ ctrl := gomock.NewController(t)
+
+ ctx, ms := mocks.Context(context.Background(), ctrl)
+
+ ms.MockL1Node.EXPECT().BlockHeaderByNumber(gomock.Any()).Return(nil, fmt.Errorf("keep going")).AnyTimes()
+
+ ctx = context.WithValue(ctx, core.State, state.NewMemState())
+
+ return New(ctx, NewAnalyzer(reg), reg, NewStore(), NewGraph(), nil)
+ },
+
+ testLogic: func(t *testing.T, etl ETL) {
+ pCfg := &core.PathConfig{
+ Network: core.Layer1,
+ DataType: core.Log,
+ PathType: core.Live,
+ ClientConfig: &core.ClientConfig{
+ Network: core.Layer1,
+ PollInterval: time.Hour * 1,
+ },
+ }
+
+ id1, reuse, err := etl.CreateProcessPath(pCfg)
+ assert.NoError(t, err)
+ assert.False(t, reuse)
+ assert.NotEqual(t, id1, core.PathID{})
+
+ // Now create a new path with the same config
+ // & ensure that the previous path is reused
+
+ id2, reuse, err := etl.CreateProcessPath(pCfg)
+ assert.NoError(t, err)
+ assert.True(t, reuse)
+ assert.Equal(t, id1, id2)
+
+ // Now run the path
+ err = etl.Run(id1)
+ assert.NoError(t, err)
+
+ // Ensure shutdown works
+ go func() {
+ _ = etl.EventLoop()
+ }()
+ err = etl.Shutdown()
+ assert.NoError(t, err)
+ },
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
+ testPath := tc.constructionLogic()
+ tc.testLogic(t, testPath)
+ })
+
+ }
+
+}
diff --git a/internal/etl/path.go b/internal/etl/path.go
new file mode 100644
index 00000000..8dc8495a
--- /dev/null
+++ b/internal/etl/path.go
@@ -0,0 +1,135 @@
+package etl
+
+import (
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/logging"
+ "go.uber.org/zap"
+)
+
+// Process path
+type Path interface {
+ BlockHeight() (*big.Int, error)
+ Config() *core.PathConfig
+ Processes() []process.Process
+ UUID() core.PathID
+ State() ActivityState
+
+ Close() error
+ Run(wg *sync.WaitGroup)
+ AddEngineRelay(engineChan chan core.HeuristicInput) error
+}
+
+type path struct {
+ id core.PathID
+ cfg *core.PathConfig
+
+ state ActivityState
+
+ processes []process.Process
+}
+
+// NewPath ... Initializer
+func NewPath(cfg *core.PathConfig, id core.PathID, procs []process.Process) (Path, error) {
+ if len(procs) == 0 {
+ return nil, fmt.Errorf(emptyPathError)
+ }
+
+ p := &path{
+ cfg: cfg,
+ id: id,
+ processes: procs,
+ state: INACTIVE,
+ }
+
+ return p, nil
+}
+
+func (path *path) State() ActivityState {
+ return path.state
+}
+
+func (path *path) Config() *core.PathConfig {
+ return path.cfg
+}
+
+func (path *path) Processes() []process.Process {
+ return path.processes
+}
+
+func (path *path) UUID() core.PathID {
+ return path.id
+}
+
+func (path *path) BlockHeight() (*big.Int, error) {
+ // We assume that all paths have an oracle as their last process
+ p := path.processes[len(path.processes)-1]
+ cr, ok := p.(*process.ChainReader)
+ if !ok {
+ return nil, fmt.Errorf("could not cast process to chain reader")
+ }
+
+ return cr.Height()
+}
+
+// AddEngineRelay ... Adds a relay to the path that forces it to send transformed heuristic input
+// to a risk engine
+func (path *path) AddEngineRelay(engineChan chan core.HeuristicInput) error {
+ p := path.processes[0]
+ eir := core.NewEngineRelay(path.id, engineChan)
+
+ logging.NoContext().Debug("Adding engine relay to path",
+ zap.String(logging.Process, p.ID().String()),
+ zap.String(logging.Path, p.PathID().String()))
+
+ return p.AddEngineRelay(eir)
+}
+
+// Run ... Spawns process event loops
+func (path *path) Run(wg *sync.WaitGroup) {
+ for _, p := range path.processes {
+ wg.Add(1)
+
+ go func(p process.Process, wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ logging.NoContext().
+ Debug("Starting process",
+ zap.String(logging.Process, p.ID().String()),
+ zap.String(logging.Path, p.ID().String()))
+
+ if err := p.EventLoop(); err != nil {
+ // NOTE - Consider killing the entire path if one process fails
+ // Otherwise dangling processes will be left in a running state
+ logging.NoContext().Error("Obtained error from event loop", zap.Error(err),
+ zap.String(logging.Process, p.ID().String()),
+ zap.String(logging.Path, p.ID().String()))
+ path.state = CRASHED
+ }
+ }(p, wg)
+ }
+
+ path.state = ACTIVE
+}
+
+// Close ... Closes all processes in the path
+func (path *path) Close() error {
+ for _, p := range path.processes {
+ if p.ActivityState() != process.Terminated {
+ logging.NoContext().
+ Debug("Shutting down path process",
+ zap.String(logging.Process, p.ID().String()),
+ zap.String(logging.Path, p.ID().String()))
+
+ if err := p.Close(); err != nil {
+ return err
+ }
+ }
+ }
+ path.state = TERMINATED
+ return nil
+}
diff --git a/internal/etl/path_test.go b/internal/etl/path_test.go
new file mode 100644
index 00000000..87f1003e
--- /dev/null
+++ b/internal/etl/path_test.go
@@ -0,0 +1,119 @@
+package etl_test
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPath(t *testing.T) {
+ var tests = []struct {
+ name string
+ function string
+
+ constructionLogic func() etl.Path
+ testLogic func(t *testing.T, path etl.Path)
+ }{
+ {
+ name: "Successful Construction",
+ function: "NewPath",
+ constructionLogic: func() etl.Path {
+ sub, _ := mocks.NewSubscriber(
+ context.Background(),
+ core.BlockHeader,
+ core.Log)
+
+ testO, _ := mocks.NewReader(
+ context.Background(),
+ core.BlockHeader)
+
+ path, err := etl.NewPath(
+ nil,
+ core.PathID{},
+ []process.Process{sub, testO})
+
+ if err != nil {
+ panic(err)
+ }
+
+ return path
+ },
+ testLogic: func(t *testing.T, path etl.Path) {
+
+ assert.Equal(t, path.Processes()[0].EmitType(), core.Log)
+ assert.Equal(t, path.Processes()[1].EmitType(), core.BlockHeader)
+ },
+ },
+ {
+ name: "Successful Run",
+ function: "AddEngineRelay",
+ constructionLogic: func() etl.Path {
+
+ testO, _ := mocks.NewReader(
+ context.Background(),
+ core.BlockHeader)
+
+ pl, err := etl.NewPath(
+ nil,
+ core.PathID{},
+ []process.Process{testO})
+
+ if err != nil {
+ panic(err)
+ }
+
+ return pl
+ },
+ testLogic: func(t *testing.T, pl etl.Path) {
+
+ relay := make(chan core.HeuristicInput)
+ err := pl.AddEngineRelay(relay)
+ assert.NoError(t, err)
+ },
+ },
+ {
+ name: "Successful Run",
+ function: "RunPath",
+ constructionLogic: func() etl.Path {
+
+ testO, _ := mocks.NewReader(
+ context.Background(),
+ core.BlockHeader)
+
+ pl, err := etl.NewPath(
+ nil,
+ core.PathID{},
+ []process.Process{testO})
+
+ if err != nil {
+ panic(err)
+ }
+
+ return pl
+ },
+ testLogic: func(t *testing.T, pl etl.Path) {
+ assert.Equal(t, pl.State(), etl.INACTIVE, "Path should be inactive")
+
+ wg := &sync.WaitGroup{}
+ pl.Run(wg)
+
+ assert.Equal(t, pl.State(), etl.ACTIVE, "Path should be active")
+ },
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
+ path := tc.constructionLogic()
+ tc.testLogic(t, path)
+ })
+
+ }
+}
diff --git a/internal/etl/pipeline/analysis_test.go b/internal/etl/pipeline/analysis_test.go
deleted file mode 100644
index 99b6e055..00000000
--- a/internal/etl/pipeline/analysis_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package pipeline_test
-
-import (
- "context"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/etl/pipeline"
- "github.com/base-org/pessimism/internal/etl/registry"
- "github.com/base-org/pessimism/internal/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Mergable(t *testing.T) {
- var tests = []struct {
- name string
- function string
- description string
- testConstructor func() pipeline.Analyzer
- testLogic func(t *testing.T, a pipeline.Analyzer)
- }{
- {
- name: "Successful Pipeline Merge",
- function: "Mergable",
- description: "Mergable function should return true if pipelines are mergable",
- testConstructor: func() pipeline.Analyzer {
- dRegistry := registry.NewRegistry()
- return pipeline.NewAnalyzer(dRegistry)
- },
- testLogic: func(t *testing.T, a pipeline.Analyzer) {
- // Setup test pipelines
- mockOracle, err := mocks.NewDummyOracle(context.Background(), core.GethBlock)
- assert.NoError(t, err)
-
- comps := []component.Component{mockOracle}
- testPUUID := core.MakePUUID(0, core.MakeCUUID(core.Live, 0, 0, 0), core.MakeCUUID(core.Live, 0, 0, 0))
- testPUUID2 := core.MakePUUID(0, core.MakeCUUID(core.Live, 0, 0, 0), core.MakeCUUID(core.Live, 0, 0, 0))
-
- testCfg := &core.PipelineConfig{
- PipelineType: core.Live,
- ClientConfig: &core.ClientConfig{},
- }
-
- p1, err := pipeline.NewPipeline(testCfg, testPUUID, comps)
- assert.NoError(t, err)
-
- p2, err := pipeline.NewPipeline(testCfg, testPUUID2, comps)
- assert.NoError(t, err)
-
- assert.True(t, a.Mergable(p1, p2))
- },
- },
- {
- name: "Failure Pipeline Merge",
- function: "Mergable",
- description: "Mergable function should return false when PID's do not match",
- testConstructor: func() pipeline.Analyzer {
- dRegistry := registry.NewRegistry()
- return pipeline.NewAnalyzer(dRegistry)
- },
- testLogic: func(t *testing.T, a pipeline.Analyzer) {
- // Setup test pipelines
- mockOracle, err := mocks.NewDummyOracle(context.Background(), core.GethBlock)
- assert.NoError(t, err)
-
- comps := []component.Component{mockOracle}
- testPUUID := core.MakePUUID(0, core.MakeCUUID(core.Backtest, 0, 0, 0), core.MakeCUUID(core.Live, 0, 0, 0))
- testPUUID2 := core.MakePUUID(0, core.MakeCUUID(core.Live, 0, 0, 0), core.MakeCUUID(core.Live, 0, 0, 0))
-
- testCfg := &core.PipelineConfig{
- PipelineType: core.Live,
- ClientConfig: &core.ClientConfig{},
- }
-
- p1, err := pipeline.NewPipeline(testCfg, testPUUID, comps)
- assert.NoError(t, err)
-
- p2, err := pipeline.NewPipeline(testCfg, testPUUID2, comps)
- assert.NoError(t, err)
-
- assert.False(t, a.Mergable(p1, p2))
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- a := test.testConstructor()
- test.testLogic(t, a)
- })
- }
-
-}
diff --git a/internal/etl/pipeline/graph.go b/internal/etl/pipeline/graph.go
deleted file mode 100644
index 00b3254b..00000000
--- a/internal/etl/pipeline/graph.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package pipeline
-
-import (
- "fmt"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/logging"
- "go.uber.org/zap"
-)
-
-// ComponentGraph ...
-type ComponentGraph interface {
- ComponentExists(cUIID core.CUUID) bool
- GetComponent(cUIID core.CUUID) (component.Component, error)
- AddEdge(cUUID1, cUUID2 core.CUUID) error
- AddComponent(cUIID core.CUUID, comp component.Component) error
- AddComponents(cSlice []component.Component) error
-
- Edges() map[core.CUUID][]core.CUUID // Useful for testing
-
- // TODO(#23): Manager DAG Component Removal Support
- RemoveEdge(_, _ core.CUUID) error
- RemoveComponent(_ core.CUUID) error
-}
-
-// cNode ... Used to store critical component graph entry data
-type cNode struct {
- comp component.Component
- edges map[core.CUUID]interface{}
- outType core.RegisterType
-}
-
-// newNode ... Intitializer for graph node entry; stores critical routing information
-// & component metadata
-func newNode(c component.Component, rt core.RegisterType) *cNode {
- return &cNode{
- comp: c,
- outType: rt,
- edges: make(map[core.CUUID]interface{}),
- }
-}
-
-// cGraph ... Represents a directed acyclic component graph (DAG)
-type cGraph struct {
- edgeMap map[core.CUUID]*cNode
-}
-
-// NewComponentGraph ... Initializer
-func NewComponentGraph() ComponentGraph {
- return &cGraph{
- edgeMap: make(map[core.CUUID]*cNode, 0),
- }
-}
-
-// componentExists ... Returns true if component node already exists for UUID, false otherwise
-func (graph *cGraph) ComponentExists(cUIID core.CUUID) bool {
- _, exists := graph.edgeMap[cUIID]
- return exists
-}
-
-// getComponent ... Returns a component entry for some component ID
-func (graph *cGraph) GetComponent(cUIID core.CUUID) (component.Component, error) {
- if graph.ComponentExists(cUIID) {
- return graph.edgeMap[cUIID].comp, nil
- }
-
- return nil, fmt.Errorf(cUUIDNotFoundErr, cUIID)
-}
-
-/*
-NOTE - There is no check to ensure that a cyclic edge is being added, meaning
- a caller could create an edge between B->A assuming edge A->B already exists.
- This would contradict the acyclic assumption of a DAG but is fortunately
- circumnavigated since all components declare entrypoint register dependencies,
- meaning that component could only be susceptible to bipartite connectivity
- in the circumstance where a component declares inverse input->output of an
- existing component.
-*/
-
-// TODO(#30): Pipeline Collisions Occur When They Shouldn't
-// addEdge ... Adds edge between two preconstructed constructed component nodes
-func (graph *cGraph) AddEdge(cUUID1, cUUID2 core.CUUID) error {
- entry1, found := graph.edgeMap[cUUID1]
- if !found {
- return fmt.Errorf(cUUIDNotFoundErr, cUUID1.String())
- }
-
- entry2, found := graph.edgeMap[cUUID2]
- if !found {
- return fmt.Errorf(cUUIDNotFoundErr, cUUID2.String())
- }
-
- logging.NoContext().
- Debug("Adding edge between components",
- zap.String("from", entry1.comp.UUID().String()),
- zap.String("to", entry2.comp.UUID().String()))
-
- // Edge already exists edgecase (No pun)
- if _, exists := entry1.edges[entry2.comp.UUID()]; exists {
- return fmt.Errorf(edgeExistsErr, cUUID1.String(), cUUID2.String())
- }
-
- c2Ingress, err := entry2.comp.GetIngress(entry1.outType)
- if err != nil {
- return err
- }
-
- if err := entry1.comp.AddEgress(cUUID2, c2Ingress); err != nil {
- return err
- }
-
- // Update edge mapping with new link
- graph.edgeMap[cUUID1].edges[cUUID2] = nil
-
- return nil
-}
-
-// TODO(#23): Manager DAG Component Removal Support
-// removeEdge ... Removes an edge from the graph
-func (graph *cGraph) RemoveEdge(_, _ core.CUUID) error {
- return nil
-}
-
-// TODO(#23): Manager DAG Component Removal Support
-// removeComponent ... Removes a component from the graph
-func (graph *cGraph) RemoveComponent(_ core.CUUID) error {
- return nil
-}
-
-// AddComponent ... Adds component node entry to edge mapping
-func (graph *cGraph) AddComponent(cUIID core.CUUID, comp component.Component) error {
- if _, exists := graph.edgeMap[cUIID]; exists {
- return fmt.Errorf(cUUIDExistsErr, cUIID)
- }
-
- graph.edgeMap[cUIID] = newNode(comp, comp.OutputType())
-
- return nil
-}
-
-// AddComponents ... Inserts all components from some slice into edge mapping
-func (graph *cGraph) AddComponents(components []component.Component) error {
- // Add all component entries to graph
- for _, c := range components {
- if err := graph.AddComponent(c.UUID(), c); err != nil {
- return err
- }
- }
-
- // Add edges between components
- for i := 1; i < len(components); i++ {
- err := graph.AddEdge(components[i].UUID(), components[i-1].UUID())
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Edges ... Returns a representation of all graph edges between component UUIDs
-func (graph *cGraph) Edges() map[core.CUUID][]core.CUUID {
- uuidMap := make(map[core.CUUID][]core.CUUID, len(graph.edgeMap))
-
- for cUIID, cEntry := range graph.edgeMap {
- cEdges := make([]core.CUUID, len(cEntry.edges))
-
- i := 0
- for edge := range cEntry.edges {
- cEdges[i] = edge
- i++
- }
-
- uuidMap[cUIID] = cEdges
- }
-
- return uuidMap
-}
diff --git a/internal/etl/pipeline/graph_test.go b/internal/etl/pipeline/graph_test.go
deleted file mode 100644
index 0223f56a..00000000
--- a/internal/etl/pipeline/graph_test.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package pipeline_test
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
-
- pl "github.com/base-org/pessimism/internal/etl/pipeline"
- "github.com/base-org/pessimism/internal/mocks"
-
- "github.com/stretchr/testify/assert"
-)
-
-var (
- testCUUID1 = core.MakeCUUID(69, 69, 69, 69)
- testCUUID2 = core.MakeCUUID(42, 42, 42, 42)
-)
-
-func Test_Graph(t *testing.T) {
- var tests = []struct {
- name string
- function string
- description string
-
- constructionLogic func() pl.ComponentGraph
- testLogic func(*testing.T, pl.ComponentGraph)
- }{
- {
- name: "Successful Component Node Insertion",
- function: "AddComponent",
- description: "When a component is added to the graph, it should persist within the graph's edge mapping",
-
- constructionLogic: pl.NewComponentGraph,
- testLogic: func(t *testing.T, g pl.ComponentGraph) {
- cUUID := core.MakeCUUID(69, 69, 69, 69)
-
- component, err := mocks.NewDummyPipe(context.Background(), core.GethBlock, core.AccountBalance)
- assert.NoError(t, err)
-
- err = g.AddComponent(cUUID, component)
- assert.NoError(t, err, "Component addition should resolve to Nil")
-
- actualComponent, err := g.GetComponent(cUUID)
- assert.NoError(t, err, "Component retrieval should resolve to Nil")
-
- assert.Equal(t, component, actualComponent)
-
- edges := g.Edges()
-
- assert.Contains(t, edges, cUUID)
-
- assert.Len(t, edges[cUUID], 0, "No edges should exist yet")
-
- },
- },
- {
- name: "Failed Cyclic Edge Addition",
- function: "addEdge",
- description: "When an edge between two components already exists (A->B), then an inverted edge (B->A) should not be possible",
-
- constructionLogic: func() pl.ComponentGraph {
- g := pl.NewComponentGraph()
-
- comp1, err := mocks.NewDummyOracle(context.Background(), core.GethBlock)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID1, comp1); err != nil {
- panic(err)
- }
-
- comp2, err := mocks.NewDummyPipe(context.Background(), core.GethBlock, core.AccountBalance)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID2, comp2); err != nil {
- panic(err)
- }
-
- if err = g.AddEdge(testCUUID1, testCUUID2); err != nil {
- panic(err)
- }
-
- return g
- },
-
- testLogic: func(t *testing.T, g pl.ComponentGraph) {
- err := g.AddEdge(testCUUID2, testCUUID1)
- assert.Error(t, err)
-
- },
- },
- {
- name: "Failed Duplicate Edge Addition",
- function: "AddEdge",
- description: "When a unique edge exists between two components (A->B), a new edge should not be possible",
-
- constructionLogic: func() pl.ComponentGraph {
- g := pl.NewComponentGraph()
-
- comp1, err := mocks.NewDummyOracle(context.Background(), core.GethBlock)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID1, comp1); err != nil {
- panic(err)
- }
-
- comp2, err := mocks.NewDummyPipe(context.Background(), core.GethBlock, core.AccountBalance)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID2, comp2); err != nil {
- panic(err)
- }
-
- if err = g.AddEdge(testCUUID1, testCUUID2); err != nil {
- panic(err)
- }
-
- return g
- },
-
- testLogic: func(t *testing.T, g pl.ComponentGraph) {
- err := g.AddEdge(testCUUID1, testCUUID2)
- assert.Error(t, err)
-
- },
- },
- {
- name: "Successful Edge Addition",
- function: "AddEdge",
- description: "When two components are inserted, an edge should be possible between them",
-
- constructionLogic: func() pl.ComponentGraph {
- g := pl.NewComponentGraph()
-
- comp1, err := mocks.NewDummyOracle(context.Background(), core.GethBlock)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID1, comp1); err != nil {
- panic(err)
- }
-
- comp2, err := mocks.NewDummyPipe(context.Background(), core.GethBlock, core.AccountBalance)
- if err != nil {
- panic(err)
- }
-
- if err = g.AddComponent(testCUUID2, comp2); err != nil {
- panic(err)
- }
-
- return g
- },
-
- testLogic: func(t *testing.T, g pl.ComponentGraph) {
- comp1, _ := g.GetComponent(testCUUID1)
-
- err := g.AddEdge(testCUUID1, testCUUID2)
- assert.NoError(t, err)
-
- err = comp1.AddEgress(testCUUID2, core.NewTransitChannel())
- assert.Error(t, err, "Error should be returned when trying to add existing outgress of component2 to component1 ingress")
-
- assert.True(t, g.ComponentExists(testCUUID1))
- assert.True(t, g.ComponentExists(testCUUID2))
-
- edgeMap := g.Edges()
- assert.Contains(t, edgeMap[testCUUID1], testCUUID2, "ID1 should have a mapped edge to ID2")
-
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
- testRouter := tc.constructionLogic()
- tc.testLogic(t, testRouter)
- })
-
- }
-}
diff --git a/internal/etl/pipeline/manager.go b/internal/etl/pipeline/manager.go
deleted file mode 100644
index a3327997..00000000
--- a/internal/etl/pipeline/manager.go
+++ /dev/null
@@ -1,296 +0,0 @@
-//go:generate mockgen -package mocks --destination ../../mocks/etl_manager.go --mock_names Manager=EtlManager . Manager
-
-package pipeline
-
-import (
- "context"
- "fmt"
- "math/big"
- "sync"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/etl/registry"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
-
- "go.uber.org/zap"
-)
-
-// Manager ... ETL manager interface
-type Manager interface {
- InferComponent(cc *core.ClientConfig, cUUID core.CUUID, pUUID core.PUUID,
- register *core.DataRegister) (component.Component, error)
- GetStateKey(rt core.RegisterType) (*core.StateKey, bool, error)
- GetPipelineHeight(id core.PUUID) (*big.Int, error)
- CreateDataPipeline(cfg *core.PipelineConfig) (core.PUUID, bool, error)
- RunPipeline(pID core.PUUID) error
- ActiveCount() int
-
- core.Subsystem
-}
-
-// etlManager ... ETL manager
-type etlManager struct {
- ctx context.Context
- cancel context.CancelFunc
-
- analyzer Analyzer
- dag ComponentGraph
- store EtlStore
- metrics metrics.Metricer
-
- egress chan core.HeuristicInput
-
- registry registry.Registry
- wg sync.WaitGroup
-}
-
-// NewManager ... Initializer
-func NewManager(ctx context.Context, analyzer Analyzer, cRegistry registry.Registry,
- store EtlStore, dag ComponentGraph,
- eo chan core.HeuristicInput) Manager {
- ctx, cancel := context.WithCancel(ctx)
- stats := metrics.WithContext(ctx)
-
- m := &etlManager{
- analyzer: analyzer,
- ctx: ctx,
- cancel: cancel,
- dag: dag,
- store: store,
- registry: cRegistry,
- egress: eo,
- metrics: stats,
- wg: sync.WaitGroup{},
- }
-
- return m
-}
-
-// GetRegister ... Returns a data register for a given register type
-func (em *etlManager) GetRegister(rt core.RegisterType) (*core.DataRegister, error) {
- return em.registry.GetRegister(rt)
-}
-
-// CreateDataPipeline ... Creates an ETL data pipeline provided a pipeline configuration
-// Returns a pipeline UUID and a boolean indicating if the pipeline was reused
-func (em *etlManager) CreateDataPipeline(cfg *core.PipelineConfig) (core.PUUID, bool, error) {
- // NOTE - If some of these early sub-system operations succeed but lower function
- // code logic fails, then some rollback will need be triggered to undo prior applied state operations
- logger := logging.WithContext(em.ctx)
-
- depPath, err := em.registry.GetDependencyPath(cfg.DataType)
- if err != nil {
- return core.NilPUUID(), false, err
- }
-
- pUUID := depPath.GeneratePUUID(cfg.PipelineType, cfg.Network)
-
- components, err := em.getComponents(cfg, pUUID, depPath)
- if err != nil {
- return core.NilPUUID(), false, err
- }
-
- logger.Debug("Constructing pipeline",
- zap.String(logging.PUUIDKey, pUUID.String()))
-
- pipeline, err := NewPipeline(cfg, pUUID, components)
- if err != nil {
- return core.NilPUUID(), false, err
- }
-
- mPUUID, err := em.getMergeUUID(pUUID, pipeline)
- if err != nil {
- return core.NilPUUID(), false, err
- }
-
- if mPUUID != core.NilPUUID() { // A pipeline can be reused
- return mPUUID, true, nil
- }
-
- // Bind communication route between pipeline and risk engine
- if err := pipeline.AddEngineRelay(em.egress); err != nil {
- return core.NilPUUID(), false, err
- }
-
- // Add pipeline object to the store
- em.store.AddPipeline(pUUID, pipeline)
-
- return pUUID, false, nil
-}
-
-// RunPipeline ... Runs pipeline session for some provided pUUID
-func (em *etlManager) RunPipeline(pUUID core.PUUID) error {
- // 1. Get pipeline from store
- pipeline, err := em.store.GetPipelineFromPUUID(pUUID)
- if err != nil {
- return err
- }
-
- // 2. Add pipeline components to the component graph
- if err := em.dag.AddComponents(pipeline.Components()); err != nil {
- return err
- }
-
- logging.WithContext(em.ctx).Info("Running pipeline",
- zap.String(logging.PUUIDKey, pUUID.String()))
-
- // 3. Run pipeline
- pipeline.Run(&em.wg)
-
- // Pipeline successfully created, increment for type and network
- em.metrics.IncActivePipelines(pUUID.PipelineType(), pUUID.NetworkType())
- return nil
-}
-
-// EventLoop ... Driver ran as separate go routine
-func (em *etlManager) EventLoop() error {
- logger := logging.WithContext(em.ctx)
-
- for {
- <-em.ctx.Done()
- logger.Info("Received shutdown request")
- return nil
- }
-}
-
-// Shutdown ... Shuts down all pipelines
-func (em *etlManager) Shutdown() error {
- em.cancel()
- logger := logging.WithContext(em.ctx)
-
- for _, pl := range em.store.GetAllPipelines() {
- logger.Info("Shutting down pipeline",
- zap.String(logging.PUUIDKey, pl.UUID().String()))
-
- if err := pl.Close(); err != nil {
- logger.Error("Failed to close pipeline",
- zap.String(logging.PUUIDKey, pl.UUID().String()))
- return err
- }
- em.metrics.DecActivePipelines(pl.UUID().PipelineType(), pl.UUID().NetworkType())
- }
- logger.Debug("Waiting for all component routines to end")
- em.wg.Wait()
-
- return nil
-}
-
-// ActiveCount ... Returns the number of active pipelines
-func (em *etlManager) ActiveCount() int {
- return em.store.ActiveCount()
-}
-
-// getComponents ... Returns all components provided a slice of register definitions
-func (em *etlManager) getComponents(cfg *core.PipelineConfig, pUUID core.PUUID,
- depPath core.RegisterDependencyPath) ([]component.Component, error) {
- components := make([]component.Component, 0)
-
- for _, register := range depPath.Path {
- cUUID := core.MakeCUUID(cfg.PipelineType, register.ComponentType, register.DataType, cfg.Network)
-
- c, err := em.InferComponent(cfg.ClientConfig, cUUID, pUUID, register)
- if err != nil {
- return []component.Component{}, err
- }
-
- components = append(components, c)
- }
-
- return components, nil
-}
-
-// getMergeUUID ... Returns a pipeline UUID if a merging opportunity exists
-func (em *etlManager) getMergeUUID(pUUID core.PUUID, pipeline Pipeline) (core.PUUID, error) {
- pipelines := em.store.GetExistingPipelinesByPID(pUUID.PID)
-
- for _, pl := range pipelines {
- p, err := em.store.GetPipelineFromPUUID(pl)
- if err != nil {
- return core.NilPUUID(), err
- }
-
- if em.analyzer.Mergable(pipeline, p) { // Deploy heuristics to existing pipelines instead
- // This is a bit hacky since we aren't actually merging the pipelines
- return p.UUID(), nil
- }
- }
-
- return core.NilPUUID(), nil
-}
-
-// InferComponent ... Constructs a component provided a data register definition
-func (em *etlManager) InferComponent(cc *core.ClientConfig, cUUID core.CUUID, pUUID core.PUUID,
- register *core.DataRegister) (component.Component, error) {
- logging.WithContext(em.ctx).Debug("constructing component",
- zap.String("type", register.ComponentType.String()),
- zap.String("register_type", register.DataType.String()))
-
- // Embed options to avoid constructor boilerplate
- opts := []component.Option{component.WithCUUID(cUUID), component.WithPUUID(pUUID)}
-
- if register.Stateful() {
- // Propagate state key to component so that it can be used
- // by the component's definition logic
- sk := register.StateKey()
- err := sk.SetPUUID(pUUID)
- if err != nil {
- return nil, err
- }
-
- opts = append(opts, component.WithStateKey(sk))
- }
-
- switch register.ComponentType {
- case core.Oracle:
- init, success := register.ComponentConstructor.(component.OracleConstructorFunc)
- if !success {
- return nil, fmt.Errorf(fmt.Sprintf(couldNotCastErr, core.Oracle.String()))
- }
-
- return init(em.ctx, cc, opts...)
-
- case core.Pipe:
- init, success := register.ComponentConstructor.(component.PipeConstructorFunc)
- if !success {
- return nil, fmt.Errorf(fmt.Sprintf(couldNotCastErr, core.Pipe.String()))
- }
-
- return init(em.ctx, cc, opts...)
-
- case core.Aggregator:
- return nil, fmt.Errorf(noAggregatorErr)
-
- default:
- return nil, fmt.Errorf(unknownCompType, register.ComponentType.String())
- }
-}
-
-// GetStateKey ... Returns a state key provided a register type
-func (em *etlManager) GetStateKey(rt core.RegisterType) (*core.StateKey, bool, error) {
- dr, err := em.registry.GetRegister(rt)
- if err != nil {
- return nil, false, err
- }
-
- if dr.Stateful() {
- return dr.StateKey(), true, nil
- }
-
- return nil, false, nil
-}
-
-func (em *etlManager) GetPipelineHeight(id core.PUUID) (*big.Int, error) {
- pipeline, err := em.store.GetPipelineFromPUUID(id)
- if err != nil {
- return nil, err
- }
-
- height, err := pipeline.BlockHeight()
- if err != nil {
- return nil, err
- }
-
- return height, nil
-}
diff --git a/internal/etl/pipeline/manager_test.go b/internal/etl/pipeline/manager_test.go
deleted file mode 100644
index 611a4405..00000000
--- a/internal/etl/pipeline/manager_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package pipeline
-
-import (
- "context"
- "fmt"
- "testing"
- "time"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/registry"
- "github.com/base-org/pessimism/internal/mocks"
- "github.com/base-org/pessimism/internal/state"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Manager(t *testing.T) {
- var tests = []struct {
- name string
- function string
- description string
-
- constructionLogic func() Manager
- testLogic func(t *testing.T, m Manager)
- }{
- {
- name: "Successful Pipe Component Construction",
- function: "inferComponent",
- description: "inferComponent function should generate pipe component instance provided valid params",
-
- constructionLogic: func() Manager {
- reg := registry.NewRegistry()
- ctrl := gomock.NewController(t)
-
- ctx, _ := mocks.Context(context.Background(), ctrl)
-
- ctx = context.WithValue(ctx, core.State, state.NewMemState())
-
- return NewManager(ctx, NewAnalyzer(reg), reg, NewEtlStore(), NewComponentGraph(), nil)
- },
-
- testLogic: func(t *testing.T, m Manager) {
- cUUID := core.MakeCUUID(1, 1, 1, 1)
-
- register, err := registry.NewRegistry().GetRegister(core.GethBlock)
-
- assert.NoError(t, err)
-
- cc := &core.ClientConfig{
- Network: core.Layer1,
- }
- c, err := m.InferComponent(cc, cUUID, core.NilPUUID(), register)
- assert.NoError(t, err)
-
- assert.Equal(t, c.UUID(), cUUID)
- assert.Equal(t, c.Type(), register.ComponentType)
- assert.Equal(t, c.OutputType(), register.DataType)
-
- },
- },
- {
- name: "Successful Pipeline Creations",
- function: "CreateDataPipeline",
- description: "CreateDataPipeline should reuse existing pipeline if it exists",
-
- constructionLogic: func() Manager {
- reg := registry.NewRegistry()
- ctrl := gomock.NewController(t)
-
- ctx, _ := mocks.Context(context.Background(), ctrl)
-
- ctx = context.WithValue(ctx, core.State, state.NewMemState())
-
- return NewManager(ctx, NewAnalyzer(reg), reg, NewEtlStore(), NewComponentGraph(), nil)
- },
-
- testLogic: func(t *testing.T, m Manager) {
- pCfg := &core.PipelineConfig{
- Network: core.Layer1,
- DataType: core.EventLog,
- PipelineType: core.Live,
- ClientConfig: &core.ClientConfig{
- Network: core.Layer1,
- PollInterval: time.Hour * 1,
- },
- }
-
- pUUID1, reuse, err := m.CreateDataPipeline(pCfg)
- assert.NoError(t, err)
- assert.False(t, reuse)
- assert.NotEqual(t, pUUID1, core.NilPUUID())
-
- // Now create a new pipeline with the same config
- // & ensure that the previous pipeline is reused
-
- pUUID2, reuse, err := m.CreateDataPipeline(pCfg)
- assert.NoError(t, err)
- assert.True(t, reuse)
- assert.Equal(t, pUUID1, pUUID2)
-
- // Now run the pipeline
- err = m.RunPipeline(pUUID1)
- assert.NoError(t, err)
-
- // Ensure shutdown works
- go func() {
- _ = m.EventLoop()
- }()
- err = m.Shutdown()
- assert.NoError(t, err)
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
- testPipeline := tc.constructionLogic()
- tc.testLogic(t, testPipeline)
- })
-
- }
-
-}
diff --git a/internal/etl/pipeline/pipeline.go b/internal/etl/pipeline/pipeline.go
deleted file mode 100644
index 5609ad25..00000000
--- a/internal/etl/pipeline/pipeline.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package pipeline
-
-import (
- "fmt"
- "math/big"
- "sync"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/logging"
- "go.uber.org/zap"
-)
-
-// Pipeline ... Pipeline interface
-type Pipeline interface {
- BlockHeight() (*big.Int, error)
- Config() *core.PipelineConfig
- Components() []component.Component
- UUID() core.PUUID
- State() ActivityState
-
- Close() error
- Run(wg *sync.WaitGroup)
- AddEngineRelay(engineChan chan core.HeuristicInput) error
-}
-
-// pipeline ... Pipeline implementation
-type pipeline struct {
- id core.PUUID
- cfg *core.PipelineConfig
-
- state ActivityState
-
- components []component.Component
-}
-
-// NewPipeline ... Initializer
-func NewPipeline(cfg *core.PipelineConfig, pUUID core.PUUID, comps []component.Component) (Pipeline, error) {
- if len(comps) == 0 {
- return nil, fmt.Errorf(emptyPipelineError)
- }
-
- pl := &pipeline{
- cfg: cfg,
- id: pUUID,
- components: comps,
- state: INACTIVE,
- }
-
- return pl, nil
-}
-
-// State ... Returns pipeline state
-func (pl *pipeline) State() ActivityState {
- return pl.state
-}
-
-// Config ... Returns pipeline config
-func (pl *pipeline) Config() *core.PipelineConfig {
- return pl.cfg
-}
-
-// Components ... Returns slice of all constituent components
-func (pl *pipeline) Components() []component.Component {
- return pl.components
-}
-
-// UUID ... Returns pipeline UUID
-func (pl *pipeline) UUID() core.PUUID {
- return pl.id
-}
-
-func (pl *pipeline) BlockHeight() (*big.Int, error) {
- // We assume that all pipelines have an oracle as their last component
- comp := pl.components[len(pl.components)-1]
- oracle, ok := comp.(*component.Oracle)
- if !ok {
- return nil, fmt.Errorf("could not cast component to oracle")
- }
-
- return oracle.Height()
-}
-
-// AddEngineRelay ... Adds a relay to the pipeline that forces it to send transformed heuristic input
-// to a risk engine
-func (pl *pipeline) AddEngineRelay(engineChan chan core.HeuristicInput) error {
- lastComponent := pl.components[0]
- eir := core.NewEngineRelay(pl.id, engineChan)
-
- logging.NoContext().Debug("Adding engine relay to pipeline",
- zap.String(logging.CUUIDKey, lastComponent.UUID().String()),
- zap.String(logging.PUUIDKey, pl.id.String()))
-
- return lastComponent.AddRelay(eir)
-}
-
-// Run ... Spawns and manages component event loops
-// for some pipeline
-func (pl *pipeline) Run(wg *sync.WaitGroup) {
- for _, comp := range pl.components {
- wg.Add(1)
-
- go func(c component.Component, wg *sync.WaitGroup) {
- defer wg.Done()
-
- logging.NoContext().
- Debug("Attempting to start component event loop",
- zap.String(logging.CUUIDKey, c.UUID().String()),
- zap.String(logging.PUUIDKey, pl.id.String()))
-
- if err := c.EventLoop(); err != nil {
- // NOTE - Consider killing the entire pipeline if one component fails
- // Otherwise dangling components will be left in a running state
- logging.NoContext().Error("Obtained error from event loop", zap.Error(err),
- zap.String(logging.CUUIDKey, c.UUID().String()),
- zap.String(logging.PUUIDKey, pl.id.String()))
- pl.state = CRASHED
- }
- }(comp, wg)
- }
-
- pl.state = ACTIVE
-}
-
-// Close ... Closes all components in the pipeline
-func (pl *pipeline) Close() error {
- for _, comp := range pl.components {
- if comp.ActivityState() != component.Terminated {
- logging.NoContext().
- Debug("Shutting down pipeline component",
- zap.String(logging.CUUIDKey, comp.UUID().String()),
- zap.String(logging.PUUIDKey, pl.id.String()))
-
- if err := comp.Close(); err != nil {
- return err
- }
- }
- }
- pl.state = TERMINATED
- return nil
-}
diff --git a/internal/etl/pipeline/pipeline_test.go b/internal/etl/pipeline/pipeline_test.go
deleted file mode 100644
index bcc0e22c..00000000
--- a/internal/etl/pipeline/pipeline_test.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package pipeline_test
-
-import (
- "context"
- "fmt"
- "sync"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/etl/pipeline"
- "github.com/base-org/pessimism/internal/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Pipeline(t *testing.T) {
- var tests = []struct {
- name string
- function string
-
- constructionLogic func() pipeline.Pipeline
- testLogic func(t *testing.T, pl pipeline.Pipeline)
- }{
- {
- name: "Successful Construction",
- function: "NewPipeline",
- constructionLogic: func() pipeline.Pipeline {
- testPipe, _ := mocks.NewDummyPipe(
- context.Background(),
- core.GethBlock,
- core.EventLog)
-
- testO, _ := mocks.NewDummyOracle(
- context.Background(),
- core.GethBlock)
-
- pl, err := pipeline.NewPipeline(
- nil,
- core.NilPUUID(),
- []component.Component{testPipe, testO})
-
- if err != nil {
- panic(err)
- }
-
- return pl
- },
- testLogic: func(t *testing.T, pl pipeline.Pipeline) {
-
- assert.Equal(t, pl.Components()[0].OutputType(), core.EventLog)
- assert.Equal(t, pl.Components()[1].OutputType(), core.GethBlock)
- },
- },
- {
- name: "Successful Run",
- function: "AddEngineRelay",
- constructionLogic: func() pipeline.Pipeline {
-
- testO, _ := mocks.NewDummyOracle(
- context.Background(),
- core.GethBlock)
-
- pl, err := pipeline.NewPipeline(
- nil,
- core.NilPUUID(),
- []component.Component{testO})
-
- if err != nil {
- panic(err)
- }
-
- return pl
- },
- testLogic: func(t *testing.T, pl pipeline.Pipeline) {
-
- relay := make(chan core.HeuristicInput)
- err := pl.AddEngineRelay(relay)
- assert.NoError(t, err)
- },
- },
- {
- name: "Successful Run",
- function: "RunPipeline",
- constructionLogic: func() pipeline.Pipeline {
-
- testO, _ := mocks.NewDummyOracle(
- context.Background(),
- core.GethBlock)
-
- pl, err := pipeline.NewPipeline(
- nil,
- core.NilPUUID(),
- []component.Component{testO})
-
- if err != nil {
- panic(err)
- }
-
- return pl
- },
- testLogic: func(t *testing.T, pl pipeline.Pipeline) {
- assert.Equal(t, pl.State(), pipeline.INACTIVE, "Pipeline should be inactive")
-
- wg := &sync.WaitGroup{}
- pl.Run(wg)
-
- assert.Equal(t, pl.State(), pipeline.ACTIVE, "Pipeline should be active")
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
- testPipeline := tc.constructionLogic()
- tc.testLogic(t, testPipeline)
- })
-
- }
-}
diff --git a/internal/etl/pipeline/store.go b/internal/etl/pipeline/store.go
deleted file mode 100644
index a1b80daf..00000000
--- a/internal/etl/pipeline/store.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package pipeline
-
-import (
- "fmt"
-
- "github.com/base-org/pessimism/internal/core"
-)
-
-// TODO(#48): Pipeline Analysis Functionality
-// EtlStore ... Interface used to define all etl storage based functions
-type EtlStore interface {
- AddComponentLink(cID core.CUUID, pID core.PUUID)
- AddPipeline(id core.PUUID, pl Pipeline)
- ActiveCount() int
- GetAllPipelines() []Pipeline
- GetExistingPipelinesByPID(pPID core.PipelinePID) []core.PUUID
- GetPUUIDs(cID core.CUUID) ([]core.PUUID, error)
- GetPipelineFromPUUID(pUUID core.PUUID) (Pipeline, error)
-}
-
-// pipelineEntry ... value entry for some
-// pipeline with necessary metadata
-type pipelineEntry struct {
- id core.PUUID
- p Pipeline
-}
-
-type pipelineMap = map[core.PipelinePID][]pipelineEntry
-
-// etlStore ... Stores critical pipeline information
-//
-// pipeLines - Mapping used for storing all existing pipelines
-// compPipelines - Mapping used for storing all component-->[]PID entries
-type etlStore struct {
- pipelines pipelineMap
- compPipelines map[core.CUUID][]core.PUUID
-}
-
-// NewEtlStore ... Initializer
-func NewEtlStore() EtlStore {
- return &etlStore{
- compPipelines: make(map[core.CUUID][]core.PUUID),
- pipelines: make(pipelineMap),
- }
-}
-
-/*
-Note - PUUIDs can only conflict
- when pipeLineType = Live && activityState = Active
-*/
-
-// addComponentLink ... Creates an entry for some new C_UUID:P_UUID mapping
-func (store *etlStore) AddComponentLink(cUUID core.CUUID, pUUID core.PUUID) {
- // EDGE CASE - C_UUID:P_UUID pair already exists
- if _, found := store.compPipelines[cUUID]; !found { // Create slice
- store.compPipelines[cUUID] = make([]core.PUUID, 0)
- }
-
- store.compPipelines[cUUID] = append(store.compPipelines[cUUID], pUUID)
-}
-
-// addPipeline ... Creates and stores a new pipeline entry
-func (store *etlStore) AddPipeline(pUUID core.PUUID, pl Pipeline) {
- entry := pipelineEntry{
- id: pUUID,
- p: pl,
- }
-
- entrySlice, found := store.pipelines[pUUID.PID]
- if !found {
- entrySlice = make([]pipelineEntry, 0)
- }
-
- entrySlice = append(entrySlice, entry)
-
- store.pipelines[pUUID.PID] = entrySlice
-
- for _, comp := range pl.Components() {
- store.AddComponentLink(comp.UUID(), pUUID)
- }
-}
-
-// GetPUUIDs ... Returns all entry PIDs for some CID
-func (store *etlStore) GetPUUIDs(cID core.CUUID) ([]core.PUUID, error) {
- pIDs, found := store.compPipelines[cID]
-
- if !found {
- return []core.PUUID{}, fmt.Errorf("could not find key for %s", cID)
- }
-
- return pIDs, nil
-}
-
-// getPipelineByPID ... Returns pipeline store provided some PID
-func (store *etlStore) GetPipelineFromPUUID(pUUID core.PUUID) (Pipeline, error) {
- if _, found := store.pipelines[pUUID.PID]; !found {
- return nil, fmt.Errorf(pIDNotFoundErr, pUUID.String())
- }
-
- for _, plEntry := range store.pipelines[pUUID.PID] {
- if plEntry.id.UUID == pUUID.UUID {
- return plEntry.p, nil
- }
- }
-
- return nil, fmt.Errorf(uuidNotFoundErr)
-}
-
-// GetExistingPipelinesByPID ... Returns existing pipelines for some PID value
-func (store *etlStore) GetExistingPipelinesByPID(pPID core.PipelinePID) []core.PUUID {
- entries, exists := store.pipelines[pPID]
- if !exists {
- return []core.PUUID{}
- }
-
- pUUIDs := make([]core.PUUID, len(entries))
-
- for i, entry := range entries {
- pUUIDs[i] = entry.id
- }
-
- return pUUIDs
-}
-
-// Count ... Returns the number of active pipelines
-func (store *etlStore) ActiveCount() int {
- count := 0
-
- for _, entrySlice := range store.pipelines {
- for _, entry := range entrySlice {
- if entry.p.State() == ACTIVE {
- count++
- }
- }
- }
-
- return count
-}
-
-// GetAllPipelines ... Returns all existing/current pipelines
-func (store *etlStore) GetAllPipelines() []Pipeline {
- pipeLines := make([]Pipeline, 0)
-
- for _, entrySlice := range store.pipelines {
- for _, entry := range entrySlice {
- pipeLines = append(pipeLines, entry.p)
- }
- }
-
- return pipeLines
-}
diff --git a/internal/etl/pipeline/store_test.go b/internal/etl/pipeline/store_test.go
deleted file mode 100644
index 06b9ed22..00000000
--- a/internal/etl/pipeline/store_test.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package pipeline_test
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- pl "github.com/base-org/pessimism/internal/etl/pipeline"
- "github.com/base-org/pessimism/internal/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-var (
- nilPID = core.MakePUUID(0, core.NilCUUID(), core.NilCUUID())
-
- cID1 = core.MakeCUUID(0, 0, 0, 0)
- cID2 = core.MakeCUUID(0, 0, 0, 0)
-)
-
-// getTestPipeLine ... Returns a test pipeline
-func getTestPipeLine(ctx context.Context) pl.Pipeline {
-
- c1, err := mocks.NewDummyOracle(ctx, core.GethBlock, component.WithCUUID(cID2))
- if err != nil {
- panic(err)
- }
-
- c2, err := mocks.NewDummyPipe(ctx, core.GethBlock, core.EventLog, component.WithCUUID(cID1))
- if err != nil {
- panic(err)
- }
-
- comps := []component.Component{
- c1,
- c2,
- }
-
- pipeLine, err := pl.NewPipeline(&core.PipelineConfig{}, core.NilPUUID(), comps)
- if err != nil {
- panic(err)
- }
-
- return pipeLine
-}
-
-func Test_EtlStore(t *testing.T) {
- var tests = []struct {
- name string
- function string
- description string
-
- constructionLogic func() pl.EtlStore
- testLogic func(*testing.T, pl.EtlStore)
- }{
- {
- name: "Successful Add When PID Already Exists",
- function: "addPipeline",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- ctx := context.Background()
-
- testRegistry := pl.NewEtlStore()
- testPipeLine := getTestPipeLine(ctx)
-
- testRegistry.AddPipeline(core.NilPUUID(), testPipeLine)
- return testRegistry
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- ctx := context.Background()
- testPipeLine := getTestPipeLine(ctx)
-
- pID2 := core.MakePUUID(
- 0,
- core.MakeCUUID(0, 0, 0, 1),
- core.MakeCUUID(0, 0, 0, 1),
- )
-
- store.AddPipeline(pID2, testPipeLine)
-
- for _, comp := range testPipeLine.Components() {
- pIDs, err := store.GetPUUIDs(comp.UUID())
-
- assert.NoError(t, err)
- assert.Len(t, pIDs, 2)
- assert.Equal(t, pIDs[0].PID, nilPID.PID)
- assert.Equal(t, pIDs[1].PID, pID2.PID)
- }
-
- },
- },
- {
- name: "Successful Add When PID Does Not Exists",
- function: "addPipeline",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- pr := pl.NewEtlStore()
- return pr
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- ctx := context.Background()
- testPipeLine := getTestPipeLine(ctx)
-
- pID := core.MakePUUID(
- 0,
- core.MakeCUUID(0, 0, 0, 1),
- core.MakeCUUID(0, 0, 0, 1),
- )
-
- store.AddPipeline(pID, testPipeLine)
-
- for _, comp := range testPipeLine.Components() {
- pIDs, err := store.GetPUUIDs(comp.UUID())
-
- assert.NoError(t, err)
- assert.Len(t, pIDs, 1)
- assert.Equal(t, pIDs[0], pID)
- }
-
- },
- },
- {
- name: "Successful Retrieval When CID Is Valid Key",
- function: "getPipeLineIDs",
- description: "",
-
- constructionLogic: pl.NewEtlStore,
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- store.AddComponentLink(cID, pID)
-
- expectedIDs := []core.PUUID{pID}
- actualIDs, err := store.GetPUUIDs(cID)
-
- assert.NoError(t, err)
- assert.Equal(t, expectedIDs, actualIDs)
-
- },
- },
- {
- name: "Failed Retrieval When CID Is Invalid Key",
- function: "getPipeLineIDs",
- description: "",
-
- constructionLogic: pl.NewEtlStore,
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
-
- _, err := store.GetPUUIDs(cID)
-
- assert.Error(t, err)
- },
- },
- {
- name: "Failed Retrieval When PID Is Invalid Key",
- function: "getPipeline",
- description: "",
-
- constructionLogic: pl.NewEtlStore,
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- _, err := store.GetPipelineFromPUUID(pID)
- assert.Error(t, err)
-
- },
- }, {
- name: "Failed Retrieval When Matching UUID Cannot Be Found",
- function: "getPipeline",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- store := pl.NewEtlStore()
- return store
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- pLine := getTestPipeLine(context.Background())
-
- store.AddPipeline(pID, pLine)
-
- pID2 := core.MakePUUID(0, cID, cID)
- _, err := store.GetPipelineFromPUUID(pID2)
-
- assert.Error(t, err)
-
- },
- }, {
- name: "Successful Retrieval",
- function: "getPipeline",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- store := pl.NewEtlStore()
- return store
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- expectedPline := getTestPipeLine(context.Background())
-
- store.AddPipeline(pID, expectedPline)
-
- actualPline, err := store.GetPipelineFromPUUID(pID)
-
- assert.NoError(t, err)
- assert.Equal(t, expectedPline, actualPline)
- },
- },
- {
- name: "Successful Pipeline Fetch",
- function: "getAllPipelines",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- store := pl.NewEtlStore()
- return store
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- expectedPline := getTestPipeLine(context.Background())
-
- store.AddPipeline(pID, expectedPline)
-
- pipelines := store.GetAllPipelines()
-
- assert.Len(t, pipelines, 1)
- assert.Equal(t, pipelines[0], expectedPline)
- },
- },
- {
- name: "Successful Active Count Call",
- function: "ActiveCount",
- description: "",
-
- constructionLogic: func() pl.EtlStore {
- store := pl.NewEtlStore()
- return store
- },
- testLogic: func(t *testing.T, store pl.EtlStore) {
- cID := core.MakeCUUID(0, 0, 0, 0)
- pID := core.MakePUUID(0, cID, cID)
-
- expectedPline := getTestPipeLine(context.Background())
-
- store.AddPipeline(pID, expectedPline)
-
- count := store.ActiveCount()
- assert.Equal(t, count, 0)
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
- testRouter := tc.constructionLogic()
- tc.testLogic(t, testRouter)
- })
-
- }
-}
diff --git a/internal/etl/pipeline/types.go b/internal/etl/pipeline/types.go
deleted file mode 100644
index 9c4abfb1..00000000
--- a/internal/etl/pipeline/types.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package pipeline
-
-type ActivityState uint8
-
-const (
- INACTIVE ActivityState = iota
- ACTIVE
- CRASHED
- TERMINATED
-)
-
-func (as ActivityState) String() string {
- switch as {
- case INACTIVE:
- return "inactive"
-
- case ACTIVE:
- return "active"
-
- case CRASHED:
- return "crashed"
-
- case TERMINATED:
- return "terminated"
- }
-
- return "unknown"
-}
-
-const (
- // EtlStore error constants
- couldNotCastErr = "could not cast component initializer function to %s constructor type"
- pIDNotFoundErr = "could not find pipeline ID for %s"
- uuidNotFoundErr = "could not find matching UUID for pipeline entry"
-
- // ComponentGraph error constants
- cUUIDNotFoundErr = "component with ID %s does not exist within component graph"
- cUUIDExistsErr = "component with ID %s already exists in component graph"
- edgeExistsErr = "edge already exists from (%s) to (%s) in component graph"
-
- emptyPipelineError = "pipeline must contain at least one component"
- // Manager error constants
- unknownCompType = "unknown component type %s provided"
-
- noAggregatorErr = "aggregator component has yet to be implemented"
-)
diff --git a/internal/etl/process/process.go b/internal/etl/process/process.go
new file mode 100644
index 00000000..9292a618
--- /dev/null
+++ b/internal/etl/process/process.go
@@ -0,0 +1,145 @@
+package process
+
+import (
+ "sync"
+
+ "github.com/base-org/pessimism/internal/core"
+)
+
+const (
+ killSig = 0
+)
+
+type Process interface {
+ /*
+ NOTE - Storing the PathID assumes that one process
+ can only be a part of one path at a time. This could be
+ problematic if we want to have a process be a part of multiple
+ paths at once. In that case, we would need to store a slice
+ of PathIDs instead.
+ */
+ Close() error
+ EventLoop() error
+
+ AddSubscriber(id core.ProcessID, outChan chan core.Event) error
+ SetState(as ActivityState)
+
+ AddRelay(tt core.TopicType) error
+ GetRelay(tt core.TopicType) (chan core.Event, error)
+
+ AddEngineRelay(relay *core.ExecInputRelay) error
+
+ ID() core.ProcessID
+ PathID() core.PathID
+
+ Type() core.ProcessType
+ EmitType() core.TopicType
+ StateKey() *core.StateKey
+ // TODO(#24): Add Internal Process Activity State Tracking
+ ActivityState() ActivityState
+}
+
+// Process state
+type State struct {
+ id core.ProcessID
+ pathID core.PathID
+
+ procType core.ProcessType
+ publType core.TopicType
+ as ActivityState
+
+ relay chan StateChange
+ close chan int
+
+ sk *core.StateKey
+
+ *topics
+ *subscribers
+
+ *sync.RWMutex
+}
+
+func newState(pt core.ProcessType, tt core.TopicType) *State {
+ return &State{
+ id: core.ProcessID{},
+ pathID: core.PathID{},
+
+ as: Inactive,
+ procType: pt,
+ publType: tt,
+
+ close: make(chan int),
+ relay: make(chan StateChange),
+ subscribers: &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ },
+ topics: &topics{
+ relays: make(map[core.TopicType]chan core.Event),
+ },
+ RWMutex: &sync.RWMutex{},
+ }
+}
+
+func (s *State) ActivityState() ActivityState {
+ return s.as
+}
+
+func (s *State) SetState(as ActivityState) {
+ s.as = as
+}
+
+func (s *State) StateKey() *core.StateKey {
+ return s.sk
+}
+
+func (s *State) ID() core.ProcessID {
+ return s.id
+}
+
+func (s *State) PathID() core.PathID {
+ return s.pathID
+}
+
+func (s *State) Type() core.ProcessType {
+ return s.procType
+}
+
+func (s *State) EmitType() core.TopicType {
+ return s.publType
+}
+
+func (s *State) emit(as ActivityState) {
+ event := StateChange{
+ ID: s.id,
+ From: s.as,
+ To: as,
+ }
+
+ s.as = as
+ s.relay <- event // Send to upstream consumers
+}
+
+type Option = func(*State)
+
+func WithID(id core.ProcessID) Option {
+ return func(meta *State) {
+ meta.id = id
+ }
+}
+func WithPathID(id core.PathID) Option {
+ return func(s *State) {
+ s.pathID = id
+ }
+}
+
+func WithEventChan(sc chan StateChange) Option {
+ return func(s *State) {
+ s.relay = sc
+ }
+}
+
+func WithStateKey(sk *core.StateKey) Option {
+ return func(s *State) {
+ s.sk = sk
+ }
+}
diff --git a/internal/etl/component/component_test.go b/internal/etl/process/process_test.go
similarity index 50%
rename from internal/etl/component/component_test.go
rename to internal/etl/process/process_test.go
index 8e465b14..94d9ea6b 100644
--- a/internal/etl/component/component_test.go
+++ b/internal/etl/process/process_test.go
@@ -1,4 +1,4 @@
-package component
+package process
import (
"fmt"
@@ -8,37 +8,37 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_MetaData(t *testing.T) {
+func Test_State(t *testing.T) {
var tests = []struct {
name string
description string
function string
- constructionLogic func() *metaData
- testLogic func(*testing.T, *metaData)
+ constructionLogic func() *State
+ testLogic func(*testing.T, *State)
}{
{
name: "Test State Change Emit",
- description: "When emitStateChange is called, a new state should be state for metadata and sent in channel",
+ description: "When emitStateChange is called, a new state should be state for State and sent in channel",
function: "emitStateChange",
- constructionLogic: func() *metaData {
- return newMetaData(0, 0)
+ constructionLogic: func() *State {
+ return newState(0, 0)
},
- testLogic: func(t *testing.T, md *metaData) {
+ testLogic: func(t *testing.T, s *State) {
go func() {
- // Simulate a component ending itself
- md.emitStateChange(Terminated)
+ // Simulate a process ending itself
+ s.emit(Terminated)
}()
- sChange := <-md.stateChan
+ s1 := <-s.relay
- assert.Equal(t, sChange.From, Inactive)
- assert.Equal(t, sChange.To, Terminated)
- assert.Equal(t, sChange.ID, core.NilCUUID())
+ assert.Equal(t, s1.From, Inactive)
+ assert.Equal(t, s1.To, Terminated)
+ assert.Equal(t, s1.ID, core.ProcessID{})
},
},
}
diff --git a/internal/etl/process/reader.go b/internal/etl/process/reader.go
new file mode 100644
index 00000000..84b846ce
--- /dev/null
+++ b/internal/etl/process/reader.go
@@ -0,0 +1,112 @@
+package process
+
+import (
+ "context"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/logging"
+ "github.com/base-org/pessimism/internal/metrics"
+ "go.uber.org/zap"
+)
+
+// Routine ...
+type Routine interface {
+ Loop(ctx context.Context, processChan chan core.Event) error
+ Height() (*big.Int, error)
+}
+
+// ChainReader ...
+type ChainReader struct {
+ ctx context.Context
+
+ routine Routine
+ jobEvents chan core.Event
+
+ wg *sync.WaitGroup
+
+ *State
+}
+
+// NewReader ... Initializer
+func NewReader(ctx context.Context, outType core.TopicType,
+ r Routine, opts ...Option) (Process, error) {
+ cr := &ChainReader{
+ ctx: ctx,
+ routine: r,
+ jobEvents: make(chan core.Event),
+ wg: &sync.WaitGroup{},
+ State: newState(core.Read, outType),
+ }
+
+ for _, opt := range opts {
+ opt(cr.State)
+ }
+
+ logging.WithContext(ctx).Info("Constructed process",
+ zap.String(logging.Process, cr.State.id.String()))
+
+ return cr, nil
+}
+
+func (cr *ChainReader) Height() (*big.Int, error) {
+ return cr.routine.Height()
+}
+
+func (cr *ChainReader) Close() error {
+ cr.close <- killSig
+ cr.wg.Wait()
+ return nil
+}
+
+// EventLoop ...
+func (cr *ChainReader) EventLoop() error {
+ // TODO(#24) - Add Internal Process Activity State Tracking
+
+ logger := logging.WithContext(cr.ctx)
+
+ logger.Debug("Starting process job",
+ zap.String(logging.Process, cr.id.String()))
+
+ cr.wg.Add(1)
+
+ jobCtx, cancel := context.WithCancel(cr.ctx)
+
+ // Run job
+ go func() {
+ defer cr.wg.Done()
+ if err := cr.routine.Loop(jobCtx, cr.jobEvents); err != nil {
+ logger.Error("Received error from read routine",
+ zap.String(logging.Process, cr.id.String()),
+ zap.Error(err))
+ }
+ }()
+
+ for {
+ select {
+ case event := <-cr.jobEvents:
+ logger.Debug("Sending event to subscribers",
+ zap.String(logging.Process, cr.id.String()),
+ zap.String("event", event.Type.String()))
+
+ if err := cr.subscribers.Publish(event); err != nil {
+ logger.Error(relayErr, zap.String(logging.Session, cr.id.String()))
+ }
+
+ if cr.subscribers.None() {
+ latency := float64(time.Since(event.OriginTS).Milliseconds())
+ metrics.WithContext(cr.ctx).
+ RecordPathLatency(cr.PathID(), latency)
+ }
+
+ case <-cr.close:
+ logger.Debug("Shutting down process",
+ zap.String(logging.Process, cr.id.String()))
+ close(cr.jobEvents)
+ cancel()
+ return nil
+ }
+ }
+}
diff --git a/internal/etl/process/subscriber.go b/internal/etl/process/subscriber.go
new file mode 100644
index 00000000..dfd7dfb4
--- /dev/null
+++ b/internal/etl/process/subscriber.go
@@ -0,0 +1,109 @@
+package process
+
+import (
+ "context"
+ "time"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/logging"
+ "github.com/base-org/pessimism/internal/metrics"
+ "go.uber.org/zap"
+)
+
+type Subscription interface {
+ Run(ctx context.Context, data core.Event) ([]core.Event, error)
+}
+
+type Subscriber struct {
+ ctx context.Context
+ tt core.TopicType
+
+ spt Subscription
+
+ *State
+}
+
+// NewSubscriber ... Initializer
+func NewSubscriber(ctx context.Context, s Subscription, tt core.TopicType,
+ outType core.TopicType, opts ...Option) (Process, error) {
+ sub := &Subscriber{
+ ctx: ctx,
+ spt: s,
+ tt: tt,
+
+ State: newState(core.Subscribe, outType),
+ }
+
+ if err := sub.AddRelay(tt); err != nil {
+ return nil, err
+ }
+
+ for _, opt := range opts {
+ opt(sub.State)
+ }
+
+ return sub, nil
+}
+
+func (sub *Subscriber) Close() error {
+ sub.close <- killSig
+
+ return nil
+}
+
+func (sub *Subscriber) EventLoop() error {
+ logger := logging.WithContext(sub.ctx)
+
+ logger.Info("Starting event loop",
+ zap.String("ID", sub.id.String()),
+ )
+
+ relay, err := sub.GetRelay(sub.tt)
+ if err != nil {
+ return err
+ }
+
+ for {
+ select {
+ case event := <-relay:
+
+ events, err := sub.spt.Run(sub.ctx, event)
+ if err != nil {
+ logger.Error(err.Error(), zap.String("ID", sub.id.String()))
+ }
+
+ if sub.subscribers.None() {
+ latency := float64(time.Since(event.OriginTS).Milliseconds())
+
+ metrics.WithContext(sub.ctx).
+ RecordPathLatency(sub.pathID, latency)
+ }
+
+ length := len(events)
+ logger.Debug("Received publisher events",
+ zap.String(logging.Process, sub.id.String()),
+ zap.Int("Length", length))
+
+ if length == 0 {
+ continue
+ }
+
+ logger.Debug("Sending data batch",
+ zap.String("ID", sub.id.String()),
+ zap.String("Type", sub.EmitType().String()))
+
+ if err := sub.subscribers.PublishBatch(events); err != nil {
+ logger.Error(relayErr, zap.String("ID", sub.id.String()))
+ }
+
+ // Manager is telling us to shutdown
+ case <-sub.close:
+ logger.Debug("Process shutdown signal",
+ zap.String("ID", sub.id.String()))
+
+ // p.emitStateChange(Terminated)
+
+ return nil
+ }
+ }
+}
diff --git a/internal/etl/component/pipe_test.go b/internal/etl/process/subscriber_test.go
similarity index 76%
rename from internal/etl/component/pipe_test.go
rename to internal/etl/process/subscriber_test.go
index 9acfcefc..13e07c3f 100644
--- a/internal/etl/component/pipe_test.go
+++ b/internal/etl/process/subscriber_test.go
@@ -1,4 +1,4 @@
-package component_test
+package process_test
import (
"context"
@@ -15,22 +15,22 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_Pipe_Event_Flow(t *testing.T) {
+func TestSubscription(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ts := time.Date(1969, time.April, 1, 4, 20, 0, 0, time.Local)
- // Setup component dependencies
- testID := core.MakeCUUID(6, 9, 6, 9)
+ // Setup process dependencies
+ id := core.MakeProcessID(6, 9, 6, 9)
- outputChan := make(chan core.TransitData)
+ topics := make(chan core.Event)
- // Construct test component
- testPipe, err := mocks.NewDummyPipe(ctx, core.GethBlock, core.EventLog)
+ // Construct test process
+ path, err := mocks.NewSubscriber(ctx, core.BlockHeader, core.Log)
assert.NoError(t, err)
- err = testPipe.AddEgress(testID, outputChan)
+ err = path.AddSubscriber(id, topics)
assert.NoError(t, err)
// Encoded value taken from https://github.com/ethereum/go-ethereum/blob/master/core/types/block_test.go#L36
@@ -40,41 +40,40 @@ func Test_Pipe_Event_Flow(t *testing.T) {
err = rlp.DecodeBytes(blockEnc, &block)
assert.NoError(t, err)
- // Start component event loop on separate go routine
go func() {
- if err := testPipe.EventLoop(); err != nil {
- log.Printf("Got error from testPipe event loop %s", err.Error())
+ if err := path.EventLoop(); err != nil {
+ log.Printf("Got error from path event loop %s", err.Error())
}
}()
wg := sync.WaitGroup{}
- inputData := core.TransitData{
+ e := core.Event{
Timestamp: ts,
- Type: core.GethBlock,
+ Type: core.BlockHeader,
Value: block,
}
- var outputData core.TransitData
+ var outputData core.Event
- // Spawn listener routine that reads for output from testPipe
+ // Spawn listener routine that reads for output from path
wg.Add(1)
go func() {
defer wg.Done()
// Read first value from channel and return
- for output := range outputChan {
+ for output := range topics {
outputData = output
return
}
}()
- entryChan, err := testPipe.GetIngress(core.GethBlock)
+ relay, err := path.GetRelay(core.BlockHeader)
assert.NoError(t, err)
- entryChan <- inputData
+ relay <- e
- // Wait for pipe to transform block data into a transaction slice
+ // Wait for subscription to transform block data into a transaction slice
wg.Wait()
assert.NotNil(t, outputData)
diff --git a/internal/etl/process/subscribers.go b/internal/etl/process/subscribers.go
new file mode 100644
index 00000000..eadb083f
--- /dev/null
+++ b/internal/etl/process/subscribers.go
@@ -0,0 +1,80 @@
+package process
+
+import (
+ "fmt"
+
+ "github.com/base-org/pessimism/internal/core"
+)
+
+type subscribers struct {
+ subs map[core.ProcIdentifier]chan core.Event
+
+ relay *core.ExecInputRelay
+}
+
+func (s *subscribers) None() bool {
+ return len(s.subs) == 0 && s.HasEngineRelay()
+}
+
+func (s *subscribers) Publish(e core.Event) error {
+ if len(s.subs) == 0 && !s.HasEngineRelay() {
+ return fmt.Errorf(noSubErr)
+ }
+
+ if s.HasEngineRelay() {
+ if err := s.relay.RelayEvent(e); err != nil {
+ return err
+ }
+ }
+
+ // NOTE - Consider introducing a fail safe timeout to ensure that freezing on clogged chanel buffers is recognized
+ for _, channel := range s.subs {
+ channel <- e
+ }
+
+ return nil
+}
+
+func (s *subscribers) PublishBatch(dataSlice []core.Event) error {
+ // NOTE - Consider introducing a fail safe timeout to ensure that freezing on clogged chanel buffers is recognized
+ for _, data := range dataSlice {
+ // NOTE - Does it make sense to fail loudly here?
+
+ if err := s.Publish(data); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *subscribers) AddSubscriber(id core.ProcessID, topic chan core.Event) error {
+ if _, found := s.subs[id.ID]; found {
+ return fmt.Errorf(subExistsErr, id.String())
+ }
+
+ s.subs[id.ID] = topic
+ return nil
+}
+
+func (s *subscribers) RemoveSubscriber(id core.ProcessID) error {
+ if _, found := s.subs[id.ID]; !found {
+ return fmt.Errorf(subNotFound, id.ID.String())
+ }
+
+ delete(s.subs, id.ID)
+ return nil
+}
+
+func (s *subscribers) HasEngineRelay() bool {
+ return s.relay != nil
+}
+
+func (s *subscribers) AddEngineRelay(relay *core.ExecInputRelay) error {
+ if s.HasEngineRelay() {
+ return fmt.Errorf(engineRelayExists)
+ }
+
+ s.relay = relay
+ return nil
+}
diff --git a/internal/etl/process/subscribers_test.go b/internal/etl/process/subscribers_test.go
new file mode 100644
index 00000000..f2f021fc
--- /dev/null
+++ b/internal/etl/process/subscribers_test.go
@@ -0,0 +1,248 @@
+package process
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAddRemoveSubscription(t *testing.T) {
+ var tests = []struct {
+ name string
+ description string
+
+ construction func() *subscribers
+ test func(*testing.T, *subscribers)
+ }{
+ {
+ name: "Successful Multi Add Test",
+ description: "Many subscriptions should be addable",
+
+ construction: func() *subscribers {
+ return &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+ },
+
+ test: func(t *testing.T, s *subscribers) {
+
+ for _, id := range []core.ProcessID{
+ core.MakeProcessID(1, 54, 43, 32),
+ core.MakeProcessID(2, 54, 43, 32),
+ core.MakeProcessID(3, 54, 43, 32),
+ core.MakeProcessID(4, 54, 43, 32)} {
+ outChan := make(chan core.Event)
+ err := s.AddSubscriber(id, outChan)
+
+ assert.NoError(t, err, "Ensuring that no error when adding new sub")
+
+ _, exists := s.subs[id.ID]
+ assert.True(t, exists, "Ensuring that key exists")
+ }
+ },
+ },
+ {
+ name: "Failed Add Test",
+ description: "Duplicate subscribers cannot exist",
+
+ construction: func() *subscribers {
+ id := core.MakeProcessID(1, 54, 43, 32)
+ outChan := make(chan core.Event)
+
+ s := &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+ if err := s.AddSubscriber(id, outChan); err != nil {
+ panic(err)
+ }
+
+ return s
+ },
+
+ test: func(t *testing.T, s *subscribers) {
+ id := core.MakeProcessID(1, 54, 43, 32)
+ outChan := make(chan core.Event)
+ err := s.AddSubscriber(id, outChan)
+
+ assert.Error(t, err, "Error was not generated when adding conflicting subs with same ID")
+ assert.Equal(t, err.Error(), fmt.Sprintf(subExistsErr, id.String()), "Ensuring that returned error is a not found type")
+ },
+ },
+ {
+ name: "Successful Remove Test",
+ description: "Subscribers should be removable",
+
+ construction: func() *subscribers {
+ id := core.MakeProcessID(1, 54, 43, 32)
+ outChan := make(chan core.Event)
+
+ s := &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+ if err := s.AddSubscriber(id, outChan); err != nil {
+ panic(err)
+ }
+
+ return s
+ },
+
+ test: func(t *testing.T, s *subscribers) {
+ id := core.MakeProcessID(1, 54, 43, 32)
+ err := s.RemoveSubscriber(id)
+
+ assert.NoError(t, err, "Ensuring that no error is thrown when removing an existing sub")
+
+ _, exists := s.subs[core.MakeProcessID(1, 54, 43, 32).ID]
+ assert.False(t, exists, "Ensuring that key is removed from mapping")
+ },
+ }, {
+ name: "Failed Remove Test",
+ description: "Unknown keys should not be removable",
+
+ construction: func() *subscribers {
+ id := core.MakeProcessID(1, 54, 43, 32)
+ outChan := make(chan core.Event)
+
+ s := &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+ if err := s.AddSubscriber(id, outChan); err != nil {
+ panic(err)
+ }
+
+ return s
+ },
+
+ test: func(t *testing.T, s *subscribers) {
+
+ id := core.MakeProcessID(69, 69, 69, 69)
+ err := s.RemoveSubscriber(id)
+
+ assert.Error(t, err, "Ensuring that an error is thrown when trying to remove a non-existent sub")
+ assert.Equal(t, err.Error(), fmt.Sprintf(subNotFound, id.Identifier()))
+ },
+ },
+ {
+ name: "Passed Engine Test",
+ description: "When a relay is passed to AddRelay, it should be used during transit operations",
+
+ construction: func() *subscribers {
+ return &subscribers{}
+ },
+ test: func(t *testing.T, s *subscribers) {
+ relayChan := make(chan core.HeuristicInput)
+
+ PathID := core.PathID{}
+
+ relay := core.NewEngineRelay(PathID, relayChan)
+
+ err := s.AddEngineRelay(relay)
+ assert.NoError(t, err)
+
+ testData := core.Event{Network: 2, Value: "goodbye closed-source blocksec monitoring"}
+ expectedInput := core.HeuristicInput{
+ PathID: PathID,
+ Input: testData,
+ }
+
+ go func(t *testing.T) {
+ assert.NoError(t, s.Publish(testData))
+ }(t)
+
+ actualInput := <-relayChan
+
+ assert.Equal(t, actualInput, expectedInput)
+
+ },
+ },
+ {
+ name: "Failed Engine Test",
+ description: "When relay already exists and AddRelay function is called, an error should be returned",
+
+ construction: func() *subscribers {
+ relayChan := make(chan core.HeuristicInput)
+
+ relay := core.NewEngineRelay(core.PathID{}, relayChan)
+ s := &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+
+ if err := s.AddEngineRelay(relay); err != nil {
+ panic(err)
+ }
+
+ return s
+ },
+
+ test: func(t *testing.T, s *subscribers) {
+ relayChan := make(chan core.HeuristicInput)
+
+ relay := core.NewEngineRelay(core.PathID{}, relayChan)
+
+ err := s.AddEngineRelay(relay)
+
+ assert.Error(t, err)
+ },
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d-%s", i, tc.name), func(t *testing.T) {
+ subs := tc.construction()
+ tc.test(t, subs)
+ })
+
+ }
+}
+
+func TestPublishToSubscribers(t *testing.T) {
+ s := &subscribers{
+ subs: make(map[core.ProcIdentifier]chan core.Event),
+ }
+
+ var subs = []struct {
+ channel chan core.Event
+ id core.ProcessID
+ }{
+ {
+ channel: make(chan core.Event, 1),
+ id: core.MakeProcessID(3, 54, 43, 32),
+ },
+ {
+ channel: make(chan core.Event, 1),
+ id: core.MakeProcessID(1, 54, 43, 32),
+ },
+ {
+ channel: make(chan core.Event, 1),
+ id: core.MakeProcessID(1, 2, 43, 32),
+ },
+ {
+ channel: make(chan core.Event, 1),
+ id: core.MakeProcessID(1, 4, 43, 32),
+ },
+ }
+
+ for _, sub := range subs {
+ err := s.AddSubscriber(sub.id, sub.channel)
+ assert.NoError(t, err, "Received error when trying to add sub")
+ }
+
+ expected := core.Event{
+ Timestamp: time.Date(1969, time.April, 1, 4, 20, 0, 0, time.Local),
+ Type: 3,
+ Value: 0x42069,
+ }
+
+ err := s.Publish(expected)
+ assert.NoError(t, err, "Received error when trying to transit output")
+
+ for _, sub := range subs {
+ actual := <-sub.channel
+
+ assert.Equal(t, actual, expected, "Ensuring transited data is actually returned on channels used by sub")
+ }
+
+}
diff --git a/internal/etl/process/topics.go b/internal/etl/process/topics.go
new file mode 100644
index 00000000..b3620997
--- /dev/null
+++ b/internal/etl/process/topics.go
@@ -0,0 +1,30 @@
+package process
+
+import (
+ "fmt"
+
+ "github.com/base-org/pessimism/internal/core"
+)
+
+type topics struct {
+ relays map[core.TopicType]chan core.Event
+}
+
+func (p *topics) GetRelay(rt core.TopicType) (chan core.Event, error) {
+ val, found := p.relays[rt]
+ if !found {
+ return nil, fmt.Errorf(topicNotFoundErr, rt.String())
+ }
+
+ return val, nil
+}
+
+func (p *topics) AddRelay(rt core.TopicType) error {
+ if _, found := p.relays[rt]; found {
+ return fmt.Errorf(topicExistsErr, rt.String())
+ }
+
+ p.relays[rt] = core.NewTransitChannel()
+
+ return nil
+}
diff --git a/internal/etl/component/ingress_test.go b/internal/etl/process/topics_test.go
similarity index 51%
rename from internal/etl/component/ingress_test.go
rename to internal/etl/process/topics_test.go
index 6f1d03bc..53937e8d 100644
--- a/internal/etl/component/ingress_test.go
+++ b/internal/etl/process/topics_test.go
@@ -1,4 +1,4 @@
-package component
+package process
import (
"fmt"
@@ -8,26 +8,27 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_Add_Remove_Ingress(t *testing.T) {
+func TestAddRemoveTopic(t *testing.T) {
var tests = []struct {
name string
description string
- constructionLogic func() *ingressHandler
- testLogic func(*testing.T, *ingressHandler)
+ construct func() *topics
+ test func(*testing.T, *topics)
}{
{
name: "Successful Add Test",
description: "When a register type is passed to AddIngress function, it should be successfully added to handler ingress mapping",
- constructionLogic: func() *ingressHandler {
- handler := newIngressHandler()
- return handler
+ construct: func() *topics {
+ return &topics{
+ relays: make(map[core.TopicType]chan core.Event),
+ }
},
- testLogic: func(t *testing.T, ih *ingressHandler) {
+ test: func(t *testing.T, p *topics) {
- err := ih.createIngress(core.GethBlock)
+ err := p.AddRelay(core.BlockHeader)
assert.NoError(t, err, "geth.block register should added as an egress")
},
@@ -36,20 +37,22 @@ func Test_Add_Remove_Ingress(t *testing.T) {
name: "Failed Add Test",
description: "When the same register type is added twice to AddIngress function, the second add should fail with key collisions",
- constructionLogic: func() *ingressHandler {
- handler := newIngressHandler()
- if err := handler.createIngress(core.GethBlock); err != nil {
- panic(err)
+ construct: func() *topics {
+ p := &topics{
+ relays: make(map[core.TopicType]chan core.Event),
}
- return handler
+ if err := p.AddRelay(core.BlockHeader); err != nil {
+ panic(err)
+ }
+ return p
},
- testLogic: func(t *testing.T, ih *ingressHandler) {
- err := ih.createIngress(core.GethBlock)
+ test: func(t *testing.T, p *topics) {
+ err := p.AddRelay(core.BlockHeader)
assert.Error(t, err, "geth.block register should fail to be added")
- assert.Equal(t, err.Error(), fmt.Sprintf(ingressAlreadyExistsErr, core.GethBlock.String()))
+ assert.Equal(t, err.Error(), fmt.Sprintf(topicExistsErr, core.BlockHeader.String()))
},
},
@@ -57,8 +60,8 @@ func Test_Add_Remove_Ingress(t *testing.T) {
for i, tc := range tests {
t.Run(fmt.Sprintf("%d-%s", i, tc.name), func(t *testing.T) {
- testIngress := tc.constructionLogic()
- tc.testLogic(t, testIngress)
+ publisher := tc.construct()
+ tc.test(t, publisher)
})
}
diff --git a/internal/etl/process/types.go b/internal/etl/process/types.go
new file mode 100644
index 00000000..b6d3cfc5
--- /dev/null
+++ b/internal/etl/process/types.go
@@ -0,0 +1,56 @@
+package process
+
+import (
+ "context"
+
+ "github.com/base-org/pessimism/internal/core"
+)
+
+type ActivityState int
+
+const (
+ Inactive ActivityState = iota
+ Live
+ Terminated
+)
+
+func (as ActivityState) String() string {
+ switch as {
+ case Inactive:
+ return "inactive"
+
+ case Live:
+ return "live"
+
+ case Terminated:
+ return "terminated"
+ }
+
+ return "unknown"
+}
+
+// Denotes a process state change
+type StateChange struct {
+ ID core.ProcessID
+
+ From ActivityState // S
+ To ActivityState // S'
+}
+
+const (
+ engineRelayExists = "engine egress already exists"
+ subExistsErr = "%s subscriber already exists"
+ subNotFound = "no subscriber with key %s exists"
+ noSubErr = "no subscribers to notify"
+
+ relayErr = "received relay error: %s"
+)
+
+const (
+ topicExistsErr = "topic already exists for %s"
+ topicNotFoundErr = "topic not found for %s"
+)
+
+type (
+ Constructor = func(context.Context, *core.ClientConfig, ...Option) (Process, error)
+)
diff --git a/internal/etl/registry/oracle/account_balance.go b/internal/etl/registry/oracle/account_balance.go
deleted file mode 100644
index 82ff2ea9..00000000
--- a/internal/etl/registry/oracle/account_balance.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package oracle
-
-import (
- "context"
- "fmt"
- "math/big"
- "time"
-
- "github.com/base-org/pessimism/internal/client"
- "github.com/base-org/pessimism/internal/common/math"
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/state"
- "github.com/ethereum/go-ethereum/common"
-
- "go.uber.org/zap"
-)
-
-// TODO(#21): Verify config validity during Oracle construction
-// AddressBalanceODef ... Address register oracle definition used to drive oracle component
-type AddressBalanceODef struct {
- pUUID core.PUUID
- cfg *core.ClientConfig
- client client.EthClient
- currHeight *big.Int
- sk *core.StateKey
-}
-
-func (oracle *AddressBalanceODef) Height() (*big.Int, error) {
- return oracle.currHeight, nil
-}
-
-// NewAddressBalanceODef ... Initializer for address.balance oracle definition
-func NewAddressBalanceODef(cfg *core.ClientConfig, client client.EthClient,
- h *big.Int) *AddressBalanceODef {
- return &AddressBalanceODef{
- cfg: cfg,
- client: client,
- currHeight: h,
- }
-}
-
-// NewAddressBalanceOracle ... Initializer for address.balance oracle component
-func NewAddressBalanceOracle(ctx context.Context, cfg *core.ClientConfig,
- opts ...component.Option) (component.Component, error) {
- client, err := client.FromNetwork(ctx, cfg.Network)
- if err != nil {
- return nil, err
- }
-
- od := NewAddressBalanceODef(cfg, client, nil)
- o, err := component.NewOracle(ctx, core.GethBlock, od, opts...)
- if err != nil {
- return nil, err
- }
-
- od.sk = o.StateKey().Clone()
- return o, nil
-}
-
-// BackTestRoutine ...
-// NOTE - This oracle does not support backtesting
-// TODO (#59) : Add account balance backtesting support
-func (oracle *AddressBalanceODef) BackTestRoutine(_ context.Context, _ chan core.TransitData,
- _ *big.Int, _ *big.Int) error {
- return fmt.Errorf(noBackTestSupportError)
-}
-
-// ReadRoutine ... Sequentially polls go-ethereum compatible execution
-// client for address (EOA, Contract) native balance amounts
-func (oracle *AddressBalanceODef) ReadRoutine(ctx context.Context, componentChan chan core.TransitData) error {
- stateStore, err := state.FromContext(ctx)
- if err != nil {
- return err
- }
-
- ticker := time.NewTicker(oracle.cfg.PollInterval * time.Millisecond) //nolint:durationcheck // inapplicable
- for {
- select {
- case <-ticker.C: // Polling
- ts := time.Now()
- logging.NoContext().Debug("Getting addresess",
- zap.String(logging.PUUIDKey, oracle.pUUID.String()))
-
- // Get addresses from shared state store for pipeline uuid
-
- addresses, err := stateStore.GetSlice(ctx, oracle.sk)
- if err != nil {
- logging.WithContext(ctx).Error(err.Error())
- continue
- }
-
- for _, address := range addresses {
- // Convert to go-ethereum address type
- gethAddress := common.HexToAddress(address)
- logging.NoContext().Debug("Balance query",
- zap.String(logging.AddrKey, gethAddress.String()))
-
- // Get balance using go-ethereum client
- weiBalance, err := oracle.client.BalanceAt(ctx, gethAddress, nil)
- if err != nil {
- logging.WithContext(ctx).Error(err.Error())
- continue
- }
-
- // Convert wei to ether
- // NOTE - There is a possibility of precision loss here
- // TODO (#58) : Verify precision loss
- ethBalance, _ := math.WeiToEther(weiBalance).Float64()
-
- // Send parsed float64 balance value to downstream component channel
- componentChan <- core.NewTransitData(core.AccountBalance, ethBalance,
- core.WithAddress(gethAddress), core.WithOriginTS(ts))
- }
-
- case <-ctx.Done(): // Shutdown
- return nil
- }
- }
-}
diff --git a/internal/etl/registry/oracle/geth_block.go b/internal/etl/registry/oracle/geth_block.go
deleted file mode 100644
index 03cfc5ed..00000000
--- a/internal/etl/registry/oracle/geth_block.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package oracle
-
-import (
- "context"
- "errors"
- "math/big"
- "time"
-
- "github.com/base-org/pessimism/internal/client"
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
- "github.com/ethereum/go-ethereum/core/types"
- "go.uber.org/zap"
-)
-
-const (
- notFoundMsg = "not found"
-)
-
-// TODO(#21): Verify config validity during Oracle construction
-// GethBlockODef ...GethBlock register oracle definition used to drive oracle component
-type GethBlockODef struct {
- cUUID core.CUUID
- pUUID core.PUUID
- cfg *core.ClientConfig
- client client.EthClient
- currHeight *big.Int
-
- stats metrics.Metricer
-}
-
-// NewGethBlockODef ... Initializer for geth.block oracle definition
-func NewGethBlockODef(cfg *core.ClientConfig, client client.EthClient,
- h *big.Int, stats metrics.Metricer) *GethBlockODef {
- return &GethBlockODef{
- cfg: cfg,
- client: client,
- currHeight: h,
- stats: stats,
- }
-}
-
-// NewGethBlockOracle ... Initializer for geth.block oracle component
-func NewGethBlockOracle(ctx context.Context, cfg *core.ClientConfig,
- opts ...component.Option) (component.Component, error) {
- client, err := client.FromNetwork(ctx, cfg.Network)
- if err != nil {
- return nil, err
- }
-
- od := NewGethBlockODef(cfg, client, nil, metrics.WithContext(ctx))
-
- oracle, err := component.NewOracle(ctx, core.GethBlock, od, opts...)
- if err != nil {
- return nil, err
- }
-
- od.cUUID = oracle.UUID()
- od.pUUID = oracle.PUUID()
- return oracle, nil
-}
-
-func (oracle *GethBlockODef) Height() (*big.Int, error) {
- return oracle.currHeight, nil
-}
-
-// getCurrentHeightFromNetwork ... Gets the current height of the network and will not quit until found
-func (oracle *GethBlockODef) getCurrentHeightFromNetwork(ctx context.Context) *types.Header {
- for {
- header, err := oracle.client.HeaderByNumber(ctx, nil)
- if err != nil {
- oracle.stats.RecordNodeError(oracle.cfg.Network)
- logging.WithContext(ctx).Error("problem fetching current height from network", zap.Error(err))
- continue
- }
- return header
- }
-}
-
-// BackTestRoutine ...
-func (oracle *GethBlockODef) BackTestRoutine(ctx context.Context, componentChan chan core.TransitData,
- startHeight *big.Int, endHeight *big.Int) error {
- if endHeight.Cmp(startHeight) < 0 {
- return errors.New("start height cannot be more than the end height")
- }
-
- currentHeader := oracle.getCurrentHeightFromNetwork(ctx)
-
- if startHeight.Cmp(currentHeader.Number) == 1 {
- return errors.New("start height cannot be more than the latest height from network")
- }
-
- ticker := time.NewTicker(oracle.cfg.PollInterval * time.Millisecond) //nolint:durationcheck // inapplicable
- height := startHeight
-
- for {
- select {
- case <-ticker.C:
-
- headerAsInterface, err := oracle.fetchData(ctx, height, core.FetchHeader)
- headerAsserted, headerAssertedOk := headerAsInterface.(*types.Header)
-
- if err != nil || !headerAssertedOk {
- logging.WithContext(ctx).Error("problem fetching or asserting header", zap.NamedError("headerFetch", err),
- zap.Bool("headerAsserted", headerAssertedOk))
- oracle.stats.RecordNodeError(oracle.cfg.Network)
- continue
- }
-
- blockAsInterface, err := oracle.fetchData(ctx, headerAsserted.Number, core.FetchBlock)
- blockAsserted, blockAssertedOk := blockAsInterface.(*types.Block)
-
- if err != nil || !blockAssertedOk {
- // logging.WithContext(ctx).Error("problem fetching or asserting block", zap.NamedError("blockFetch", err),
- // zap.Bool("blockAsserted", blockAssertedOk))
- oracle.stats.RecordNodeError(oracle.cfg.Network)
- continue
- }
-
- componentChan <- core.TransitData{
- OriginTS: time.Now(),
- Timestamp: time.Now(),
- Type: core.GethBlock,
- Value: *blockAsserted,
- }
-
- if height.Cmp(endHeight) == 0 {
- logging.WithContext(ctx).Info("Completed back-test routine.")
- return nil
- }
-
- height.Add(height, big.NewInt(1))
-
- case <-ctx.Done():
- return nil
- }
- }
-}
-
-// getHeightToProcess ...
-//
-// Check if current height is nil, if it is, then check if starting height is provided:
-// 1. if start height is provided, use that number as the current height
-// 2. if not, then sending nil as current height means use the latest
-// if current height is not nil, skip all above steps and continue iterating.
-// At the end, if the end height is specified and not nil, if its met, it returns once done.
-// Start Height and End Height is inclusive in fetching blocks.
-func (oracle *GethBlockODef) getHeightToProcess(ctx context.Context) *big.Int {
- if oracle.currHeight == nil {
- if oracle.cfg.StartHeight != nil {
- logging.WithContext(ctx).Info("Using provided starting height for poller processing", zap.Int64("StartHeight",
- oracle.cfg.StartHeight.Int64()))
- return oracle.cfg.StartHeight
- }
- logging.WithContext(ctx).Info("Starting pipeline syncing from latest block",
- zap.String(logging.CUUIDKey, oracle.cUUID.String()))
- return nil
- }
- return oracle.currHeight
-}
-
-// fetchHeaderWithRetry ... retry for specified number of times.
-// Not an exponent backoff, but a simpler method which retries sooner
-func (oracle *GethBlockODef) fetchData(ctx context.Context, height *big.Int,
- fetchType core.FetchType) (interface{}, error) {
- if fetchType == core.FetchHeader {
- return oracle.client.HeaderByNumber(ctx, height)
- }
- return oracle.client.BlockByNumber(ctx, height)
-}
-
-func validHeightParams(start, end *big.Int) error {
- if end != nil && start == nil {
- return errors.New("cannot start with latest block height with end height configured")
- }
-
- if end != nil && start != nil &&
- end.Cmp(start) < 0 {
- return errors.New("start height cannot be more than the end height")
- }
-
- return nil
-}
-
-// ReadRoutine ... Sequentially polls go-ethereum compatible execution
-// client using monotonic block height variable for block metadata
-// & writes block metadata to output listener components
-func (oracle *GethBlockODef) ReadRoutine(ctx context.Context, componentChan chan core.TransitData) error {
- // NOTE - Might need improvements in future as the project takes shape.
-
- // Now fetching current height from the network
- // currentHeader := oracle.getCurrentHeightFromNetwork(ctx)
-
- // if oracle.cfg.StartHeight.Cmp(currentHeader.Number) == 1 {
- // return errors.New("start height cannot be more than the latest height from network")
- // }
-
- if err := validHeightParams(oracle.cfg.StartHeight, oracle.cfg.EndHeight); err != nil {
- return err
- }
-
- logging.WithContext(ctx).
- Debug("Starting poll routine", zap.Duration("poll_interval", oracle.cfg.PollInterval),
- zap.String(logging.CUUIDKey, oracle.cUUID.String()))
-
- ticker := time.NewTicker(oracle.cfg.PollInterval * time.Millisecond) //nolint:durationcheck // inapplicable
- for {
- select {
- case <-ticker.C:
- opStart := time.Now()
-
- height := oracle.getHeightToProcess(ctx)
- if height != nil {
- logging.WithContext(ctx).Debug("Polling block for processing",
- zap.Int("Height", int(height.Int64())),
- zap.String(logging.CUUIDKey, oracle.cUUID.String()))
- }
-
- headerAsInterface, err := oracle.fetchData(ctx, height, core.FetchHeader)
- headerAsserted, headerAssertedOk := headerAsInterface.(*types.Header)
-
- // Ensure err is indicative of block not existing yet
- if err != nil && err.Error() == notFoundMsg {
- continue
- }
-
- if err != nil || !headerAssertedOk {
- logging.WithContext(ctx).Error("problem fetching or asserting header", zap.NamedError("headerFetch", err),
- zap.Bool("headerAsserted", headerAssertedOk), zap.String(logging.CUUIDKey, oracle.cUUID.String()))
- oracle.stats.RecordNodeError(oracle.cfg.Network)
- continue
- }
-
- blockAsInterface, err := oracle.fetchData(ctx, headerAsserted.Number, core.FetchBlock)
- block, blockAssertedOk := blockAsInterface.(*types.Block)
-
- if err != nil || !blockAssertedOk {
- logging.WithContext(ctx).Error("problem fetching or asserting block", zap.NamedError("blockFetch", err),
- zap.Bool("blockAsserted", blockAssertedOk), zap.String(logging.CUUIDKey, oracle.cUUID.String()))
- oracle.stats.RecordNodeError(oracle.cfg.Network)
- continue
- }
-
- blockTS := time.Unix(int64(block.Time()), 0)
- oracle.stats.RecordBlockLatency(oracle.cfg.Network, float64(time.Since(blockTS).Milliseconds()))
-
- componentChan <- core.TransitData{
- OriginTS: opStart,
- Timestamp: time.Now(),
- Type: core.GethBlock,
- Value: *block,
- }
-
- // check has to be done here to include the end height block
- if oracle.cfg.EndHeight != nil && height.Cmp(oracle.cfg.EndHeight) == 0 {
- return nil
- }
-
- if height != nil {
- height.Add(height, big.NewInt(1))
- } else {
- height = new(big.Int).Add(headerAsserted.Number, big.NewInt(1))
- }
-
- logging.NoContext().Debug("New height", zap.Int("Height", int(height.Int64())),
- zap.String(logging.CUUIDKey, oracle.cUUID.String()))
-
- oracle.currHeight = height
-
- case <-ctx.Done():
- logging.NoContext().Info("Geth.block oracle routine ending", zap.String(logging.CUUIDKey, oracle.cUUID.String()))
- return nil
- }
- }
-}
diff --git a/internal/etl/registry/oracle/geth_block_test.go b/internal/etl/registry/oracle/geth_block_test.go
deleted file mode 100644
index 73fe2f6f..00000000
--- a/internal/etl/registry/oracle/geth_block_test.go
+++ /dev/null
@@ -1,424 +0,0 @@
-package oracle
-
-import (
- "context"
- "fmt"
- "math/big"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
- "github.com/base-org/pessimism/internal/mocks"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/trie"
- gomock "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_GetCurrentHeightFromNetwork(t *testing.T) {
-
- ctx, cancel := context.WithCancel(context.Background())
- logging.New(core.Development)
- defer cancel()
-
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- header := types.Header{
- ParentHash: common.HexToHash("0x123456789"),
- Number: big.NewInt(5),
- }
- // setup expectations
- testObj.
- EXPECT().
- HeaderByNumber(gomock.Any(), gomock.Any()).
- Return(&header, nil)
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- NumOfRetries: 3,
- }, currHeight: nil, client: testObj}
-
- assert.Equal(t, od.getCurrentHeightFromNetwork(ctx).Number, header.Number)
-}
-
-func Test_GetHeightToProcess(t *testing.T) {
-
- ctx, cancel := context.WithCancel(context.Background())
- logging.New(core.Development)
- defer cancel()
-
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- header := types.Header{
- ParentHash: common.HexToHash("0x123456789"),
- Number: big.NewInt(5),
- }
- testObj.
- EXPECT().
- HeaderByNumber(gomock.Any(), gomock.Any()).
- Return(&header, nil).
- AnyTimes()
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- NumOfRetries: 3,
- }, currHeight: big.NewInt(123), client: testObj}
-
- assert.Equal(t, od.getHeightToProcess(ctx), big.NewInt(123))
-
- od.currHeight = nil
- od.cfg.StartHeight = big.NewInt(123)
- assert.Equal(t, od.getHeightToProcess(ctx), big.NewInt(123))
-
- od.currHeight = nil
- od.cfg.StartHeight = nil
- assert.Nil(t, od.getHeightToProcess(ctx))
-}
-
-func Test_Backroutine(t *testing.T) {
- logging.New(core.Development)
- var tests = []struct {
- name string
- description string
-
- constructionLogic func() (*GethBlockODef, chan core.TransitData)
- testLogic func(*testing.T, *GethBlockODef, chan core.TransitData)
- }{
-
- {
- name: "Current network height check",
- description: "Check if network height check is less than starting height",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- header := types.Header{
- ParentHash: common.HexToHash("0x123456789"),
- Number: big.NewInt(5),
- }
- // setup expectationss
- testObj.
- EXPECT().
- HeaderByNumber(gomock.Any(), gomock.Any()).
- Return(&header, nil).
- AnyTimes()
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- NumOfRetries: 3,
- }, currHeight: nil, client: testObj}
-
- outChan := make(chan core.TransitData)
-
- return od, outChan
- },
-
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.BackTestRoutine(ctx, outChan, big.NewInt(7), big.NewInt(10))
- assert.Error(t, err)
- assert.EqualError(t, err, "start height cannot be more than the latest height from network")
- },
- },
- {
- name: "Successful Height check",
- description: "Ending height cannot be less than the Starting height",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- NumOfRetries: 3,
- }, currHeight: nil, client: testObj}
-
- outChan := make(chan core.TransitData)
-
- return od, outChan
- },
-
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.BackTestRoutine(ctx, outChan, big.NewInt(2), big.NewInt(1))
- assert.Error(t, err)
- assert.EqualError(t, err, "start height cannot be more than the end height")
- },
- },
- // Leaving this here to help devs test infinite loops
- //
- //{
- // name: "Header fetch retry exceeded error check",
- // description: "Check if the header fetch retry fails after 3 retries, total 4 tries.",
- //
- // constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // testObj := new(EthClientMocked)
- //
- // // setup expectations
- // testObj.On("DialContext", mock.Anything, "pass test").Return(nil)
- // testObj.On("HeaderByNumber", mock.Anything, mock.Anything).Return(nil, errors.New("no header for you"))
- //
- // od := &GethBlockODef{cfg: &core.ClientConfig{
- // RPCEndpoint: "pass test",
- // NumOfRetries: 3,
- // }, currHeight: nil, client: testObj}
- //
- // outChan := make(chan core.TransitData)
- // return od, outChan
- // },
- //
- // testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
- //
- // ctx, cancel := context.WithCancel(context.Background())
- // defer cancel()
- //
- // err := od.BackTestRoutine(ctx, outChan, big.NewInt(1), big.NewInt(2))
- // assert.Error(t, err)
- // assert.EqualError(t, err, "no header for you")
- // },
- // },
- {
- name: "Backroutine happy path test",
- description: "Backroutine works and channel should have 4 messages waiting.",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- header := types.Header{
- ParentHash: common.HexToHash("0x123456789"),
- Number: big.NewInt(7),
- }
- block := types.NewBlock(&header, nil, nil, nil, trie.NewStackTrie(nil))
- // setup expectations
- testObj.
- EXPECT().
- HeaderByNumber(gomock.Any(), gomock.Any()).
- Return(&header, nil).
- AnyTimes()
- testObj.
- EXPECT().
- BlockByNumber(gomock.Any(), gomock.Any()).
- Return(block, nil).
- AnyTimes()
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- NumOfRetries: 3,
- PollInterval: 1000,
- }, currHeight: nil, client: testObj}
-
- outChan := make(chan core.TransitData, 2)
-
- return od, outChan
- },
-
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.BackTestRoutine(ctx, outChan, big.NewInt(5), big.NewInt(6))
- assert.NoError(t, err)
- close(outChan)
-
- for m := range outChan {
- val := m.Value.(types.Block) //nolint:errcheck // converting to type from any for getting internal values
- assert.Equal(t, val.ParentHash(), common.HexToHash("0x123456789"))
- }
- },
- },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s", i, tc.name), func(t *testing.T) {
- od, outChan := tc.constructionLogic()
- tc.testLogic(t, od, outChan)
- })
-
- }
-}
-
-func Test_ReadRoutine(t *testing.T) {
- logging.New(core.Development)
- var tests = []struct {
- name string
- description string
-
- constructionLogic func() (*GethBlockODef, chan core.TransitData)
- testLogic func(*testing.T, *GethBlockODef, chan core.TransitData)
- }{
-
- {
- name: "Successful Height check 1",
- description: "Ending height cannot be less than the Starting height",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- StartHeight: big.NewInt(2),
- EndHeight: big.NewInt(1),
- NumOfRetries: 3,
- PollInterval: 1000,
- }, currHeight: nil, client: testObj}
- outChan := make(chan core.TransitData)
- return od, outChan
- },
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.ReadRoutine(ctx, outChan)
- assert.Error(t, err)
- assert.EqualError(t, err, "start height cannot be more than the end height")
- },
- },
- {
- name: "Successful Height check 2",
- description: "Cannot have start height nil, i.e, latest block and end height configured",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- StartHeight: nil,
- EndHeight: big.NewInt(1),
- NumOfRetries: 3,
- PollInterval: 1000,
- }, currHeight: nil, client: testObj, stats: metrics.NoopMetrics}
- outChan := make(chan core.TransitData)
- return od, outChan
- },
-
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.ReadRoutine(ctx, outChan)
- assert.Error(t, err)
- assert.EqualError(t, err, "cannot start with latest block height with end height configured")
- },
- },
- {
- description: "Making sure that number of blocks fetched matches the assumption. Number of messages should be 5, in the channel",
-
- constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // setup mock
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- testObj := mocks.NewMockEthClient(ctrl)
-
- header := types.Header{
- ParentHash: common.HexToHash("0x123456789"),
- Number: big.NewInt(7),
- }
- block := types.NewBlock(&header, nil, nil, nil, trie.NewStackTrie(nil))
-
- testObj.
- EXPECT().
- HeaderByNumber(gomock.Any(), gomock.Any()).
- Return(&header, nil).
- AnyTimes()
- testObj.
- EXPECT().
- BlockByNumber(gomock.Any(), gomock.Any()).
- Return(block, nil).
- AnyTimes()
-
- od := &GethBlockODef{cfg: &core.ClientConfig{
- StartHeight: big.NewInt(1),
- EndHeight: big.NewInt(5),
- NumOfRetries: 3,
- PollInterval: 1000,
- }, currHeight: nil, client: testObj, stats: metrics.NoopMetrics}
- outChan := make(chan core.TransitData, 10)
- return od, outChan
- },
-
- testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- err := od.ReadRoutine(ctx, outChan)
- assert.NoError(t, err)
- close(outChan)
- assert.Equal(t, len(outChan), 5)
- },
- },
- // Leaving this here to help devs test infinite loops
- //
- //{
- // name: "Latest block check",
- // description: "Making sure that number of blocks fetched matches the assumption. Number of messages should be 5, in the channel",
- //
- // constructionLogic: func() (*GethBlockODef, chan core.TransitData) {
- // testObj := new(EthClientMocked)
- // header := types.Header{
- // ParentHash: common.HexToHash("0x123456789"),
- // Number: big.NewInt(1),
- // }
- // block := types.NewBlock(&header, nil, nil, nil, trie.NewStackTrie(nil))
- // // setup expectations
- // testObj.On("DialContext", mock.Anything, "pass test").Return(nil)
- // testObj.On("HeaderByNumber", mock.Anything, mock.Anything).Return(&header, nil)
- // testObj.On("BlockByNumber", mock.Anything, mock.Anything).Return(block, nil)
- //
- // od := &GethBlockODef{cfg: &core.ClientConfig{
- // RPCEndpoint: "pass test",
- // StartHeight: nil,
- // EndHeight: nil,
- // NumOfRetries: 3,
- // PollInterval: 1000,
-
- // }, currHeight: nil, client: testObj}
- // outChan := make(chan core.TransitData, 10)
- // return od, outChan
- // },
- //
- // testLogic: func(t *testing.T, od *GethBlockODef, outChan chan core.TransitData) {
- //
- // ctx, cancel := context.WithCancel(context.Background())
- // defer cancel()
- //
- // err := od.ReadRoutine(ctx, outChan)
- // assert.NoError(t, err)
- // close(outChan)
- // assert.Equal(t, len(outChan), 5)
- // },
- // },
- }
-
- for i, tc := range tests {
- t.Run(fmt.Sprintf("%d-%s", i, tc.name), func(t *testing.T) {
- od, outChan := tc.constructionLogic()
- tc.testLogic(t, od, outChan)
- })
-
- }
-}
diff --git a/internal/etl/registry/oracle/types.go b/internal/etl/registry/oracle/types.go
deleted file mode 100644
index f685bc90..00000000
--- a/internal/etl/registry/oracle/types.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package oracle
-
-const (
- noBackTestSupportError = "backtest routine is unimplemented"
-)
diff --git a/internal/etl/registry/pipe/event_log.go b/internal/etl/registry/pipe/event_log.go
deleted file mode 100644
index b1cba92b..00000000
--- a/internal/etl/registry/pipe/event_log.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package pipe
-
-import (
- "context"
- "fmt"
-
- "github.com/base-org/pessimism/internal/client"
- p_common "github.com/base-org/pessimism/internal/common"
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/logging"
- "github.com/base-org/pessimism/internal/metrics"
- "github.com/base-org/pessimism/internal/state"
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
-
- "go.uber.org/zap"
-)
-
-const (
- // dlqMaxSize ... Max size of the DLQ
- // NOTE ... This could be made configurable via env vars
- // or some other mechanism if needed
- dlqMaxSize = 100
-)
-
-// EventDefinition ... Represents the stateful definition of the event log pipe component
-// Used to transform block data into event logs (block->events)
-// Uses a DLQ to reprocess failed queries & state store to get events to monitor
-type EventDefinition struct {
- client client.EthClient
- dlq *p_common.DLQ[core.TransitData]
-
- pUUID core.PUUID
- ss state.Store
-
- SK *core.StateKey
-}
-
-// NewEventDefinition ... Initializes the event log pipe definition
-func NewEventDefinition(ctx context.Context, n core.Network) (*EventDefinition, error) {
- // 1. Load dependencies from context
- client, err := client.FromNetwork(ctx, n)
- if err != nil {
- return nil, err
- }
-
- ss, err := state.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- // 2. Construct the pipe definition
- ed := &EventDefinition{
- dlq: p_common.NewTransitDLQ(dlqMaxSize),
- client: client,
- ss: ss,
- }
- return ed, nil
-}
-
-// NewEventParserPipe ... Initializer
-func NewEventParserPipe(ctx context.Context, cfg *core.ClientConfig,
- opts ...component.Option) (component.Component, error) {
- // 1. Construct the pipe definition
- ed, err := NewEventDefinition(ctx, cfg.Network)
- if err != nil {
- return nil, err
- }
-
- // 2. Embed the definition into a generic pipe construction
- p, err := component.NewPipe(ctx, ed, core.GethBlock, core.EventLog, opts...)
- if err != nil {
- return nil, err
- }
-
- // 3. Set the post component construction fields on the definition
- // There's likely a more extensible way to construct this definition fields
- // given that they're used by component implementations across the ETL
- ed.SK = p.StateKey().Clone()
- ed.pUUID = p.PUUID()
- return p, nil
-}
-
-// getEventsToMonitor ... Gets the smart contract events to monitor from the state store
-func (ed *EventDefinition) getTopics(ctx context.Context,
- addresses []string, ss state.Store) [][]common.Hash {
- sigs := make([]common.Hash, 0)
-
- // 1. Iterate over addresses and construct nested state keys
- // to lookup the associated events to monitor
- for _, address := range addresses {
- innerKey := &core.StateKey{
- Nesting: false,
- Prefix: ed.SK.Prefix,
- ID: address,
- PUUID: ed.SK.PUUID,
- }
-
- // 1.1 Attempt to fetch the events to monitor from the state store
- // and continue if there is an error
- events, err := ss.GetSlice(ctx, innerKey)
- if err != nil {
- logging.WithContext(ctx).Error("Failed to get events to monitor",
- zap.String(logging.PUUIDKey, ed.pUUID.String()),
- zap.Error(err))
- continue
- }
-
- // 1.2 Compute signatures for the function declaration strings
- for _, event := range events {
- sigs = append(sigs, crypto.Keccak256Hash([]byte(event)))
- }
- }
-
- // 2. Construct the topics slice to be used in the filter query
- // via populating the first index of 2D Topics slice with the event signatures to monitor
- topics := make([][]common.Hash, 1)
- topics[0] = sigs
-
- return topics
-}
-
-// Transform ... Attempts to reprocess previously failed queries first
-// before attempting to process the current block data
-func (ed *EventDefinition) Transform(ctx context.Context, td core.TransitData) ([]core.TransitData, error) {
- logger := logging.WithContext(ctx)
- // 1. Check to see if there are any failed queries to reprocess
- // If failures occur again, add the caller (Transform)
- // function input to the DLQ and return
- var (
- tds []core.TransitData
- err error
- )
-
- if !ed.dlq.Empty() {
- logger.Debug("Attempting to reprocess failed queries",
- zap.Int("dlq_size", ed.dlq.Size()))
-
- tds, err = ed.attemptDLQ(ctx)
- // NOTE ... Returning here is intentional to ensure that block events
- // downstream are processed in the sequential order for which they came in
- if err != nil {
- err = ed.dlq.Add(&td)
- if err != nil {
- return tds, err
- }
- }
- logger.Debug("Successfully reprocessed failed queries",
- zap.String(logging.PUUIDKey, ed.pUUID.String()))
- }
-
- // 2. If there are no failed queries, then process the current block data
- // and add a data input to the DLQ if it fails for reprocessing next block
- tds2, err := ed.transformFunc(ctx, td)
- if err != nil {
- if ed.dlq.Full() {
- // NOTE ... If the DLQ is full, then we pop the oldest entry
- // to make room for the new entry
- lostVal, _ := ed.dlq.Pop()
- logger.Warn("DLQ is full, popping oldest entry",
- zap.String(logging.PUUIDKey, ed.pUUID.String()),
- zap.Any("lost_value", lostVal))
-
- metrics.WithContext(ctx).
- IncMissedBlock(ed.pUUID)
- }
-
- _ = ed.dlq.Add(&td)
- logging.WithContext(ctx).Error("Failed to process block data",
- zap.Int("dlq_size", ed.dlq.Size()))
-
- return tds, err
- }
-
- // 3. Concatenate the results from the failed queries and the current block data
- // and return
- tds = append(tds, tds2...)
- return tds, nil
-}
-
-// attemptDLQ ... Attempts to reprocess previously failed queries
-func (ed *EventDefinition) attemptDLQ(ctx context.Context) ([]core.TransitData, error) {
- failedInputs := ed.dlq.PopAll()
-
- // 1. Attempt to reprocess failed inputs
- tds := make([]core.TransitData, 0)
- for _, td := range failedInputs {
- result, err := ed.transformFunc(ctx, *td)
- // 2. If the reprocessing fails, then the function will return an error
- if err != nil {
- err = ed.dlq.Add(td)
- if err != nil {
- return tds, err
- }
- // NOTE ... Returning here is intentional to ensure that block events
- // downstream are processed in the sequential order for which they came in
- return tds, err
- }
-
- // 3. If the reprocessing succeeds, append result to return slice
- tds = append(tds, result...)
- }
-
- return tds, nil
-}
-
-// transformFunc ... Gets the events from the block, filters them and
-// returns them if they are in the list of events to monitor
-func (ed *EventDefinition) transformFunc(ctx context.Context, td core.TransitData) ([]core.TransitData, error) {
- // 1. Convert arbitrary transit data to go-ethereum compatible block type
- block, success := td.Value.(types.Block)
- if !success {
- return []core.TransitData{}, fmt.Errorf("could not convert to block")
- }
-
- // 2. Fetch the addresses and events to monitor for
- logging.NoContext().Debug("Getting addresses",
- zap.String(logging.PUUIDKey, ed.pUUID.String()))
-
- addresses, err := ed.ss.GetSlice(ctx, ed.SK)
- if err != nil {
- return []core.TransitData{}, err
- }
-
- topics := ed.getTopics(ctx, addresses, ed.ss)
- hash := block.Header().Hash()
-
- // 3. Construct and execute a filter query on the provided block hash
- // to get the relevant logs
- query := ethereum.FilterQuery{
- BlockHash: &hash,
- Addresses: p_common.SliceToAddresses(addresses),
- Topics: topics,
- }
-
- logs, err := ed.client.FilterLogs(context.Background(), query)
- if err != nil {
- return []core.TransitData{}, err
- }
-
- // 4. See if there are any logs to process
- if len(logs) == 0 {
- return []core.TransitData{}, nil
- }
-
- // 5. Convert the logs to transit data and return them
- result := make([]core.TransitData, 0)
- for _, log := range logs {
- result = append(result,
- core.NewTransitData(core.EventLog, log, core.WithAddress(log.Address),
- core.WithOriginTS(td.OriginTS)))
- }
-
- return result, nil
-}
diff --git a/internal/etl/registry/pipe/event_log_test.go b/internal/etl/registry/pipe/event_log_test.go
deleted file mode 100644
index f8c278e2..00000000
--- a/internal/etl/registry/pipe/event_log_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package pipe_test
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/registry/pipe"
- "github.com/base-org/pessimism/internal/state"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
-
- "github.com/base-org/pessimism/internal/etl/component"
- "github.com/base-org/pessimism/internal/mocks"
-)
-
-// testSuite ... Test suite for the event log pipe
-type testSuite struct {
- ctx context.Context
- def component.PipeDefinition
- mockSuite *mocks.MockSuite
-}
-
-// defConstructor ... Default constructor for the test suite
-func defConstructor(t *testing.T) *testSuite {
- ctrl := gomock.NewController(t)
- ctx, suite := mocks.Context(context.Background(), ctrl)
-
- // Populate the state store with the events to monitor
- // NOTE - There's likely a more extensible way to handle nested keys in the state store
- _ = state.InsertUnique(ctx, &core.StateKey{
- Nesting: true,
- }, "0x00000000")
-
- innerKey := &core.StateKey{
- Nesting: false,
- ID: "0x00000000",
- }
-
- _ = state.InsertUnique(ctx, innerKey, "transfer(address,address,uint256)")
-
- ed, err := pipe.NewEventDefinition(ctx, core.Layer1)
- if err != nil {
- t.Fatal(err)
- }
-
- nilKey := &core.StateKey{}
- ed.SK = nilKey
-
- return &testSuite{
- ctx: ctx,
- def: ed,
- mockSuite: suite,
- }
-}
-
-// TestEventLogPipe ... Tests the event log pipe
-func TestEventLogPipe(t *testing.T) {
- var tests = []struct {
- name string
- constructor func(t *testing.T) *testSuite
- runner func(t *testing.T, suite *testSuite)
- }{
- {
- name: "Error when failed FilterQuery",
- constructor: defConstructor,
- runner: func(t *testing.T, suite *testSuite) {
- suite.mockSuite.MockL1.EXPECT().FilterLogs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("unknown block"))
-
- _, err := suite.def.Transform(suite.ctx, core.TransitData{
- Value: *types.NewBlockWithHeader(&types.Header{})})
- assert.Error(t, err)
- },
- },
- {
- name: "No Error When Successful Filter Query",
- constructor: func(t *testing.T) *testSuite {
- ts := defConstructor(t)
-
- return ts
- },
- runner: func(t *testing.T, suite *testSuite) {
- suite.mockSuite.MockL1.EXPECT().FilterLogs(gomock.Any(), gomock.Any()).Return(nil, nil)
-
- tds, err := suite.def.Transform(suite.ctx, core.TransitData{
- Value: *types.NewBlockWithHeader(&types.Header{}),
- })
- assert.NoError(t, err)
- assert.Empty(t, tds)
- },
- },
- {
- name: "DLQ Retry When Failed Filter Query",
- constructor: func(t *testing.T) *testSuite {
- ts := defConstructor(t)
-
- return ts
- },
- runner: func(t *testing.T, suite *testSuite) {
- // 1. Fail the first filter query and assert that the DLQ is populated
- suite.mockSuite.MockL1.EXPECT().
- FilterLogs(gomock.Any(), gomock.Any()).
- Return(nil, fmt.Errorf("unknown block"))
-
- tds, err := suite.def.Transform(suite.ctx, core.TransitData{
- Value: *types.NewBlockWithHeader(&types.Header{}),
- })
- assert.Error(t, err)
- assert.Empty(t, tds)
-
- log1 := types.Log{
- Address: common.HexToAddress("0x0"),
- }
-
- log2 := types.Log{
- Address: common.HexToAddress("0x1"),
- }
-
- // 2. Retry the filter query and assert that the DLQ is empty
- suite.mockSuite.MockL1.EXPECT().
- FilterLogs(gomock.Any(), gomock.Any()).
- Return([]types.Log{log1}, nil)
-
- suite.mockSuite.MockL1.EXPECT().
- FilterLogs(gomock.Any(), gomock.Any()).
- Return([]types.Log{log2}, nil)
-
- tds, err = suite.def.Transform(suite.ctx, core.TransitData{
- Value: *types.NewBlockWithHeader(&types.Header{}),
- })
-
- assert.NoError(t, err)
- assert.NotEmpty(t, tds)
-
- actualLog1, ok := tds[0].Value.(types.Log)
- assert.True(t, ok)
-
- actualLog2, ok := tds[1].Value.(types.Log)
- assert.True(t, ok)
-
- assert.Equal(t, actualLog1, log1)
- assert.Equal(t, actualLog2, log2)
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- suite := tt.constructor(t)
- tt.runner(t, suite)
- })
- }
-
-}
diff --git a/internal/etl/registry/readers.go b/internal/etl/registry/readers.go
new file mode 100644
index 00000000..469c08f9
--- /dev/null
+++ b/internal/etl/registry/readers.go
@@ -0,0 +1,150 @@
+package registry
+
+import (
+ "context"
+ "math/big"
+ "time"
+
+ "github.com/base-org/pessimism/internal/client"
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/logging"
+ ix_node "github.com/ethereum-optimism/optimism/indexer/node"
+ "github.com/ethereum/go-ethereum/core/types"
+ "go.uber.org/zap"
+)
+
+const (
+ // This could be configurable in the future
+ batchSize = 100
+)
+
+type HeaderTraversal struct {
+ n core.Network
+ id core.ProcessID
+ pathID core.PathID
+
+ client ix_node.EthClient
+ traversal *ix_node.HeaderTraversal
+ pollInterval time.Duration
+
+ // TODO - Add height metric
+ // stats metrics.Metricer
+}
+
+func NewHeaderTraversal(ctx context.Context, cfg *core.ClientConfig,
+ opts ...process.Option) (process.Process, error) {
+ clients, err := client.FromContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ node, err := clients.NodeClient(cfg.Network)
+ if err != nil {
+ return nil, err
+ }
+
+ var startHeader *types.Header
+ if cfg.EndHeight != nil {
+ header, err := node.BlockHeaderByNumber(cfg.EndHeight)
+ if err != nil {
+ return nil, err
+ }
+
+ startHeader = header
+ }
+
+ // TODO - Support network confirmation counts
+
+ ht := &HeaderTraversal{
+ n: cfg.Network,
+ client: node,
+ traversal: ix_node.NewHeaderTraversal(node, startHeader, big.NewInt(0)),
+ pollInterval: cfg.PollInterval,
+ }
+
+ reader, err := process.NewReader(ctx, core.BlockHeader, ht, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ht.id = reader.ID()
+ ht.pathID = reader.PathID()
+ return reader, nil
+}
+
+// Height ... Current block height
+func (ht *HeaderTraversal) Height() (*big.Int, error) {
+ return ht.traversal.LastHeader().Number, nil
+}
+
+func (ht *HeaderTraversal) Backfill(start, end *big.Int, consumer chan core.Event) error {
+ for i := start; i.Cmp(end) < 0; i.Add(i, big.NewInt(batchSize)) {
+ end := big.NewInt(0).Add(i, big.NewInt(batchSize))
+
+ headers, err := ht.client.BlockHeadersByRange(i, end)
+ if err != nil {
+ return err
+ }
+
+ for _, header := range headers {
+ consumer <- core.Event{
+ Timestamp: time.Now(),
+ Type: core.BlockHeader,
+ Value: header,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Loop ...
+func (ht *HeaderTraversal) Loop(ctx context.Context, consumer chan core.Event) error {
+ ticker := time.NewTicker(1 * time.Second)
+
+ recent, err := ht.client.BlockHeaderByNumber(nil)
+ if err != nil {
+ logging.WithContext(ctx).Error("Failed to get latest header", zap.Error(err))
+ }
+
+ // backfill if provided starting header
+ if ht.traversal.LastHeader() != nil {
+ err = ht.Backfill(ht.traversal.LastHeader().Number, recent.Number, consumer)
+ if err != nil {
+ return err
+ }
+ } else {
+ ht.traversal = ix_node.NewHeaderTraversal(ht.client, recent, big.NewInt(0))
+ }
+
+ for {
+ select {
+ case <-ticker.C:
+
+ header, err := ht.client.BlockHeaderByNumber(nil)
+ if err != nil {
+ return err
+ }
+
+ if header.Number.Cmp(ht.traversal.LastHeader().Number) > 0 {
+ headers, err := ht.traversal.NextFinalizedHeaders(batchSize)
+ if err != nil {
+ return err
+ }
+
+ for _, header := range headers {
+ consumer <- core.Event{
+ Network: ht.n,
+ Timestamp: time.Now(),
+ Type: core.BlockHeader,
+ Value: header,
+ }
+ }
+ }
+
+ case <-ctx.Done():
+ return nil
+ }
+ }
+}
diff --git a/internal/etl/registry/registry.go b/internal/etl/registry/registry.go
index 3d591e0d..5ae602da 100644
--- a/internal/etl/registry/registry.go
+++ b/internal/etl/registry/registry.go
@@ -4,118 +4,89 @@ import (
"fmt"
"github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/registry/oracle"
- "github.com/base-org/pessimism/internal/etl/registry/pipe"
)
const (
- noEntryErr = "could not find entry in registry for encoded register type %s"
+ noEntryErr = "could not find data topic type %s"
)
-// Registry ... Interface for registry
-type Registry interface {
- GetDependencyPath(rt core.RegisterType) (core.RegisterDependencyPath, error)
- GetRegister(rt core.RegisterType) (*core.DataRegister, error)
+type Registry struct {
+ topics map[core.TopicType]*core.DataTopic
}
-// componentRegistry ... Registry implementation
-type componentRegistry struct {
- registers map[core.RegisterType]*core.DataRegister
-}
-
-// NewRegistry ... Instantiates a new hardcoded registry
-// that contains all extractable ETL data types
-func NewRegistry() Registry {
- registers := map[core.RegisterType]*core.DataRegister{
- core.GethBlock: {
- Addressing: false,
- DataType: core.GethBlock,
- ComponentType: core.Oracle,
- ComponentConstructor: oracle.NewGethBlockOracle,
+func New() *Registry {
+ topics := map[core.TopicType]*core.DataTopic{
+ core.BlockHeader: {
+ Addressing: false,
+ DataType: core.BlockHeader,
+ ProcessType: core.Read,
+ Constructor: NewHeaderTraversal,
Dependencies: noDeps(),
Sk: noState(),
},
- core.AccountBalance: {
- Addressing: true,
- DataType: core.AccountBalance,
- ComponentType: core.Oracle,
- ComponentConstructor: oracle.NewAddressBalanceOracle,
- Dependencies: noDeps(),
- Sk: &core.StateKey{
- Nesting: false,
- Prefix: core.AccountBalance,
- ID: core.AddressKey,
- PUUID: nil,
- },
- },
- core.EventLog: {
- Addressing: true,
- DataType: core.EventLog,
- ComponentType: core.Pipe,
- ComponentConstructor: pipe.NewEventParserPipe,
+ core.Log: {
+ Addressing: true,
+ DataType: core.Log,
+ ProcessType: core.Subscribe,
+ Constructor: NewLogSubscriber,
- Dependencies: makeDeps(core.GethBlock),
+ Dependencies: makeDeps(core.BlockHeader),
Sk: &core.StateKey{
Nesting: true,
- Prefix: core.EventLog,
+ Prefix: core.Log,
ID: core.AddressKey,
- PUUID: nil,
+ PathID: nil,
},
},
}
- return &componentRegistry{registers}
+ return &Registry{topics}
}
-// makeDeps ... Makes dependency slice
-func makeDeps(types ...core.RegisterType) []core.RegisterType {
- deps := make([]core.RegisterType, len(types))
+func makeDeps(types ...core.TopicType) []core.TopicType {
+ deps := make([]core.TopicType, len(types))
copy(deps, types)
return deps
}
-// noDeps ... Returns empty dependency slice
-func noDeps() []core.RegisterType {
- return []core.RegisterType{}
+func noDeps() []core.TopicType {
+ return []core.TopicType{}
}
-// noState ... Returns empty state key, indicating no state dependencies
-// for cross subsystem communication (i.e. ETL -> Risk Engine)
func noState() *core.StateKey {
return nil
}
-// GetDependencyPath ... Returns in-order slice of ETL pipeline path
-func (cr *componentRegistry) GetDependencyPath(rt core.RegisterType) (core.RegisterDependencyPath, error) {
- destRegister, err := cr.GetRegister(rt)
+// Returns in-order slice of ETL path path
+func (r *Registry) TopicPath(tt core.TopicType) (core.TopicPath, error) {
+ topic, err := r.GetDataTopic(tt)
if err != nil {
- return core.RegisterDependencyPath{}, err
+ return core.TopicPath{}, err
}
- registers := make([]*core.DataRegister, len(destRegister.Dependencies)+1)
+ topics := make([]*core.DataTopic, len(topic.Dependencies)+1)
- registers[0] = destRegister
+ topics[0] = topic
- for i, depType := range destRegister.Dependencies {
- depRegister, err := cr.GetRegister(depType)
+ for i, depType := range topic.Dependencies {
+ depRegister, err := r.GetDataTopic(depType)
if err != nil {
- return core.RegisterDependencyPath{}, err
+ return core.TopicPath{}, err
}
- registers[i+1] = depRegister
+ topics[i+1] = depRegister
}
- return core.RegisterDependencyPath{Path: registers}, nil
+ return core.TopicPath{Path: topics}, nil
}
-// GetRegister ... Returns a data register provided an enum type
-func (cr *componentRegistry) GetRegister(rt core.RegisterType) (*core.DataRegister, error) {
- if _, exists := cr.registers[rt]; !exists {
- return nil, fmt.Errorf(noEntryErr, rt)
+func (r *Registry) GetDataTopic(tt core.TopicType) (*core.DataTopic, error) {
+ if _, exists := r.topics[tt]; !exists {
+ return nil, fmt.Errorf(noEntryErr, tt)
}
- return cr.registers[rt], nil
+ return r.topics[tt], nil
}
diff --git a/internal/etl/registry/registry_test.go b/internal/etl/registry/registry_test.go
index 0084c95d..82c40c0d 100644
--- a/internal/etl/registry/registry_test.go
+++ b/internal/etl/registry/registry_test.go
@@ -9,25 +9,23 @@ import (
"github.com/stretchr/testify/assert"
)
-func Test_ComponentRegistry(t *testing.T) {
+func TestProcessRegistry(t *testing.T) {
var tests = []struct {
name string
function string
description string
- constructionLogic func() registry.Registry
- testLogic func(*testing.T, registry.Registry)
+ test func(*testing.T, *registry.Registry)
}{
{
name: "Fetch Failure",
- function: "GetRegister",
+ function: "GetDataTopic",
description: "When trying to get an invalid register, an error should be returned",
- constructionLogic: registry.NewRegistry,
- testLogic: func(t *testing.T, testRegistry registry.Registry) {
+ test: func(t *testing.T, r *registry.Registry) {
- invalidType := core.RegisterType(255)
- register, err := testRegistry.GetRegister(invalidType)
+ invalidType := core.TopicType(255)
+ register, err := r.GetDataTopic(invalidType)
assert.Error(t, err)
assert.Nil(t, register)
@@ -35,42 +33,40 @@ func Test_ComponentRegistry(t *testing.T) {
},
{
name: "Fetch Success",
- function: "GetRegister",
+ function: "GetDataTopic",
description: "When trying to get a register provided a valid register type, a register should be returned",
- constructionLogic: registry.NewRegistry,
- testLogic: func(t *testing.T, testRegistry registry.Registry) {
+ test: func(t *testing.T, r *registry.Registry) {
- reg, err := testRegistry.GetRegister(core.GethBlock)
+ reg, err := r.GetDataTopic(core.BlockHeader)
assert.NoError(t, err)
assert.NotNil(t, reg)
- assert.Equal(t, reg.DataType, core.GethBlock)
+ assert.Equal(t, reg.DataType, core.BlockHeader)
},
},
{
name: "Fetch Dependency Path Success",
- function: "GetRegister",
+ function: "GetDataTopic",
description: "When trying to get a register dependency path provided a valid register type, a path should be returned",
- constructionLogic: registry.NewRegistry,
- testLogic: func(t *testing.T, testRegistry registry.Registry) {
+ test: func(t *testing.T, r *registry.Registry) {
- path, err := testRegistry.GetDependencyPath(core.EventLog)
+ path, err := r.TopicPath(core.Log)
assert.NoError(t, err)
assert.Len(t, path.Path, 2)
- assert.Equal(t, path.Path[1].DataType, core.GethBlock)
- assert.Equal(t, path.Path[0].DataType, core.EventLog)
+ assert.Equal(t, path.Path[1].DataType, core.BlockHeader)
+ assert.Equal(t, path.Path[0].DataType, core.Log)
},
},
}
for i, tc := range tests {
t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
- testRouter := tc.constructionLogic()
- tc.testLogic(t, testRouter)
+ r := registry.New()
+ tc.test(t, r)
})
}
diff --git a/internal/etl/registry/subscription_test.go b/internal/etl/registry/subscription_test.go
new file mode 100644
index 00000000..1966eaa0
--- /dev/null
+++ b/internal/etl/registry/subscription_test.go
@@ -0,0 +1,103 @@
+package registry_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/etl/registry"
+ "github.com/base-org/pessimism/internal/mocks"
+)
+
+// testSuite ... Test suite for the event log subscription
+type testSuite struct {
+ ctx context.Context
+ def process.Subscription
+ mockSuite *mocks.MockSuite
+}
+
+// defConstructor ... Default constructor for the test suite
+func defConstructor(t *testing.T) *testSuite {
+ ctrl := gomock.NewController(t)
+ ctx, suite := mocks.Context(context.Background(), ctrl)
+
+ // Populate the state store with the events to monitor
+ // NOTE - There's likely a more extensible way to handle nested keys in the state store
+ _ = state.InsertUnique(ctx, &core.StateKey{
+ Nesting: true,
+ }, "0x00000000")
+
+ innerKey := &core.StateKey{
+ Nesting: false,
+ ID: "0x00000000",
+ }
+
+ _ = state.InsertUnique(ctx, innerKey, "transfer(address,address,uint256)")
+
+ subscript, err := registry.NewLogSubscript(ctx, core.Layer1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ subscript.SK = &core.StateKey{}
+
+ return &testSuite{
+ ctx: ctx,
+ def: subscript,
+ mockSuite: suite,
+ }
+}
+
+// TestLogSubscription ... Tests the event log subscription
+func TestLogSubscription(t *testing.T) {
+ var tests = []struct {
+ name string
+ constructor func(t *testing.T) *testSuite
+ runner func(t *testing.T, suite *testSuite)
+ }{
+ {
+ name: "Error when failed FilterQuery",
+ constructor: defConstructor,
+ runner: func(t *testing.T, ts *testSuite) {
+ ts.mockSuite.MockL1.EXPECT().FilterLogs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("unknown block")).
+ Times(10)
+
+ _, err := ts.def.Run(ts.ctx, core.Event{
+ Value: types.Header{}})
+ assert.Error(t, err)
+ },
+ },
+ {
+ name: "No Error When Successful Filter Query",
+ constructor: func(t *testing.T) *testSuite {
+ ts := defConstructor(t)
+
+ return ts
+ },
+ runner: func(t *testing.T, ts *testSuite) {
+ ts.mockSuite.MockL1.EXPECT().FilterLogs(gomock.Any(), gomock.Any()).Return(nil, nil)
+
+ tds, err := ts.def.Run(ts.ctx, core.Event{
+ Value: types.Header{},
+ })
+ assert.NoError(t, err)
+ assert.Empty(t, tds)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ suite := tt.constructor(t)
+ tt.runner(t, suite)
+ })
+ }
+
+}
diff --git a/internal/etl/registry/subscriptions.go b/internal/etl/registry/subscriptions.go
new file mode 100644
index 00000000..2220de31
--- /dev/null
+++ b/internal/etl/registry/subscriptions.go
@@ -0,0 +1,158 @@
+package registry
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/base-org/pessimism/internal/client"
+ p_common "github.com/base-org/pessimism/internal/common"
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/logging"
+ "github.com/base-org/pessimism/internal/state"
+ "github.com/ethereum-optimism/optimism/op-service/retry"
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+
+ "go.uber.org/zap"
+)
+
+type LogSubscription struct {
+ PathID core.PathID
+ SK *core.StateKey
+
+ client client.EthClient
+ ss state.Store
+}
+
+func NewLogSubscript(ctx context.Context, n core.Network) (*LogSubscription, error) {
+ client, err := client.FromNetwork(ctx, n)
+ if err != nil {
+ return nil, err
+ }
+
+ ss, err := state.FromContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ sub := &LogSubscription{
+ client: client,
+ ss: ss,
+ }
+ return sub, nil
+}
+
+func NewLogSubscriber(ctx context.Context, cfg *core.ClientConfig,
+ opts ...process.Option) (process.Process, error) {
+ s, err := NewLogSubscript(ctx, cfg.Network)
+ if err != nil {
+ return nil, err
+ }
+
+ p, err := process.NewSubscriber(ctx, s, core.BlockHeader, core.Log, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ s.SK = p.StateKey().Clone()
+ s.PathID = p.PathID()
+ return p, nil
+}
+
+// Fetch smart contract events to monitor
+func (sub *LogSubscription) getTopics(ctx context.Context,
+ addresses []string, ss state.Store) [][]common.Hash {
+ sigs := make([]common.Hash, 0)
+
+ for _, address := range addresses {
+ innerKey := &core.StateKey{
+ Nesting: false,
+ Prefix: sub.SK.Prefix,
+ ID: address,
+ PathID: sub.SK.PathID,
+ }
+
+ events, err := ss.GetSlice(ctx, innerKey)
+ if err != nil {
+ logging.WithContext(ctx).Error("Failed to get events to monitor",
+ zap.String(logging.Path, sub.PathID.String()),
+ zap.Error(err))
+ continue
+ }
+
+ for _, event := range events {
+ sigs = append(sigs, crypto.Keccak256Hash([]byte(event)))
+ }
+ }
+
+ // populate event signatures to monitor
+ topics := make([][]common.Hash, 1)
+ topics[0] = sigs
+
+ return topics
+}
+
+func (sub *LogSubscription) Run(ctx context.Context, e core.Event) ([]core.Event, error) {
+ logger := logging.WithContext(ctx)
+ events, err := sub.transformEvents(ctx, e)
+ if err != nil {
+ logger.Error("Failed to process block data",
+ zap.String(logging.Path, sub.PathID.String()),
+ zap.Error(err))
+
+ return nil, err
+ }
+
+ return events, nil
+}
+
+func (sub *LogSubscription) transformEvents(ctx context.Context, e core.Event) ([]core.Event, error) {
+ header, success := e.Value.(types.Header)
+ if !success {
+ return []core.Event{}, fmt.Errorf("could not convert to header")
+ }
+
+ logging.NoContext().Debug("Getting addresses",
+ zap.String(logging.Path, sub.PathID.String()))
+
+ addresses, err := sub.ss.GetSlice(ctx, sub.SK)
+ if err != nil {
+ return []core.Event{}, err
+ }
+
+ topics := sub.getTopics(ctx, addresses, sub.ss)
+ hash := header.Hash()
+
+ // Construct and execute a filter query on the provided block hash
+ // to get the relevant logs
+ query := ethereum.FilterQuery{
+ BlockHash: &hash,
+ Addresses: p_common.SliceToAddresses(addresses),
+ Topics: topics,
+ }
+
+ logs, err := retry.Do[[]types.Log](ctx, 10, core.RetryStrategy(), func() ([]types.Log, error) {
+ return sub.client.FilterLogs(context.Background(), query)
+ })
+
+ if err != nil {
+ logging.WithContext(ctx).Error("Failed to parse transform events", zap.Error(err))
+ return []core.Event{}, err
+ }
+
+ if len(logs) == 0 {
+ return []core.Event{}, nil
+ }
+
+ result := make([]core.Event, 0)
+ for _, log := range logs {
+ result = append(result,
+ core.NewEvent(core.Log, log, core.WithAddress(log.Address),
+ core.WithOriginTS(e.OriginTS)))
+ }
+
+ return result, nil
+}
diff --git a/internal/etl/store.go b/internal/etl/store.go
new file mode 100644
index 00000000..441cd5d8
--- /dev/null
+++ b/internal/etl/store.go
@@ -0,0 +1,125 @@
+package etl
+
+import (
+ "fmt"
+
+ "github.com/base-org/pessimism/internal/core"
+)
+
+type Entry struct {
+ id core.PathID
+ p Path
+}
+
+type Store struct {
+ paths map[core.PathIdentifier][]Entry
+ procToPath map[core.ProcessID][]core.PathID
+}
+
+// NewStore ... Initializer
+func NewStore() *Store {
+ return &Store{
+ procToPath: make(map[core.ProcessID][]core.PathID),
+ paths: make(map[core.PathIdentifier][]Entry),
+ }
+}
+
+/*
+Note - PathIDs can only conflict
+ when PathType = Live && activityState = Active
+*/
+
+// Link ... Creates an entry for some new C_UUID:P_UUID mapping
+func (store *Store) Link(id1 core.ProcessID, id2 core.PathID) {
+ // EDGE CASE - C_UUID:P_UUID pair already exists
+ if _, found := store.procToPath[id1]; !found { // Create slice
+ store.procToPath[id1] = make([]core.PathID, 0)
+ }
+
+ store.procToPath[id1] = append(store.procToPath[id1], id2)
+}
+
+func (store *Store) AddPath(id core.PathID, path Path) {
+ entry := Entry{
+ id: id,
+ p: path,
+ }
+
+ entrySlice, found := store.paths[id.ID]
+ if !found {
+ entrySlice = make([]Entry, 0)
+ }
+
+ entrySlice = append(entrySlice, entry)
+
+ store.paths[id.ID] = entrySlice
+
+ for _, p := range path.Processes() {
+ store.Link(p.ID(), id)
+ }
+}
+
+func (store *Store) GetPathIDs(cID core.ProcessID) ([]core.PathID, error) {
+ pIDs, found := store.procToPath[cID]
+
+ if !found {
+ return []core.PathID{}, fmt.Errorf("could not find key for %s", cID)
+ }
+
+ return pIDs, nil
+}
+
+func (store *Store) GetPathByID(id core.PathID) (Path, error) {
+ if _, found := store.paths[id.ID]; !found {
+ return nil, fmt.Errorf(pIDNotFoundErr, id.String())
+ }
+
+ for _, p := range store.paths[id.ID] {
+ if p.id.UUID == id.UUID {
+ return p.p, nil
+ }
+ }
+
+ return nil, fmt.Errorf(uuidNotFoundErr)
+}
+
+func (store *Store) GetExistingPaths(id core.PathID) []core.PathID {
+ entries, exists := store.paths[id.ID]
+ if !exists {
+ return []core.PathID{}
+ }
+
+ ids := make([]core.PathID, len(entries))
+ for i, entry := range entries {
+ ids[i] = entry.id
+ }
+
+ return ids
+}
+
+// Count ... Returns the number of active paths
+func (store *Store) ActiveCount() int {
+ count := 0
+
+ for _, entrySlice := range store.paths {
+ for _, entry := range entrySlice {
+ if entry.p.State() == ACTIVE {
+ count++
+ }
+ }
+ }
+
+ return count
+}
+
+func (store *Store) Paths() []Path {
+ paths := make([]Path, 0)
+
+ for _, entrySlice := range store.paths {
+ for _, entry := range entrySlice {
+ paths = append(paths, entry.p)
+ }
+ }
+
+ return paths
+}
diff --git a/internal/etl/store_test.go b/internal/etl/store_test.go
new file mode 100644
index 00000000..ea00bd82
--- /dev/null
+++ b/internal/etl/store_test.go
@@ -0,0 +1,272 @@
+package etl_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl"
+
+ "github.com/base-org/pessimism/internal/etl/process"
+ "github.com/base-org/pessimism/internal/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ nilID = core.MakePathID(0, core.ProcessID{}, core.ProcessID{})
+
+ cID1 = core.MakeProcessID(0, 0, 0, 0)
+ cID2 = core.MakeProcessID(0, 0, 0, 0)
+)
+
+func getTestPath(ctx context.Context) etl.Path {
+
+ p1, err := mocks.NewReader(ctx, core.BlockHeader, process.WithID(cID2))
+ if err != nil {
+ panic(err)
+ }
+
+ p2, err := mocks.NewSubscriber(ctx, core.BlockHeader, core.Log, process.WithID(cID1))
+ if err != nil {
+ panic(err)
+ }
+
+ procs := []process.Process{
+ p1,
+ p2,
+ }
+
+ path, err := etl.NewPath(&core.PathConfig{}, core.PathID{}, procs)
+ if err != nil {
+ panic(err)
+ }
+
+ return path
+}
+
+func TestStore(t *testing.T) {
+ var tests = []struct {
+ name string
+ function string
+ description string
+
+ constructionLogic func() *etl.Store
+ testLogic func(*testing.T, *etl.Store)
+ }{
+ {
+ name: "Successful Add When PID Already Exists",
+ function: "AddPath",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ ctx := context.Background()
+
+ store := etl.NewStore()
+ testPath := getTestPath(ctx)
+
+ store.AddPath(core.PathID{}, testPath)
+ return store
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ ctx := context.Background()
+ testPath := getTestPath(ctx)
+
+ pID2 := core.MakePathID(
+ 0,
+ core.MakeProcessID(0, 0, 0, 1),
+ core.MakeProcessID(0, 0, 0, 1),
+ )
+
+ store.AddPath(pID2, testPath)
+
+ for _, p := range testPath.Processes() {
+ ids, err := store.GetPathIDs(p.ID())
+
+ assert.NoError(t, err)
+ assert.Len(t, ids, 2)
+ assert.Equal(t, ids[0].ID, nilID.ID)
+ assert.Equal(t, ids[1].ID, pID2.ID)
+ }
+
+ },
+ },
+ {
+ name: "Successful Add When PID Does Not Exists",
+ function: "AddPath",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ pr := etl.NewStore()
+ return pr
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ ctx := context.Background()
+ testPath := getTestPath(ctx)
+
+ pID := core.MakePathID(
+ 0,
+ core.MakeProcessID(0, 0, 0, 1),
+ core.MakeProcessID(0, 0, 0, 1),
+ )
+
+ store.AddPath(pID, testPath)
+
+ for _, p := range testPath.Processes() {
+ ids, err := store.GetPathIDs(p.ID())
+
+ assert.NoError(t, err)
+ assert.Len(t, ids, 1)
+ assert.Equal(t, ids[0], pID)
+ }
+
+ },
+ },
+ {
+ name: "Successful Retrieval When CID Is Valid Key",
+ function: "getpathIDs",
+ description: "",
+
+ constructionLogic: etl.NewStore,
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ store.Link(cID, pID)
+
+ expectedIDs := []core.PathID{pID}
+ actualIDs, err := store.GetPathIDs(cID)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expectedIDs, actualIDs)
+
+ },
+ },
+ {
+ name: "Failed Retrieval When CID Is Invalid Key",
+ function: "getpathIDs",
+ description: "",
+
+ constructionLogic: etl.NewStore,
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+
+ _, err := store.GetPathIDs(cID)
+
+ assert.Error(t, err)
+ },
+ },
+ {
+ name: "Failed Retrieval When PID Is Invalid Key",
+ function: "getpath",
+ description: "",
+
+ constructionLogic: etl.NewStore,
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ _, err := store.GetPathByID(pID)
+ assert.Error(t, err)
+
+ },
+ }, {
+ name: "Failed Retrieval When Matching UUID Cannot Be Found",
+ function: "getpath",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ store := etl.NewStore()
+ return store
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ pLine := getTestPath(context.Background())
+
+ store.AddPath(pID, pLine)
+
+ pID2 := core.MakePathID(0, cID, cID)
+ _, err := store.GetPathByID(pID2)
+
+ assert.Error(t, err)
+
+ },
+ }, {
+ name: "Successful Retrieval",
+ function: "getpath",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ store := etl.NewStore()
+ return store
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ expected := getTestPath(context.Background())
+
+ store.AddPath(pID, expected)
+
+ actualPline, err := store.GetPathByID(pID)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expected, actualPline)
+ },
+ },
+ {
+ name: "Successful path Fetch",
+ function: "Paths",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ store := etl.NewStore()
+ return store
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ expected := getTestPath(context.Background())
+
+ store.AddPath(pID, expected)
+
+ paths := store.Paths()
+
+ assert.Len(t, paths, 1)
+ assert.Equal(t, paths[0], expected)
+ },
+ },
+ {
+ name: "Successful Active Count Call",
+ function: "ActiveCount",
+ description: "",
+
+ constructionLogic: func() *etl.Store {
+ store := etl.NewStore()
+ return store
+ },
+ testLogic: func(t *testing.T, store *etl.Store) {
+ cID := core.MakeProcessID(0, 0, 0, 0)
+ pID := core.MakePathID(0, cID, cID)
+
+ expected := getTestPath(context.Background())
+
+ store.AddPath(pID, expected)
+
+ count := store.ActiveCount()
+ assert.Equal(t, count, 0)
+ },
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d-%s-%s", i, tc.function, tc.name), func(t *testing.T) {
+ testRouter := tc.constructionLogic()
+ tc.testLogic(t, testRouter)
+ })
+
+ }
+}
diff --git a/internal/etl/types.go b/internal/etl/types.go
new file mode 100644
index 00000000..62949696
--- /dev/null
+++ b/internal/etl/types.go
@@ -0,0 +1,44 @@
+package etl
+
+type ActivityState uint8
+
+const (
+ INACTIVE ActivityState = iota
+ ACTIVE
+ CRASHED
+ TERMINATED
+)
+
+func (as ActivityState) String() string {
+ switch as {
+ case INACTIVE:
+ return "inactive"
+
+ case ACTIVE:
+ return "active"
+
+ case CRASHED:
+ return "crashed"
+
+ case TERMINATED:
+ return "terminated"
+ }
+
+ return "unknown"
+}
+
+const (
+ // EtlStore error constants
+ couldNotCastErr = "could not cast process initializer function to %s constructor type"
+ pIDNotFoundErr = "could not find path ID for %s"
+ uuidNotFoundErr = "could not find matching UUID for path entry"
+
+ // ProcessGraph error constants
+ procNotFoundErr = "process with ID %s does not exist within dag"
+ procExistsErr = "process with ID %s already exists in dag"
+ edgeExistsErr = "edge already exists from (%s) to (%s) in dag"
+
+ emptyPathError = "path must contain at least one process"
+ // Manager error constants
+ unknownCompType = "unknown process type %s provided"
+)
diff --git a/internal/logging/logger.go b/internal/logging/logger.go
index a7f7b062..92addfaa 100644
--- a/internal/logging/logger.go
+++ b/internal/logging/logger.go
@@ -26,9 +26,10 @@ const (
const (
AddrKey LogKey = "address"
- CUUIDKey LogKey = "cuuid"
- PUUIDKey LogKey = "puuid"
- SUUIDKey LogKey = "suuid"
+ Process LogKey = "process_id"
+ Path LogKey = "path_id"
+ UUID LogKey = "uuid"
+ Session LogKey = "session_id"
)
// NOTE - Logger is set to Nop as default to avoid redundant testing
@@ -133,7 +134,7 @@ type stringJSONEncoder struct {
}
// NewStringJSONEncoder returns an encoder that encodes the JSON log dict as a string
-// so the log processing pipeline can correctly process logs with nested JSON.
+// so the log processing path can correctly process logs with nested JSON.
func NewStringJSONEncoder(cfg zapcore.EncoderConfig) (zapcore.Encoder, error) {
return newStringJSONEncoder(cfg), nil
}
diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go
index 611ec7ed..3f64036d 100644
--- a/internal/metrics/metrics.go
+++ b/internal/metrics/metrics.go
@@ -8,6 +8,7 @@ import (
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/engine/heuristic"
"github.com/base-org/pessimism/internal/logging"
+ "github.com/ethereum/go-ethereum/rpc"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
@@ -18,6 +19,7 @@ const (
metricsNamespace = "pessimism"
SubsystemHeuristics = "heuristics"
SubsystemEtl = "etl"
+ batchMethod = "batch"
)
// serverShutdownTimeout ... Timeout for shutting down the metrics server
@@ -33,36 +35,40 @@ type Config struct {
// Metricer ... Interface for metrics
type Metricer interface {
- IncMissedBlock(pUUID core.PUUID)
- IncActiveHeuristics(ht core.HeuristicType, network core.Network, pipelineType core.PipelineType)
- IncActivePipelines(pipelineType core.PipelineType, network core.Network)
- DecActivePipelines(pipelineType core.PipelineType, network core.Network)
+ IncMissedBlock(id core.PathID)
+ IncActiveHeuristics(ht core.HeuristicType, network core.Network)
+ IncActivePaths(network core.Network)
+ DecActivePaths(network core.Network)
RecordBlockLatency(network core.Network, latency float64)
- RecordHeuristicRun(heuristic heuristic.Heuristic)
+ RecordHeuristicRun(n core.Network, h heuristic.Heuristic)
RecordAlertGenerated(alert core.Alert, dest core.AlertDestination, clientName string)
RecordNodeError(network core.Network)
- RecordPipelineLatency(pUUID core.PUUID, latency float64)
+ RecordPathLatency(id core.PathID, latency float64)
RecordAssessmentError(h heuristic.Heuristic)
- RecordInvExecutionTime(h heuristic.Heuristic, latency float64)
+ RecordAssessmentTime(h heuristic.Heuristic, latency float64)
RecordUp()
Start()
Shutdown(ctx context.Context) error
+ RecordRPCClientRequest(method string) func(err error)
+ RecordRPCClientBatchRequest(b []rpc.BatchElem) func(err error)
Document() []DocumentedMetric
}
// Metrics ... Metrics struct
type Metrics struct {
- Up prometheus.Gauge
- ActivePipelines *prometheus.GaugeVec
- ActiveHeuristics *prometheus.GaugeVec
- HeuristicRuns *prometheus.CounterVec
- AlertsGenerated *prometheus.CounterVec
- NodeErrors *prometheus.CounterVec
- MissedBlocks *prometheus.CounterVec
- BlockLatency *prometheus.GaugeVec
- PipelineLatency *prometheus.GaugeVec
- InvExecutionTime *prometheus.GaugeVec
- HeuristicErrors *prometheus.CounterVec
+ rpcClientRequestsTotal *prometheus.CounterVec
+ rpcClientRequestDurationSeconds *prometheus.HistogramVec
+ Up prometheus.Gauge
+ ActivePaths *prometheus.GaugeVec
+ ActiveHeuristics *prometheus.GaugeVec
+ HeuristicRuns *prometheus.CounterVec
+ AlertsGenerated *prometheus.CounterVec
+ NodeErrors *prometheus.CounterVec
+ MissedBlocks *prometheus.CounterVec
+ BlockLatency *prometheus.GaugeVec
+ PathLatency *prometheus.GaugeVec
+ InvExecutionTime *prometheus.GaugeVec
+ HeuristicErrors *prometheus.CounterVec
registry *prometheus.Registry
factory Factory
@@ -93,6 +99,23 @@ func New(ctx context.Context, cfg *Config) (Metricer, func(), error) {
factory := With(registry)
stats = &Metrics{
+ rpcClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: SubsystemEtl,
+ Name: "requests_total",
+ Help: "Total RPC requests initiated by the RPC client",
+ }, []string{
+ "method",
+ }),
+ rpcClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricsNamespace,
+ Subsystem: SubsystemEtl,
+ Name: "request_duration_seconds",
+ Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
+ Help: "Histogram of RPC client request durations",
+ }, []string{
+ "method",
+ }),
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Name: "up",
@@ -103,14 +126,14 @@ func New(ctx context.Context, cfg *Config) (Metricer, func(), error) {
Help: "Number of active heuristics",
Namespace: metricsNamespace,
Subsystem: SubsystemHeuristics,
- }, []string{"heuristic", "network", "pipeline"}),
+ }, []string{"heuristic", "network"}),
- ActivePipelines: factory.NewGaugeVec(prometheus.GaugeOpts{
- Name: "active_pipelines",
- Help: "Number of active pipelines",
+ ActivePaths: factory.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "active_paths",
+ Help: "Number of active paths",
Namespace: metricsNamespace,
Subsystem: SubsystemEtl,
- }, []string{"pipeline", "network"}),
+ }, []string{"network"}),
HeuristicRuns: factory.NewCounterVec(prometheus.CounterOpts{
Name: "heuristic_runs_total",
@@ -123,7 +146,7 @@ func New(ctx context.Context, cfg *Config) (Metricer, func(), error) {
Name: "alerts_generated_total",
Help: "Number of total alerts generated for a given heuristic",
Namespace: metricsNamespace,
- }, []string{"network", "heuristic", "pipeline", "severity", "destination", "client_name"}),
+ }, []string{"network", "heuristic", "path", "severity", "destination", "client_name"}),
NodeErrors: factory.NewCounterVec(prometheus.CounterOpts{
Name: "node_errors_total",
@@ -136,11 +159,11 @@ func New(ctx context.Context, cfg *Config) (Metricer, func(), error) {
Namespace: metricsNamespace,
}, []string{"network"}),
- PipelineLatency: factory.NewGaugeVec(prometheus.GaugeOpts{
- Name: "pipeline_latency",
- Help: "Millisecond latency of pipeline processing",
+ PathLatency: factory.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "path_latency",
+ Help: "Millisecond latency of path processing",
Namespace: metricsNamespace,
- }, []string{"puuid"}),
+ }, []string{"path_id"}),
InvExecutionTime: factory.NewGaugeVec(prometheus.GaugeOpts{
Name: "heuristic_execution_time",
Help: "Nanosecond time of heuristic execution",
@@ -155,7 +178,7 @@ func New(ctx context.Context, cfg *Config) (Metricer, func(), error) {
Name: "missed_blocks_total",
Help: "Number of missed blocks",
Namespace: metricsNamespace,
- }, []string{"puuid"}),
+ }, []string{"path_id"}),
registry: registry,
factory: factory,
@@ -183,74 +206,97 @@ func (m *Metrics) RecordUp() {
// RecordAssessmentError ... Increments the number of errors generated by heuristic executions
func (m *Metrics) RecordAssessmentError(h heuristic.Heuristic) {
- ht := h.SUUID().PID.HeuristicType().String()
+ ht := h.Type().String()
m.HeuristicErrors.WithLabelValues(ht).Inc()
}
-// RecordInvExecutionTime ... Records the time it took to execute a heuristic
-func (m *Metrics) RecordInvExecutionTime(h heuristic.Heuristic, latency float64) {
- ht := h.SUUID().PID.HeuristicType().String()
+// RecordAssessmentTime ... Records the time it took to execute a heuristic
+func (m *Metrics) RecordAssessmentTime(h heuristic.Heuristic, latency float64) {
+ ht := h.Type().String()
m.InvExecutionTime.WithLabelValues(ht).Set(latency)
}
// IncMissedBlock ... Increments the number of missed blocks
-func (m *Metrics) IncMissedBlock(pUUID core.PUUID) {
- m.MissedBlocks.WithLabelValues(pUUID.String()).Inc()
+func (m *Metrics) IncMissedBlock(id core.PathID) {
+ m.MissedBlocks.WithLabelValues(id.String()).Inc()
}
// IncActiveHeuristics ... Increments the number of active heuristics
-func (m *Metrics) IncActiveHeuristics(ht core.HeuristicType, n core.Network,
- pipelineType core.PipelineType) {
- m.ActiveHeuristics.WithLabelValues(ht.String(), n.String(), pipelineType.String()).Inc()
+func (m *Metrics) IncActiveHeuristics(ht core.HeuristicType, n core.Network) {
+ m.ActiveHeuristics.WithLabelValues(ht.String(), n.String()).Inc()
}
-// IncActivePipelines ... Increments the number of active pipelines
-func (m *Metrics) IncActivePipelines(pt core.PipelineType, n core.Network) {
- m.ActivePipelines.WithLabelValues(pt.String(), n.String()).Inc()
+// IncActivePaths ... Increments the number of active paths
+func (m *Metrics) IncActivePaths(n core.Network) {
+ m.ActivePaths.WithLabelValues(n.String()).Inc()
}
-// DecActivePipelines ... Decrements the number of active pipelines
-func (m *Metrics) DecActivePipelines(pt core.PipelineType, n core.Network) {
- m.ActivePipelines.WithLabelValues(pt.String(), n.String()).Dec()
+// DecActivePaths ... Decrements the number of active paths
+func (m *Metrics) DecActivePaths(n core.Network) {
+ m.ActivePaths.WithLabelValues(n.String()).Dec()
}
// RecordHeuristicRun ... Records that a given heuristic has been run
-func (m *Metrics) RecordHeuristicRun(h heuristic.Heuristic) {
- net := h.SUUID().PID.Network().String()
- ht := h.SUUID().PID.HeuristicType().String()
+func (m *Metrics) RecordHeuristicRun(n core.Network, h heuristic.Heuristic) {
+ net := n.String()
+ ht := h.Type().String()
m.HeuristicRuns.WithLabelValues(net, ht).Inc()
}
-// RecordAlertGenerated ... Records that an alert has been generated for a given heuristic
func (m *Metrics) RecordAlertGenerated(alert core.Alert, dest core.AlertDestination, clientName string) {
- net := alert.SUUID.PID.Network().String()
- h := alert.SUUID.PID.HeuristicType().String()
- pipeline := alert.Ptype.String()
- sev := alert.Criticality.String()
- m.AlertsGenerated.WithLabelValues(net, h, pipeline, sev, dest.String(), clientName).Inc()
+ net := alert.PathID.Network().String()
+ h := alert.HT.String()
+ sev := alert.Sev.String()
+
+ m.AlertsGenerated.WithLabelValues(net, h, sev, dest.String(), clientName).Inc()
}
-// RecordNodeError ... Records that an error has been caught for a given node
func (m *Metrics) RecordNodeError(n core.Network) {
m.NodeErrors.WithLabelValues(n.String()).Inc()
}
-// RecordBlockLatency ... Records the latency of block processing
func (m *Metrics) RecordBlockLatency(n core.Network, latency float64) {
m.BlockLatency.WithLabelValues(n.String()).Set(latency)
}
-// RecordPipelineLatency ... Records the latency of pipeline processing
-func (m *Metrics) RecordPipelineLatency(pUUID core.PUUID, latency float64) {
- m.PipelineLatency.WithLabelValues(pUUID.String()).Set(latency)
+func (m *Metrics) RecordPathLatency(id core.PathID, latency float64) {
+ m.PathLatency.WithLabelValues(id.String()).Set(latency)
+}
+
+func (m *Metrics) RecordRPCClientRequest(method string) func(err error) {
+ m.rpcClientRequestsTotal.WithLabelValues(method).Inc()
+ // timer := prometheus.NewTimer(m.rpcClientRequestDurationSeconds.WithLabelValues(method))
+ // return func(err error) {
+ // m.recordRPCClientResponse(method, err)
+ // timer.ObserveDuration()
+ // }
+
+ return nil
+}
+
+func (m *Metrics) RecordRPCClientBatchRequest(b []rpc.BatchElem) func(err error) {
+ m.rpcClientRequestsTotal.WithLabelValues(batchMethod).Add(float64(len(b)))
+ for _, elem := range b {
+ m.rpcClientRequestsTotal.WithLabelValues(elem.Method).Inc()
+ }
+
+ // timer := prometheus.NewTimer(m.rpcClientRequestDurationSeconds.WithLabelValues(batchMethod))
+ // return func(err error) {
+ // m.recordRPCClientResponse(batchMethod, err)
+ // timer.ObserveDuration()
+
+ // // Record errors for individual requests
+ // for _, elem := range b {
+ // m.recordRPCClientResponse(elem.Method, elem.Error)
+ // }
+ // }
+ return nil
}
-// Shutdown ... Shuts down the metrics server
func (m *Metrics) Shutdown(ctx context.Context) error {
return m.server.Shutdown(ctx)
}
-// Document ... Returns a list of documented metrics
func (m *Metrics) Document() []DocumentedMetric {
return m.factory.Document()
}
@@ -259,19 +305,25 @@ type noopMetricer struct{}
var NoopMetrics Metricer = new(noopMetricer)
-func (n *noopMetricer) IncMissedBlock(_ core.PUUID) {}
-func (n *noopMetricer) RecordUp() {}
-func (n *noopMetricer) IncActiveHeuristics(_ core.HeuristicType, _ core.Network, _ core.PipelineType) {
+func (n *noopMetricer) IncMissedBlock(_ core.PathID) {}
+func (n *noopMetricer) RecordUp() {}
+func (n *noopMetricer) IncActiveHeuristics(_ core.HeuristicType, _ core.Network) {
}
-func (n *noopMetricer) RecordInvExecutionTime(_ heuristic.Heuristic, _ float64) {}
-func (n *noopMetricer) IncActivePipelines(_ core.PipelineType, _ core.Network) {}
-func (n *noopMetricer) DecActivePipelines(_ core.PipelineType, _ core.Network) {}
-func (n *noopMetricer) RecordHeuristicRun(_ heuristic.Heuristic) {}
+func (n *noopMetricer) RecordAssessmentTime(_ heuristic.Heuristic, _ float64) {}
+func (n *noopMetricer) IncActivePaths(_ core.Network) {}
+func (n *noopMetricer) DecActivePaths(_ core.Network) {}
+func (n *noopMetricer) RecordHeuristicRun(_ core.Network, _ heuristic.Heuristic) {}
func (n *noopMetricer) RecordAlertGenerated(_ core.Alert, _ core.AlertDestination, _ string) {}
func (n *noopMetricer) RecordNodeError(_ core.Network) {}
func (n *noopMetricer) RecordBlockLatency(_ core.Network, _ float64) {}
-func (n *noopMetricer) RecordPipelineLatency(_ core.PUUID, _ float64) {}
+func (n *noopMetricer) RecordPathLatency(_ core.PathID, _ float64) {}
func (n *noopMetricer) RecordAssessmentError(_ heuristic.Heuristic) {}
+func (n *noopMetricer) RecordRPCClientRequest(_ string) func(err error) {
+ return func(err error) {}
+}
+func (n *noopMetricer) RecordRPCClientBatchRequest(_ []rpc.BatchElem) func(err error) {
+ return func(err error) {}
+}
func (n *noopMetricer) Shutdown(_ context.Context) error {
return nil
diff --git a/internal/mocks/alert_client.go b/internal/mocks/alert_client.go
index 939026de..1aae3778 100644
--- a/internal/mocks/alert_client.go
+++ b/internal/mocks/alert_client.go
@@ -1,14 +1,14 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/base-org/pessimism/internal/client/alert_client (interfaces: AlertClient)
+// Source: github.com/base-org/pessimism/internal/client (interfaces: AlertClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
- alert_client "github.com/base-org/pessimism/internal/client"
reflect "reflect"
+ client "github.com/base-org/pessimism/internal/client"
gomock "github.com/golang/mock/gomock"
)
@@ -35,11 +35,25 @@ func (m *MockAlertClient) EXPECT() *MockAlertClientMockRecorder {
return m.recorder
}
+// GetName mocks base method.
+func (m *MockAlertClient) GetName() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetName")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// GetName indicates an expected call of GetName.
+func (mr *MockAlertClientMockRecorder) GetName() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockAlertClient)(nil).GetName))
+}
+
// PostEvent mocks base method.
-func (m *MockAlertClient) PostEvent(arg0 context.Context, arg1 *alert_client.AlertEventTrigger) (*alert_client.AlertAPIResponse, error) {
+func (m *MockAlertClient) PostEvent(arg0 context.Context, arg1 *client.AlertEventTrigger) (*client.AlertAPIResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PostEvent", arg0, arg1)
- ret0, _ := ret[0].(*alert_client.AlertAPIResponse)
+ ret0, _ := ret[0].(*client.AlertAPIResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/internal/mocks/alert_manager.go b/internal/mocks/alert_manager.go
index 61ca42a6..7456bf33 100644
--- a/internal/mocks/alert_manager.go
+++ b/internal/mocks/alert_manager.go
@@ -35,7 +35,7 @@ func (m *AlertManager) EXPECT() *AlertManagerMockRecorder {
}
// AddSession mocks base method.
-func (m *AlertManager) AddSession(arg0 core.SUUID, arg1 *core.AlertPolicy) error {
+func (m *AlertManager) AddSession(arg0 core.UUID, arg1 *core.AlertPolicy) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddSession", arg0, arg1)
ret0, _ := ret[0].(error)
diff --git a/internal/mocks/api_service.go b/internal/mocks/api_service.go
index c7edc8d3..a3c6880e 100644
--- a/internal/mocks/api_service.go
+++ b/internal/mocks/api_service.go
@@ -64,10 +64,10 @@ func (mr *MockServiceMockRecorder) CheckHealth() *gomock.Call {
}
// ProcessHeuristicRequest mocks base method.
-func (m *MockService) ProcessHeuristicRequest(arg0 *models.SessionRequestBody) (core.SUUID, error) {
+func (m *MockService) ProcessHeuristicRequest(arg0 *models.SessionRequestBody) (core.UUID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ProcessHeuristicRequest", arg0)
- ret0, _ := ret[0].(core.SUUID)
+ ret0, _ := ret[0].(core.UUID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -79,10 +79,10 @@ func (mr *MockServiceMockRecorder) ProcessHeuristicRequest(arg0 interface{}) *go
}
// RunHeuristicSession mocks base method.
-func (m *MockService) RunHeuristicSession(arg0 *models.SessionRequestParams) (core.SUUID, error) {
+func (m *MockService) RunHeuristicSession(arg0 *models.SessionRequestParams) (core.UUID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunHeuristicSession", arg0)
- ret0, _ := ret[0].(core.SUUID)
+ ret0, _ := ret[0].(core.UUID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/internal/mocks/context.go b/internal/mocks/context.go
index c49b3fb0..c565ff70 100644
--- a/internal/mocks/context.go
+++ b/internal/mocks/context.go
@@ -13,8 +13,10 @@ type MockSuite struct {
Ctrl *gomock.Controller
Bundle *client.Bundle
MockIndexer *MockIxClient
+ MockL1Node *MockNodeClient
MockL1 *MockEthClient
MockL2 *MockEthClient
+ MockL2Node *MockNodeClient
SS state.Store
}
@@ -23,13 +25,16 @@ func Context(ctx context.Context, ctrl *gomock.Controller) (context.Context, *Mo
// 1. Construct mocked bundle
mockedClient := NewMockEthClient(ctrl)
mockedIndexer := NewMockIxClient(ctrl)
+ mockedNode := NewMockNodeClient(ctrl)
ss := state.NewMemState()
bundle := &client.Bundle{
IxClient: mockedIndexer,
L1Client: mockedClient,
+ L1Node: mockedNode,
L2Client: mockedClient,
+ L2Node: mockedNode,
}
// 2. Bind to context
@@ -42,7 +47,9 @@ func Context(ctx context.Context, ctrl *gomock.Controller) (context.Context, *Mo
Bundle: bundle,
MockIndexer: mockedIndexer,
MockL1: mockedClient,
+ MockL1Node: mockedNode,
MockL2: mockedClient,
+ MockL2Node: mockedNode,
SS: ss,
}
diff --git a/internal/mocks/engine_manager.go b/internal/mocks/engine_manager.go
index 3debe1b9..589c56ee 100644
--- a/internal/mocks/engine_manager.go
+++ b/internal/mocks/engine_manager.go
@@ -36,10 +36,10 @@ func (m *EngineManager) EXPECT() *EngineManagerMockRecorder {
}
// DeleteHeuristicSession mocks base method.
-func (m *EngineManager) DeleteHeuristicSession(arg0 core.SUUID) (core.SUUID, error) {
+func (m *EngineManager) DeleteHeuristicSession(arg0 core.UUID) (core.UUID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteHeuristicSession", arg0)
- ret0, _ := ret[0].(core.SUUID)
+ ret0, _ := ret[0].(core.UUID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -50,19 +50,19 @@ func (mr *EngineManagerMockRecorder) DeleteHeuristicSession(arg0 interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHeuristicSession", reflect.TypeOf((*EngineManager)(nil).DeleteHeuristicSession), arg0)
}
-// DeployHeuristicSession mocks base method.
-func (m *EngineManager) DeployHeuristicSession(arg0 *heuristic.DeployConfig) (core.SUUID, error) {
+// DeployHeuristic mocks base method.
+func (m *EngineManager) DeployHeuristic(arg0 *heuristic.DeployConfig) (core.UUID, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeployHeuristicSession", arg0)
- ret0, _ := ret[0].(core.SUUID)
+ ret := m.ctrl.Call(m, "DeployHeuristic", arg0)
+ ret0, _ := ret[0].(core.UUID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// DeployHeuristicSession indicates an expected call of DeployHeuristicSession.
-func (mr *EngineManagerMockRecorder) DeployHeuristicSession(arg0 interface{}) *gomock.Call {
+// DeployHeuristic indicates an expected call of DeployHeuristic.
+func (mr *EngineManagerMockRecorder) DeployHeuristic(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeployHeuristicSession", reflect.TypeOf((*EngineManager)(nil).DeployHeuristicSession), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeployHeuristic", reflect.TypeOf((*EngineManager)(nil).DeployHeuristic), arg0)
}
// EventLoop mocks base method.
@@ -80,10 +80,10 @@ func (mr *EngineManagerMockRecorder) EventLoop() *gomock.Call {
}
// GetInputType mocks base method.
-func (m *EngineManager) GetInputType(arg0 core.HeuristicType) (core.RegisterType, error) {
+func (m *EngineManager) GetInputType(arg0 core.HeuristicType) (core.TopicType, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetInputType", arg0)
- ret0, _ := ret[0].(core.RegisterType)
+ ret0, _ := ret[0].(core.TopicType)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/internal/mocks/eth_client.go b/internal/mocks/eth_client.go
index 07210a0b..f33e2061 100644
--- a/internal/mocks/eth_client.go
+++ b/internal/mocks/eth_client.go
@@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/base-org/pessimism/internal/client (interfaces: EthClient)
+// Source: github.com/base-org/pessimism/internal/client (interfaces: EthClient,NodeClient)
// Package mocks is a generated GoMock package.
package mocks
@@ -142,3 +142,116 @@ func (mr *MockEthClientMockRecorder) SubscribeFilterLogs(arg0, arg1, arg2 interf
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeFilterLogs", reflect.TypeOf((*MockEthClient)(nil).SubscribeFilterLogs), arg0, arg1, arg2)
}
+
+// MockNodeClient is a mock of NodeClient interface.
+type MockNodeClient struct {
+ ctrl *gomock.Controller
+ recorder *MockNodeClientMockRecorder
+}
+
+// MockNodeClientMockRecorder is the mock recorder for MockNodeClient.
+type MockNodeClientMockRecorder struct {
+ mock *MockNodeClient
+}
+
+// NewMockNodeClient creates a new mock instance.
+func NewMockNodeClient(ctrl *gomock.Controller) *MockNodeClient {
+ mock := &MockNodeClient{ctrl: ctrl}
+ mock.recorder = &MockNodeClientMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockNodeClient) EXPECT() *MockNodeClientMockRecorder {
+ return m.recorder
+}
+
+// BlockHeaderByHash mocks base method.
+func (m *MockNodeClient) BlockHeaderByHash(arg0 common.Hash) (*types.Header, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BlockHeaderByHash", arg0)
+ ret0, _ := ret[0].(*types.Header)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BlockHeaderByHash indicates an expected call of BlockHeaderByHash.
+func (mr *MockNodeClientMockRecorder) BlockHeaderByHash(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockHeaderByHash", reflect.TypeOf((*MockNodeClient)(nil).BlockHeaderByHash), arg0)
+}
+
+// BlockHeaderByNumber mocks base method.
+func (m *MockNodeClient) BlockHeaderByNumber(arg0 *big.Int) (*types.Header, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BlockHeaderByNumber", arg0)
+ ret0, _ := ret[0].(*types.Header)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BlockHeaderByNumber indicates an expected call of BlockHeaderByNumber.
+func (mr *MockNodeClientMockRecorder) BlockHeaderByNumber(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockHeaderByNumber", reflect.TypeOf((*MockNodeClient)(nil).BlockHeaderByNumber), arg0)
+}
+
+// BlockHeadersByRange mocks base method.
+func (m *MockNodeClient) BlockHeadersByRange(arg0, arg1 *big.Int) ([]types.Header, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BlockHeadersByRange", arg0, arg1)
+ ret0, _ := ret[0].([]types.Header)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BlockHeadersByRange indicates an expected call of BlockHeadersByRange.
+func (mr *MockNodeClientMockRecorder) BlockHeadersByRange(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockHeadersByRange", reflect.TypeOf((*MockNodeClient)(nil).BlockHeadersByRange), arg0, arg1)
+}
+
+// FilterLogs mocks base method.
+func (m *MockNodeClient) FilterLogs(arg0 ethereum.FilterQuery) ([]types.Log, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FilterLogs", arg0)
+ ret0, _ := ret[0].([]types.Log)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FilterLogs indicates an expected call of FilterLogs.
+func (mr *MockNodeClientMockRecorder) FilterLogs(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterLogs", reflect.TypeOf((*MockNodeClient)(nil).FilterLogs), arg0)
+}
+
+// StorageHash mocks base method.
+func (m *MockNodeClient) StorageHash(arg0 common.Address, arg1 *big.Int) (common.Hash, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StorageHash", arg0, arg1)
+ ret0, _ := ret[0].(common.Hash)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StorageHash indicates an expected call of StorageHash.
+func (mr *MockNodeClientMockRecorder) StorageHash(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageHash", reflect.TypeOf((*MockNodeClient)(nil).StorageHash), arg0, arg1)
+}
+
+// TxByHash mocks base method.
+func (m *MockNodeClient) TxByHash(arg0 common.Hash) (*types.Transaction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TxByHash", arg0)
+ ret0, _ := ret[0].(*types.Transaction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// TxByHash indicates an expected call of TxByHash.
+func (mr *MockNodeClientMockRecorder) TxByHash(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxByHash", reflect.TypeOf((*MockNodeClient)(nil).TxByHash), arg0)
+}
diff --git a/internal/mocks/etl.go b/internal/mocks/etl.go
new file mode 100644
index 00000000..f431cbff
--- /dev/null
+++ b/internal/mocks/etl.go
@@ -0,0 +1,155 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/base-org/pessimism/internal/etl (interfaces: ETL)
+
+// Package mocks is a generated GoMock package.
+package mocks
+
+import (
+ big "math/big"
+ reflect "reflect"
+
+ core "github.com/base-org/pessimism/internal/core"
+ process "github.com/base-org/pessimism/internal/etl/process"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockETL is a mock of ETL interface.
+type MockETL struct {
+ ctrl *gomock.Controller
+ recorder *MockETLMockRecorder
+}
+
+// MockETLMockRecorder is the mock recorder for MockETL.
+type MockETLMockRecorder struct {
+ mock *MockETL
+}
+
+// NewMockETL creates a new mock instance.
+func NewMockETL(ctrl *gomock.Controller) *MockETL {
+ mock := &MockETL{ctrl: ctrl}
+ mock.recorder = &MockETLMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockETL) EXPECT() *MockETLMockRecorder {
+ return m.recorder
+}
+
+// ActiveCount mocks base method.
+func (m *MockETL) ActiveCount() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ActiveCount")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// ActiveCount indicates an expected call of ActiveCount.
+func (mr *MockETLMockRecorder) ActiveCount() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveCount", reflect.TypeOf((*MockETL)(nil).ActiveCount))
+}
+
+// CreateProcess mocks base method.
+func (m *MockETL) CreateProcess(arg0 *core.ClientConfig, arg1 core.ProcessID, arg2 core.PathID, arg3 *core.DataTopic) (process.Process, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateProcess", arg0, arg1, arg2, arg3)
+ ret0, _ := ret[0].(process.Process)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// CreateProcess indicates an expected call of CreateProcess.
+func (mr *MockETLMockRecorder) CreateProcess(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProcess", reflect.TypeOf((*MockETL)(nil).CreateProcess), arg0, arg1, arg2, arg3)
+}
+
+// CreateProcessPath mocks base method.
+func (m *MockETL) CreateProcessPath(arg0 *core.PathConfig) (core.PathID, bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateProcessPath", arg0)
+ ret0, _ := ret[0].(core.PathID)
+ ret1, _ := ret[1].(bool)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// CreateProcessPath indicates an expected call of CreateProcessPath.
+func (mr *MockETLMockRecorder) CreateProcessPath(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProcessPath", reflect.TypeOf((*MockETL)(nil).CreateProcessPath), arg0)
+}
+
+// EventLoop mocks base method.
+func (m *MockETL) EventLoop() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EventLoop")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// EventLoop indicates an expected call of EventLoop.
+func (mr *MockETLMockRecorder) EventLoop() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventLoop", reflect.TypeOf((*MockETL)(nil).EventLoop))
+}
+
+// GetBlockHeight mocks base method.
+func (m *MockETL) GetBlockHeight(arg0 core.PathID) (*big.Int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBlockHeight", arg0)
+ ret0, _ := ret[0].(*big.Int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBlockHeight indicates an expected call of GetBlockHeight.
+func (mr *MockETLMockRecorder) GetBlockHeight(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockHeight", reflect.TypeOf((*MockETL)(nil).GetBlockHeight), arg0)
+}
+
+// GetStateKey mocks base method.
+func (m *MockETL) GetStateKey(arg0 core.TopicType) (*core.StateKey, bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetStateKey", arg0)
+ ret0, _ := ret[0].(*core.StateKey)
+ ret1, _ := ret[1].(bool)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// GetStateKey indicates an expected call of GetStateKey.
+func (mr *MockETLMockRecorder) GetStateKey(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateKey", reflect.TypeOf((*MockETL)(nil).GetStateKey), arg0)
+}
+
+// Run mocks base method.
+func (m *MockETL) Run(arg0 core.PathID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Run", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Run indicates an expected call of Run.
+func (mr *MockETLMockRecorder) Run(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockETL)(nil).Run), arg0)
+}
+
+// Shutdown mocks base method.
+func (m *MockETL) Shutdown() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Shutdown")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Shutdown indicates an expected call of Shutdown.
+func (mr *MockETLMockRecorder) Shutdown() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockETL)(nil).Shutdown))
+}
diff --git a/internal/mocks/etl_manager.go b/internal/mocks/etl_manager.go
deleted file mode 100644
index 7687210b..00000000
--- a/internal/mocks/etl_manager.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/base-org/pessimism/internal/etl/pipeline (interfaces: Manager)
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- big "math/big"
- reflect "reflect"
-
- core "github.com/base-org/pessimism/internal/core"
- component "github.com/base-org/pessimism/internal/etl/component"
- gomock "github.com/golang/mock/gomock"
-)
-
-// EtlManager is a mock of Manager interface.
-type EtlManager struct {
- ctrl *gomock.Controller
- recorder *EtlManagerMockRecorder
-}
-
-// EtlManagerMockRecorder is the mock recorder for EtlManager.
-type EtlManagerMockRecorder struct {
- mock *EtlManager
-}
-
-// NewEtlManager creates a new mock instance.
-func NewEtlManager(ctrl *gomock.Controller) *EtlManager {
- mock := &EtlManager{ctrl: ctrl}
- mock.recorder = &EtlManagerMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *EtlManager) EXPECT() *EtlManagerMockRecorder {
- return m.recorder
-}
-
-// ActiveCount mocks base method.
-func (m *EtlManager) ActiveCount() int {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ActiveCount")
- ret0, _ := ret[0].(int)
- return ret0
-}
-
-// ActiveCount indicates an expected call of ActiveCount.
-func (mr *EtlManagerMockRecorder) ActiveCount() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveCount", reflect.TypeOf((*EtlManager)(nil).ActiveCount))
-}
-
-// CreateDataPipeline mocks base method.
-func (m *EtlManager) CreateDataPipeline(arg0 *core.PipelineConfig) (core.PUUID, bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "CreateDataPipeline", arg0)
- ret0, _ := ret[0].(core.PUUID)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
-}
-
-// CreateDataPipeline indicates an expected call of CreateDataPipeline.
-func (mr *EtlManagerMockRecorder) CreateDataPipeline(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateDataPipeline", reflect.TypeOf((*EtlManager)(nil).CreateDataPipeline), arg0)
-}
-
-// EventLoop mocks base method.
-func (m *EtlManager) EventLoop() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "EventLoop")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// EventLoop indicates an expected call of EventLoop.
-func (mr *EtlManagerMockRecorder) EventLoop() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventLoop", reflect.TypeOf((*EtlManager)(nil).EventLoop))
-}
-
-// GetPipelineHeight mocks base method.
-func (m *EtlManager) GetPipelineHeight(arg0 core.PUUID) (*big.Int, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetPipelineHeight", arg0)
- ret0, _ := ret[0].(*big.Int)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetPipelineHeight indicates an expected call of GetPipelineHeight.
-func (mr *EtlManagerMockRecorder) GetPipelineHeight(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPipelineHeight", reflect.TypeOf((*EtlManager)(nil).GetPipelineHeight), arg0)
-}
-
-// GetStateKey mocks base method.
-func (m *EtlManager) GetStateKey(arg0 core.RegisterType) (*core.StateKey, bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetStateKey", arg0)
- ret0, _ := ret[0].(*core.StateKey)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
-}
-
-// GetStateKey indicates an expected call of GetStateKey.
-func (mr *EtlManagerMockRecorder) GetStateKey(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateKey", reflect.TypeOf((*EtlManager)(nil).GetStateKey), arg0)
-}
-
-// InferComponent mocks base method.
-func (m *EtlManager) InferComponent(arg0 *core.ClientConfig, arg1 core.CUUID, arg2 core.PUUID, arg3 *core.DataRegister) (component.Component, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "InferComponent", arg0, arg1, arg2, arg3)
- ret0, _ := ret[0].(component.Component)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// InferComponent indicates an expected call of InferComponent.
-func (mr *EtlManagerMockRecorder) InferComponent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InferComponent", reflect.TypeOf((*EtlManager)(nil).InferComponent), arg0, arg1, arg2, arg3)
-}
-
-// RunPipeline mocks base method.
-func (m *EtlManager) RunPipeline(arg0 core.PUUID) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "RunPipeline", arg0)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// RunPipeline indicates an expected call of RunPipeline.
-func (mr *EtlManagerMockRecorder) RunPipeline(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPipeline", reflect.TypeOf((*EtlManager)(nil).RunPipeline), arg0)
-}
-
-// Shutdown mocks base method.
-func (m *EtlManager) Shutdown() error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Shutdown")
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Shutdown indicates an expected call of Shutdown.
-func (mr *EtlManagerMockRecorder) Shutdown() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*EtlManager)(nil).Shutdown))
-}
diff --git a/internal/mocks/heuristic.go b/internal/mocks/heuristic.go
index dadb02af..57c191cd 100644
--- a/internal/mocks/heuristic.go
+++ b/internal/mocks/heuristic.go
@@ -36,7 +36,7 @@ func (m *MockHeuristic) EXPECT() *MockHeuristicMockRecorder {
}
// Assess mocks base method.
-func (m *MockHeuristic) Assess(arg0 core.TransitData) (*heuristic.ActivationSet, error) {
+func (m *MockHeuristic) Assess(arg0 core.Event) (*heuristic.ActivationSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Assess", arg0)
ret0, _ := ret[0].(*heuristic.ActivationSet)
@@ -50,56 +50,70 @@ func (mr *MockHeuristicMockRecorder) Assess(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Assess", reflect.TypeOf((*MockHeuristic)(nil).Assess), arg0)
}
-// InputType mocks base method.
-func (m *MockHeuristic) InputType() core.RegisterType {
+// ID mocks base method.
+func (m *MockHeuristic) ID() core.UUID {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "InputType")
- ret0, _ := ret[0].(core.RegisterType)
+ ret := m.ctrl.Call(m, "ID")
+ ret0, _ := ret[0].(core.UUID)
return ret0
}
-// InputType indicates an expected call of InputType.
-func (mr *MockHeuristicMockRecorder) InputType() *gomock.Call {
+// ID indicates an expected call of ID.
+func (mr *MockHeuristicMockRecorder) ID() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InputType", reflect.TypeOf((*MockHeuristic)(nil).InputType))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockHeuristic)(nil).ID))
}
-// SUUID mocks base method.
-func (m *MockHeuristic) SUUID() core.SUUID {
+// SetID mocks base method.
+func (m *MockHeuristic) SetID(arg0 core.UUID) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "SUUID")
- ret0, _ := ret[0].(core.SUUID)
+ m.ctrl.Call(m, "SetID", arg0)
+}
+
+// SetID indicates an expected call of SetID.
+func (mr *MockHeuristicMockRecorder) SetID(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetID", reflect.TypeOf((*MockHeuristic)(nil).SetID), arg0)
+}
+
+// TopicType mocks base method.
+func (m *MockHeuristic) TopicType() core.TopicType {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TopicType")
+ ret0, _ := ret[0].(core.TopicType)
return ret0
}
-// SUUID indicates an expected call of SUUID.
-func (mr *MockHeuristicMockRecorder) SUUID() *gomock.Call {
+// TopicType indicates an expected call of TopicType.
+func (mr *MockHeuristicMockRecorder) TopicType() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SUUID", reflect.TypeOf((*MockHeuristic)(nil).SUUID))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TopicType", reflect.TypeOf((*MockHeuristic)(nil).TopicType))
}
-// SetSUUID mocks base method.
-func (m *MockHeuristic) SetSUUID(arg0 core.SUUID) {
+// Type mocks base method.
+func (m *MockHeuristic) Type() core.HeuristicType {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "SetSUUID", arg0)
+ ret := m.ctrl.Call(m, "Type")
+ ret0, _ := ret[0].(core.HeuristicType)
+ return ret0
}
-// SetSUUID indicates an expected call of SetSUUID.
-func (mr *MockHeuristicMockRecorder) SetSUUID(arg0 interface{}) *gomock.Call {
+// Type indicates an expected call of Type.
+func (mr *MockHeuristicMockRecorder) Type() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSUUID", reflect.TypeOf((*MockHeuristic)(nil).SetSUUID), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockHeuristic)(nil).Type))
}
-// ValidateInput mocks base method.
-func (m *MockHeuristic) ValidateInput(arg0 core.TransitData) error {
+// Validate mocks base method.
+func (m *MockHeuristic) Validate(arg0 core.Event) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ValidateInput", arg0)
+ ret := m.ctrl.Call(m, "Validate", arg0)
ret0, _ := ret[0].(error)
return ret0
}
-// ValidateInput indicates an expected call of ValidateInput.
-func (mr *MockHeuristicMockRecorder) ValidateInput(arg0 interface{}) *gomock.Call {
+// Validate indicates an expected call of Validate.
+func (mr *MockHeuristicMockRecorder) Validate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateInput", reflect.TypeOf((*MockHeuristic)(nil).ValidateInput), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockHeuristic)(nil).Validate), arg0)
}
diff --git a/internal/mocks/oracle.go b/internal/mocks/oracle.go
deleted file mode 100644
index 1f42eb9f..00000000
--- a/internal/mocks/oracle.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package mocks
-
-import (
- "context"
- "math/big"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
-)
-
-type mockOracleDefinition struct {
-}
-
-func (md *mockOracleDefinition) ConfigureRoutine(core.PUUID) error {
- return nil
-}
-
-func (md *mockOracleDefinition) BackTestRoutine(_ context.Context, _ chan core.TransitData,
- _ *big.Int, _ *big.Int) error {
- return nil
-}
-
-func (md *mockOracleDefinition) ReadRoutine(_ context.Context, _ chan core.TransitData) error {
- return nil
-}
-
-func (md *mockOracleDefinition) Height() (*big.Int, error) {
- return big.NewInt(0), nil
-}
-
-// NewDummyOracle ... Takes in a register type that specifies the mocked output type
-// Useful for testing inter-component connectivity and higher level component management abstractions
-func NewDummyOracle(ctx context.Context, ot core.RegisterType, opts ...component.Option) (component.Component, error) {
- od := &mockOracleDefinition{}
-
- return component.NewOracle(ctx, ot, od, opts...)
-}
diff --git a/internal/mocks/pipe.go b/internal/mocks/pipe.go
deleted file mode 100644
index 3c9a8578..00000000
--- a/internal/mocks/pipe.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package mocks
-
-import (
- "context"
-
- "github.com/base-org/pessimism/internal/core"
- "github.com/base-org/pessimism/internal/etl/component"
-)
-
-// mockPipeDefinition ... Mocked pipe definition struct
-type mockPipeDefinition struct {
-}
-
-// Transform ... Mocked transform function that returns an empty slice
-func (md *mockPipeDefinition) Transform(_ context.Context, td core.TransitData) ([]core.TransitData, error) {
- return []core.TransitData{td}, nil
-}
-
-// NewDummyPipe ... Takes in a register type that specifies the mocked output type
-// Useful for testing inter-component connectivity and higher level component management abstractions
-func NewDummyPipe(ctx context.Context, it core.RegisterType, ot core.RegisterType,
- opts ...component.Option) (component.Component, error) {
- pd := &mockPipeDefinition{}
-
- return component.NewPipe(ctx, pd, it, ot, opts...)
-}
diff --git a/internal/mocks/process.go b/internal/mocks/process.go
new file mode 100644
index 00000000..1f9db1f0
--- /dev/null
+++ b/internal/mocks/process.go
@@ -0,0 +1,45 @@
+package mocks
+
+import (
+ "context"
+ big "math/big"
+
+ "github.com/base-org/pessimism/internal/core"
+ "github.com/base-org/pessimism/internal/etl/process"
+)
+
+type mockSubscription struct {
+}
+
+func (ms *mockSubscription) Run(_ context.Context, e core.Event) ([]core.Event, error) {
+ return []core.Event{e}, nil
+}
+
+func NewSubscriber(ctx context.Context, it core.TopicType, ot core.TopicType,
+ opts ...process.Option) (process.Process, error) {
+ ms := &mockSubscription{}
+
+ return process.NewSubscriber(ctx, ms, it, ot, opts...)
+}
+
+type mockTraversal struct {
+}
+
+func (md *mockTraversal) ConfigureRoutine(core.PathID) error {
+ return nil
+}
+
+func (md *mockTraversal) Loop(_ context.Context, _ chan core.Event) error {
+ return nil
+}
+
+func (md *mockTraversal) Height() (*big.Int, error) {
+ return big.NewInt(0), nil
+}
+
+// NewReader
+func NewReader(ctx context.Context, ot core.TopicType, opts ...process.Option) (process.Process, error) {
+ mt := &mockTraversal{}
+
+ return process.NewReader(ctx, ot, mt, opts...)
+}
diff --git a/internal/mocks/subsystem.go b/internal/mocks/subsystem.go
index 23dd1dfe..b72d414e 100644
--- a/internal/mocks/subsystem.go
+++ b/internal/mocks/subsystem.go
@@ -38,7 +38,7 @@ func (m *SubManager) EXPECT() *SubManagerMockRecorder {
}
// BuildDeployCfg mocks base method.
-func (m *SubManager) BuildDeployCfg(arg0 *core.PipelineConfig, arg1 *core.SessionConfig) (*heuristic.DeployConfig, error) {
+func (m *SubManager) BuildDeployCfg(arg0 *core.PathConfig, arg1 *core.SessionConfig) (*heuristic.DeployConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BuildDeployCfg", arg0, arg1)
ret0, _ := ret[0].(*heuristic.DeployConfig)
@@ -52,34 +52,34 @@ func (mr *SubManagerMockRecorder) BuildDeployCfg(arg0, arg1 interface{}) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildDeployCfg", reflect.TypeOf((*SubManager)(nil).BuildDeployCfg), arg0, arg1)
}
-// BuildPipelineCfg mocks base method.
-func (m *SubManager) BuildPipelineCfg(arg0 *models.SessionRequestParams) (*core.PipelineConfig, error) {
+// BuildPathCfg mocks base method.
+func (m *SubManager) BuildPathCfg(arg0 *models.SessionRequestParams) (*core.PathConfig, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "BuildPipelineCfg", arg0)
- ret0, _ := ret[0].(*core.PipelineConfig)
+ ret := m.ctrl.Call(m, "BuildPathCfg", arg0)
+ ret0, _ := ret[0].(*core.PathConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// BuildPipelineCfg indicates an expected call of BuildPipelineCfg.
-func (mr *SubManagerMockRecorder) BuildPipelineCfg(arg0 interface{}) *gomock.Call {
+// BuildPathCfg indicates an expected call of BuildPathCfg.
+func (mr *SubManagerMockRecorder) BuildPathCfg(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildPipelineCfg", reflect.TypeOf((*SubManager)(nil).BuildPipelineCfg), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildPathCfg", reflect.TypeOf((*SubManager)(nil).BuildPathCfg), arg0)
}
-// RunSession mocks base method.
-func (m *SubManager) RunSession(arg0 *heuristic.DeployConfig) (core.SUUID, error) {
+// RunHeuristic mocks base method.
+func (m *SubManager) RunHeuristic(arg0 *heuristic.DeployConfig) (core.UUID, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "RunSession", arg0)
- ret0, _ := ret[0].(core.SUUID)
+ ret := m.ctrl.Call(m, "RunHeuristic", arg0)
+ ret0, _ := ret[0].(core.UUID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// RunSession indicates an expected call of RunSession.
-func (mr *SubManagerMockRecorder) RunSession(arg0 interface{}) *gomock.Call {
+// RunHeuristic indicates an expected call of RunHeuristic.
+func (mr *SubManagerMockRecorder) RunHeuristic(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunSession", reflect.TypeOf((*SubManager)(nil).RunSession), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunHeuristic", reflect.TypeOf((*SubManager)(nil).RunHeuristic), arg0)
}
// Shutdown mocks base method.
diff --git a/internal/subsystem/constants.go b/internal/subsystem/constants.go
index e83c6ee8..21192ce4 100644
--- a/internal/subsystem/constants.go
+++ b/internal/subsystem/constants.go
@@ -3,5 +3,5 @@ package subsystem
const (
networkNotFoundErr = "could not find endpoint for network %s"
- maxPipelineErr = "max etl pipeline count reached: %d"
+ maxPathErr = "max etl path count reached: %d"
)
diff --git a/internal/subsystem/manager.go b/internal/subsystem/manager.go
index 0564b244..5dc3a83d 100644
--- a/internal/subsystem/manager.go
+++ b/internal/subsystem/manager.go
@@ -14,7 +14,7 @@ import (
"github.com/base-org/pessimism/internal/core"
"github.com/base-org/pessimism/internal/engine"
"github.com/base-org/pessimism/internal/engine/heuristic"
- "github.com/base-org/pessimism/internal/etl/pipeline"
+ "github.com/base-org/pessimism/internal/etl"
"github.com/base-org/pessimism/internal/logging"
"github.com/base-org/pessimism/internal/metrics"
"go.uber.org/zap"
@@ -22,9 +22,9 @@ import (
// Config ... Used to store necessary API service config values
type Config struct {
- MaxPipelineCount int
- L1PollInterval int
- L2PollInterval int
+ MaxPathCount int
+ L1PollInterval int
+ L2PollInterval int
}
// GetPollInterval ... Returns config poll-interval for network type
@@ -42,9 +42,9 @@ func (cfg *Config) GetPollInterval(n core.Network) (time.Duration, error) {
}
type Subsystem interface {
- BuildDeployCfg(pConfig *core.PipelineConfig, sConfig *core.SessionConfig) (*heuristic.DeployConfig, error)
- BuildPipelineCfg(params *models.SessionRequestParams) (*core.PipelineConfig, error)
- RunSession(cfg *heuristic.DeployConfig) (core.SUUID, error)
+ BuildDeployCfg(pConfig *core.PathConfig, sConfig *core.SessionConfig) (*heuristic.DeployConfig, error)
+ BuildPathCfg(params *models.SessionRequestParams) (*core.PathConfig, error)
+ RunHeuristic(cfg *heuristic.DeployConfig) (core.UUID, error)
// Orchestration
StartEventRoutines(ctx context.Context)
Shutdown() error
@@ -55,7 +55,7 @@ type Manager struct {
cfg *Config
ctx context.Context
- etl pipeline.Manager
+ etl etl.ETL
eng engine.Manager
alert alert.Manager
stats metrics.Metricer
@@ -64,7 +64,7 @@ type Manager struct {
}
// NewManager ... Initializer for the subsystem manager
-func NewManager(ctx context.Context, cfg *Config, etl pipeline.Manager, eng engine.Manager,
+func NewManager(ctx context.Context, cfg *Config, etl etl.ETL, eng engine.Manager,
a alert.Manager,
) *Manager {
return &Manager{
@@ -126,8 +126,8 @@ func (m *Manager) StartEventRoutines(ctx context.Context) {
}()
}
-// BuildDeployCfg ... Builds a deploy config provided a pipeline & session config
-func (m *Manager) BuildDeployCfg(pConfig *core.PipelineConfig,
+// BuildDeployCfg ... Builds a deploy config provided a path & session config
+func (m *Manager) BuildDeployCfg(pConfig *core.PathConfig,
sConfig *core.SessionConfig) (*heuristic.DeployConfig, error) {
// 1. Fetch state key using risk engine input register type
sk, stateful, err := m.etl.GetStateKey(pConfig.DataType)
@@ -135,18 +135,18 @@ func (m *Manager) BuildDeployCfg(pConfig *core.PipelineConfig,
return nil, err
}
- // 2. Create data pipeline
- pUUID, reuse, err := m.etl.CreateDataPipeline(pConfig)
+ // 2. Create data path
+ id, reuse, err := m.etl.CreateProcessPath(pConfig)
if err != nil {
return nil, err
}
logging.WithContext(m.ctx).
- Info("Created etl pipeline", zap.String(logging.PUUIDKey, pUUID.String()))
+ Info("Created etl path", zap.String(logging.Path, id.String()))
// 3. Create a deploy config
return &heuristic.DeployConfig{
- PUUID: pUUID,
+ PathID: id,
Reuse: reuse,
HeuristicType: sConfig.Type,
Params: sConfig.Params,
@@ -157,42 +157,42 @@ func (m *Manager) BuildDeployCfg(pConfig *core.PipelineConfig,
}, nil
}
-// RunSession ... Runs a heuristic session
-func (m *Manager) RunSession(cfg *heuristic.DeployConfig) (core.SUUID, error) {
- // 1. Verify that pipeline constraints are met
+// RunHeuristic ... Runs a heuristic session
+func (m *Manager) RunHeuristic(cfg *heuristic.DeployConfig) (core.UUID, error) {
+ // 1. Verify that path constraints are met
// NOTE - Consider introducing a config validation step or module
if !cfg.Reuse && m.etlLimitReached() {
- return core.NilSUUID(), fmt.Errorf(maxPipelineErr, m.cfg.MaxPipelineCount)
+ return core.UUID{}, fmt.Errorf(maxPathErr, m.cfg.MaxPathCount)
}
// 2. Deploy heuristic session to risk engine
- sUUID, err := m.eng.DeployHeuristicSession(cfg)
+ id, err := m.eng.DeployHeuristic(cfg)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
logging.WithContext(m.ctx).
- Info("Deployed heuristic session to risk engine", zap.String(logging.SUUIDKey, sUUID.String()))
+ Info("Deployed heuristic session to risk engine", zap.String(logging.UUID, id.ShortString()))
// 3. Add session to alert manager
- err = m.alert.AddSession(sUUID, cfg.AlertingPolicy)
+ err = m.alert.AddSession(id, cfg.AlertingPolicy)
if err != nil {
- return core.NilSUUID(), err
+ return core.UUID{}, err
}
- // 4. Run pipeline if not reused
+ // 4. Run path if not reused
if cfg.Reuse {
- return sUUID, nil
+ return id, nil
}
- if err = m.etl.RunPipeline(cfg.PUUID); err != nil { // Spin-up pipeline components
- return core.NilSUUID(), err
+ if err = m.etl.Run(cfg.PathID); err != nil { // Spin-up path processes
+ return core.UUID{}, err
}
- return sUUID, nil
+ return id, nil
}
-// BuildPipelineCfg ... Builds a pipeline config provided a set of heuristic request params
-func (m *Manager) BuildPipelineCfg(params *models.SessionRequestParams) (*core.PipelineConfig, error) {
+// BuildPathCfg ... Builds a path config provided a set of heuristic request params
+func (m *Manager) BuildPathCfg(params *models.SessionRequestParams) (*core.PathConfig, error) {
inType, err := m.eng.GetInputType(params.Heuristic())
if err != nil {
return nil, err
@@ -203,10 +203,10 @@ func (m *Manager) BuildPipelineCfg(params *models.SessionRequestParams) (*core.P
return nil, err
}
- return &core.PipelineConfig{
- Network: params.NetworkType(),
- DataType: inType,
- PipelineType: params.PipelineType(),
+ return &core.PathConfig{
+ Network: params.NetworkType(),
+ DataType: inType,
+ PathType: core.Live,
ClientConfig: &core.ClientConfig{
Network: params.NetworkType(),
PollInterval: pollInterval,
@@ -216,11 +216,11 @@ func (m *Manager) BuildPipelineCfg(params *models.SessionRequestParams) (*core.P
}, nil
}
-// etlLimitReached ... Returns true if the ETL pipeline count is at or above the max
+// etlLimitReached ... Returns true if the ETL path count is at or above the max
func (m *Manager) etlLimitReached() bool {
- return m.etl.ActiveCount() >= m.cfg.MaxPipelineCount
+ return m.etl.ActiveCount() >= m.cfg.MaxPathCount
}
-func (m *Manager) PipelineHeight(pUUID core.PUUID) (*big.Int, error) {
- return m.etl.GetPipelineHeight(pUUID)
+func (m *Manager) PathHeight(id core.PathID) (*big.Int, error) {
+ return m.etl.GetBlockHeight(id)
}
diff --git a/internal/subsystem/manager_test.go b/internal/subsystem/manager_test.go
index fabd8f40..91100148 100644
--- a/internal/subsystem/manager_test.go
+++ b/internal/subsystem/manager_test.go
@@ -19,40 +19,40 @@ func testErr() error {
}
type testSuite struct {
- subsys *subsystem.Manager
+ sys *subsystem.Manager
- mockEtl *mocks.EtlManager
- mockEng *mocks.EngineManager
- mockAlrt *mocks.AlertManager
- mockCtrl *gomock.Controller
+ mockETL *mocks.MockETL
+ mockENG *mocks.EngineManager
+ mockAlert *mocks.AlertManager
+ mockCtrl *gomock.Controller
}
func createTestSuite(t *testing.T) *testSuite {
ctrl := gomock.NewController(t)
- etlMock := mocks.NewEtlManager(ctrl)
+ etlMock := mocks.NewMockETL(ctrl)
engMock := mocks.NewEngineManager(ctrl)
alrtMock := mocks.NewAlertManager(ctrl)
cfg := &subsystem.Config{
- MaxPipelineCount: 10,
+ MaxPathCount: 10,
}
- subsys := subsystem.NewManager(context.Background(), cfg, etlMock, engMock, alrtMock)
+ sys := subsystem.NewManager(context.Background(), cfg, etlMock, engMock, alrtMock)
return &testSuite{
- subsys: subsys,
- mockEtl: etlMock,
- mockEng: engMock,
- mockAlrt: alrtMock,
- mockCtrl: ctrl,
+ sys: sys,
+ mockETL: etlMock,
+ mockENG: engMock,
+ mockAlert: alrtMock,
+ mockCtrl: ctrl,
}
}
-func Test_BuildDeployCfg(t *testing.T) {
- pConfig := &core.PipelineConfig{
+func TestBuildDeployCfg(t *testing.T) {
+ pConfig := &core.PathConfig{
Network: core.Layer1,
- DataType: core.GethBlock,
- PipelineType: core.Live,
+ DataType: core.BlockHeader,
+ PathType: core.Live,
ClientConfig: nil,
}
@@ -73,35 +73,35 @@ func Test_BuildDeployCfg(t *testing.T) {
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().GetStateKey(pConfig.DataType).
+ ts.mockETL.EXPECT().GetStateKey(pConfig.DataType).
Return(nil, false, testErr()).
Times(1)
return ts
},
testLogic: func(t *testing.T, ts *testSuite) {
- actualCfg, err := ts.subsys.BuildDeployCfg(pConfig, sConfig)
+ actualCfg, err := ts.sys.BuildDeployCfg(pConfig, sConfig)
assert.Error(t, err)
assert.Nil(t, actualCfg)
},
},
{
- name: "Failure when creating data pipeline",
+ name: "Failure when creating data path",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().GetStateKey(pConfig.DataType).
+ ts.mockETL.EXPECT().GetStateKey(pConfig.DataType).
Return(nil, false, nil).
Times(1)
- ts.mockEtl.EXPECT().CreateDataPipeline(gomock.Any()).
- Return(core.NilPUUID(), false, testErr()).
+ ts.mockETL.EXPECT().CreateProcessPath(gomock.Any()).
+ Return(core.PathID{}, false, testErr()).
Times(1)
return ts
},
testLogic: func(t *testing.T, ts *testSuite) {
- actualCfg, err := ts.subsys.BuildDeployCfg(pConfig, sConfig)
+ actualCfg, err := ts.sys.BuildDeployCfg(pConfig, sConfig)
assert.Error(t, err)
assert.Nil(t, actualCfg)
},
@@ -116,13 +116,13 @@ func Test_BuildDeployCfg(t *testing.T) {
}
}
-func Test_RunSession(t *testing.T) {
- testSUUID := core.MakeSUUID(1, 1, 1)
+func TestRunHeuristic(t *testing.T) {
+ id := core.NewUUID()
testCfg := &heuristic.DeployConfig{
Stateful: false,
StateKey: nil,
Network: core.Layer1,
- PUUID: core.NilPUUID(),
+ PathID: core.PathID{},
Reuse: false,
HeuristicType: core.BalanceEnforcement,
@@ -140,44 +140,44 @@ func Test_RunSession(t *testing.T) {
name: "Failure when deploying heuristic session",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().
+ ts.mockETL.EXPECT().
ActiveCount().Return(1).
Times(1)
- ts.mockEng.EXPECT().DeployHeuristicSession(testCfg).
- Return(core.NilSUUID(), testErr()).
+ ts.mockENG.EXPECT().DeployHeuristic(testCfg).
+ Return(core.UUID{}, testErr()).
Times(1)
return ts
},
testLogic: func(t *testing.T, ts *testSuite) {
- actualSUUID, err := ts.subsys.RunSession(testCfg)
+ id, err := ts.sys.RunHeuristic(testCfg)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
{
name: "Failure when adding heuristic session to alerting system",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().
+ ts.mockETL.EXPECT().
ActiveCount().Return(1).
Times(1)
- ts.mockEng.EXPECT().DeployHeuristicSession(testCfg).
- Return(testSUUID, nil).
+ ts.mockENG.EXPECT().DeployHeuristic(testCfg).
+ Return(id, nil).
Times(1)
- ts.mockAlrt.EXPECT().AddSession(testSUUID, testCfg.AlertingPolicy).
+ ts.mockAlert.EXPECT().AddSession(id, testCfg.AlertingPolicy).
Return(testErr()).
Times(1)
return ts
},
testLogic: func(t *testing.T, ts *testSuite) {
- actualSUUID, err := ts.subsys.RunSession(testCfg)
+ id, err := ts.sys.RunHeuristic(testCfg)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
{
@@ -185,28 +185,28 @@ func Test_RunSession(t *testing.T) {
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().
+ ts.mockETL.EXPECT().
ActiveCount().Return(1).
Times(1)
- ts.mockEng.EXPECT().DeployHeuristicSession(testCfg).
- Return(testSUUID, nil).
+ ts.mockENG.EXPECT().DeployHeuristic(testCfg).
+ Return(id, nil).
Times(1)
- ts.mockAlrt.EXPECT().AddSession(testSUUID, testCfg.AlertingPolicy).
+ ts.mockAlert.EXPECT().AddSession(id, testCfg.AlertingPolicy).
Return(nil).
Times(1)
- ts.mockEtl.EXPECT().RunPipeline(testCfg.PUUID).
+ ts.mockETL.EXPECT().Run(testCfg.PathID).
Return(nil).
Times(1)
return ts
},
testLogic: func(t *testing.T, ts *testSuite) {
- actualSUUID, err := ts.subsys.RunSession(testCfg)
+ id, err := ts.sys.RunHeuristic(testCfg)
assert.NoError(t, err)
- assert.Equal(t, testSUUID, actualSUUID)
+ assert.Equal(t, id, id)
},
},
{
@@ -214,11 +214,11 @@ func Test_RunSession(t *testing.T) {
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEng.EXPECT().DeployHeuristicSession(testCfg).
- Return(testSUUID, nil).
+ ts.mockENG.EXPECT().DeployHeuristic(testCfg).
+ Return(id, nil).
Times(1)
- ts.mockAlrt.EXPECT().AddSession(testSUUID, testCfg.AlertingPolicy).
+ ts.mockAlert.EXPECT().AddSession(id, testCfg.AlertingPolicy).
Return(nil).
Times(1)
@@ -226,17 +226,17 @@ func Test_RunSession(t *testing.T) {
},
testLogic: func(t *testing.T, ts *testSuite) {
testCfg.Reuse = true
- actualSUUID, err := ts.subsys.RunSession(testCfg)
+ id, err := ts.sys.RunHeuristic(testCfg)
assert.NoError(t, err)
- assert.Equal(t, testSUUID, actualSUUID)
+ assert.Equal(t, id, id)
},
},
{
- name: "Failure when active pipeline count is reached",
+ name: "Failure when active path count is reached",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEtl.EXPECT().
+ ts.mockETL.EXPECT().
ActiveCount().Return(10).
Times(1)
@@ -244,9 +244,9 @@ func Test_RunSession(t *testing.T) {
},
testLogic: func(t *testing.T, ts *testSuite) {
testCfg.Reuse = false
- actualSUUID, err := ts.subsys.RunSession(testCfg)
+ id, err := ts.sys.RunHeuristic(testCfg)
assert.Error(t, err)
- assert.Equal(t, core.NilSUUID(), actualSUUID)
+ assert.Equal(t, core.UUID{}, id)
},
},
}
@@ -259,7 +259,7 @@ func Test_RunSession(t *testing.T) {
}
}
-func Test_BuildPipelineCfg(t *testing.T) {
+func TestBuildPathCfg(t *testing.T) {
var tests = []struct {
name string
@@ -270,8 +270,8 @@ func Test_BuildPipelineCfg(t *testing.T) {
name: "Failure when getting input type",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEng.EXPECT().GetInputType(core.BalanceEnforcement).
- Return(core.AccountBalance, testErr()).
+ ts.mockENG.EXPECT().GetInputType(core.BalanceEnforcement).
+ Return(core.BlockHeader, testErr()).
Times(1)
return ts
@@ -279,11 +279,10 @@ func Test_BuildPipelineCfg(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := &models.SessionRequestParams{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
}
- cfg, err := ts.subsys.BuildPipelineCfg(testParams)
+ cfg, err := ts.sys.BuildPathCfg(testParams)
assert.Error(t, err)
assert.Nil(t, cfg)
},
@@ -292,8 +291,8 @@ func Test_BuildPipelineCfg(t *testing.T) {
name: "Failure when getting poll interval for invalid network",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEng.EXPECT().GetInputType(core.BalanceEnforcement).
- Return(core.AccountBalance, nil).
+ ts.mockENG.EXPECT().GetInputType(core.BalanceEnforcement).
+ Return(core.BlockHeader, nil).
Times(1)
return ts
@@ -301,11 +300,10 @@ func Test_BuildPipelineCfg(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := &models.SessionRequestParams{
Network: "layer0",
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
}
- cfg, err := ts.subsys.BuildPipelineCfg(testParams)
+ cfg, err := ts.sys.BuildPathCfg(testParams)
assert.Error(t, err)
assert.Nil(t, cfg)
},
@@ -314,8 +312,8 @@ func Test_BuildPipelineCfg(t *testing.T) {
name: "Success with valid params",
constructor: func(t *testing.T) *testSuite {
ts := createTestSuite(t)
- ts.mockEng.EXPECT().GetInputType(core.BalanceEnforcement).
- Return(core.AccountBalance, nil).
+ ts.mockENG.EXPECT().GetInputType(core.BalanceEnforcement).
+ Return(core.BlockHeader, nil).
Times(1)
return ts
@@ -323,16 +321,15 @@ func Test_BuildPipelineCfg(t *testing.T) {
testLogic: func(t *testing.T, ts *testSuite) {
testParams := &models.SessionRequestParams{
Network: core.Layer1.String(),
- PType: core.Live.String(),
HeuristicType: core.BalanceEnforcement.String(),
}
- cfg, err := ts.subsys.BuildPipelineCfg(testParams)
+ cfg, err := ts.sys.BuildPathCfg(testParams)
assert.NoError(t, err)
assert.NotNil(t, cfg)
assert.Equal(t, core.Layer1, cfg.Network)
- assert.Equal(t, core.Live, cfg.PipelineType)
+ assert.Equal(t, core.Live, cfg.PathType)
},
},
}