From f78302270300358319cb3986973674d849bb8451 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Tue, 12 Nov 2024 22:08:55 +0000 Subject: [PATCH 1/6] docs(consensus): Restored decentralization instructions (#3254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Restored previously deleted intructions on how to enable p2p syncing. It should remain until we deprecate JSON RPC syncing. --- core/node/consensus/src/en.rs | 2 +- docs/announcements/attester_commitee.md | 2 +- .../external-node/10_decentralization.md | 91 +++++++++++++++++++ .../prepared_configs/mainnet-config.env | 2 +- .../testnet-sepolia-config.env | 2 +- 5 files changed, 95 insertions(+), 4 deletions(-) create mode 100644 docs/guides/external-node/10_decentralization.md diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index ec8d3c19b54a..6f4d80233be4 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -179,7 +179,7 @@ impl EN { tracing::warn!("\ WARNING: this node is using ZKsync API synchronization, which will be deprecated soon. \ Please follow this instruction to switch to p2p synchronization: \ - https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/09_decentralization.md"); + https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/10_decentralization.md"); let res: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); diff --git a/docs/announcements/attester_commitee.md b/docs/announcements/attester_commitee.md index 84ff8aa5be6d..148e51a4f976 100644 --- a/docs/announcements/attester_commitee.md +++ b/docs/announcements/attester_commitee.md @@ -36,7 +36,7 @@ Participants can leave the committee at any time. The only action that is required to participate is to share your attester public key with the Main Node operator (by opening an issue in this repo or using any other communication channel). You can find it in the comment in the `consensus_secrets.yaml` file (that was - in most cases - generated by the tool described -[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/09_decentralization.md#generating-secrets)) +[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/10_decentralization.md#generating-secrets)) > [!WARNING] > diff --git a/docs/guides/external-node/10_decentralization.md b/docs/guides/external-node/10_decentralization.md new file mode 100644 index 000000000000..41f59486bef6 --- /dev/null +++ b/docs/guides/external-node/10_decentralization.md @@ -0,0 +1,91 @@ +# Decentralization + +In the default setup, the ZKsync node will fetch data from the ZKsync API endpoint maintained by Matter Labs. To reduce +the reliance on this centralized endpoint we have developed a decentralized p2p networking stack (aka gossipnet) which +will eventually be used instead of ZKsync API for synchronizing data. + +On the gossipnet, the data integrity will be protected by the BFT (byzantine fault-tolerant) consensus algorithm +(currently data is signed just by the main node though). + +## Enabling gossipnet on your node + +> [!NOTE] +> +> Because the data transmitted over the gossipnet is signed by the main node (and eventually by the consensus quorum), +> the signatures need to be backfilled to the node's local storage the first time you switch from centralized (ZKsync +> API based) synchronization to the decentralized (gossipnet based) synchronization (this is a one-time thing). With the +> current implementation it may take a couple of hours and gets faster the more nodes you add to the +> `gossip_static_outbound` list (see below). We are working to remove this inconvenience. + +> [!NOTE] +> +> The minimal supported server version for this is +> [24.11.0](https://github.com/matter-labs/zksync-era/releases/tag/core-v24.11.0) + +### Generating secrets + +Each participant node of the gossipnet has to have an identity (a public/secret key pair). When running your node for +the first time, generate the secrets by running: + +``` +docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v24.12.0" generate-secrets > consensus_secrets.yaml +chmod 600 consensus_secrets.yaml +``` + +> [!NOTE] +> +> NEVER reveal the secret keys used by your node. Otherwise, someone can impersonate your node on the gossipnet. If you +> suspect that your secret key has been leaked, you can generate fresh keys using the same tool. +> +> If you want someone else to connect to your node, give them your PUBLIC key instead. Both public and secret keys are +> present in the `consensus_secrets.yaml` (public keys are in comments). + +### Preparing configuration file + +Copy the template of the consensus configuration file (for +[mainnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml) +or +[testnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml) +). + +> [!NOTE] +> +> You need to fill in the `public_addr` field. This is the address that will (not implemented yet) be advertised over +> gossipnet to other nodes, so that they can establish connections to your node. If you don't want to expose your node +> to the public internet, you can use IP in your local network. + +Currently the config contains the following fields (refer to config +[schema](https://github.com/matter-labs/zksync-era/blob/990676c5f84afd2ff8cd337f495c82e8d1f305a4/core/lib/protobuf_config/src/proto/core/consensus.proto#L66) +for more details): + +- `server_addr` - local TCP socket address that the node should listen on for incoming connections. Note that this is an + additional TCP port that will be opened by the node. +- `public_addr` - the public address of your node that will be advertised over the gossipnet. +- `max_payload_size` - limit (in bytes) on the sized of the ZKsync ERA block received from the gossipnet. This protects + your node from getting DoS`ed by too large network messages. Use the value from the template. +- `gossip_dynamic_inbound_limit` - maximal number of unauthenticated concurrent inbound connections that can be + established to your node. This is a DDoS protection measure. +- `gossip_static_outbound` - list of trusted peers that your node should always try to connect to. The template contains + the nodes maintained by Matterlabs, but you can add more if you know any. Note that the list contains both the network + address AND the public key of the node - this prevents spoofing attacks. + +### Setting environment variables + +Uncomment (or add) the following lines in your `.env` config: + +``` +EN_CONSENSUS_CONFIG_PATH=... +EN_CONSENSUS_SECRETS_PATH=... +``` + +These variables should point to your consensus config and secrets files that we have just created. Tweak the paths to +the files if you have placed them differently. + +### Add `--enable-consensus` flag to your entry point command + +For the consensus configuration to take effect you have to add `--enable-consensus` flag to the command line when +running the node, for example: + +``` +docker run "matterlabs/external-node:2.0-v24.12.0" --enable-consensus +``` diff --git a/docs/guides/external-node/prepared_configs/mainnet-config.env b/docs/guides/external-node/prepared_configs/mainnet-config.env index bce812084665..eac24f4ab7ed 100644 --- a/docs/guides/external-node/prepared_configs/mainnet-config.env +++ b/docs/guides/external-node/prepared_configs/mainnet-config.env @@ -70,7 +70,7 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 -# Settings related to gossip network, see `09_decentralization.md` +# Settings related to gossip network, see `10_decentralization.md` #EN_CONSENSUS_CONFIG_PATH=./mainnet_consensus_config.yaml #EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml diff --git a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env b/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env index 182012e2850c..c8f855b4a4a2 100644 --- a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env +++ b/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env @@ -70,7 +70,7 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 -# Settings related to gossip network, see `09_decentralization.md` +# Settings related to gossip network, see `10_decentralization.md` #EN_CONSENSUS_CONFIG_PATH=./testnet_consensus_config.yaml #EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml From 4afbc31712769ca8d1ada22c8d8254ce942f74bf Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Wed, 13 Nov 2024 02:40:55 +0100 Subject: [PATCH 2/6] docs(zkstack_cli): update docs to mention foundry zksync (#3134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Update docs to mention foundry zksync, and other changes to the CLI. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Co-authored-by: Daniyar Itegulov Co-authored-by: Devashish Dixit Co-authored-by: Artur Puzio Co-authored-by: Sarah Schwartz <58856580+sarahschwartz@users.noreply.github.com> Co-authored-by: Antonio --- docs/guides/advanced/01_initialization.md | 46 +-- docs/guides/advanced/05_how_call_works.md | 2 +- docs/guides/advanced/91_docker_and_ci.md | 4 +- docs/guides/development.md | 227 ++++++++------ docs/guides/launch.md | 345 +++++++--------------- docs/guides/setup-dev.md | 75 +++-- prover/docs/03_launch.md | 77 +++-- 7 files changed, 351 insertions(+), 425 deletions(-) diff --git a/docs/guides/advanced/01_initialization.md b/docs/guides/advanced/01_initialization.md index 79c33434d3b5..2bc4a9c3a459 100644 --- a/docs/guides/advanced/01_initialization.md +++ b/docs/guides/advanced/01_initialization.md @@ -1,4 +1,4 @@ -# ZKsync deeper dive +# ZKsync Deeper Dive The goal of this doc is to show you some more details on how ZKsync works internally. @@ -7,18 +7,22 @@ system). Now let's take a look at what's inside: -### Initialization (zk init) +### Initialization -Let's take a deeper look into what `zk init` does. +Let's take a deeper look into what `zkstack ecosystem init` does. -#### zk tool +#### ZK Stack CLI -`zk` itself is implemented in typescript (you can see the code in `infrastructure` directory). If you change anything -there, make sure to run `zk` (that compiles this code), before re-running `zk init`. +`zkstack` itself is implemented in Rust (you can see the code in `/zkstack_cli` directory). If you change anything +there, make sure to run `zkstackup --local` from the root folder (that compiles and installs this code), before +re-running any `zkstack` command. -#### zk init +#### Containers -As first step, it gets the docker images for postgres and reth. +The first step to initialize a ZK Stack ecosystem is to run the command `zkstack containers`. This command gets the +docker images for `postgres` and `reth`. If the `--observability` option is passed to the command, or the corresponding +option is selected in the interactive prompt, then Prometheus, Grafana and other observability-related images are +downloaded and run. Reth (one of the Ethereum clients) will be used to setup our own copy of L1 chain (that our local ZKsync would use). @@ -26,11 +30,19 @@ Postgres is one of the two databases, that is used by ZKsync (the other one is R stored in postgres (blocks, transactions etc) - while RocksDB is only storing the state (Tree & Map) - and it used by VM. -Then we compile JS packages (these include our web3 sdk, tools and testing infrastructure). +#### Ecosystem -Then L1 & L2 contracts. +The next step is to run the command `zkstack ecosystem init`. -And now we're ready to start setting up the system. +This command: + +- Collects and finalize the ecosystem configuration. +- Builds and deploys L1 & L2 contracts. +- Initializes each chain defined in the `/chains` folder. (Currently, a single chain `era` is defined there, but you can + create your own chains running `zkstack chain create`). +- Sets up observability. +- Runs the genesis process. +- Initializes the database. #### Postgres @@ -83,8 +95,8 @@ If everything goes well, you should see that L1 blocks are being produced. Now we can start the main server: -```shell -zk server +```bash +zkstack server ``` This will actually run a cargo binary (`zksync_server`). @@ -96,7 +108,7 @@ Currently we don't send any transactions there (so the logs might be empty). But you should see some initial blocks in postgres: -``` +```sql select * from miniblocks; ``` @@ -107,7 +119,7 @@ Let's finish this article, by taking a look at our L1: We will use the `web3` tool to communicate with the L1, have a look at [02_deposits.md](02_deposits.md) for installation instructions. You can check that you're a (localnet) crypto trillionaire, by running: -```shell +```bash ./web3 --rpc-url http://localhost:8545 balance 0x36615Cf349d7F6344891B1e7CA7C72883F5dc049 ``` @@ -120,14 +132,14 @@ In order to communicate with L2 (our ZKsync) - we have to deploy multiple contra Ethereum). You can look on the `deployL1.log` file - to see the list of contracts that were deployed and their accounts. First thing in the file, is the deployer/governor wallet - this is the account that can change, freeze and unfreeze the -contracts (basically the owner). You can also verify (using the getBalance method above), that is has a lot of tokens. +contracts (basically the owner). You can verify the token balance using the `getBalance` method above. Then, there are a bunch of contracts (CRATE2_FACTOR, DIAMOND_PROXY, L1_ALLOW_LIST etc etc) - for each one, the file contains the address. You can quickly verify that they were really deployed, by calling: -```shell +```bash ./web3 --rpc-url http://localhost:8545 address XXX ``` diff --git a/docs/guides/advanced/05_how_call_works.md b/docs/guides/advanced/05_how_call_works.md index 5b9458ddce8e..0126c5349e90 100644 --- a/docs/guides/advanced/05_how_call_works.md +++ b/docs/guides/advanced/05_how_call_works.md @@ -12,7 +12,7 @@ Since the 'call' method is only for reading data, all the calculations will happ ### Calling the 'call' method If you need to make calls quickly, you can use the 'cast' binary from the -[foundry](https://github.com/foundry-rs/foundry) suite: +[Foundry ZKsync](https://foundry-book.zksync.io/getting-started/installation) suite: ```shell= cast call 0x23DF7589897C2C9cBa1C3282be2ee6a938138f10 "myfunction()()" --rpc-url http://localhost:3050 diff --git a/docs/guides/advanced/91_docker_and_ci.md b/docs/guides/advanced/91_docker_and_ci.md index ff1c7843b8b1..885d3155dd6c 100644 --- a/docs/guides/advanced/91_docker_and_ci.md +++ b/docs/guides/advanced/91_docker_and_ci.md @@ -64,8 +64,8 @@ zk After this, you can run any commands you need. -When you see a command like `ci_run zk contract build` in the CI - this simply means that it executed -`zk contract build` inside that docker container. +When you see a command like `ci_run zkstack dev contracts` in the CI - this simply means that it executed +`zkstack dev contracts` inside that docker container. **IMPORTANT** - by default, docker is running in the mode, where it does NOT persist the changes. So if you exit that shell, all the changes will be removed (so when you restart, you'll end up in the same pristine condition). You can diff --git a/docs/guides/development.md b/docs/guides/development.md index c859017848b5..fb8dd44a6c7a 100644 --- a/docs/guides/development.md +++ b/docs/guides/development.md @@ -1,82 +1,87 @@ # Development guide -This document covers development-related actions in ZKsync. +This document outlines the steps for setting up and working with ZKsync. -## Initializing the project +## Prerequisites -To setup the main toolkit, `zk`, simply run: +If you haven't already, install the prerequisites as described in [Install Dependencies](./setup-dev.md). -``` -zk -``` +## Installing the local ZK Stack CLI -You may also configure autocompletion for your shell via: +To set up local development, begin by installing +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/blob/main/zkstack_cli/README.md). From the project's root +directory, run the following commands: -``` -zk completion install +```bash +cd ./zkstack_cli/zkstackup +./install --local ``` -Once all the dependencies were installed, project can be initialized: +This installs `zkstackup` in your user binaries directory (e.g., `$HOME/.local/bin/`) and adds it to your `PATH`. -``` -zk init +After installation, open a new terminal or reload your shell profile. From the project's root directory, you can now +run: + +```bash +zkstackup --local ``` -This command will do the following: +This command installs `zkstack` from the current source directory. -- Generate `$ZKSYNC_HOME/etc/env/target/dev.env` file with settings for the applications. -- Initialize docker containers with `reth` Ethereum node for local development. -- Download and unpack files for cryptographical backend. -- Generate required smart contracts. -- Compile all the smart contracts. -- Deploy smart contracts to the local Ethereum network. -- Create “genesis block” for server. +You can proceed to verify the installation and start familiarizing with the CLI by running: -Initializing may take pretty long, but many steps (such as downloading & unpacking keys and initializing containers) are -required to be done only once. +```bash +zkstack --help +``` -Usually, it is a good idea to do `zk init` once after each merge to the `main` branch (as application setup may change). +> NOTE: Whenever you want to update you local installation with your changes, just rerun: +> +> ```bash +> zkstackup --local +> ``` +> +> You might find convenient to add this alias to your shell profile: +> +> `alias zkstackup='zkstackup --path /path/to/zksync-era'` -Additionally, there is a subcommand `zk clean` to remove previously generated data. Examples: +## Configure Ecosystem +The project root directory includes configuration files for an ecosystem with a single chain, `era`. To initialize the +ecosystem, first start the required containers: + +```bash +zkstack containers ``` -zk clean --all # Remove generated configs, database and backups. -zk clean --config # Remove configs only. -zk clean --database # Remove database. -zk clean --backups # Remove backups. -zk clean --database --backups # Remove database *and* backups, but not configs. -``` -**When do you need it?** +Next, run: + +```bash +zkstack ecosystem init +``` -1. If you have an initialized database and want to run `zk init`, you have to remove the database first. -2. If after getting new functionality from the `main` branch your code stopped working and `zk init` doesn't help, you - may try removing `$ZKSYNC_HOME/etc/env/target/dev.env` and running `zk init` once again. This may help if the - application configuration has changed. +These commands will guide you through the configuration options for setting up the ecosystem. -If you don’t need all of the `zk init` functionality, but just need to start/stop containers, use the following -commands: +> NOTE: For local development only. You can also use the development defaults by supplying the `--dev` flag. -``` -zk up # Set up `reth` and `postgres` containers -zk down # Shut down `reth` and `postgres` containers -``` +Initialization may take some time, but key steps (such as downloading and unpacking keys or setting up containers) only +need to be completed once. -## Reinitializing +To see more detailed output, you can run commands with the `--verbose` flag. -When actively changing something that affects infrastructure (for example, contracts code), you normally don't need the -whole `init` functionality, as it contains many external steps (e.g. deploying ERC20 tokens) which don't have to be -redone. +## Cleanup -For this case, there is an additional command: +To clean up the local ecosystem (e.g., removing containers and clearing the contract cache), run: -``` -zk reinit +```bash +zkstack dev clean all ``` -This command does the minimal subset of `zk init` actions required to "reinitialize" the network. It assumes that -`zk init` was called in the current environment before. If `zk reinit` doesn't work for you, you may want to run -`zk init` instead. +You can then reinitialize the ecosystem as described in the [Configure Ecosystem](#configure-ecosystem) section. + +```bash +zkstack containers +zkstack ecosystem init +``` ## Committing changes @@ -85,64 +90,108 @@ the workspace initialization process. These hooks will not allow to commit the c Currently the following criteria are checked: -- Rust code should always be formatted via `cargo fmt`. -- Other code should always be formatted via `zk fmt`. -- Dummy Prover should not be staged for commit (see below for the explanation). +- Code must be formatted via `zkstack dev fmt`. +- Code must be linted via `zkstack dev lint`. -## Using Dummy Prover +## Testing -By default, the chosen prover is a "dummy" one, meaning that it doesn't actually compute proofs but rather uses mocks to -avoid expensive computations in the development environment. +ZKstack CLI offers multiple subcommands to run specific integration and unit test: -To switch dummy prover to real prover, one must change `dummy_verifier` to `false` in `contracts.toml` for your env -(most likely, `etc/env/base/contracts.toml`) and run `zk init` to redeploy smart contracts. +```bash +zkstack dev test --help +``` -## Testing +```bash +Usage: zkstack dev test [OPTIONS] -- Running the `rust` unit-tests: +Commands: + integration Run integration tests + fees Run fees test + revert Run revert tests + recovery Run recovery tests + upgrade Run upgrade tests + build Build all test dependencies + rust Run unit-tests, accepts optional cargo test flags + l1-contracts Run L1 contracts tests + prover Run prover tests + wallet Print test wallets information + loadtest Run loadtest + help Print this message or the help of the given subcommand(s) +``` - ``` - zk test rust - ``` +### Running unit tests -- Running a specific `rust` unit-test: +You can run unit tests for the Rust crates in the project by running: - ``` - zk test rust --package --lib ::tests:: - # e.g. zk test rust --package zksync_core --lib eth_sender::tests::resend_each_block - ``` +```bash +zkstack dev test rust +``` -- Running the integration test: +### Running integration tests - ``` - zk server # Has to be run in the 1st terminal - zk test i server # Has to be run in the 2nd terminal - ``` +Running integration tests is more complex. Some tests require a running server, while others need the system to be in a +specific state. Please refer to our CI scripts +[ci-core-reusable.yml](https://github.com/matter-labs/zksync-era/blob/main/.github/workflows/ci-core-reusable.yml) to +have a better understanding of the process. -- Running the benchmarks: +### Running load tests - ``` - zk f cargo bench - ``` +The current load test implementation only supports the legacy bridge. To use it, you need to create a new chain with +legacy bridge support: -- Running the loadtest: +```bash +zkstack chain create --legacy-bridge +zkstack chain init +``` - ``` - zk server # Has to be run in the 1st terminal - zk prover # Has to be run in the 2nd terminal if you want to use real prover, otherwise it's not required. - zk run loadtest # Has to be run in the 3rd terminal - ``` +After initializing the chain with a legacy bridge, you can run the load test against it. + +```bash +zkstack dev test loadtest +``` + +> WARNING: Never use legacy bridges in non-testing environments. ## Contracts -### Re-build contracts +### Build contracts +Run: + +```bash +zkstack dev contracts --help ``` -zk contract build -``` -### Publish source code on etherscan +to see all the options. + +### Publish source code on Etherscan +#### Verifier Options + +Most commands interacting with smart contracts support the same verification options as Foundry's `forge` command. Just +double check if the following options are available in the subcommand: + +```bash +--verifier -- Verifier to use +--verifier-api-key -- Verifier API key +--verifier-url -- Verifier URL, if using a custom provider ``` -zk contract publish + +#### Using Foundry + +You can use `foundry` to verify the source code of the contracts. + +```bash +forge verify-contract ``` + +Verifies a smart contract on a chosen verification provider. + +You must provide: + +- The contract address +- The contract name or the path to the contract. +- In case of Etherscan verification, you must also provide: + - Your Etherscan API key, either by passing it as an argument or setting `ETHERSCAN_API_KEY` + +For more information check [Foundry's documentation](https://book.getfoundry.sh/reference/forge/forge-verify-contract). diff --git a/docs/guides/launch.md b/docs/guides/launch.md index 10c0b10f5d84..52872a53cf2a 100644 --- a/docs/guides/launch.md +++ b/docs/guides/launch.md @@ -10,42 +10,43 @@ Prepare dev environment prerequisites: see ## Setup local dev environment -Setup: +Run the required containers with: -``` -zk # installs and builds zk itself -zk init +```bash +zkstack containers ``` -If you face any other problems with the `zk init` command, go to the -[Troubleshooting](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/launch.md#troubleshooting) section at -the end of this file. There are solutions for some common error cases. +Setup: + +```bash +zkstack ecosystem init +``` To completely reset the dev environment: - Stop services: - ``` - zk down + ```bash + zkstack dev clean all ``` - Repeat the setup procedure above -If `zk init` has already been executed, and now you only need to start docker containers (e.g. after reboot), simply -launch: - -``` -zk up -``` + ```bash + zkstack containers + zkstack ecosystem init + ``` ### Run observability stack If you want to run [Dockprom](https://github.com/stefanprodan/dockprom/) stack (Prometheus, Grafana) alongside other -containers - add `--run-observability` parameter during initialisation. +containers - add `--observability` parameter during initialisation. +```bash +zkstack containers --observability ``` -zk init --run-observability -``` + +or select `yes` when prompted during the interactive execution of the command. That will also provision Grafana with [era-observability](https://github.com/matter-labs/era-observability/tree/main/dashboards) dashboards. You can then @@ -54,290 +55,148 @@ access it at `http://127.0.0.1:3000/` under credentials `admin/admin`. > If you don't see any data displayed on the Grafana dashboards - try setting the timeframe to "Last 30 minutes". You > will also have to have `jq` installed on your system. -## (Re)deploy db and contracts +## Ecosystem Configuration -``` -zk contract redeploy -``` +The ecosystem configuration is spread across multiple files and directories: -## Environment configurations +1. Root level: -Env config files are held in `etc/env/target/` + - `ZkStack.yaml`: Main configuration file for the entire ecosystem. -List configurations: +2. `configs/` directory: -``` -zk env -``` + - `apps/`: + - `portal_config.json`: Configuration for the portal application. + - `contracts.yaml`: Defines smart contract settings and addresses. + - `erc20.yaml`: Configuration for ERC20 tokens. + - `initial_deployments.yaml`: Specifies initial ERC20 token deployments. + - `wallets.yaml`: Contains wallet configurations. -Switch between configurations: +3. `chains//` directory: -``` -zk env -``` + - `artifacts/`: Contains build/execution artifacts. + - `configs/`: Chain-specific configuration files. + - `contracts.yaml`: Chain-specific smart contract settings. + - `external_node.yaml`: Configuration for external nodes. + - `general.yaml`: General chain configuration. + - `genesis.yaml`: Genesis configuration for the chain. + - `secrets.yaml`: Secrets and private keys for the chain. + - `wallets.yaml`: Wallet configurations for the chain. + - `db/main/`: Database files for the chain. + - `ZkStack.yaml`: Chain-specific ZkStack configuration. + +These configuration files are automatically generated during the ecosystem initialization (`zkstack ecosystem init`) and +chain initialization (`zkstack chain init`) processes. They control various aspects of the ZKsync ecosystem, including: + +- Network settings +- Smart contract deployments +- Token configurations +- Database settings +- Application/Service-specific parameters -Default configuration is `dev.env`, which is generated automatically from `dev.env.example` during `zk init` command -execution. +It's important to note that while these files can be manually edited, any changes may be overwritten if the ecosystem or +chain is reinitialized. Always back up your modifications and exercise caution when making direct changes to these +files. + +For specific configuration needs, it's recommended to use the appropriate `zkstack` commands or consult the +documentation for safe ways to customize your setup. ## Build and run server Run server: +```bash +zkstack server ``` -zk server + +The server's configuration files can be found in `/chains//configs` directory. These files are created when +running `zkstack chain init` command. + +### Modifying configuration files manually + +To manually modify configuration files: + +1. Locate the relevant config file in `/chains//configs` +2. Open the file in a text editor +3. Make necessary changes, following the existing format +4. Save the file +5. Restart the relevant services for changes to take effect: + +```bash +zkstack server ``` -Server is configured using env files in `./etc/env` directory. After the first initialization, file -`./etc/env/target/dev.env`will be created. By default, this file is copied from the `./etc/env/target/dev.env.example` -template. +> NOTE: Manual changes to configuration files may be overwritten if the ecosystem is reinitialized or the chain is +> reinitialized. -Make sure you have environment variables set right, you can check it by running: `zk env`. You should see `* dev` in -output. +> WARNING: Some properties, such as ports, may require manual modification across different configuration files to +> ensure consistency and avoid conflicts. ## Running server using Google cloud storage object store instead of default In memory store -Get the service_account.json file containing the GCP credentials from kubernetes secret for relevant environment(stage2/ -testnet2) add that file to the default location ~/gcloud/service_account.json or update object_store.toml with the file -location +Get the `service_account.json` file containing the GCP credentials from kubernetes secret for relevant +environment(stage2/ testnet2) add that file to the default location `~/gcloud/service_account.json` or update +`object_store.toml` with the file location -``` -zk server +```bash +zkstack prover init --bucket-base-url={url} --credentials-file={path/to/service_account.json} ``` ## Running prover server -Running on machine without GPU +Running on a machine with GPU -```shell -zk f cargo +nightly run --release --bin zksync_prover +```bash +zkstack prover run --component=prover ``` -Running on machine with GPU - -```shell -zk f cargo +nightly run --features gpu --release --bin zksync_prover -``` +> NOTE: Running on machine without GPU is currently not supported by `zkstack`. ## Running the verification key generator -```shell +```bash # ensure that the setup_2^26.key in the current directory, the file can be download from https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key # To generate all verification keys cargo run --release --bin zksync_verification_key_generator - - ``` ## Generating binary verification keys for existing json verification keys -```shell +```bash cargo run --release --bin zksync_json_to_binary_vk_converter -- -o /path/to/output-binary-vk ``` ## Generating commitment for existing verification keys -```shell +```bash cargo run --release --bin zksync_commitment_generator ``` ## Running the contract verifier -```shell -# To process fixed number of jobs -cargo run --release --bin zksync_contract_verifier -- --jobs-number X - -# To run until manual exit -zk contract_verifier +```bash +zkstack contract-verifier run ``` ## Troubleshooting -### SSL error: certificate verify failed - -**Problem**. `zk init` fails with the following error: - -``` -Initializing download: https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2%5E20.key -SSL error: certificate verify failed -``` - -**Solution**. Make sure that the version of `axel` on your computer is `2.17.10` or higher. - -### rmSync is not a function - -**Problem**. `zk init` fails with the following error: +### Connection Refused -``` -fs_1.default.rmSync is not a function -``` - -**Solution**. Make sure that the version of `node.js` installed on your computer is `14.14.0` or higher. - -### Invalid bytecode: () - -**Problem**. `zk init` fails with an error similar to: - -``` -Running `target/release/zksync_server --genesis` -2023-04-05T14:23:40.291277Z INFO zksync_core::genesis: running regenesis -thread 'main' panicked at 'Invalid bytecode: ()', core/lib/utils/src/bytecode.rs:159:10 -stack backtrace: - 0: 0x104551410 - std::backtrace_rs::backtrace::libunwind::trace::hf9c5171f212b04e2 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/libunwind.rs:93:5 - 1: 0x104551410 - std::backtrace_rs::backtrace::trace_unsynchronized::h179003f6ec753118 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/mod.rs:66:5 - 2: 0x104551410 - std::sys_common::backtrace::_print_fmt::h92d38f701cf42b17 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:65:5 - 3: 0x104551410 - ::fmt::hb33e6e8152f78c95 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:44:22 - 4: 0x10456cdb0 - core::fmt::write::hd33da007f7a27e39 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/fmt/mod.rs:1208:17 - 5: 0x10454b41c - std::io::Write::write_fmt::h7edc10723862001e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/io/mod.rs:1682:15 - 6: 0x104551224 - std::sys_common::backtrace::_print::h5e00f05f436af01f - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:47:5 - 7: 0x104551224 - std::sys_common::backtrace::print::h895ee35b3f17b334 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:34:9 - 8: 0x104552d84 - std::panicking::default_hook::{{closure}}::h3b7ee083edc2ea3e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:267:22 - 9: 0x104552adc - std::panicking::default_hook::h4e7c2c28eba716f5 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:286:9 - 10: 0x1045533a8 - std::panicking::rust_panic_with_hook::h1672176227032c45 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:688:13 - 11: 0x1045531c8 - std::panicking::begin_panic_handler::{{closure}}::h0b2d072f9624d32e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:579:13 - 12: 0x104551878 - std::sys_common::backtrace::__rust_end_short_backtrace::he9abda779115b93c - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:137:18 - 13: 0x104552f24 - rust_begin_unwind - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:575:5 - 14: 0x1045f89c0 - core::panicking::panic_fmt::h23ae44661fec0889 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:64:14 - 15: 0x1045f8ce0 - core::result::unwrap_failed::h414a6cbb12b1e143 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/result.rs:1791:5 - 16: 0x103f79a30 - zksync_utils::bytecode::hash_bytecode::h397dd7c5b6202bf4 - 17: 0x103e47e78 - zksync_contracts::BaseSystemContracts::load_from_disk::h0e2da8f63292ac46 - 18: 0x102d885a0 - zksync_core::genesis::ensure_genesis_state::{{closure}}::h5143873f2c337e11 - 19: 0x102d7dee0 - zksync_core::genesis_init::{{closure}}::h4e94f3d4ad984788 - 20: 0x102d9c048 - zksync_server::main::{{closure}}::h3fe943a3627d31e1 - 21: 0x102d966f8 - tokio::runtime::park::CachedParkThread::block_on::h2f2fdf7edaf08470 - 22: 0x102df0dd4 - tokio::runtime::runtime::Runtime::block_on::h1fd1d83272a23194 - 23: 0x102e21470 - zksync_server::main::h500621fd4d160768 - 24: 0x102d328f0 - std::sys_common::backtrace::__rust_begin_short_backtrace::h52973e519e2e8a0d - 25: 0x102e08ea8 - std::rt::lang_start::{{closure}}::hbd395afe0ab3b799 - 26: 0x10454508c - core::ops::function::impls:: for &F>::call_once::ha1c2447b9b665e13 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/ops/function.rs:606:13 - 27: 0x10454508c - std::panicking::try::do_call::ha57d6d1e9532dc1f - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 - 28: 0x10454508c - std::panicking::try::hca0526f287961ecd - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 - 29: 0x10454508c - std::panic::catch_unwind::hdcaa7fa896e0496a - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 - 30: 0x10454508c - std::rt::lang_start_internal::{{closure}}::h142ec071d3766871 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:48 - 31: 0x10454508c - std::panicking::try::do_call::h95f5e55d6f048978 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 - 32: 0x10454508c - std::panicking::try::h0fa00e2f7b4a5c64 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 - 33: 0x10454508c - std::panic::catch_unwind::h1765f149814d4d3e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 - 34: 0x10454508c - std::rt::lang_start_internal::h00a235e820a7f01c - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:20 - 35: 0x102e21578 - _main -Error: Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty) -``` - -**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because -your `zksync-2-dev/etc/system-contracts/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. -We don't expect this error to happen as we've updated to latest version which fixes the problem. - -**Solution**. Update your dependency and reinit: +#### Problem +```bash +error sending request for url (http://127.0.0.1:8545/): error trying to connect: tcp connect error: Connection refused (os error 61) ``` -yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder -zk clean --all && zk init -``` - -On the run, it moved from: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", -``` - -to: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.15", -``` - -### Error: Bytecode length in 32-byte words must be odd -**Problem**. `zk init` fails with an error similar to: +#### Description -``` -Successfully generated Typechain artifacts! -Error: Error: Bytecode length in 32-byte words must be odd - at hashL2Bytecode (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:29:15) - at computeL2Create2Address (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:53:26) - at /Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:50:63 - at step (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:33:23) - at Object.next (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:14:53) - at fulfilled (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:5:58) -error Command failed with exit code 1. -info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. -error Command failed. -Exit code: 1 -Command: /Users/emilluta/.nvm/versions/node/v16.19.1/bin/node -Arguments: /opt/homebrew/Cellar/yarn/1.22.19/libexec/lib/cli.js compile-and-deploy-libs -Directory: /Users/emilluta/code/zksync-2-dev/contracts/zksync -Output: - -info Visit https://yarnpkg.com/en/docs/cli/workspace for documentation about this command. -error Command failed with exit code 1. -info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. -Error: Child process exited with code 1 -``` - -**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because -your `zksync-2-dev/contracts/zksync/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. We -don't expect this error to happen as we've updated to latest version which fixes the problem. - -**Solution**. Update your dependency and reinit: - -``` -yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder -zk clean --all && zk init -``` - -On the run, it moved from: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", -``` - -to: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.15", -``` - -### Error: Cannot read properties of undefined (reading 'compilerPath') - -**Problem**. `zk init` fails with an error similar to the following: - -```text -Yarn project directory: /Users//Projects/zksync-era/contracts/system-contracts -Error: Cannot read properties of undefined (reading 'compilerPath') -error Command failed with exit code 1. -``` +It appears that no containers are currently running, which is likely the reason you're encountering this error. -**Description**. The compiler downloader -[could not verify](https://github.com/NomicFoundation/hardhat/blob/0d850d021f3ab33b59b1ea2ae70d1e659e579e40/packages/hardhat-core/src/internal/solidity/compiler/downloader.ts#L336-L383) -that the Solidity compiler it downloaded actually works. +#### Solution -**Solution**. Delete the cached `*.does.not.work` file to run the check again: +Ensure that the necessary containers have been started and are functioning correctly to resolve the issue. -```sh -# NOTE: Compiler version, commit hash may differ. -rm $HOME/Library/Caches/hardhat-nodejs/compilers-v2/macosx-amd64/solc-macosx-amd64-v0.8.20+commit.a1b79de6.does.not.work +```bash +zkstack containers ``` diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 4eef211cd3d1..a74f976c2b73 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -14,20 +14,20 @@ git config --global url."https://".insteadOf git:// # Rust curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + # NVM curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash + # All necessary stuff sudo apt-get update -sudo apt-get install build-essential pkg-config cmake clang lldb lld libssl-dev postgresql apt-transport-https ca-certificates curl software-properties-common +sudo apt-get install -y build-essential pkg-config cmake clang lldb lld libssl-dev libpq-dev apt-transport-https ca-certificates curl software-properties-common + # Install docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" sudo apt install docker-ce sudo usermod -aG docker ${USER} -# Stop default postgres (as we'll use the docker one) -sudo systemctl stop postgresql -sudo systemctl disable postgresql # Start docker. sudo systemctl start docker @@ -45,9 +45,9 @@ cargo install cargo-nextest # SQL tools cargo install sqlx-cli --version 0.8.1 -# Foundry -curl -L https://foundry.paradigm.xyz | bash -foundryup --branch master +# Foundry ZKsync +curl -L https://raw.githubusercontent.com/matter-labs/foundry-zksync/main/install-foundry-zksync | bash +foundryup-zksync --branch master # Non CUDA (GPU) setup, can be skipped if the machine has a CUDA installed for provers # Don't do that if you intend to run provers on your machine. Check the prover docs for a setup instead. @@ -60,24 +60,24 @@ cd zksync-era git submodule update --init --recursive ``` -Don't forget to [add env variables](#Environment) and look at [tips](#tips). +Don't forget to look at [tips](#tips). ## Supported operating systems -ZKsync currently can be launched on any \*nix operating system (e.g. any linux distribution or MacOS). +ZKsync currently can be launched on any \*nix operating system (e.g. any linux distribution or macOS). If you're using Windows, then make sure to use WSL 2. Additionally, if you are going to use WSL 2, make sure that your project is located in the _linux filesystem_, since accessing NTFS partitions from within WSL is very slow. -If you're using MacOS with an ARM processor (e.g. M1/M2), make sure that you are working in the _native_ environment -(e.g. your terminal and IDE don't run in Rosetta, and your toolchain is native). Trying to work with ZKsync code via +If you're using macOS with an ARM processor (e.g. M1/M2), make sure that you are working in the _native_ environment +(e.g., your terminal and IDE don't run in Rosetta, and your toolchain is native). Trying to work with ZKsync code via Rosetta may cause problems that are hard to spot and debug, so make sure to check everything before you start. If you are a NixOS user or would like to have a reproducible environment, skip to the section about `nix`. -## `Docker` +## Docker Install `docker`. It is recommended to follow the instructions from the [official site](https://docs.docker.com/install/). @@ -117,13 +117,13 @@ at this step. If logging out does not resolve the issue, restarting the computer should. -## `Node` & `Yarn` +## Node.js & Yarn 1. Install `Node` (requires version `v20`). The recommended way is via [nvm](https://github.com/nvm-sh/nvm). 2. Install `yarn`. Can be done via `npm install -g yarn`. Make sure to get version 1.22.19 - you can change the version by running `yarn set version 1.22.19`. -## `clang` +## clang In order to compile RocksDB, you must have LLVM available. On debian-based linux it can be installed as follows: @@ -133,12 +133,12 @@ On debian-based linux: sudo apt-get install build-essential pkg-config cmake clang lldb lld ``` -On mac: +On macOS: You need to have an up-to-date `Xcode`. You can install it directly from `App Store`. With Xcode command line tools, you get the Clang compiler installed by default. Thus, having XCode you don't need to install `clang`. -## `OpenSSL` +## OpenSSL Install OpenSSL: @@ -154,9 +154,9 @@ On debian-based linux: sudo apt-get install libssl-dev ``` -## `Rust` +## Rust -Install the latest `rust` version. +Install `Rust`'s toolchain version reported in `/rust-toolchain.toml` (also a later stable version should work). Instructions can be found on the [official site](https://www.rust-lang.org/tools/install). @@ -167,7 +167,7 @@ rustc --version rustc 1.xx.y (xxxxxx 20xx-yy-zz) # Output may vary depending on actual version of rust ``` -If you are using MacOS with ARM processor (e.g. M1/M2), make sure that you use an `aarch64` toolchain. For example, when +If you are using macOS with ARM processor (e.g. M1/M2), make sure that you use an `aarch64` toolchain. For example, when you run `rustup show`, you should see a similar input: ```bash @@ -190,25 +190,26 @@ If you see `x86_64` mentioned in the output, probably you're running (or used to that's the case, you should probably change the way you run terminal, and/or reinstall your IDE, and then reinstall the Rust toolchain as well. -## Postgres +## PostgreSQL Client Library -Install the latest postgres: +For development purposes, you typically only need the PostgreSQL client library, not the full server installation. +Here's how to install it: -On mac: +On macOS: ```bash -brew install postgresql@14 +brew install libpq ``` -On debian-based linux: +On Debian-based Linux: ```bash -sudo apt-get install postgresql +sudo apt-get install libpq-dev ``` ### Cargo nextest -[cargo-nextest](https://nexte.st/) is the next-generation test runner for Rust projects. `zk test rust` uses +[cargo-nextest](https://nexte.st/) is the next-generation test runner for Rust projects. `zkstack dev test rust` uses `cargo nextest` by default. ```bash @@ -236,10 +237,13 @@ enable nix-ld. Go to the zksync folder and run `nix develop`. After it finishes, you are in a shell that has all the dependencies. -## Foundry +## Foundry ZKsync + +ZKSync depends on Foundry ZKsync (which is is a specialized fork of Foundry, tailored for ZKsync). Please follow this +[installation guide](https://foundry-book.zksync.io/getting-started/installation) to get started with Foundry ZKsync. -[Foundry](https://book.getfoundry.sh/getting-started/installation) can be utilized for deploying smart contracts. For -commands related to deployment, you can pass flags for Foundry integration. +Foundry ZKsync can also be used for deploying smart contracts. For commands related to deployment, you can pass flags +for Foundry integration. ## Non-GPU setup @@ -266,17 +270,6 @@ RUSTFLAGS as env var, or pass it in `config.toml` (either project level or globa rustflags = ["--cfg=no_cuda"] ``` -## Environment - -Edit the lines below and add them to your shell profile file (e.g. `~/.bash_profile`, `~/.zshrc`): - -```bash -# Add path here: -export ZKSYNC_HOME=/path/to/zksync - -export PATH=$ZKSYNC_HOME/bin:$PATH -``` - ## Tips ### Tip: `mold` @@ -294,7 +287,7 @@ export RUSTFLAGS='-C link-arg=-fuse-ld=/usr/local/bin/mold' export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="clang" ``` -## Tip: Speeding up building `RocksDB` +### Tip: Speeding up building `RocksDB` By default, each time you compile `rocksdb` crate, it will compile required C++ sources from scratch. It can be avoided by using precompiled versions of library, and it will significantly improve your build times. diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 0465d888f612..fcddf93174b9 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -2,37 +2,25 @@ ## Preparing -First, run the following command: +First, create a new chain with prover mode `GPU`: -``` -zk env prover-local +```bash +zkstack chain create --prover-mode gpu ``` -It will create a config similar to `dev`, but with: +It will create a config similar to `era`, but with: - Proof sending mode set to `OnlyRealProofs` - Prover mode set to `Local` instead of `GCS`. -You can always switch back to dev config via `zk env dev`. - -**Important:** If you change environments, you have to do `zk init` again. - -## Enter the prover workspace - -All the commands for binaries in the prover workspace must be done from the prover folder: - -``` -cd $ZKSYNC_HOME/prover -``` - ## Key generation This operation should only be done once; if you already generated keys, you can skip it. The following command will generate the required keys: -``` -zk f cargo run --features gpu --release --bin key_generator -- generate-sk-gpu all --recompute-if-missing +```bash +zkstack prover setup-keys ``` With that, you should be ready to run the prover. @@ -40,20 +28,20 @@ With that, you should be ready to run the prover. ## Running Important! Generating a proof takes a lot of time, so if you just want to see whether you can generate a proof, do it -against clean sequencer state (e.g. right after `zk init`). +against clean sequencer state (e.g. right after `zkstack chain init`). We will be running a bunch of binaries, it's recommended to run each in a separate terminal. ### Server -``` -zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +```bash +zkstack server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` -### Proof data handler +### Prover gateway -``` -zk f cargo run --release --bin zksync_prover_fri_gateway +```bash +zkstack prover run --component=gateway ``` Then wait until the first job is picked up. Prover gateway has to insert protocol information into the database, and @@ -63,8 +51,8 @@ until it happens, witness generators will panic and won't be able to start. Once a job is created, start witness generators: -``` -zk f cargo run --release --bin zksync_witness_generator -- --all_rounds +```bash +zkstack prover run --component=witness-generator --round=all-rounds ``` `--all_rounds` means that witness generator will produce witnesses of all kinds. You can run a witness generator for @@ -72,22 +60,47 @@ each round separately, but it's mostly useful in production environments. ### Witness vector generator -``` -zk f cargo run --release --bin zksync_witness_vector_generator -- --threads 10 +```bash +zkstack prover run --component=witness-vector-generator --threads 10 ``` WVG prepares inputs for prover, and it's a single-threaded time-consuming operation. You may run several jobs by changing the `threads` parameter. The exact amount of WVGs needed to "feed" one prover depends on CPU/GPU specs, but a ballpark estimate (useful for local development) is 10 WVGs per prover. +> NOTE: The WVG thread typically uses approximately 10GB of RAM. + ### Prover -``` -zk f cargo run --features "gpu" --release --bin zksync_prover_fri +```bash +zkstack prover run --component=prover ``` Prover can prove any kinds of circuits, so you only need a single instance. +### Prover job monitor + +You can start the prover job monitor by specifying its component as follows. + +```bash +zkstack prover run --component=prover-job-monitor +``` + +### Insert protocol version in prover database + +Before running the prover, you can insert the protocol version in the prover database by executing the following +command: + +```bash +zkstack dev prover insert-version --version --snark-wrapper= +``` + +To query this information, use the following command: + +```bash +zkstack dev prover info +``` + ### Proof compressor ⚠️ Both prover and proof compressor require 24GB of VRAM, and currently it's not possible to make them use different @@ -96,8 +109,8 @@ GPU. So unless you have a GPU with 48GB of VRAM, you won't be able to run both a You should wait until the proof is generated, and once you see in the server logs that it tries to find available compressor, you can shut the prover down, and run the proof compressor: -``` -zk f cargo run --features "gpu" --release --bin zksync_proof_fri_compressor +```bash +zkstack prover run --component=compressor ``` Once the proof is compressed, proof gateway will see that and will send the generated proof back to core. From 0a8869839a8873463ed402c354d31a5ffac2e605 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 13 Nov 2024 17:08:48 +0700 Subject: [PATCH 3/6] chore(consensus): updated EN config templates (#3269) Removed the default gossip outbound peers list, obsoleted in favor of seed_peers. Bumped EN image version in docker file. --- .../configs/mainnet_consensus_config.yaml | 6 ------ .../configs/testnet_consensus_config.yaml | 6 ------ .../mainnet-external-node-docker-compose.yml | 4 ++-- .../prepared_configs/mainnet_consensus_config.yaml | 10 ---------- .../prepared_configs/testnet_consensus_config.yaml | 10 ---------- 5 files changed, 2 insertions(+), 34 deletions(-) diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml index f2a0ce318757..08f5861daa83 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -3,9 +3,3 @@ public_addr: '127.0.0.1:3054' debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' - addr: 'external-node-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' - addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml index a5f752fe405a..08f5861daa83 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -3,9 +3,3 @@ public_addr: '127.0.0.1:3054' debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' - addr: 'external-node-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' - addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 9c8c5bb31425..5ee9de187bf0 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -52,7 +52,7 @@ services: # Generation of consensus secrets. # The secrets are generated iff the secrets file doesn't already exist. generate-secrets: - image: "matterlabs/external-node:2.0-v24.16.0" + image: "matterlabs/external-node:2.0-v25.1.0" entrypoint: [ "/configs/generate_secrets.sh", @@ -61,7 +61,7 @@ services: volumes: - ./configs:/configs external-node: - image: "matterlabs/external-node:2.0-v24.16.0" + image: "matterlabs/external-node:2.0-v25.1.0" entrypoint: [ "/usr/bin/entrypoint.sh", diff --git a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml index be37aaf29329..08347a14efa0 100644 --- a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml @@ -2,13 +2,3 @@ server_addr: '0.0.0.0:3054' public_addr: ':3054' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' - addr: 'external-node-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' - addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:45d23515008b5121484eb774507df63ff4ce9f4b65e6a03b7c9ec4e0474d3044' - addr: 'consensus-mainnet-1.zksync-nodes.com:3054' - - key: 'node:public:ed25519:c278bb0831e8d0dcd3aaf0b7af7c3dca048d50b28c578ceecce61a412986b883' - addr: 'consensus-mainnet-2.zksync-nodes.com:3054' diff --git a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml index 8d2551c07087..08347a14efa0 100644 --- a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml @@ -2,13 +2,3 @@ server_addr: '0.0.0.0:3054' public_addr: ':3054' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' - addr: 'external-node-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' - addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:f48616db5965ada49dcbd51b1de11068a27c9886c900d3522607f16dff2e66fc' - addr: 'consensus-sepolia-1.zksync-nodes.com:3054' - - key: 'node:public:ed25519:3789d49293792755a9c1c2a7ed9b0e210e92994606dcf76388b5635d7ed676cb' - addr: 'consensus-sepolia-2.zksync-nodes.com:3054' From 726203bab540e3d6ada10b6bc12bd3c09220d895 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 13 Nov 2024 12:40:03 +0100 Subject: [PATCH 4/6] feat(consensus): fallback json rpc syncing for consensus (#3211) if for any reason p2p syncing is behind, json RPC syncing will activate. This is a protective measure for while we deploy changes to the consensus algorithm. Fixes BFT-516 --- Cargo.lock | 2 + core/node/consensus/Cargo.toml | 1 + core/node/consensus/src/en.rs | 94 ++++++++++++------------ core/node/consensus/src/lib.rs | 1 + core/node/consensus/src/metrics.rs | 13 ++++ core/node/consensus/src/storage/store.rs | 8 +- core/node/consensus/src/testonly.rs | 39 +--------- core/node/consensus/src/tests/mod.rs | 65 +--------------- core/node/node_sync/Cargo.toml | 1 + core/node/node_sync/src/sync_state.rs | 21 +++--- 10 files changed, 85 insertions(+), 160 deletions(-) create mode 100644 core/node/consensus/src/metrics.rs diff --git a/Cargo.lock b/Cargo.lock index eb93300b1729..75591bca7293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11499,6 +11499,7 @@ dependencies = [ "thiserror", "tokio", "tracing", + "vise", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -11699,6 +11700,7 @@ dependencies = [ "vise", "zksync_concurrency", "zksync_config", + "zksync_consensus_roles", "zksync_contracts", "zksync_dal", "zksync_eth_client", diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index 120d355da9a8..1d6b80f475e7 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -42,6 +42,7 @@ thiserror.workspace = true tracing.workspace = true tokio.workspace = true semver.workspace = true +vise.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 6f4d80233be4..e417b68cf2cb 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -17,13 +17,14 @@ use zksync_web3_decl::{ use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; use crate::{ + metrics::METRICS, registry, storage::{self, ConnectionPool}, }; -/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, -/// the temporary fetcher will stop fetching blocks. -pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; +/// Whenever more than FALLBACK_FETCHER_THRESHOLD certificates are missing, +/// the fallback fetcher is active. +pub(crate) const FALLBACK_FETCHER_THRESHOLD: u64 = 10; /// External node. pub(super) struct EN { @@ -115,11 +116,9 @@ impl EN { let store = store.clone(); async { let store = store; - self.temporary_block_fetcher(ctx, &store).await?; - tracing::info!( - "temporary block fetcher finished, switching to p2p fetching only" - ); - Ok(()) + self.fallback_block_fetcher(ctx, &store) + .await + .wrap("fallback_block_fetcher()") } }); @@ -191,7 +190,7 @@ impl EN { .new_payload_queue(ctx, actions, self.sync_state.clone()) .await .wrap("new_fetcher_cursor()")?; - self.fetch_blocks(ctx, &mut payload_queue, None).await + self.fetch_blocks(ctx, &mut payload_queue).await }) .await; match res { @@ -362,9 +361,14 @@ impl EN { } /// Fetches (with retries) the given block from the main node. - async fn fetch_block(&self, ctx: &ctx::Ctx, n: L2BlockNumber) -> ctx::Result { + async fn fetch_block( + &self, + ctx: &ctx::Ctx, + n: validator::BlockNumber, + ) -> ctx::Result { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); - + let n = L2BlockNumber(n.0.try_into().context("overflow")?); + METRICS.fetch_block.inc(); loop { match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), @@ -376,9 +380,8 @@ impl EN { } } - /// Fetches blocks from the main node directly, until the certificates - /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. - pub(crate) async fn temporary_block_fetcher( + /// Fetches blocks from the main node directly whenever the EN is lagging behind too much. + pub(crate) async fn fallback_block_fetcher( &self, ctx: &ctx::Ctx, store: &Store, @@ -386,66 +389,63 @@ impl EN { const MAX_CONCURRENT_REQUESTS: usize = 30; scope::run!(ctx, |ctx, s| async { let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); - s.spawn(async { - let Some(mut next) = store.next_block(ctx).await? else { - return Ok(()); - }; - while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { - let n = L2BlockNumber(next.0.try_into().context("overflow")?); - self.sync_state.wait_for_main_node_block(ctx, n).await?; - send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + // TODO: metrics. + s.spawn::<()>(async { + let send = send; + let is_lagging = + |main| main >= store.persisted().borrow().next() + FALLBACK_FETCHER_THRESHOLD; + let mut next = store.next_block(ctx).await.wrap("next_block()")?; + loop { + // Wait until p2p syncing is lagging. + self.sync_state + .wait_for_main_node_block(ctx, is_lagging) + .await?; + // Determine the next block to fetch and wait for it to be available. + next = next.max(store.next_block(ctx).await.wrap("next_block()")?); + self.sync_state + .wait_for_main_node_block(ctx, |main| main >= next) + .await?; + // Fetch the block asynchronously. + send.send(ctx, s.spawn(self.fetch_block(ctx, next))).await?; next = next.next(); } - drop(send); - Ok(()) }); - while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + loop { + let block = recv.recv(ctx).await?; store .queue_next_fetched_block(ctx, block.join(ctx).await?) .await .wrap("queue_next_fetched_block()")?; } - Ok(()) }) .await } - /// Fetches blocks from the main node in range `[cursor.next()..end)`. + /// Fetches blocks starting with `queue.next()`. async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, - end: Option, ) -> ctx::Result<()> { const MAX_CONCURRENT_REQUESTS: usize = 30; - let first = queue.next(); - let mut next = first; + let mut next = queue.next(); scope::run!(ctx, |ctx, s| async { let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); - s.spawn(async { + s.spawn::<()>(async { let send = send; - while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().context("overflow")?); - self.sync_state.wait_for_main_node_block(ctx, n).await?; - send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + loop { + self.sync_state + .wait_for_main_node_block(ctx, |main| main >= next) + .await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, next))).await?; next = next.next(); } - Ok(()) }); - while end.map_or(true, |end| queue.next() < end) { + loop { let block = recv.recv(ctx).await?.join(ctx).await?; queue.send(block).await.context("queue.send()")?; } - Ok(()) }) - .await?; - // If fetched anything, wait for the last block to be stored persistently. - if first < queue.next() { - self.pool - .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await - .wrap("wait_for_payload()")?; - } - Ok(()) + .await } } diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 8bf078120aa9..d89aa5f5e829 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -9,6 +9,7 @@ mod abi; mod config; mod en; pub mod era; +mod metrics; mod mn; mod registry; mod storage; diff --git a/core/node/consensus/src/metrics.rs b/core/node/consensus/src/metrics.rs new file mode 100644 index 000000000000..f53bb9320917 --- /dev/null +++ b/core/node/consensus/src/metrics.rs @@ -0,0 +1,13 @@ +//! Consensus related metrics. + +#[derive(Debug, vise::Metrics)] +#[metrics(prefix = "zksync_node_consensus")] +pub(crate) struct Metrics { + /// Number of blocks that has been fetched via JSON-RPC. + /// It is used only as a fallback when the p2p syncing is disabled or falling behind. + /// so it shouldn't be increasing under normal circumstances if p2p syncing is enabled. + pub fetch_block: vise::Counter, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 154509e97b14..c42e78658dc2 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -114,14 +114,12 @@ impl Store { } /// Number of the next block to queue. - pub(crate) async fn next_block( - &self, - ctx: &ctx::Ctx, - ) -> ctx::OrCanceled> { + pub(crate) async fn next_block(&self, ctx: &ctx::Ctx) -> ctx::Result { Ok(sync::lock(ctx, &self.block_payloads) .await? .as_ref() - .map(|p| p.next())) + .context("payload_queue not set")? + .next()) } /// Queues the next block. diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index ef4226c915f0..faa895c86c71 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -45,10 +45,7 @@ use zksync_types::{ }; use zksync_web3_decl::client::{Client, DynClient, L2}; -use crate::{ - en, - storage::{ConnectionPool, Store}, -}; +use crate::{en, storage::ConnectionPool}; /// Fake StateKeeper for tests. #[derive(Debug)] @@ -413,40 +410,6 @@ impl StateKeeper { .await } - pub async fn run_temporary_fetcher( - self, - ctx: &ctx::Ctx, - client: Box>, - ) -> ctx::Result<()> { - scope::run!(ctx, |ctx, s| async { - let payload_queue = self - .pool - .connection(ctx) - .await - .wrap("connection()")? - .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) - .await - .wrap("new_payload_queue()")?; - let (store, runner) = Store::new( - ctx, - self.pool.clone(), - Some(payload_queue), - Some(client.clone()), - ) - .await - .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - en::EN { - pool: self.pool.clone(), - client, - sync_state: self.sync_state.clone(), - } - .temporary_block_fetcher(ctx, &store) - .await - }) - .await - } - /// Runs consensus node for the external node. pub async fn run_consensus( self, diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 663ccab49904..efb8d14314c8 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -16,7 +16,7 @@ use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ - en::TEMPORARY_FETCHER_THRESHOLD, + en::FALLBACK_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, @@ -665,7 +665,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV // Test temporary fetcher fetching blocks if a lot of certs are missing. #[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_fallback_fetcher(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); @@ -705,7 +705,7 @@ async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId) s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); validator - .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .push_random_blocks(rng, account, FALLBACK_FETCHER_THRESHOLD as usize + 1) .await; node_pool .wait_for_payload(ctx, validator.last_block()) @@ -715,58 +715,7 @@ async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId) .await .unwrap(); - tracing::info!( - "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." - ); - scope::run!(ctx, |ctx, s| async { - let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, account, 5).await; - node_pool - .wait_for_payload(ctx, validator.last_block()) - .await?; - Ok(()) - }) - .await - .unwrap(); - Ok(()) - }) - .await - .unwrap(); -} - -// Test that temporary fetcher terminates once enough blocks have certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); - let rng = &mut ctx.rng(); - let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); - let node_cfg = validator_cfg.new_fullnode(rng); - let account = &mut Account::random(); - - scope::run!(ctx, |ctx, s| async { - tracing::info!("Spawn validator."); - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(run_main_node( - ctx, - validator_cfg.config.clone(), - validator_cfg.secrets.clone(), - validator_pool.clone(), - )); - // API server needs at least 1 L1 batch to start. - validator.seal_batch().await; - let client = validator.connect(ctx).await?; - - let node_pool = ConnectionPool::test(from_snapshot, version).await; - - // Run the EN so the consensus is initialized on EN and wait for it to sync. + tracing::info!("Run p2p fetcher. Blocks should be fetched by the fallback fetcher anyway."); scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); @@ -779,12 +728,6 @@ async fn test_temporary_fetcher_termination(from_snapshot: bool, version: Protoc }) .await .unwrap(); - - // Run the temporary fetcher. It should terminate immediately, since EN is synced. - let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - node.run_temporary_fetcher(ctx, client).await?; - Ok(()) }) .await diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 9c5b0c000700..e42cbff85806 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -24,6 +24,7 @@ zksync_health_check.workspace = true zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_concurrency.workspace = true +zksync_consensus_roles.workspace = true vise.workspace = true zksync_vm_executor.workspace = true diff --git a/core/node/node_sync/src/sync_state.rs b/core/node/node_sync/src/sync_state.rs index f8a2fe00ec09..1ffec757c9b1 100644 --- a/core/node/node_sync/src/sync_state.rs +++ b/core/node/node_sync/src/sync_state.rs @@ -4,6 +4,7 @@ use async_trait::async_trait; use serde::Serialize; use tokio::sync::watch; use zksync_concurrency::{ctx, sync}; +use zksync_consensus_roles::validator; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{CheckHealth, Health, HealthStatus}; use zksync_shared_metrics::EN_METRICS; @@ -50,18 +51,20 @@ impl SyncState { .unwrap(); } + /// Waits until the main node block is greater or equal to the given block number. + /// Returns the current main node block number. pub async fn wait_for_main_node_block( &self, ctx: &ctx::Ctx, - want: L2BlockNumber, - ) -> ctx::OrCanceled<()> { - sync::wait_for( - ctx, - &mut self.0.subscribe(), - |inner| matches!(inner.main_node_block, Some(got) if got >= want), - ) - .await?; - Ok(()) + pred: impl Fn(validator::BlockNumber) -> bool, + ) -> ctx::OrCanceled { + sync::wait_for_some(ctx, &mut self.0.subscribe(), |inner| { + inner + .main_node_block + .map(|n| validator::BlockNumber(n.0.into())) + .filter(|n| pred(*n)) + }) + .await } pub fn set_main_node_block(&self, block: L2BlockNumber) { From 54e4b007b2d32d86b2701b01cd3bef3b3bc97087 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 13 Nov 2024 17:55:03 +0200 Subject: [PATCH 5/6] fix(vm): Do not require experimental VM config (#3270) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Uses default experimental VM config when initializing the main node. ## Why ❔ Currently, the experimental VM config is required in certain cases, which may lead to panics during node initialization. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/bin/zksync_server/src/node_builder.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 32478ede5bf8..794c847a24d5 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -325,7 +325,11 @@ impl MainNodeBuilder { latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; - let vm_config = try_load_config!(self.configs.experimental_vm_config); + let vm_config = self + .configs + .experimental_vm_config + .clone() + .unwrap_or_default(); // On main node we always use master pool sink. self.node.add_layer(MasterPoolSinkLayer); @@ -597,7 +601,11 @@ impl MainNodeBuilder { } fn add_vm_playground_layer(mut self) -> anyhow::Result { - let vm_config = try_load_config!(self.configs.experimental_vm_config); + let vm_config = self + .configs + .experimental_vm_config + .clone() + .unwrap_or_default(); self.node.add_layer(VmPlaygroundLayer::new( vm_config.playground, self.genesis_config.l2_chain_id, From 8620a8e9b97a31e4db13f39912759c3db3111a11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 14 Nov 2024 02:21:06 +0000 Subject: [PATCH 6/6] fix: Fixes build issues related to foundy-zksync (#3272) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Docker build script checked for wrong forge version (foundry-zksync reports a forge version 0.0.2) - Dev setup guide stated the wrong command to install foundry-zksync (current one wouldn't work) --- docker/Makefile | 2 +- docs/guides/setup-dev.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Makefile b/docker/Makefile index 4e0ca51f904e..19d5fee0907f 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -7,7 +7,7 @@ NODE_VERSION_MIN=20.17.0 YARN_VERSION_MIN=1.22.19 RUST_VERSION=nightly-2024-08-01 SQLX_CLI_VERSION=0.8.1 -FORGE_MIN_VERSION=0.2.0 +FORGE_MIN_VERSION=0.0.2 # Versions and packages checks check-nodejs: diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index a74f976c2b73..43350ac3314d 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -47,7 +47,7 @@ cargo install sqlx-cli --version 0.8.1 # Foundry ZKsync curl -L https://raw.githubusercontent.com/matter-labs/foundry-zksync/main/install-foundry-zksync | bash -foundryup-zksync --branch master +foundryup-zksync # Non CUDA (GPU) setup, can be skipped if the machine has a CUDA installed for provers # Don't do that if you intend to run provers on your machine. Check the prover docs for a setup instead.