From c53ab45e19ce99b5c207cf9f4f2ef0c0c0c72bdb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 24 Nov 2022 17:29:09 +0600 Subject: [PATCH 01/73] Add in-protocol deposit processing --- specs/deposits/beacon-chain.md | 416 +++++++++++++++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 specs/deposits/beacon-chain.md diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md new file mode 100644 index 0000000000..bb6ebd2e8c --- /dev/null +++ b/specs/deposits/beacon-chain.md @@ -0,0 +1,416 @@ +# DepositEIP -- The Beacon Chain + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Preset](#preset) + - [State list lengths](#state-list-lengths) + - [Execution](#execution) +- [Containers](#containers) + - [New containers](#new-containers) + - [`DepositReceipt`](#depositreceipt) + - [`IndexedDepositData`](#indexeddepositdata) + - [Extended Containers](#extended-containers) + - [`ExecutionPayload`](#executionpayload) + - [`ExecutionPayloadHeader`](#executionpayloadheader) + - [`BeaconState`](#beaconstate) +- [Beacon chain state transition function](#beacon-chain-state-transition-function) + - [Epoch processing](#epoch-processing) + - [Helper functions](#helper-functions) + - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) + - [New `apply_pending_deposit`](#new-apply_pending_deposit) + - [New `process_pending_deposits`](#new-process_pending_deposits) + - [Block processing](#block-processing) + - [New `process_deposit_receipts`](#new-process_deposit_receipts) + - [Modified `process_execution_payload`](#modified-process_execution_payload) + - [Modified `process_operations`](#modified-process_operations) +- [Testing](#testing) + + + + +## Introduction + +This is the beacon chain specification of in-protocol deposits processing mechanism. +This mechanism relies on the changes proposed by the corresponding EIP. + +*Note:* This specification is under development and should be used with care. + +## Preset + +### State list lengths + +| Name | Value | +| - | - | +| `PENDING_DEPOSITS_LIMIT` | `2**32` (= 4,294,967,296) | + +### Execution + +| Name | Value | Description | +| - | - | - | +| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**10)` (= 1,024) | Maximum number of deposit receipts allowed in each payload | + +## Containers + +### New containers + +#### `DepositReceipt` + +```python +class DepositReceipt(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + signature: BLSSignature + index: uint64 +``` + +#### `IndexedDepositData` + +```python +class IndexedDepositData(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + index: uint64 + epoch: Epoch +``` + +### Extended Containers + +#### `ExecutionPayload` + +```python +class ExecutionPayload(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in DepositEIP] +``` + +#### `ExecutionPayloadHeader` + +```python +class ExecutionPayloadHeader(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 + transactions_root: Root + withdrawals_root: Root + deposit_receipts_root: Root # [New in DepositEIP] +``` + +#### `BeaconState` + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Execution + latest_execution_payload_header: ExecutionPayloadHeader + # Withdrawals + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + # DepositsEIP + pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] +``` + +## Beacon chain state transition function + +### Epoch processing + +```python +def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + # Run before registry and after finality updates + process_pending_deposits(state) # [New in DepositsEIP] + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_roots_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) +``` + +#### Helper functions + +##### New `get_validator_from_indexed_deposit_data` + +```python +def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDepositData) -> Validator: + amount = indexed_deposit_data.amount + effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + + return Validator( + pubkey=indexed_deposit_data.pubkey, + withdrawal_credentials=indexed_deposit_data.withdrawal_credentials, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + effective_balance=effective_balance, + ) +``` + +##### New `apply_pending_deposit` + +```python +def apply_pending_deposit(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: + pubkey = indexed_deposit_data.pubkey + amount = indexed_deposit_data.amount + validator_pubkeys = [v.pubkey for v in state.validators] + if pubkey not in validator_pubkeys: + # Add validator and balance entries + state.validators.append(get_validator_from_indexed_deposit_data(indexed_deposit_data)) + state.balances.append(amount) + else: + # Increase balance by deposit amount + index = ValidatorIndex(validator_pubkeys.index(pubkey)) + increase_balance(state, index, amount) +``` + +#### New `process_pending_deposits` + +```python +def process_pending_deposits(state: BeaconState) -> None: + finalized_epoch = state.finalized_checkpoint.epoch + + next_pending_deposit_index = 0 + for pending_deposit in state.pending_deposits: + # Apply only finalized deposits + if pending_deposit.epoch > finalized_epoch + break + + # Skip already applied deposits + if pending_deposit.index >= state.eth1_deposit_index: + apply_pending_deposit(state, pending_deposit) + state.eth1_deposit_index += 1 + + next_pending_deposit_index += 1 + + state.pending_deposit = state.pending_deposit[next_pending_deposit_index:] +``` + +### Block processing + +```python +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + if is_execution_enabled(state, block.body): + process_withdrawals(state, block.body.execution_payload) + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in DepositsEIP] + process_deposit_receipts(state, block.body.execution_payload) # [New in DepositsEIP] + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) # [Modified in DepositsEIP] + process_sync_aggregate(state, block.body.sync_aggregate) +``` + +#### New `process_deposit_receipts` + +```python +def process_deposit_receipts(state: BeaconState, payload: ExecutionPayload) -> None: + current_epoch = get_current_epoch(state) + + for deposit_receipt in payload.deposit_receipts: + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + ) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + if not bls.Verify(pubkey, signing_root, deposit.data.signature): + continue + + pending_deposit = IndexedDepositData( + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + index=deposit_receipt.index, + epoch=current_epoch, + ) + state.pending_deposits.append(pending_deposit) +``` + +#### Modified `process_execution_payload` + +*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type. + +```python +def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None: + # Verify consistency of the parent hash with respect to the previous execution payload header + if is_merge_transition_complete(state): + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) + # Verify the execution payload is valid + assert execution_engine.notify_new_payload(payload) + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in DepositsEIP] + ) +``` + +#### Modified `process_operations` + +*Note*: The function `process_operations` is modified to process `BLSToExecutionChange` operations included in the block. + +```python +def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Verify that outstanding deposits are processed up to the maximum number of deposits + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) # [New in Capella] +``` + +## Testing + +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure DepositsEIP testing only. +Modifications include: +1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. +2. Utilize the DepositsEIP `BeaconBlockBody` when constructing the initial `latest_block_header`. + +```python +def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, + eth1_timestamp: uint64, + deposits: Sequence[Deposit], + execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() + ) -> BeaconState: + fork = Fork( + previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only + current_version=CAPELLA_FORK_VERSION, # [Modified in Capella] + epoch=GENESIS_EPOCH, + ) + state = BeaconState( + genesis_time=eth1_timestamp + GENESIS_DELAY, + fork=fork, + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + ) + + # Process deposits + leaves = list(map(lambda deposit: deposit.data, deposits)) + for index, deposit in enumerate(deposits): + deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) + state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) + process_deposit(state, deposit) + + # Process activations + for index, validator in enumerate(state.validators): + balance = state.balances[index] + validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + if validator.effective_balance == MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + + # Set genesis validators root for domain separation and chain versioning + state.genesis_validators_root = hash_tree_root(state.validators) + + # Fill in sync committees + # Note: A duplicate committee is assigned for the current and next committee at genesis + state.current_sync_committee = get_next_sync_committee(state) + state.next_sync_committee = get_next_sync_committee(state) + + # Initialize the execution payload header + state.latest_execution_payload_header = execution_payload_header + + return state +``` From b3c771c46db891812422e113e227b39c6757620c Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 12:05:24 +0600 Subject: [PATCH 02/73] Preserve deposits per epoch boundary --- specs/deposits/beacon-chain.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index bb6ebd2e8c..7e8b5bf82d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -244,10 +244,14 @@ def process_pending_deposits(state: BeaconState) -> None: next_pending_deposit_index = 0 for pending_deposit in state.pending_deposits: + # Preserve deposits per epoch boundary + if next_pending_deposit_index >= MAX_DEPOSITS * SLOTS_PER_EPOCH: + break + # Apply only finalized deposits - if pending_deposit.epoch > finalized_epoch + if pending_deposit.epoch > finalized_epoch: break - + # Skip already applied deposits if pending_deposit.index >= state.eth1_deposit_index: apply_pending_deposit(state, pending_deposit) From 5ea983ac33e16963b845318fbfd1f178a516a8cb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 22:25:22 +0600 Subject: [PATCH 03/73] Fix toc, add more comments --- specs/deposits/beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index 7e8b5bf82d..40d9864c2d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -22,7 +22,7 @@ - [Epoch processing](#epoch-processing) - [Helper functions](#helper-functions) - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) - - [New `apply_pending_deposit`](#new-apply_pending_deposit) + - [New `apply_indexed_deposit_data`](#new-apply_indexed_deposit_data) - [New `process_pending_deposits`](#new-process_pending_deposits) - [Block processing](#block-processing) - [New `process_deposit_receipts`](#new-process_deposit_receipts) @@ -219,10 +219,10 @@ def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDeposit ) ``` -##### New `apply_pending_deposit` +##### New `apply_indexed_deposit_data` ```python -def apply_pending_deposit(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: +def apply_indexed_deposit_data(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: pubkey = indexed_deposit_data.pubkey amount = indexed_deposit_data.amount validator_pubkeys = [v.pubkey for v in state.validators] @@ -254,7 +254,7 @@ def process_pending_deposits(state: BeaconState) -> None: # Skip already applied deposits if pending_deposit.index >= state.eth1_deposit_index: - apply_pending_deposit(state, pending_deposit) + apply_indexed_deposit_data(state, pending_deposit) state.eth1_deposit_index += 1 next_pending_deposit_index += 1 @@ -348,8 +348,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Verify that outstanding deposits are processed up to the maximum number of deposits + # Prevent potential underflow that is introduced by the mix of two deposit processing flows unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: From 8cc293c8697376bb594cca9f753f0b93c2aef634 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 22:29:21 +0600 Subject: [PATCH 04/73] Fix wording --- specs/deposits/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index 40d9864c2d..76423d656d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -348,7 +348,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow that is introduced by the mix of two deposit processing flows + # Prevent potential underflow introduced by mixing two deposit processing flows unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] From 9d2a8f7d63f8a720c7723f04190972fc6e542e96 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 19 Dec 2022 17:47:56 +0600 Subject: [PATCH 05/73] Give deposits EIP a name --- specs/{deposits => eip6110}/beacon-chain.md | 28 ++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) rename specs/{deposits => eip6110}/beacon-chain.md (95%) diff --git a/specs/deposits/beacon-chain.md b/specs/eip6110/beacon-chain.md similarity index 95% rename from specs/deposits/beacon-chain.md rename to specs/eip6110/beacon-chain.md index 76423d656d..d1b64f5a68 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -1,4 +1,4 @@ -# DepositEIP -- The Beacon Chain +# EIP-6110 -- The Beacon Chain ## Table of contents @@ -36,7 +36,7 @@ ## Introduction This is the beacon chain specification of in-protocol deposits processing mechanism. -This mechanism relies on the changes proposed by the corresponding EIP. +This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). *Note:* This specification is under development and should be used with care. @@ -103,7 +103,7 @@ class ExecutionPayload(Container): block_hash: Hash32 transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] - deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in DepositEIP] + deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP-6110] ``` #### `ExecutionPayloadHeader` @@ -127,7 +127,7 @@ class ExecutionPayloadHeader(Container): block_hash: Hash32 transactions_root: Root withdrawals_root: Root - deposit_receipts_root: Root # [New in DepositEIP] + deposit_receipts_root: Root # [New in EIP-6110] ``` #### `BeaconState` @@ -173,7 +173,7 @@ class BeaconState(Container): # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex - # DepositsEIP + # EIP-6110 pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] ``` @@ -187,7 +187,7 @@ def process_epoch(state: BeaconState) -> None: process_inactivity_updates(state) process_rewards_and_penalties(state) # Run before registry and after finality updates - process_pending_deposits(state) # [New in DepositsEIP] + process_pending_deposits(state) # [New in EIP-6110] process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) @@ -269,11 +269,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) if is_execution_enabled(state, block.body): process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in DepositsEIP] - process_deposit_receipts(state, block.body.execution_payload) # [New in DepositsEIP] + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] + process_deposit_receipts(state, block.body.execution_payload) # [New in EIP-6110] process_randao(state, block.body) process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified in DepositsEIP] + process_operations(state, block.body) # [Modified in EIP-6110] process_sync_aggregate(state, block.body.sync_aggregate) ``` @@ -338,7 +338,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), - deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in DepositsEIP] + deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP-6110] ) ``` @@ -349,9 +349,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: @@ -367,10 +367,10 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ## Testing -*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure DepositsEIP testing only. +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. Modifications include: 1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. -2. Utilize the DepositsEIP `BeaconBlockBody` when constructing the initial `latest_block_header`. +2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, From 48f120c90e77dadcbce51d8ac08df0e23b5e6db1 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 21 Dec 2022 19:19:13 +0600 Subject: [PATCH 06/73] Set a higher limit to deposit receipts list --- specs/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/eip6110/beacon-chain.md b/specs/eip6110/beacon-chain.md index d1b64f5a68..cf9c046af3 100644 --- a/specs/eip6110/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -52,7 +52,7 @@ This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum | Name | Value | Description | | - | - | - | -| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**10)` (= 1,024) | Maximum number of deposit receipts allowed in each payload | +| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | Maximum number of deposit receipts allowed in each payload | ## Containers From 6eb118d34afad8f8e01d2164e5d8d06ea0cae663 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 22 Dec 2022 16:58:34 +0600 Subject: [PATCH 07/73] Fix finality check in deposit processing --- specs/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/eip6110/beacon-chain.md b/specs/eip6110/beacon-chain.md index cf9c046af3..4a95fa95f7 100644 --- a/specs/eip6110/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -249,7 +249,7 @@ def process_pending_deposits(state: BeaconState) -> None: break # Apply only finalized deposits - if pending_deposit.epoch > finalized_epoch: + if pending_deposit.epoch >= finalized_epoch: break # Skip already applied deposits From 368e70d9bef4f29a16f1c2f7018fe68eaeeb7b06 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 2 Feb 2023 14:47:28 +1100 Subject: [PATCH 08/73] Remove sending empty blobs sidecar responses --- specs/deneb/p2p-interface.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index b1ff8b9226..f6b1050314 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -235,8 +235,9 @@ The following blobs sidecars, where they exist, MUST be sent in consecutive orde Clients MAY limit the number of blobs sidecars in the response. -An empty `BlobSidecar` is one that does not contain any blobs, but contains non-zero `beacon_block_root`, `beacon_block_slot` and a valid `kzg_aggregated_proof`. -Clients MAY NOT want to consider empty `BlobSidecar`s in rate limiting logic. +Slots that do not contain known blobs MUST be skipped, mimicking the behaviour +of the `BlocksByRange` request. Only response chunks with known blobs should +therefore be sent. The response MUST contain no more than `count` blobs sidecars. From 7637158a2fe8c54d36b1f69dd2932c5de9a2794e Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Wed, 15 Feb 2023 11:39:33 +0000 Subject: [PATCH 09/73] Change get_latest_attesting_balances() to get_weight() --- specs/phase0/fork-choice.md | 9 ++++----- .../core/pyspec/eth2spec/test/helpers/optimistic_sync.py | 2 +- .../eth2spec/test/phase0/fork_choice/test_on_block.py | 8 ++++---- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index f2ccc24b9d..3176c1cd5d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -18,7 +18,7 @@ - [`get_current_slot`](#get_current_slot) - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - - [`get_latest_attesting_balance`](#get_latest_attesting_balance) + - [`get_weight`](#get_weight) - [`filter_block_tree`](#filter_block_tree) - [`get_filtered_block_tree`](#get_filtered_block_tree) - [`get_head`](#get_head) @@ -174,10 +174,10 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: return root ``` -#### `get_latest_attesting_balance` +#### `get_weight` ```python -def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: +def get_weight(store: Store, root: Root) -> Gwei: state = store.checkpoint_states[store.justified_checkpoint] active_indices = get_active_validator_indices(state, get_current_epoch(state)) attestation_score = Gwei(sum( @@ -197,7 +197,6 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100 return attestation_score + proposer_score - ``` #### `filter_block_tree` @@ -270,7 +269,7 @@ def get_head(store: Store) -> Root: return head # Sort by latest attesting balance with ties broken lexicographically # Ties broken by favoring block with lexicographically higher root - head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root)) + head = max(children, key=lambda root: (get_weight(store, root), root)) ``` #### `should_update_justified_checkpoint` diff --git a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py index 6f42aa9bad..816c7a10b7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py @@ -177,7 +177,7 @@ def get_opt_head_block_root(spec, mega_store): return head # Sort by latest attesting balance with ties broken lexicographically # Ties broken by favoring block with lexicographically higher root - head = max(children, key=lambda root: (spec.get_latest_attesting_balance(store, root), root)) + head = max(children, key=lambda root: (spec.get_weight(store, root), root)) def is_invalidated(mega_store, block_root): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index eede246302..23514b325b 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -729,14 +729,14 @@ def test_proposer_boost(spec, state): on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 next_slots(spec, state, 3) block = build_empty_block_for_next_slot(spec, state) @@ -747,14 +747,14 @@ def test_proposer_boost(spec, state): on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 test_steps.append({ 'checks': { From d5c7474d4d50982f9b6b714b0ba2a03103d0e1be Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 21 Feb 2023 17:31:27 +0600 Subject: [PATCH 10/73] Move EIP6110 to features --- specs/{ => _features}/eip6110/beacon-chain.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename specs/{ => _features}/eip6110/beacon-chain.md (100%) diff --git a/specs/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md similarity index 100% rename from specs/eip6110/beacon-chain.md rename to specs/_features/eip6110/beacon-chain.md From 9391f3ccfca279ec6d02e9b689b5b77c949da1e1 Mon Sep 17 00:00:00 2001 From: kasey Date: Tue, 21 Feb 2023 21:48:21 -0600 Subject: [PATCH 11/73] fix MAX_REQUEST_BLOBS_SIDECARS typo --- specs/deneb/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..378a65fdc2 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. +No more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). From 08c7287387d456b96bfe952ab4952c6be1af1612 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 22 Feb 2023 18:33:05 +0600 Subject: [PATCH 12/73] Get rid of pending_deposits queue --- specs/_features/eip6110/beacon-chain.md | 254 ++++++++++++------------ 1 file changed, 130 insertions(+), 124 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 4a95fa95f7..3c81cfb891 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -7,6 +7,8 @@ - [Introduction](#introduction) +- [Constants](#constants) + - [Misc](#misc) - [Preset](#preset) - [State list lengths](#state-list-lengths) - [Execution](#execution) @@ -19,15 +21,12 @@ - [`ExecutionPayloadHeader`](#executionpayloadheader) - [`BeaconState`](#beaconstate) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - - [Epoch processing](#epoch-processing) - - [Helper functions](#helper-functions) - - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) - - [New `apply_indexed_deposit_data`](#new-apply_indexed_deposit_data) - - [New `process_pending_deposits`](#new-process_pending_deposits) - [Block processing](#block-processing) - - [New `process_deposit_receipts`](#new-process_deposit_receipts) - - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Modified `process_operations`](#modified-process_operations) + - [New `get_validator_from_deposit_receipt`](#new-get_validator_from_deposit_receipt) + - [New `process_deposit_receipt`](#new-process_deposit_receipt) + - [Modified `process_deposit`](#modified-process_deposit) + - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Testing](#testing) @@ -40,6 +39,16 @@ This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum *Note:* This specification is under development and should be used with care. +## Constants + +The following values are (non-configurable) constants used throughout the specification. + +### Misc + +| Name | Value | +| - | - | +| `NOT_SET_DEPOSIT_RECEIPT_START_INDEX` | `2**64 - 1` | + ## Preset ### State list lengths @@ -174,43 +183,66 @@ class BeaconState(Container): next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex # EIP-6110 - pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] + deposit_receipt_start_index: uint64 + deposit_receipt_next_index: uint64 ``` ## Beacon chain state transition function -### Epoch processing +### Block processing ```python -def process_epoch(state: BeaconState) -> None: - process_justification_and_finalization(state) - process_inactivity_updates(state) - process_rewards_and_penalties(state) - # Run before registry and after finality updates - process_pending_deposits(state) # [New in EIP-6110] - process_registry_updates(state) - process_slashings(state) - process_eth1_data_reset(state) - process_effective_balance_updates(state) - process_slashings_reset(state) - process_randao_mixes_reset(state) - process_historical_roots_update(state) - process_participation_flag_updates(state) - process_sync_committee_updates(state) +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + if is_execution_enabled(state, block.body): + process_withdrawals(state, block.body.execution_payload) + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) # [Modified in EIP-6110] + process_sync_aggregate(state, block.body.sync_aggregate) ``` -#### Helper functions +#### Modified `process_operations` + +*Note*: The function `process_operations` is modified to process `DepositReceipt` operations included in the payload. + +```python +def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Prevent potential underflow introduced by mixing two deposit processing flows + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] + # Verify that outstanding deposits are processed up to the maximum number of deposits + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) # [Modified in EIP-6110] + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) + + # [New in EIP-6110] + if is_execution_enabled(state, body): + for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) + # Signify the end of transition to in-protocol deposits logic + if state.eth1_deposit_index >= state.deposit_receipt_start_index + state.eth1_deposit_index = state.deposit_receipt_next_index +``` -##### New `get_validator_from_indexed_deposit_data` +#### New `get_validator_from_deposit_receipt` ```python -def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDepositData) -> Validator: - amount = indexed_deposit_data.amount +def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Validator: + amount = deposit_receipt.amount effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=indexed_deposit_data.pubkey, - withdrawal_credentials=indexed_deposit_data.withdrawal_credentials, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -219,91 +251,87 @@ def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDeposit ) ``` -##### New `apply_indexed_deposit_data` +#### New `process_deposit_receipt` ```python -def apply_indexed_deposit_data(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: - pubkey = indexed_deposit_data.pubkey - amount = indexed_deposit_data.amount - validator_pubkeys = [v.pubkey for v in state.validators] +def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: + # Set deposit receipt start index + if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: + state.deposit_receipt_start_index = deposit_receipt.index + + state.deposit_receipt_next_index = deposit_receipt.index + 1 + + pubkey = deposit_receipt.pubkey + amount = deposit_receipt.amount + validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: - # Add validator and balance entries - state.validators.append(get_validator_from_indexed_deposit_data(indexed_deposit_data)) - state.balances.append(amount) + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + ) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + # Initialize validator if the deposit signature is valid + if bls.Verify(pubkey, signing_root, deposit.data.signature): + state.validators.append(get_validator_from_deposit_receipt(deposit)) + state.balances.append(amount) + state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.inactivity_scores.append(uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) ``` -#### New `process_pending_deposits` - -```python -def process_pending_deposits(state: BeaconState) -> None: - finalized_epoch = state.finalized_checkpoint.epoch - - next_pending_deposit_index = 0 - for pending_deposit in state.pending_deposits: - # Preserve deposits per epoch boundary - if next_pending_deposit_index >= MAX_DEPOSITS * SLOTS_PER_EPOCH: - break - - # Apply only finalized deposits - if pending_deposit.epoch >= finalized_epoch: - break +#### Modified `process_deposit` - # Skip already applied deposits - if pending_deposit.index >= state.eth1_deposit_index: - apply_indexed_deposit_data(state, pending_deposit) - state.eth1_deposit_index += 1 - - next_pending_deposit_index += 1 - - state.pending_deposit = state.pending_deposit[next_pending_deposit_index:] -``` - -### Block processing +*Note*: The function `process_deposit` is modified to prevent deposits from being processed in the second time (due to `process_deposit_receipt`). ```python -def process_block(state: BeaconState, block: BeaconBlock) -> None: - process_block_header(state, block) - if is_execution_enabled(state, block.body): - process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] - process_deposit_receipts(state, block.body.execution_payload) # [New in EIP-6110] - process_randao(state, block.body) - process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified in EIP-6110] - process_sync_aggregate(state, block.body.sync_aggregate) -``` - -#### New `process_deposit_receipts` +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Skip already processed deposits + if state.eth1_deposit_index >= state.deposit_receipt_start_index: + return + + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) -```python -def process_deposit_receipts(state: BeaconState, payload: ExecutionPayload) -> None: - current_epoch = get_current_epoch(state) - - for deposit_receipt in payload.deposit_receipts: - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - if not bls.Verify(pubkey, signing_root, deposit.data.signature): - continue - - pending_deposit = IndexedDepositData( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, - index=deposit_receipt.index, - epoch=current_epoch, + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + pubkey = deposit.data.pubkey + amount = deposit.data.amount + validator_pubkeys = [validator.pubkey for validator in state.validators] + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, ) - state.pending_deposits.append(pending_deposit) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + # Initialize validator if the deposit signature is valid + if bls.Verify(pubkey, signing_root, deposit.data.signature): + state.validators.append(get_validator_from_deposit(deposit)) + state.balances.append(amount) + # [New in Altair] + state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.inactivity_scores.append(uint64(0)) + else: + # Increase balance by deposit amount + index = ValidatorIndex(validator_pubkeys.index(pubkey)) + increase_balance(state, index, amount) ``` #### Modified `process_execution_payload` @@ -342,29 +370,6 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ) ``` -#### Modified `process_operations` - -*Note*: The function `process_operations` is modified to process `BLSToExecutionChange` operations included in the block. - -```python -def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] - # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] - - def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: - for operation in operations: - fn(state, operation) - - for_ops(body.proposer_slashings, process_proposer_slashing) - for_ops(body.attester_slashings, process_attester_slashing) - for_ops(body.attestations, process_attestation) - for_ops(body.deposits, process_deposit) - for_ops(body.voluntary_exits, process_voluntary_exit) - for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) # [New in Capella] -``` - ## Testing *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. @@ -389,6 +394,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + deposit_receipt_start_index = NOT_SET_DEPOSIT_RECEIPT_START_INDEX, ) # Process deposits From 837233a1be3350032a058df6730f0af910e05179 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 16:50:56 +0100 Subject: [PATCH 13/73] Fix reference to block->sidecar (This was probably a cut-n-paste from block validation) --- specs/deneb/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..840ea18630 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -108,9 +108,9 @@ This topic is used to propagate signed blob sidecars, one for each sidecar index The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. -- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot). +- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). +- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. From d8111d7d3b9a021d3f4c6ea85a81c467669b55f2 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 16:51:56 +0100 Subject: [PATCH 14/73] Refer to "sidecar" consistently --- specs/deneb/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 840ea18630..52ad411f89 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -110,8 +110,8 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -- _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation. +- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). From 970da9efd2b5b1bdd46dda6b7f676e7d8cffdc90 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 17:15:39 +0100 Subject: [PATCH 15/73] Clean up max request blobs constants The spec currently defines `MAX_REQUEST_BLOB_SIDECARS` as the "maximum number of blob sidecars in a single request", but then later in the RPC description defines the max is `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK`. Clean this up by defining `MAX_REQUEST_BLOB_SIDECARS` to be the actual max. --- specs/deneb/p2p-interface.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..22d8cf94c4 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -38,7 +38,7 @@ The specification of these changes continues in the same format as the network s | Name | Value | Description | |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| | `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request | -| `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request | +| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request | | `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars | ## Containers @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. +No more than `MAX_REQUEST_BLOBS_SIDECARS` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). @@ -239,7 +239,7 @@ Request Content: Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -274,7 +274,7 @@ to be fully compliant with `BlobSidecarsByRange` requests. participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. +Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars. Clients MUST include all blob sidecars of each block from which they include blob sidecars. From 23c10cfd7fc7c72127b2a7bc2f7e3561c978b511 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 13:53:15 +0600 Subject: [PATCH 16/73] Remove state.deposit_receipt_next_index variable --- specs/_features/eip6110/beacon-chain.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 3c81cfb891..df92fb103a 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -184,7 +184,6 @@ class BeaconState(Container): next_withdrawal_validator_index: ValidatorIndex # EIP-6110 deposit_receipt_start_index: uint64 - deposit_receipt_next_index: uint64 ``` ## Beacon chain state transition function @@ -228,9 +227,6 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # [New in EIP-6110] if is_execution_enabled(state, body): for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) - # Signify the end of transition to in-protocol deposits logic - if state.eth1_deposit_index >= state.deposit_receipt_start_index - state.eth1_deposit_index = state.deposit_receipt_next_index ``` #### New `get_validator_from_deposit_receipt` @@ -259,7 +255,9 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: state.deposit_receipt_start_index = deposit_receipt.index - state.deposit_receipt_next_index = deposit_receipt.index + 1 + # Signify the end of transition to in-protocol deposit logic + if state.eth1_deposit_index >= state.deposit_receipt_start_index + state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey amount = deposit_receipt.amount @@ -294,6 +292,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Skip already processed deposits if state.eth1_deposit_index >= state.deposit_receipt_start_index: + state.eth1_deposit_index += 1 return # Verify the Merkle branch From b22c89244a88a20853c82fbc2df00af545728e81 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 14:09:01 +0600 Subject: [PATCH 17/73] Cosmetic renaming --- specs/_features/eip6110/beacon-chain.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index df92fb103a..e7e0630b06 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -47,7 +47,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPT_START_INDEX` | `2**64 - 1` | +| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `2**64 - 1` | ## Preset @@ -183,7 +183,7 @@ class BeaconState(Container): next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex # EIP-6110 - deposit_receipt_start_index: uint64 + deposit_receipts_start_index: uint64 ``` ## Beacon chain state transition function @@ -252,11 +252,11 @@ def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Valid ```python def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: # Set deposit receipt start index - if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: - state.deposit_receipt_start_index = deposit_receipt.index + if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipt_start_index + if state.eth1_deposit_index >= state.deposit_receipts_start_index state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey @@ -291,7 +291,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Skip already processed deposits - if state.eth1_deposit_index >= state.deposit_receipt_start_index: + if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index += 1 return @@ -393,7 +393,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipt_start_index = NOT_SET_DEPOSIT_RECEIPT_START_INDEX, + deposit_receipts_start_index = NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, ) # Process deposits From a1daac098ce4458acad75d0385a89e7146d3d20b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 22:34:32 +0800 Subject: [PATCH 18/73] Make EIP-6110 executable and fix linter errors --- .gitignore | 1 + setup.py | 29 ++++- specs/_features/eip6110/beacon-chain.md | 16 +-- specs/_features/eip6110/fork.md | 142 ++++++++++++++++++++++++ 4 files changed, 176 insertions(+), 12 deletions(-) create mode 100644 specs/_features/eip6110/fork.md diff --git a/.gitignore b/.gitignore index c49e6c006c..c56a658ce2 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ tests/core/pyspec/eth2spec/altair/ tests/core/pyspec/eth2spec/bellatrix/ tests/core/pyspec/eth2spec/capella/ tests/core/pyspec/eth2spec/deneb/ +tests/core/pyspec/eth2spec/eip6110/ # coverage reports .htmlcov diff --git a/setup.py b/setup.py index 9c5488f126..b1158e440f 100644 --- a/setup.py +++ b/setup.py @@ -47,6 +47,7 @@ def installPackage(package: str): BELLATRIX = 'bellatrix' CAPELLA = 'capella' DENEB = 'deneb' +EIP6110 = 'eip6110' # The helper functions that are used when defining constants @@ -667,9 +668,22 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str: return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants} +# +# EIP6110SpecBuilder +# +class EIP6110SpecBuilder(CapellaSpecBuilder): + fork: str = EIP6110 + + @classmethod + def imports(cls, preset_name: str): + return super().imports(preset_name) + f''' +from eth2spec.capella import {preset_name} as capella +''' + + spec_builders = { builder.fork: builder - for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder) + for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder) } @@ -968,14 +982,14 @@ def finalize_options(self): if len(self.md_doc_paths) == 0: print("no paths were specified, using default markdown file paths for pyspec" " build (spec fork: %s)" % self.spec_fork) - if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths = """ specs/phase0/beacon-chain.md specs/phase0/fork-choice.md specs/phase0/validator.md specs/phase0/weak-subjectivity.md """ - if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/altair/light-client/full-node.md specs/altair/light-client/light-client.md @@ -987,7 +1001,7 @@ def finalize_options(self): specs/altair/validator.md specs/altair/p2p-interface.md """ - if self.spec_fork in (BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/bellatrix/beacon-chain.md specs/bellatrix/fork.md @@ -996,7 +1010,7 @@ def finalize_options(self): specs/bellatrix/p2p-interface.md sync/optimistic.md """ - if self.spec_fork in (CAPELLA, DENEB): + if self.spec_fork in (CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/capella/light-client/fork.md specs/capella/light-client/full-node.md @@ -1021,6 +1035,11 @@ def finalize_options(self): specs/deneb/p2p-interface.md specs/deneb/validator.md """ + if self.spec_fork == EIP6110: + self.md_doc_paths += """ + specs/_features/eip6110/beacon-chain.md + specs/_features/eip6110/fork.md + """ if len(self.md_doc_paths) == 0: raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index e7e0630b06..1daa44fb14 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -182,6 +182,8 @@ class BeaconState(Container): # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex + # Deep history valid from Capella onwards + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] # EIP-6110 deposit_receipts_start_index: uint64 ``` @@ -256,7 +258,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index + if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey @@ -265,15 +267,15 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit_receipt(deposit)) + if bls.Verify(pubkey, signing_root, deposit_receipt.signature): + state.validators.append(get_validator_from_deposit_receipt(deposit_receipt)) state.balances.append(amount) state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) @@ -393,7 +395,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index = NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md new file mode 100644 index 0000000000..3a8de1b8dc --- /dev/null +++ b/specs/_features/eip6110/fork.md @@ -0,0 +1,142 @@ +# EIP-6110 -- Fork Logic + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +- [Introduction](#introduction) +- [Configuration](#configuration) +- [Helper functions](#helper-functions) + - [Misc](#misc) + - [Modified `compute_fork_version`](#modified-compute_fork_version) +- [Fork to Deneb](#fork-to-deneb) + - [Fork trigger](#fork-trigger) + - [Upgrading the state](#upgrading-the-state) + + + +## Introduction + +This document describes the process of Deneb upgrade. + +## Configuration + +Warning: this configuration is not definitive. + +| Name | Value | +| - | - | +| `EIP6110_FORK_VERSION` | `Version('0x05000000')` | +| `EIP6110_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | + +## Helper functions + +### Misc + +#### Modified `compute_fork_version` + +```python +def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= EIP6110_FORK_EPOCH: + return EIP6110_FORK_EPOCH + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION +``` + +## Fork to Deneb + +### Fork trigger + +TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade. +For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`. + +Note that for the pure Deneb networks, we don't apply `upgrade_to_eip6110` since it starts with Deneb version logic. + +### Upgrading the state + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6110_FORK_EPOCH`, +an irregular state change is made to upgrade to EIP-6110. + +```python +def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: + epoch = capella.get_current_epoch(pre) + latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=pre.latest_execution_payload_header.parent_hash, + fee_recipient=pre.latest_execution_payload_header.fee_recipient, + state_root=pre.latest_execution_payload_header.state_root, + receipts_root=pre.latest_execution_payload_header.receipts_root, + logs_bloom=pre.latest_execution_payload_header.logs_bloom, + prev_randao=pre.latest_execution_payload_header.prev_randao, + block_number=pre.latest_execution_payload_header.block_number, + gas_limit=pre.latest_execution_payload_header.gas_limit, + gas_used=pre.latest_execution_payload_header.gas_used, + timestamp=pre.latest_execution_payload_header.timestamp, + extra_data=pre.latest_execution_payload_header.extra_data, + base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, + block_hash=pre.latest_execution_payload_header.block_hash, + transactions_root=pre.latest_execution_payload_header.transactions_root, + withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + deposit_receipts_root=Root(), # [New in EIP-6110] + ) + post = BeaconState( + # Versioning + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=EIP6110_FORK_VERSION, # [Modified in EIP-6110] + epoch=epoch, + ), + # History + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + # Eth1 + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + # Registry + validators=pre.validators, + balances=pre.balances, + # Randomness + randao_mixes=pre.randao_mixes, + # Slashings + slashings=pre.slashings, + # Participation + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + # Finality + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + # Inactivity + inactivity_scores=pre.inactivity_scores, + # Sync + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + # Execution-layer + latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP-6110] + # Withdrawals + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + # Deep history valid from Capella onwards + historical_summaries=pre.historical_summaries, + # EIP-6110 + deposit_receipts_start_index=0, # [New in EIP-6110] + ) + + return post +``` From e7035dacf5f1f20fddfd0a2b45ae97e0bcf7e449 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 22:46:55 +0800 Subject: [PATCH 19/73] Remove the outdated statement --- specs/deneb/fork.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md index 1ace26c7f5..23b3f23c7b 100644 --- a/specs/deneb/fork.md +++ b/specs/deneb/fork.md @@ -64,8 +64,6 @@ Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since i ### Upgrading the state -Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`. - ```python def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: epoch = capella.get_current_epoch(pre) @@ -82,10 +80,10 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: timestamp=pre.latest_execution_payload_header.timestamp, extra_data=pre.latest_execution_payload_header.extra_data, base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, - excess_data_gas=uint256(0), # [New in Deneb] block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + excess_data_gas=uint256(0), # [New in Deneb] ) post = BeaconState( # Versioning From 7d6831ec8691607b1b4b30d4b1b9cd0af69b16a5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 21:23:52 +0600 Subject: [PATCH 20/73] Fix initialize_beacon_state_from_eth1 definition --- specs/_features/eip6110/beacon-chain.md | 9 +++++---- specs/_features/eip6110/fork.md | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 1daa44fb14..62a8664769 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -375,8 +375,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. Modifications include: -1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. +1. Use `EIP6110_FORK_VERSION` as the previous and current fork version. 2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`. +3. Add `deposit_receipts_start_index` variable to the genesis state initialization. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, @@ -385,8 +386,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() ) -> BeaconState: fork = Fork( - previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only - current_version=CAPELLA_FORK_VERSION, # [Modified in Capella] + previous_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] for testing only + current_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] epoch=GENESIS_EPOCH, ) state = BeaconState( @@ -395,7 +396,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index 3a8de1b8dc..6e89788872 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -135,7 +135,7 @@ def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: # Deep history valid from Capella onwards historical_summaries=pre.historical_summaries, # EIP-6110 - deposit_receipts_start_index=0, # [New in EIP-6110] + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] ) return post From 703fdfc7c7cc5945fa8d451498164b1e641b625a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 21:31:19 +0600 Subject: [PATCH 21/73] Fix linter --- specs/_features/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 62a8664769..827637221c 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -396,7 +396,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits From fda0eae70af88658f1bff373e3bb7bb7b2fb8c67 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 23:41:57 +0800 Subject: [PATCH 22/73] Add EIP6110 to pylint and mypy scope --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d4259b2fe9..b17baf7c89 100644 --- a/Makefile +++ b/Makefile @@ -142,8 +142,8 @@ codespell: lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb + && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb ./eth2spec/eip6110 \ + && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb -p eth2spec.eip6110 lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ From 9d690a4cb298b34da59960cdf24891608b817ad0 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 24 Feb 2023 17:58:10 +0800 Subject: [PATCH 23/73] Fix typo --- specs/_features/eip6110/fork.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index 6e89788872..a932ecb24e 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -12,7 +12,7 @@ - [Helper functions](#helper-functions) - [Misc](#misc) - [Modified `compute_fork_version`](#modified-compute_fork_version) -- [Fork to Deneb](#fork-to-deneb) +- [Fork to EIP-6110](#fork-to-eip-6110) - [Fork trigger](#fork-trigger) - [Upgrading the state](#upgrading-the-state) @@ -20,7 +20,7 @@ ## Introduction -This document describes the process of Deneb upgrade. +This document describes the process of EIP-6110 upgrade. ## Configuration @@ -53,14 +53,14 @@ def compute_fork_version(epoch: Epoch) -> Version: return GENESIS_FORK_VERSION ``` -## Fork to Deneb +## Fork to EIP-6110 ### Fork trigger TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade. For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`. -Note that for the pure Deneb networks, we don't apply `upgrade_to_eip6110` since it starts with Deneb version logic. +Note that for the pure EIP-6110 networks, we don't apply `upgrade_to_eip6110` since it starts with EIP-6110 version logic. ### Upgrading the state From 136c78ddc77416ae55ed89f231dbfb4ad58b58d1 Mon Sep 17 00:00:00 2001 From: henridf Date: Fri, 24 Feb 2023 14:07:16 +0100 Subject: [PATCH 24/73] Update fork-choice.md Fix outdated (likely a Bellatrix cut-paste) description of change. --- specs/deneb/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index e93eb54faf..91ff3c4b1a 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -63,7 +63,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ ### `on_block` -*Note*: The only modification is the addition of the verification of transition block conditions. +*Note*: The only modification is the addition of the blob data availability check. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: From 0879c46a34ee03b23dddbb0d83f9cd179cacac51 Mon Sep 17 00:00:00 2001 From: Enrico Del Fante Date: Mon, 27 Feb 2023 20:12:31 +0100 Subject: [PATCH 25/73] Add `blob_sidecar` gossip rule for parent slot Similarly to the check we do on Block gossip, we should check slot consistency with the parent block, so we can independently reject wrong block and blobb_sidecar when the rule is violated. --- specs/deneb/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2e77fa98fa..1898f40267 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -112,6 +112,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. +- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). From de5be633996fca73355a84200f21c58bfaafa656 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 16:14:47 +0600 Subject: [PATCH 26/73] Apply suggestions from code review Co-authored-by: Hsiao-Wei Wang --- specs/_features/eip6110/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 827637221c..7fe2218ae9 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -47,7 +47,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `2**64 - 1` | +| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | ## Preset @@ -292,6 +292,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # [New in EIP-6110] # Skip already processed deposits if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index += 1 @@ -325,7 +326,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: if bls.Verify(pubkey, signing_root, deposit.data.signature): state.validators.append(get_validator_from_deposit(deposit)) state.balances.append(amount) - # [New in Altair] state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.inactivity_scores.append(uint64(0)) From fae77eb53dc45be4e2a346cc5a57e7f6a1e007e9 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 16:36:46 +0600 Subject: [PATCH 27/73] Apply @hwwhww suggestions --- specs/_features/eip6110/beacon-chain.md | 122 ++++++++++-------------- 1 file changed, 53 insertions(+), 69 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 7fe2218ae9..160eee6585 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -10,12 +10,10 @@ - [Constants](#constants) - [Misc](#misc) - [Preset](#preset) - - [State list lengths](#state-list-lengths) - [Execution](#execution) - [Containers](#containers) - [New containers](#new-containers) - [`DepositReceipt`](#depositreceipt) - - [`IndexedDepositData`](#indexeddepositdata) - [Extended Containers](#extended-containers) - [`ExecutionPayload`](#executionpayload) - [`ExecutionPayloadHeader`](#executionpayloadheader) @@ -23,7 +21,8 @@ - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - [Modified `process_operations`](#modified-process_operations) - - [New `get_validator_from_deposit_receipt`](#new-get_validator_from_deposit_receipt) + - [New `get_validator_from_deposit_data`](#new-get_validator_from_deposit_data) + - [New `apply_deposit`](#new-apply_deposit) - [New `process_deposit_receipt`](#new-process_deposit_receipt) - [Modified `process_deposit`](#modified-process_deposit) - [Modified `process_execution_payload`](#modified-process_execution_payload) @@ -51,12 +50,6 @@ The following values are (non-configurable) constants used throughout the specif ## Preset -### State list lengths - -| Name | Value | -| - | - | -| `PENDING_DEPOSITS_LIMIT` | `2**32` (= 4,294,967,296) | - ### Execution | Name | Value | Description | @@ -78,17 +71,6 @@ class DepositReceipt(Container): index: uint64 ``` -#### `IndexedDepositData` - -```python -class IndexedDepositData(Container): - pubkey: BLSPubkey - withdrawal_credentials: Bytes32 - amount: Gwei - index: uint64 - epoch: Epoch -``` - ### Extended Containers #### `ExecutionPayload` @@ -211,9 +193,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] - # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] + # [New in EIP-6110] + if state.eth1_data.deposit_count > state.eth1_deposit_index: + assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) + else: + assert len(body.deposits) == 0 def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: @@ -231,16 +215,15 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) ``` -#### New `get_validator_from_deposit_receipt` +#### New `get_validator_from_deposit_data` ```python -def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Validator: - amount = deposit_receipt.amount +def get_validator_from_deposit_data(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -249,33 +232,28 @@ def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Valid ) ``` -#### New `process_deposit_receipt` +#### New `apply_deposit` ```python -def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: - # Set deposit receipt start index - if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: - state.deposit_receipts_start_index = deposit_receipt.index - - # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index = deposit_receipt.index + 1 - - pubkey = deposit_receipt.pubkey - amount = deposit_receipt.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature, + ) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit_receipt.signature): - state.validators.append(get_validator_from_deposit_receipt(deposit_receipt)) + if bls.Verify(pubkey, signing_root, signature): + state.validators.append(get_validator_from_deposit_data(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) @@ -286,9 +264,31 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) increase_balance(state, index, amount) ``` + +#### New `process_deposit_receipt` + +```python +def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: + # Set deposit receipt start index + if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + state.deposit_receipts_start_index = deposit_receipt.index + + # Signify the end of transition to in-protocol deposit logic + if state.eth1_deposit_index >= state.deposit_receipts_start_index: + state.eth1_deposit_index = deposit_receipt.index + 1 + + apply_deposit( + state=state, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + signature=deposit_receipt.signature, + ) +``` + #### Modified `process_deposit` -*Note*: The function `process_deposit` is modified to prevent deposits from being processed in the second time (due to `process_deposit_receipt`). +*Note*: The function `process_deposit` is modified to prevent deposits from being processed the second time (due to `process_deposit_receipt`). ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: @@ -310,29 +310,13 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Deposits must be processed in order state.eth1_deposit_index += 1 - pubkey = deposit.data.pubkey - amount = deposit.data.amount - validator_pubkeys = [validator.pubkey for validator in state.validators] - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit(deposit)) - state.balances.append(amount) - state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(uint64(0)) - else: - # Increase balance by deposit amount - index = ValidatorIndex(validator_pubkeys.index(pubkey)) - increase_balance(state, index, amount) + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) ``` #### Modified `process_execution_payload` From 7bb65f88d91f5f00ec0d19baf665f0f59c9134e0 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 17:18:07 +0600 Subject: [PATCH 28/73] Cosmetic fix --- specs/_features/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 160eee6585..4c7ca790d5 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -192,8 +192,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow introduced by mixing two deposit processing flows # [New in EIP-6110] + # Prevent potential underflow introduced by mixing two deposit processing flows if state.eth1_data.deposit_count > state.eth1_deposit_index: assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) else: From 195babdf3d55c6661dad4ce658b4679063cb33e7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 24 Feb 2023 17:56:38 +0800 Subject: [PATCH 29/73] Refactoring the specs list. Avoid listing specs again and again. --- Makefile | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index d4259b2fe9..1bb78cb0c7 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,12 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \ $(wildcard $(SSZ_DIR)/*.md) +ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb +# The parameters for commands. Use `foreach` to avoid listing specs again. +COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE)) +PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S) +MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S) + COV_HTML_OUT=.htmlcov COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT) COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html @@ -63,15 +69,14 @@ partial_clean: rm -f .coverage rm -rf $(PY_SPEC_DIR)/.pytest_cache rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache - rm -rf $(ETH2SPEC_MODULE_DIR)/phase0 - rm -rf $(ETH2SPEC_MODULE_DIR)/altair - rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix - rm -rf $(ETH2SPEC_MODULE_DIR)/capella - rm -rf $(ETH2SPEC_MODULE_DIR)/deneb rm -rf $(COV_HTML_OUT_DIR) rm -rf $(TEST_REPORT_DIR) rm -rf eth2spec.egg-info dist build - rm -rf build + rm -rf build; + @for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \ + echo $$spec_name; \ + rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \ + done clean: partial_clean rm -rf venv @@ -105,12 +110,12 @@ install_test: # Testing against `minimal` or `mainnet` config by default test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec # Testing against `minimal` or `mainnet` config by default find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p $(TEST_REPORT_DIR); @@ -119,7 +124,7 @@ ifdef fork python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec else . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec endif @@ -137,13 +142,11 @@ check_toc: $(MARKDOWN_FILES:=.toc) codespell: codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist -# TODO: add future protocol upgrade patch packages to linting. -# NOTE: we use `pylint` just for catching unused arguments in spec code lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb + && pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \ + && mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE) lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ From 1f3249407a9521827c24455d8a010b2c27c3aed6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Feb 2023 23:37:12 +0800 Subject: [PATCH 30/73] Full wildcard search `MARKDOWN_FILES` --- Makefile | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 1bb78cb0c7..371a3ecf8a 100644 --- a/Makefile +++ b/Makefile @@ -23,14 +23,10 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER # To check generator matching: #$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}]) -MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ - $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \ - $(wildcard $(SPEC_DIR)/bellatrix/*.md) \ - $(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \ - $(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \ - $(wildcard $(SPEC_DIR)/_features/custody/*.md) \ - $(wildcard $(SPEC_DIR)/_features/das/*.md) \ - $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \ +MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \ + $(wildcard $(SPEC_DIR)/*/*/*.md) \ + $(wildcard $(SPEC_DIR)/_features/*/*.md) \ + $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \ $(wildcard $(SSZ_DIR)/*.md) ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb From a236770b077af8107b0afd4054781f6e1856e31f Mon Sep 17 00:00:00 2001 From: terencechain Date: Tue, 28 Feb 2023 15:17:40 -0800 Subject: [PATCH 31/73] EIP4844: Use `MAX_REQUEST_BLOB_SIDECARS` --- specs/deneb/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2e77fa98fa..660448b525 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS] + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS` may be requested at a time. +No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). From 3259922a9eb59c30c826e0986afbc07bfc296b2c Mon Sep 17 00:00:00 2001 From: Stefan Bratanov Date: Wed, 1 Mar 2023 17:10:58 +0000 Subject: [PATCH 32/73] change usage of MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS --- specs/deneb/fork-choice.md | 2 +- specs/deneb/validator.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 91ff3c4b1a..830c487645 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -47,7 +47,7 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: # `retrieve_blobs_and_proofs` is implementation and context dependent # It returns all the blobs for the given block root, and raises an exception if not available - # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` + # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) # For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST"). diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index b29330ce57..77edb957f8 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -118,7 +118,7 @@ def get_blob_sidecar_signature(state: BeaconState, After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested. -The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable, +The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable, to ensure the data-availability of these blobs throughout the network. -After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them. +After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them. From c445fa9b3762f665e08a2d558c80abf78178dc0d Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 15:50:08 +0600 Subject: [PATCH 33/73] Apply suggestions from code review Co-authored-by: Danny Ryan --- specs/_features/eip6110/beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 4c7ca790d5..ddaca21a24 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -46,7 +46,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | +| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | ## Preset @@ -166,7 +166,7 @@ class BeaconState(Container): next_withdrawal_validator_index: ValidatorIndex # Deep history valid from Capella onwards historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] - # EIP-6110 + # [New in EIP-6110] deposit_receipts_start_index: uint64 ``` @@ -239,8 +239,7 @@ def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, - signature: BLSSignature, - ) -> None: + signature: BLSSignature) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract From 13f3654296546f63a1b60c5b9263a6516125f5e5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 17:29:22 +0600 Subject: [PATCH 34/73] Apply suggestions from @djrtwo --- specs/_features/eip6110/beacon-chain.md | 100 ++---------------------- specs/_features/eip6110/fork.md | 2 +- specs/altair/beacon-chain.md | 36 +++------ specs/phase0/beacon-chain.md | 60 ++++++++------ 4 files changed, 58 insertions(+), 140 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index ddaca21a24..95655d32b3 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -21,10 +21,7 @@ - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - [Modified `process_operations`](#modified-process_operations) - - [New `get_validator_from_deposit_data`](#new-get_validator_from_deposit_data) - - [New `apply_deposit`](#new-apply_deposit) - [New `process_deposit_receipt`](#new-process_deposit_receipt) - - [Modified `process_deposit`](#modified-process_deposit) - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Testing](#testing) @@ -36,7 +33,7 @@ This is the beacon chain specification of in-protocol deposits processing mechanism. This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). -*Note:* This specification is under development and should be used with care. +*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. ## Constants @@ -192,10 +189,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # [New in EIP-6110] - # Prevent potential underflow introduced by mixing two deposit processing flows - if state.eth1_data.deposit_count > state.eth1_deposit_index: - assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) + # [Modified in EIP-6110] + # Disable former deposit mechanism once all prior deposits are processed + eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) + if state.eth1_deposit_index < eth1_deposit_index_limit: + assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) else: assert len(body.deposits) == 0 @@ -215,61 +213,12 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) ``` -#### New `get_validator_from_deposit_data` - -```python -def get_validator_from_deposit_data(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: - effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) - - return Validator( - pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, - activation_eligibility_epoch=FAR_FUTURE_EPOCH, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=effective_balance, - ) -``` - -#### New `apply_deposit` - -```python -def apply_deposit(state: BeaconState, - pubkey: BLSPubkey, - withdrawal_credentials: Bytes32, - amount: uint64, - signature: BLSSignature) -> None: - validator_pubkeys = [validator.pubkey for validator in state.validators] - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, - amount=amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, signature): - state.validators.append(get_validator_from_deposit_data(pubkey, withdrawal_credentials, amount)) - state.balances.append(amount) - state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(uint64(0)) - else: - # Increase balance by deposit amount - index = ValidatorIndex(validator_pubkeys.index(pubkey)) - increase_balance(state, index, amount) -``` - - #### New `process_deposit_receipt` ```python def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: # Set deposit receipt start index - if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX: state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic @@ -285,39 +234,6 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ) ``` -#### Modified `process_deposit` - -*Note*: The function `process_deposit` is modified to prevent deposits from being processed the second time (due to `process_deposit_receipt`). - -```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # [New in EIP-6110] - # Skip already processed deposits - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index += 1 - return - - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - apply_deposit( - state=state, - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, - signature=deposit.data.signature, - ) -``` - #### Modified `process_execution_payload` *Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type. @@ -379,7 +295,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] + deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index a932ecb24e..b08661e5fa 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -135,7 +135,7 @@ def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: # Deep history valid from Capella onwards historical_summaries=pre.historical_summaries, # EIP-6110 - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] + deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] ) return post diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index fe71a5ff83..58dfad608a 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -43,7 +43,7 @@ - [Modified `slash_validator`](#modified-slash_validator) - [Block processing](#block-processing) - [Modified `process_attestation`](#modified-process_attestation) - - [Modified `process_deposit`](#modified-process_deposit) + - [Modified `apply_deposit`](#modified-apply_deposit) - [Sync aggregate processing](#sync-aggregate-processing) - [Epoch processing](#epoch-processing) - [Justification and finalization](#justification-and-finalization) @@ -489,39 +489,29 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: increase_balance(state, get_beacon_proposer_index(state), proposer_reward) ``` -#### Modified `process_deposit` +#### Modified `apply_deposit` -*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`. +*Note*: The function `apply_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`. ```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - pubkey = deposit.data.pubkey - amount = deposit.data.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit(deposit)) + if bls.Verify(pubkey, signing_root, signature): + state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) # [New in Altair] state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 7e14fa951a..3794cd6be3 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -1835,13 +1835,12 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits ```python -def get_validator_from_deposit(deposit: Deposit) -> Validator: - amount = deposit.data.amount +def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -1851,36 +1850,26 @@ def get_validator_from_deposit(deposit: Deposit) -> Validator: ``` ```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - pubkey = deposit.data.pubkey - amount = deposit.data.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature) -> None: validator_pubkeys = [v.pubkey for v in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) - if not bls.Verify(pubkey, signing_root, deposit.data.signature): + if not bls.Verify(pubkey, signing_root, signature): return # Add validator and balance entries - state.validators.append(get_validator_from_deposit(deposit)) + state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) else: # Increase balance by deposit amount @@ -1888,6 +1877,29 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: increase_balance(state, index, amount) ``` +```python +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) + + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) +``` + ##### Voluntary exits ```python From 00557c56492171dbaa41853e5c658b0789ff3206 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 17:31:12 +0600 Subject: [PATCH 35/73] Remove unnecessary eth1_deposit_index bump --- specs/_features/eip6110/beacon-chain.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 95655d32b3..70a72a5f45 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -221,10 +221,6 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX: state.deposit_receipts_start_index = deposit_receipt.index - # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index = deposit_receipt.index + 1 - apply_deposit( state=state, pubkey=deposit_receipt.pubkey, From 86fb82b221474cc89387fa6436806507b3849d88 Mon Sep 17 00:00:00 2001 From: dankrad Date: Thu, 2 Mar 2023 20:49:10 +0000 Subject: [PATCH 36/73] Test generators for kzg-4844 libraries (#3274) Arkworks integration and test generators for kzg-4844 libraries --- Makefile | 4 +- setup.py | 1 + specs/deneb/polynomial-commitments.md | 15 +- tests/core/pyspec/eth2spec/test/conftest.py | 11 +- tests/core/pyspec/eth2spec/utils/bls.py | 223 ++++++- tests/formats/kzg/README.md | 15 + tests/formats/kzg/blob_to_kzg_commitment.md | 21 + tests/formats/kzg/compute_blob_kzg_proof.md | 21 + tests/formats/kzg/compute_kzg_proof.md | 23 + tests/formats/kzg/verify_blob_kzg_proof.md | 23 + .../kzg/verify_blob_kzg_proof_batch.md | 23 + tests/formats/kzg/verify_kzg_proof.md | 25 + tests/generators/kzg_4844/README.md | 3 + tests/generators/kzg_4844/main.py | 579 ++++++++++++++++++ tests/generators/kzg_4844/requirements.txt | 2 + 15 files changed, 951 insertions(+), 38 deletions(-) create mode 100644 tests/formats/kzg/README.md create mode 100644 tests/formats/kzg/blob_to_kzg_commitment.md create mode 100644 tests/formats/kzg/compute_blob_kzg_proof.md create mode 100644 tests/formats/kzg/compute_kzg_proof.md create mode 100644 tests/formats/kzg/verify_blob_kzg_proof.md create mode 100644 tests/formats/kzg/verify_blob_kzg_proof_batch.md create mode 100644 tests/formats/kzg/verify_kzg_proof.md create mode 100644 tests/generators/kzg_4844/README.md create mode 100644 tests/generators/kzg_4844/main.py create mode 100644 tests/generators/kzg_4844/requirements.txt diff --git a/Makefile b/Makefile index 371a3ecf8a..cd18256e9d 100644 --- a/Makefile +++ b/Makefile @@ -117,10 +117,10 @@ citest: pyspec mkdir -p $(TEST_REPORT_DIR); ifdef fork . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec else . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec endif diff --git a/setup.py b/setup.py index 9c5488f126..cf030c5492 100644 --- a/setup.py +++ b/setup.py @@ -1174,5 +1174,6 @@ def run(self): RUAMEL_YAML_VERSION, "lru-dict==1.1.8", MARKO_VERSION, + "py_arkworks_bls12381==0.3.4", ] ) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 61e22e1820..7b65b44b62 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -273,7 +273,7 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants. """ assert len(points) == len(scalars) - result = bls.Z1 + result = bls.Z1() for x, a in zip(points, scalars): result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a)) return KZGCommitment(bls.G1_to_bytes48(result)) @@ -323,7 +323,7 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS) b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS) result += int(div(a, b) % BLS_MODULUS) - result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width) + result = result * int(BLS_MODULUS + pow(z, width, BLS_MODULUS) - 1) * int(inverse_width) return BLSFieldElement(result % BLS_MODULUS) ``` @@ -371,10 +371,10 @@ def verify_kzg_proof_impl(commitment: KZGCommitment, Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. """ # Verify: P - y = Q * (X - z) - X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z)) - P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) + X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2(), (BLS_MODULUS - z) % BLS_MODULUS)) + P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS)) return bls.pairing_check([ - [P_minus_y, bls.neg(bls.G2)], + [P_minus_y, bls.neg(bls.G2())], [bls.bytes48_to_G1(proof), X_minus_z] ]) ``` @@ -415,14 +415,14 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], proofs, [BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)], ) - C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) + C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS)) for commitment, y in zip(commitments, ys)] C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys] C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) return bls.pairing_check([ [bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))], - [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2] + [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()] ]) ``` @@ -561,3 +561,4 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob], return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs) ``` + diff --git a/tests/core/pyspec/eth2spec/test/conftest.py b/tests/core/pyspec/eth2spec/test/conftest.py index a5f19e20cb..3026b48eb7 100644 --- a/tests/core/pyspec/eth2spec/test/conftest.py +++ b/tests/core/pyspec/eth2spec/test/conftest.py @@ -44,8 +44,11 @@ def pytest_addoption(parser): help="bls-default: make tests that are not dependent on BLS run without BLS" ) parser.addoption( - "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro"], - help="bls-type: use 'pyecc' or 'milagro' implementation for BLS" + "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"], + help=( + "bls-type: use specified BLS implementation;" + "fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)" + ) ) @@ -88,5 +91,9 @@ def bls_type(request): bls_utils.use_py_ecc() elif bls_type == "milagro": bls_utils.use_milagro() + elif bls_type == "arkworks": + bls_utils.use_arkworks() + elif bls_type == "fastest": + bls_utils.use_fastest() else: raise Exception(f"unrecognized bls type: {bls_type}") diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index aa060f4f9a..7ea22be46d 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -1,28 +1,49 @@ from py_ecc.bls import G2ProofOfPossession as py_ecc_bls from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2 from py_ecc.optimized_bls12_381 import ( # noqa: F401 - G1, - G2, - Z1, - Z2, - FQ, - add, - multiply, - neg, - pairing, - final_exponentiate, - FQ12 + G1 as py_ecc_G1, + G2 as py_ecc_G2, + Z1 as py_ecc_Z1, + add as py_ecc_add, + multiply as py_ecc_mul, + neg as py_ecc_neg, + pairing as py_ecc_pairing, + final_exponentiate as py_ecc_final_exponentiate, + FQ12 as py_ecc_GT, ) from py_ecc.bls.g2_primitives import ( # noqa: F401 - G1_to_pubkey as G1_to_bytes48, - pubkey_to_G1 as bytes48_to_G1, - G2_to_signature as G2_to_bytes96, - signature_to_G2 as bytes96_to_G2, + G1_to_pubkey as py_ecc_G1_to_bytes48, + pubkey_to_G1 as py_ecc_bytes48_to_G1, + G2_to_signature as py_ecc_G2_to_bytes96, + signature_to_G2 as py_ecc_bytes96_to_G2, +) +from py_arkworks_bls12381 import ( + G1Point as arkworks_G1, + G2Point as arkworks_G2, + Scalar as arkworks_Scalar, + GT as arkworks_GT, ) import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option +import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option + + +class fastest_bls: + G1 = arkworks_G1 + G2 = arkworks_G2 + Scalar = arkworks_Scalar + GT = arkworks_GT + _AggregatePKs = milagro_bls._AggregatePKs + Sign = milagro_bls.Sign + Verify = milagro_bls.Verify + Aggregate = milagro_bls.Aggregate + AggregateVerify = milagro_bls.AggregateVerify + FastAggregateVerify = milagro_bls.FastAggregateVerify + SkToPk = milagro_bls.SkToPk + + # Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing. bls_active = True @@ -43,6 +64,14 @@ def use_milagro(): bls = milagro_bls +def use_arkworks(): + """ + Shortcut to use Arkworks as BLS library + """ + global bls + bls = arkworks_bls + + def use_py_ecc(): """ Shortcut to use Py-ecc as BLS library @@ -51,6 +80,14 @@ def use_py_ecc(): bls = py_ecc_bls +def use_fastest(): + """ + Shortcut to use Milagro for signatures and Arkworks for other BLS operations + """ + global bls + bls = fastest_bls + + def only_with_bls(alt_return=None): """ Decorator factory to make a function only run when BLS is active. Otherwise return the default. @@ -68,7 +105,10 @@ def entry(*args, **kw): @only_with_bls(alt_return=True) def Verify(PK, message, signature): try: - result = bls.Verify(PK, message, signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.Verify(PK, message, signature) + else: + result = bls.Verify(PK, message, signature) except Exception: result = False finally: @@ -78,7 +118,10 @@ def Verify(PK, message, signature): @only_with_bls(alt_return=True) def AggregateVerify(pubkeys, messages, signature): try: - result = bls.AggregateVerify(list(pubkeys), list(messages), signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.AggregateVerify(list(pubkeys), list(messages), signature) + else: + result = bls.AggregateVerify(list(pubkeys), list(messages), signature) except Exception: result = False finally: @@ -88,7 +131,10 @@ def AggregateVerify(pubkeys, messages, signature): @only_with_bls(alt_return=True) def FastAggregateVerify(pubkeys, message, signature): try: - result = bls.FastAggregateVerify(list(pubkeys), message, signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.FastAggregateVerify(list(pubkeys), message, signature) + else: + result = bls.FastAggregateVerify(list(pubkeys), message, signature) except Exception: result = False finally: @@ -97,12 +143,16 @@ def FastAggregateVerify(pubkeys, message, signature): @only_with_bls(alt_return=STUB_SIGNATURE) def Aggregate(signatures): + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.Aggregate(signatures) return bls.Aggregate(signatures) @only_with_bls(alt_return=STUB_SIGNATURE) def Sign(SK, message): - if bls == py_ecc_bls: + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.Sign(SK, message) + elif bls == py_ecc_bls: return bls.Sign(SK, message) else: return bls.Sign(SK.to_bytes(32, 'big'), message) @@ -121,24 +171,143 @@ def AggregatePKs(pubkeys): # milagro_bls._AggregatePKs checks KeyValidate internally pass + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls._AggregatePKs(list(pubkeys)) + return bls._AggregatePKs(list(pubkeys)) @only_with_bls(alt_return=STUB_SIGNATURE) def SkToPk(SK): - if bls == py_ecc_bls: - return bls.SkToPk(SK) + if bls == py_ecc_bls or bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.SkToPk(SK) else: return bls.SkToPk(SK.to_bytes(32, 'big')) def pairing_check(values): - p_q_1, p_q_2 = values - final_exponentiation = final_exponentiate( - pairing(p_q_1[1], p_q_1[0], final_exponentiate=False) - * pairing(p_q_2[1], p_q_2[0], final_exponentiate=False) - ) - return final_exponentiation == FQ12.one() + if bls == arkworks_bls or bls == fastest_bls: + p_q_1, p_q_2 = values + g1s = [p_q_1[0], p_q_2[0]] + g2s = [p_q_1[1], p_q_2[1]] + return arkworks_GT.multi_pairing(g1s, g2s) == arkworks_GT.one() + else: + p_q_1, p_q_2 = values + final_exponentiation = py_ecc_final_exponentiate( + py_ecc_pairing(p_q_1[1], p_q_1[0], final_exponentiate=False) + * py_ecc_pairing(p_q_2[1], p_q_2[0], final_exponentiate=False) + ) + return final_exponentiation == py_ecc_GT.one() + + +def add(lhs, rhs): + """ + Performs point addition of `lhs` and `rhs`. + The points can either be in G1 or G2. + """ + if bls == arkworks_bls or bls == fastest_bls: + return lhs + rhs + return py_ecc_add(lhs, rhs) + + +def multiply(point, scalar): + """ + Performs Scalar multiplication between + `point` and `scalar`. + `point` can either be in G1 or G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + int_as_bytes = scalar.to_bytes(32, 'little') + scalar = arkworks_Scalar.from_le_bytes(int_as_bytes) + return point * scalar + return py_ecc_mul(point, scalar) + + +def neg(point): + """ + Returns the point negation of `point` + `point` can either be in G1 or G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + return -point + return py_ecc_neg(point) + + +def Z1(): + """ + Returns the identity point in G1 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1.identity() + return py_ecc_Z1 + + +def G1(): + """ + Returns the chosen generator point in G1 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1() + return py_ecc_G1 + + +def G2(): + """ + Returns the chosen generator point in G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G2() + return py_ecc_G2 + + +def G1_to_bytes48(point): + """ + Serializes a point in G1. + Returns a bytearray of size 48 as + we use the compressed format + """ + if bls == arkworks_bls or bls == fastest_bls: + return bytes(point.to_compressed_bytes()) + return py_ecc_G1_to_bytes48(point) + + +def G2_to_bytes96(point): + """ + Serializes a point in G2. + Returns a bytearray of size 96 as + we use the compressed format + """ + if bls == arkworks_bls or bls == fastest_bls: + return bytes(point.to_compressed_bytes()) + return py_ecc_G2_to_bytes96(point) + + +def bytes48_to_G1(bytes48): + """ + Deserializes a purported compressed serialized + point in G1. + - No subgroup checks are performed + - If the bytearray is not a valid serialization + of a point in G1, then this method will raise + an exception + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1.from_compressed_bytes_unchecked(bytes48) + return py_ecc_bytes48_to_G1(bytes48) + + +def bytes96_to_G2(bytes96): + """ + Deserializes a purported compressed serialized + point in G2. + - No subgroup checks are performed + - If the bytearray is not a valid serialization + of a point in G2, then this method will raise + an exception + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G2.from_compressed_bytes_unchecked(bytes96) + return py_ecc_bytes96_to_G2(bytes96) @only_with_bls(alt_return=True) diff --git a/tests/formats/kzg/README.md b/tests/formats/kzg/README.md new file mode 100644 index 0000000000..b5bd720393 --- /dev/null +++ b/tests/formats/kzg/README.md @@ -0,0 +1,15 @@ +# KZG tests + +A test type for KZG libraries. Tests all the public interfaces that a KZG library required to implement EIP-4844 needs to provide, as defined in `polynomial-commitments.md`. + +We do not recommend rolling your own crypto or using an untested KZG library. + +The KZG test suite runner has the following handlers: + +- [`blob_to_kzg_commitment`](./blob_to_kzg_commitment.md) +- [`compute_kzg_proof`](./compute_kzg_proof.md) +- [`verify_kzg_proof`](./verify_kzg_proof.md) +- [`compute_blob_kzg_proof`](./compute_blob_kzg_proof.md) +- [`verify_blob_kzg_proof`](./verify_blob_kzg_proof.md) +- [`verify_blob_kzg_proof_batch`](./verify_blob_kzg_proof_batch.md) + diff --git a/tests/formats/kzg/blob_to_kzg_commitment.md b/tests/formats/kzg/blob_to_kzg_commitment.md new file mode 100644 index 0000000000..dbb1556a1d --- /dev/null +++ b/tests/formats/kzg/blob_to_kzg_commitment.md @@ -0,0 +1,21 @@ +# Test format: Blob to KZG commitment + +Compute the KZG commitment for a given `blob`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob +output: KZGCommitment -- The KZG commitment +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `blob_to_kzg_commitment` handler should compute the KZG commitment for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/compute_blob_kzg_proof.md b/tests/formats/kzg/compute_blob_kzg_proof.md new file mode 100644 index 0000000000..512f60ecb3 --- /dev/null +++ b/tests/formats/kzg/compute_blob_kzg_proof.md @@ -0,0 +1,21 @@ +# Test format: Compute blob KZG proof + +Compute the blob KZG proof for a given `blob`, that helps with quickly verifying that the KZG commitment for the blob is correct. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob +output: KZGProof -- The blob KZG proof +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `compute_blob_kzg_proof` handler should compute the blob KZG proof for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/compute_kzg_proof.md b/tests/formats/kzg/compute_kzg_proof.md new file mode 100644 index 0000000000..bba13638f8 --- /dev/null +++ b/tests/formats/kzg/compute_kzg_proof.md @@ -0,0 +1,23 @@ +# Test format: Compute KZG proof + +Compute the KZG proof for a given `blob` and an evaluation point `z`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob representing a polynomial + z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated +output: KZGProof -- The KZG proof +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. +- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `compute_kzg_proof` handler should compute the KZG proof for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_blob_kzg_proof.md b/tests/formats/kzg/verify_blob_kzg_proof.md new file mode 100644 index 0000000000..dd0bcda5a9 --- /dev/null +++ b/tests/formats/kzg/verify_blob_kzg_proof.md @@ -0,0 +1,23 @@ +# Test format: Verify blob KZG proof + +Use the blob KZG proof to verify that the KZG commitment for a given `blob` is correct + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob + commitment: KZGCommitment -- the KZG commitment to the data blob + proof: KZGProof -- The KZG proof +output: bool -- true (valid proof) or false (incorrect proof) +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_blob_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `blob` by using the blob KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `blob` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_blob_kzg_proof_batch.md b/tests/formats/kzg/verify_blob_kzg_proof_batch.md new file mode 100644 index 0000000000..3bcc74d6bb --- /dev/null +++ b/tests/formats/kzg/verify_blob_kzg_proof_batch.md @@ -0,0 +1,23 @@ +# Test format: Verify blob KZG proof batch + +Use the blob KZG proofs to verify that the KZG commitments for given `blob`s are correct + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: List[Blob] -- the data blob + commitment: List[KZGCommitment] -- the KZG commitment to the data blob + proof: List[KZGProof] -- The KZG proof +output: bool -- true (all proofs are valid) or false (some proofs incorrect) +``` + +- `blob`s here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_blob_kzg_proof_batch` handler should verify that `commitments` are correct KZG commitments to `blobs` by using the blob KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or any blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_kzg_proof.md b/tests/formats/kzg/verify_kzg_proof.md new file mode 100644 index 0000000000..143466b66f --- /dev/null +++ b/tests/formats/kzg/verify_kzg_proof.md @@ -0,0 +1,25 @@ +# Test format: Verify KZG proof + +Verify the KZG proof for a given `blob` and an evaluation point `z` that claims to result in a value of `y`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + commitment: KZGCommitment -- the KZG commitment to the data blob + z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated + y: Bytes32 -- the claimed result of the evaluation + proof: KZGProof -- The KZG proof +output: bool -- true (valid proof) or false (incorrect proof) +``` + +- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. +- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_kzg_proof` handler should verify the KZG proof for evaluating the polynomial represented by `blob` at `z` resulting in the value `y`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/generators/kzg_4844/README.md b/tests/generators/kzg_4844/README.md new file mode 100644 index 0000000000..ab81a85e86 --- /dev/null +++ b/tests/generators/kzg_4844/README.md @@ -0,0 +1,3 @@ +# KZG 4844 Test Generator + +These tests are specific to the KZG API required for implementing EIP-4844 \ No newline at end of file diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py new file mode 100644 index 0000000000..616e2cc461 --- /dev/null +++ b/tests/generators/kzg_4844/main.py @@ -0,0 +1,579 @@ +""" +KZG 4844 test vectors generator +""" + +from hashlib import sha256 +from typing import Tuple, Iterable, Any, Callable, Dict + +from eth_utils import ( + encode_hex, + int_to_big_endian, +) + +from eth2spec.utils import bls +from eth2spec.test.helpers.constants import DENEB +from eth2spec.test.helpers.typing import SpecForkName +from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing +from eth2spec.deneb import spec + + +def expect_exception(func, *args): + try: + func(*args) + except Exception: + pass + else: + raise Exception("should have raised exception") + + +def field_element_bytes(x): + return int.to_bytes(x % spec.BLS_MODULUS, 32, "little") + + +def encode_hex_list(a): + return [encode_hex(x) for x in a] + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def evaluate_blob_at(blob, z): + return field_element_bytes( + spec.evaluate_polynomial_in_evaluation_form(spec.blob_to_polynomial(blob), spec.bytes_to_bls_field(z)) + ) + + +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") +BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS) + +BLOB_ALL_ZEROS = spec.Blob() +BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_RANDOM_VALID2 = spec.Blob(b''.join([field_element_bytes(pow(3, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_RANDOM_VALID3 = spec.Blob(b''.join([field_element_bytes(pow(5, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_ALL_MODULUS_MINUS_ONE = spec.Blob(b''.join([field_element_bytes(spec.BLS_MODULUS - 1) for n in range(4096)])) +BLOB_ALMOST_ZERO = spec.Blob(b''.join([field_element_bytes(1 if n == 3211 else 0) for n in range(4096)])) +BLOB_INVALID = spec.Blob(b'\xFF' * 4096 * 32) +BLOB_INVALID_CLOSE = spec.Blob(b''.join( + [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)] +)) + +VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2, + BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO] +INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE] +VALID_ZS = [field_element_bytes(x) for x in [0, 1, 2, pow(5, 1235, spec.BLS_MODULUS), + spec.BLS_MODULUS - 1, spec.ROOTS_OF_UNITY[1]]] +INVALID_ZS = [x.to_bytes(32, spec.ENDIANNESS) for x in [spec.BLS_MODULUS, 2**256 - 1, 2**256 - 2**128]] + + +def hash(x): + return sha256(x).digest() + + +def int_to_hex(n: int, byte_length: int = None) -> str: + byte_value = int_to_big_endian(n) + if byte_length: + byte_value = byte_value.rjust(byte_length, b'\x00') + return encode_hex(byte_value) + + +def case01_blob_to_kzg_commitment(): + # Valid cases + for blob in VALID_BLOBS: + commitment = spec.blob_to_kzg_commitment(blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'blob_to_kzg_commitment_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': encode_hex(commitment) + } + + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + identifier = f'{encode_hex(hash(blob))}' + expect_exception(spec.blob_to_kzg_commitment, blob) + yield f'blob_to_kzg_commitment_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob) + }, + 'output': None + } + + +def case02_compute_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = spec.compute_kzg_proof(blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': encode_hex(proof) + } + + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + z = VALID_ZS[0] + expect_exception(spec.compute_kzg_proof, blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': None + } + + # Edge case: Invalid z + for z in INVALID_ZS: + blob = VALID_BLOBS[4] + expect_exception(spec.compute_kzg_proof, blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': None + } + + +def case03_verify_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + y = evaluate_blob_at(blob, z) + assert spec.verify_kzg_proof(commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': True + } + + # Incorrect proofs + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = bls_add_one(spec.compute_kzg_proof(blob, z)) + commitment = spec.blob_to_kzg_commitment(blob) + y = evaluate_blob_at(blob, z) + assert not spec.verify_kzg_proof(commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': False + } + + # Edge case: Invalid z + for z in INVALID_ZS: + blob, validz = VALID_BLOBS[4], VALID_ZS[1] + proof = spec.compute_kzg_proof(blob, validz) + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[3] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid y + blob, z = VALID_BLOBS[1], VALID_ZS[1] + proof = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + y = INVALID_ZS[0] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_invalid_y', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not in G1 + blob, z = VALID_BLOBS[2], VALID_ZS[0] + proof = P1_NOT_IN_G1 + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[1] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_not_in_G1', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + blob, z = VALID_BLOBS[3], VALID_ZS[1] + proof = P1_NOT_ON_CURVE + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[1] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_not_on_curve', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + blob, z = VALID_BLOBS[4], VALID_ZS[3] + proof = spec.compute_kzg_proof(blob, z) + commitment = P1_NOT_IN_G1 + y = VALID_ZS[2] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_not_in_G1', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + blob, z = VALID_BLOBS[1], VALID_ZS[4] + proof = spec.compute_kzg_proof(blob, z) + commitment = P1_NOT_ON_CURVE + y = VALID_ZS[3] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_not_on_curve', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + +def case04_compute_blob_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + proof = spec.compute_blob_kzg_proof(blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': encode_hex(proof) + } + + # Edge case: Invalid blob + for blob in INVALID_BLOBS: + expect_exception(spec.compute_blob_kzg_proof, blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': None + } + + +def case05_verify_blob_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + proof = spec.compute_blob_kzg_proof(blob) + commitment = spec.blob_to_kzg_commitment(blob) + assert spec.verify_blob_kzg_proof(blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': True + } + + # Incorrect proofs + for blob in VALID_BLOBS: + proof = bls_add_one(spec.compute_blob_kzg_proof(blob)) + commitment = spec.blob_to_kzg_commitment(blob) + assert not spec.verify_blob_kzg_proof(blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': False + } + + # Edge case: Invalid proof, not in G1 + blob = VALID_BLOBS[2] + proof = P1_NOT_IN_G1 + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + blob = VALID_BLOBS[1] + proof = P1_NOT_ON_CURVE + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + blob = VALID_BLOBS[0] + proof = G1 + commitment = P1_NOT_IN_G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + blob = VALID_BLOBS[2] + proof = G1 + commitment = P1_NOT_ON_CURVE + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid blob + for blob in INVALID_BLOBS: + proof = G1 + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + +def case06_verify_blob_kzg_proof_batch(): + # Valid cases + proofs = [] + commitments = [] + for blob in VALID_BLOBS: + proofs.append(spec.compute_blob_kzg_proof(blob)) + commitments.append(spec.blob_to_kzg_commitment(blob)) + + for i in range(len(proofs)): + assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i]) + identifier = f'{encode_hex(hash(b"".join(VALID_BLOBS[:i])))}' + yield f'verify_blob_kzg_proof_batch_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS[:i]), + 'commitments': encode_hex_list(commitments[:i]), + 'proofs': encode_hex_list(proofs[:i]), + }, + 'output': True + } + + # Incorrect proof + proofs_incorrect = [bls_add_one(proofs[0])] + proofs[1:] + assert not spec.verify_blob_kzg_proof_batch(VALID_BLOBS, commitments, proofs_incorrect) + yield 'verify_blob_kzg_proof_batch_case_invalid_proof', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_incorrect), + }, + 'output': False + } + + # Edge case: Invalid proof, not in G1 + proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1) + yield 'verify_blob_kzg_proof_batch_case_proof_not_in_G1', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_notG1), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + proofs_invalid_notCurve = proofs[:1] + [P1_NOT_ON_CURVE] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notCurve) + yield 'verify_blob_kzg_proof_batch_case_proof_not_on_curve', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_notCurve), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1) + yield 'verify_blob_kzg_proof_batch_case_commitment_not_in_G1', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_notG1), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + commitments_invalid_notCurve = commitments[:3] + [P1_NOT_ON_CURVE] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notCurve) + yield 'verify_blob_kzg_proof_batch_case_not_on_curve', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_notCurve), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid blob + blobs_invalid = VALID_BLOBS[:4] + [BLOB_INVALID] + VALID_BLOBS[5:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) + yield 'verify_blob_kzg_proof_batch_case_invalid_blob', { + 'input': { + 'blobs': encode_hex_list(blobs_invalid), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Blob length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs) + yield 'verify_blob_kzg_proof_batch_case_blob_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS[:-1]), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Commitment length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments[:-1], proofs) + yield 'verify_blob_kzg_proof_batch_case_commitment_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments[:-1]), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Proof length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs[:-1]) + yield 'verify_blob_kzg_proof_batch_case_proof_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs[:-1]), + }, + 'output': None + } + + +def create_provider(fork_name: SpecForkName, + handler_name: str, + test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: + + def prepare_fn() -> None: + # Nothing to load / change in spec. Maybe in future forks. + # Put the tests into the general config category, to not require any particular configuration. + return + + def cases_fn() -> Iterable[gen_typing.TestCase]: + for data in test_case_fn(): + (case_name, case_content) = data + yield gen_typing.TestCase( + fork_name=fork_name, + preset_name='general', + runner_name='kzg', + handler_name=handler_name, + suite_name='small', + case_name=case_name, + case_fn=lambda: [('data', 'data', case_content)] + ) + + return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) + + +if __name__ == "__main__": + bls.use_arkworks() + gen_runner.run_generator("kzg", [ + # DENEB + create_provider(DENEB, 'blob_to_kzg_commitment', case01_blob_to_kzg_commitment), + create_provider(DENEB, 'compute_kzg_proof', case02_compute_kzg_proof), + create_provider(DENEB, 'verify_kzg_proof', case03_verify_kzg_proof), + create_provider(DENEB, 'compute_blob_kzg_proof', case04_compute_blob_kzg_proof), + create_provider(DENEB, 'verify_blob_kzg_proof', case05_verify_blob_kzg_proof), + create_provider(DENEB, 'verify_blob_kzg_proof_batch', case06_verify_blob_kzg_proof_batch), + ]) diff --git a/tests/generators/kzg_4844/requirements.txt b/tests/generators/kzg_4844/requirements.txt new file mode 100644 index 0000000000..1822486863 --- /dev/null +++ b/tests/generators/kzg_4844/requirements.txt @@ -0,0 +1,2 @@ +pytest>=4.4 +../../../[generator] From 1b4840c96704a1f1bcaa9d37df4d9c368ad2d047 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 4 Mar 2023 19:20:01 +0000 Subject: [PATCH 37/73] Fix comment for `evaluate_polynomial_in_evaluation_form` to reflect that it can now also be used in the domain --- specs/deneb/polynomial-commitments.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 7b65b44b62..1b9de42a3b 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -303,8 +303,10 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]: def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, z: BLSFieldElement) -> BLSFieldElement: """ - Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain. - Uses the barycentric formula: + Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``. + - When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the + position that ``z`` is in the domain. + - When ``z`` is not in the domain, the barycentric formula is used: f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i]) """ width = len(polynomial) From ca8a51fcf9b6b14135126d82e687010b0b4a57d5 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 6 Mar 2023 22:56:17 +0000 Subject: [PATCH 38/73] More CI tests for polynomial commitments --- .../test_polynomial_commitments.py | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 67dce5c5b3..b4904580ba 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -9,6 +9,24 @@ get_poly_in_both_forms, eval_poly_in_coeff_form, ) +from eth2spec.utils import bls + + +BLS_MODULUS = bls.curve_order + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def field_element_bytes(x): + return int.to_bytes(x % BLS_MODULUS, 32, "little") @with_deneb_and_later @@ -18,12 +36,53 @@ def test_verify_kzg_proof(spec, state): blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof(blob, field_element_bytes(x)) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert spec.verify_kzg_proof(commitment, field_element_bytes(x), field_element_bytes(y), proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_incorrect_proof(spec, state): + x = 3465 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof(blob, field_element_bytes(x)) + proof = bls_add_one(proof) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert not spec.verify_kzg_proof(commitment, field_element_bytes(x), field_element_bytes(y), proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_impl(spec, state): + x = spec.BLS_MODULUS - 1 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) proof = spec.compute_kzg_proof_impl(polynomial, x) y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) assert spec.verify_kzg_proof_impl(commitment, x, y, proof) +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_impl_incorrect_proof(spec, state): + x = 324561 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof_impl(polynomial, x) + proof = bls_add_one(proof) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert not spec.verify_kzg_proof_impl(commitment, x, y, proof) + + @with_deneb_and_later @spec_state_test def test_barycentric_outside_domain(spec, state): @@ -107,3 +166,42 @@ def test_compute_kzg_proof_within_domain(spec, state): y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z) assert spec.verify_kzg_proof_impl(commitment, z, y, proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_blob_kzg_proof(spec, state): + """ + Create and verify KZG proof that p(z) == y + where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + """ + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob) + + assert spec.verify_blob_kzg_proof(blob, commitment, proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_blob_kzg_proof_incorrect_proof(spec, state): + """ + Create and verify KZG proof that p(z) == y + where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + """ + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob) + proof = bls_add_one(proof) + + assert not spec.verify_blob_kzg_proof(blob, commitment, proof) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1(spec, state): + """ + Verify that `validate_kzg_g1` allows the neutral element in G1 + """ + + spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1())) From 661cca59c065af32cd6ede1ef673be6093ed26e1 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 7 Mar 2023 16:52:09 +0800 Subject: [PATCH 39/73] Import `curve_order as BLS_MODULUS` --- .../polynomial_commitments/test_polynomial_commitments.py | 2 +- tests/core/pyspec/eth2spec/utils/bls.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index b4904580ba..9b60f7e7c4 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -12,7 +12,7 @@ from eth2spec.utils import bls -BLS_MODULUS = bls.curve_order +BLS_MODULUS = bls.BLS_MODULUS def bls_add_one(x): diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index 7ea22be46d..7dd9597ebe 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -12,6 +12,7 @@ FQ12 as py_ecc_GT, ) from py_ecc.bls.g2_primitives import ( # noqa: F401 + curve_order as BLS_MODULUS, G1_to_pubkey as py_ecc_G1_to_bytes48, pubkey_to_G1 as py_ecc_bytes48_to_G1, G2_to_signature as py_ecc_G2_to_bytes96, From 81ab7de44a6815f388f4af0292ce9e9f11544197 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 10:54:17 +0000 Subject: [PATCH 40/73] Add unit tests for validate_kzg_g1 and bytes_to_bls_field --- .../test_polynomial_commitments.py | 101 ++++++++++++++++-- 1 file changed, 94 insertions(+), 7 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 9b60f7e7c4..a13ce68518 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -3,6 +3,7 @@ from eth2spec.test.context import ( spec_state_test, with_deneb_and_later, + expect_assertion_error ) from eth2spec.test.helpers.sharding import ( get_sample_blob, @@ -10,9 +11,13 @@ eval_poly_in_coeff_form, ) from eth2spec.utils import bls +from eth2spec.utils.bls import BLS_MODULUS - -BLS_MODULUS = bls.BLS_MODULUS +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") def bls_add_one(x): @@ -32,6 +37,9 @@ def field_element_bytes(x): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof(spec, state): + """ + Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs. + """ x = 3 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -45,6 +53,9 @@ def test_verify_kzg_proof(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_incorrect_proof(spec, state): + """ + Test the wrapper function `verify_kzg_proof` fails on an incorrect proof. + """ x = 3465 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -59,6 +70,9 @@ def test_verify_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_impl(spec, state): + """ + Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. + """ x = spec.BLS_MODULUS - 1 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -72,6 +86,9 @@ def test_verify_kzg_proof_impl(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_impl_incorrect_proof(spec, state): + """ + Test the implementation function `verify_kzg_proof` fails on an incorrect proof. + """ x = 324561 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -172,8 +189,7 @@ def test_compute_kzg_proof_within_domain(spec, state): @spec_state_test def test_verify_blob_kzg_proof(spec, state): """ - Create and verify KZG proof that p(z) == y - where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + Test the functions to compute and verify a blob KZG proof """ blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -186,8 +202,7 @@ def test_verify_blob_kzg_proof(spec, state): @spec_state_test def test_verify_blob_kzg_proof_incorrect_proof(spec, state): """ - Create and verify KZG proof that p(z) == y - where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + Check that `verify_blob_kzg_proof` fails on an incorrect proof """ blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -199,9 +214,81 @@ def test_verify_blob_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later @spec_state_test -def test_validate_kzg_g1(spec, state): +def test_validate_kzg_g1_generator(spec, state): + """ + Verify that `validate_kzg_g1` allows the generator G1 + """ + + spec.validate_kzg_g1(bls.G1_to_bytes48(bls.G1())) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_neutral_element(spec, state): """ Verify that `validate_kzg_g1` allows the neutral element in G1 """ spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1())) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_not_in_g1(spec, state): + """ + Verify that `validate_kzg_g1` fails on point not in G1 + """ + + expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_IN_G1)) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_not_on_curve(spec, state): + """ + Verify that `validate_kzg_g1` fails on point not in G1 + """ + + expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_ON_CURVE)) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_zero(spec, state): + """ + Verify that `bytes_to_bls_field` handles zero + """ + + spec.bytes_to_bls_field(b"\0" * 32) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_modulus_minus_one(spec, state): + """ + Verify that `bytes_to_bls_field` handles modulus minus one + """ + + spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_modulus(spec, state): + """ + Verify that `bytes_to_bls_field` fails on BLS modulus + """ + + expect_assertion_error(lambda: spec.bytes_to_bls_field( + BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS) + )) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_max(spec, state): + """ + Verify that `bytes_to_bls_field` fails on 2**256 - 1 + """ + + expect_assertion_error(lambda: spec.bytes_to_bls_field(b"\xFF" * 32)) From cce82b4938c6f05fc137e07739f5fdcca6ce71c7 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 10:56:16 +0000 Subject: [PATCH 41/73] Remove spec. for getting BLS_MODULUS --- .../polynomial_commitments/test_polynomial_commitments.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index a13ce68518..91115a299a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -73,7 +73,7 @@ def test_verify_kzg_proof_impl(spec, state): """ Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. """ - x = spec.BLS_MODULUS - 1 + x = BLS_MODULUS - 1 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) @@ -119,9 +119,9 @@ def test_barycentric_outside_domain(spec, state): for _ in range(n_samples): # Get a random evaluation point and make sure it's not a root of unity - z = rng.randint(0, spec.BLS_MODULUS - 1) + z = rng.randint(0, BLS_MODULUS - 1) while z in roots_of_unity_brp: - z = rng.randint(0, spec.BLS_MODULUS - 1) + z = rng.randint(0, BLS_MODULUS - 1) # Get p(z) by evaluating poly in coefficient form p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z) From 15033d28b9f0f3a05a26de6ec093d55a7860a305 Mon Sep 17 00:00:00 2001 From: dankrad Date: Tue, 7 Mar 2023 17:50:56 +0000 Subject: [PATCH 42/73] Modify compute_[blob_]kzg_proof to remove superfluous computations (#3280) Add parameter `commitment` to `compute_blob_kzg_proof` and output `y` to `compute_kzg_proof` --- specs/deneb/polynomial-commitments.md | 17 ++--- specs/deneb/validator.md | 2 +- .../test_polynomial_commitments.py | 6 +- tests/formats/kzg/compute_blob_kzg_proof.md | 2 + tests/formats/kzg/compute_kzg_proof.md | 5 +- tests/generators/kzg_4844/main.py | 62 +++++++++++++------ 6 files changed, 62 insertions(+), 32 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 1b9de42a3b..24ae08822b 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -431,14 +431,15 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], #### `compute_kzg_proof` ```python -def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof: +def compute_kzg_proof(blob: Blob, z: Bytes32) -> Tuple[KZGProof, Bytes32]: """ Compute KZG proof at point `z` for the polynomial represented by `blob`. Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z). Public method. """ polynomial = blob_to_polynomial(blob) - return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS) ``` #### `compute_quotient_eval_within_domain` @@ -472,7 +473,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement, #### `compute_kzg_proof_impl` ```python -def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof: +def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[KZGProof, BLSFieldElement]: """ Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`. """ @@ -496,21 +497,23 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro # Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z). quotient_polynomial[i] = div(a, b) - return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)) + return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)), y ``` #### `compute_blob_kzg_proof` ```python -def compute_blob_kzg_proof(blob: Blob) -> KZGProof: +def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof: """ Given a blob, return the KZG proof that is used to verify it against the commitment. + This method does not verify that the commitment is correct with respect to `blob`. Public method. """ - commitment = blob_to_kzg_commitment(blob) + commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) evaluation_challenge = compute_challenge(blob, commitment) - return compute_kzg_proof_impl(polynomial, evaluation_challenge) + proof, _ = compute_kzg_proof_impl(polynomial, evaluation_challenge) + return proof ``` #### `verify_blob_kzg_proof` diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 77edb957f8..50ab832e00 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -96,7 +96,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], - kzg_proof=compute_blob_kzg_proof(blob), + kzg_proof=compute_blob_kzg_proof(blob, block.body.blob_kzg_commitments[index]), ) for index, blob in enumerate(blobs) ] diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 67dce5c5b3..e1e67d639a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -18,9 +18,8 @@ def test_verify_kzg_proof(spec, state): blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) - proof = spec.compute_kzg_proof_impl(polynomial, x) + proof, y = spec.compute_kzg_proof_impl(polynomial, x) - y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) assert spec.verify_kzg_proof_impl(commitment, x, y, proof) @@ -103,7 +102,6 @@ def test_compute_kzg_proof_within_domain(spec, state): roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY) for i, z in enumerate(roots_of_unity_brp): - proof = spec.compute_kzg_proof_impl(polynomial, z) + proof, y = spec.compute_kzg_proof_impl(polynomial, z) - y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z) assert spec.verify_kzg_proof_impl(commitment, z, y, proof) diff --git a/tests/formats/kzg/compute_blob_kzg_proof.md b/tests/formats/kzg/compute_blob_kzg_proof.md index 512f60ecb3..62fce37231 100644 --- a/tests/formats/kzg/compute_blob_kzg_proof.md +++ b/tests/formats/kzg/compute_blob_kzg_proof.md @@ -9,10 +9,12 @@ The test data is declared in a `data.yaml` file: ```yaml input: blob: Blob -- the data blob + commitment: Bytes48 -- the commitment to the blob output: KZGProof -- The blob KZG proof ``` - `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. +- `commitment` here is encoded as a string: hexadecimal encoding of `48` bytes, prefixed with `0x`. All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. diff --git a/tests/formats/kzg/compute_kzg_proof.md b/tests/formats/kzg/compute_kzg_proof.md index bba13638f8..0713d50d81 100644 --- a/tests/formats/kzg/compute_kzg_proof.md +++ b/tests/formats/kzg/compute_kzg_proof.md @@ -10,14 +10,15 @@ The test data is declared in a `data.yaml` file: input: blob: Blob -- the data blob representing a polynomial z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated -output: KZGProof -- The KZG proof +output: Tuple[KZGProof, Bytes32] -- The KZG proof and the value y = f(z) ``` - `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. - `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. +- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition -The `compute_kzg_proof` handler should compute the KZG proof for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. +The `compute_kzg_proof` handler should compute the KZG proof as well as the value `y` for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 616e2cc461..65f6405e52 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -115,14 +115,14 @@ def case02_compute_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_ZS: - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'z': encode_hex(z), }, - 'output': encode_hex(proof) + 'output': (encode_hex(proof), encode_hex(y)) } # Edge case: Invalid blobs @@ -156,9 +156,8 @@ def case03_verify_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_ZS: - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) - y = evaluate_blob_at(blob, z) assert spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -174,9 +173,9 @@ def case03_verify_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: for z in VALID_ZS: - proof = bls_add_one(spec.compute_kzg_proof(blob, z)) + proof_orig, y = spec.compute_kzg_proof(blob, z) + proof = bls_add_one(proof_orig) commitment = spec.blob_to_kzg_commitment(blob) - y = evaluate_blob_at(blob, z) assert not spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -192,9 +191,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid z for z in INVALID_ZS: blob, validz = VALID_BLOBS[4], VALID_ZS[1] - proof = spec.compute_kzg_proof(blob, validz) + proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[3] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -209,7 +207,7 @@ def case03_verify_kzg_proof(): # Edge case: Invalid y blob, z = VALID_BLOBS[1], VALID_ZS[1] - proof = spec.compute_kzg_proof(blob, z) + proof, _ = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) y = INVALID_ZS[0] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -257,9 +255,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid commitment, not in G1 blob, z = VALID_BLOBS[4], VALID_ZS[3] - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_IN_G1 - y = VALID_ZS[2] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) yield 'verify_kzg_proof_case_commitment_not_in_G1', { 'input': { @@ -273,9 +270,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid commitment, not on curve blob, z = VALID_BLOBS[1], VALID_ZS[4] - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_ON_CURVE - y = VALID_ZS[3] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) yield 'verify_kzg_proof_case_commitment_not_on_curve', { 'input': { @@ -291,32 +287,62 @@ def case03_verify_kzg_proof(): def case04_compute_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - proof = spec.compute_blob_kzg_proof(blob) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), }, 'output': encode_hex(proof) } # Edge case: Invalid blob for blob in INVALID_BLOBS: - expect_exception(spec.compute_blob_kzg_proof, blob) + commitment = G1 + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), }, 'output': None } + # Edge case: Invalid commitment, not in G1 + commitment = P1_NOT_IN_G1 + blob = VALID_BLOBS[1] + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) + identifier = f'{encode_hex(hash(blob))}' + yield 'compute_blob_kzg_proof_case_invalid_commitment_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + commitment = P1_NOT_ON_CURVE + blob = VALID_BLOBS[1] + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) + identifier = f'{encode_hex(hash(blob))}' + yield 'compute_blob_kzg_proof_case_invalid_commitment_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + }, + 'output': None + } + def case05_verify_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - proof = spec.compute_blob_kzg_proof(blob) commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) assert spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -330,8 +356,8 @@ def case05_verify_blob_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: - proof = bls_add_one(spec.compute_blob_kzg_proof(blob)) commitment = spec.blob_to_kzg_commitment(blob) + proof = bls_add_one(spec.compute_blob_kzg_proof(blob, commitment)) assert not spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -420,8 +446,8 @@ def case06_verify_blob_kzg_proof_batch(): proofs = [] commitments = [] for blob in VALID_BLOBS: - proofs.append(spec.compute_blob_kzg_proof(blob)) commitments.append(spec.blob_to_kzg_commitment(blob)) + proofs.append(spec.compute_blob_kzg_proof(blob, commitments[-1])) for i in range(len(proofs)): assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i]) From ccfe576dcc466a129da19d13f0418700af6515ab Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Tue, 7 Mar 2023 14:56:55 -0700 Subject: [PATCH 43/73] Add KZG tests for input length inputs (#3282) --- specs/deneb/polynomial-commitments.md | 32 +++- tests/generators/kzg_4844/main.py | 264 ++++++++++++++++++++++---- 2 files changed, 256 insertions(+), 40 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 24ae08822b..edf2062b2a 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -72,7 +72,10 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | Name | Value | Notes | | - | - | - | | `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 | +| `BYTES_PER_COMMITMENT` | `uint64(48)` | The number of bytes in a KZG commitment | +| `BYTES_PER_PROOF` | `uint64(48)` | The number of bytes in a KZG proof | | `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element | +| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob | | `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group | @@ -340,6 +343,7 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment: """ Public method. """ + assert len(blob) == BYTES_PER_BLOB return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob)) ``` @@ -347,17 +351,22 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment: ```python def verify_kzg_proof(commitment_bytes: Bytes48, - z: Bytes32, - y: Bytes32, + z_bytes: Bytes32, + y_bytes: Bytes32, proof_bytes: Bytes48) -> bool: """ Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. Receives inputs as bytes. Public method. """ + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(proof_bytes) == BYTES_PER_PROOF + return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes), - bytes_to_bls_field(z), - bytes_to_bls_field(y), + bytes_to_bls_field(z_bytes), + bytes_to_bls_field(y_bytes), bytes_to_kzg_proof(proof_bytes)) ``` @@ -431,14 +440,16 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], #### `compute_kzg_proof` ```python -def compute_kzg_proof(blob: Blob, z: Bytes32) -> Tuple[KZGProof, Bytes32]: +def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]: """ Compute KZG proof at point `z` for the polynomial represented by `blob`. Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z). Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT polynomial = blob_to_polynomial(blob) - proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes)) return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS) ``` @@ -509,6 +520,8 @@ def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof: This method does not verify that the commitment is correct with respect to `blob`. Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) evaluation_challenge = compute_challenge(blob, commitment) @@ -527,6 +540,10 @@ def verify_blob_kzg_proof(blob: Blob, Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF + commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) @@ -556,6 +573,9 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob], commitments, evaluation_challenges, ys, proofs = [], [], [], [] for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF commitment = bytes_to_kzg_commitment(commitment_bytes) commitments.append(commitment) polynomial = blob_to_polynomial(blob) diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 65f6405e52..699d1f369a 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -27,7 +27,11 @@ def expect_exception(func, *args): def field_element_bytes(x): - return int.to_bytes(x % spec.BLS_MODULUS, 32, "little") + return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.ENDIANNESS) + + +def field_element_bytes_unchecked(x): + return int.to_bytes(x, 32, spec.ENDIANNESS) def encode_hex_list(a): @@ -67,13 +71,30 @@ def evaluate_blob_at(blob, z): BLOB_INVALID_CLOSE = spec.Blob(b''.join( [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)] )) +BLOB_INVALID_LENGTH_PLUS_ONE = BLOB_RANDOM_VALID1 + b"\x00" +BLOB_INVALID_LENGTH_MINUS_ONE = BLOB_RANDOM_VALID1[:-1] VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2, BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO] -INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE] -VALID_ZS = [field_element_bytes(x) for x in [0, 1, 2, pow(5, 1235, spec.BLS_MODULUS), - spec.BLS_MODULUS - 1, spec.ROOTS_OF_UNITY[1]]] -INVALID_ZS = [x.to_bytes(32, spec.ENDIANNESS) for x in [spec.BLS_MODULUS, 2**256 - 1, 2**256 - 2**128]] +INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE, BLOB_INVALID_LENGTH_PLUS_ONE, BLOB_INVALID_LENGTH_MINUS_ONE] + +FE_VALID1 = field_element_bytes(0) +FE_VALID2 = field_element_bytes(1) +FE_VALID3 = field_element_bytes(2) +FE_VALID4 = field_element_bytes(pow(5, 1235, spec.BLS_MODULUS)) +FE_VALID5 = field_element_bytes(spec.BLS_MODULUS - 1) +FE_VALID6 = field_element_bytes(spec.ROOTS_OF_UNITY[1]) +VALID_FIELD_ELEMENTS = [FE_VALID1, FE_VALID2, FE_VALID3, FE_VALID4, FE_VALID5, FE_VALID6] + +FE_INVALID_EQUAL_TO_MODULUS = field_element_bytes_unchecked(spec.BLS_MODULUS) +FE_INVALID_MODULUS_PLUS_ONE = field_element_bytes_unchecked(spec.BLS_MODULUS + 1) +FE_INVALID_UINT256_MAX = field_element_bytes_unchecked(2**256 - 1) +FE_INVALID_UINT256_MID = field_element_bytes_unchecked(2**256 - 2**128) +FE_INVALID_LENGTH_PLUS_ONE = VALID_FIELD_ELEMENTS[0] + b"\x00" +FE_INVALID_LENGTH_MINUS_ONE = VALID_FIELD_ELEMENTS[0][:-1] +INVALID_FIELD_ELEMENTS = [FE_INVALID_EQUAL_TO_MODULUS, FE_INVALID_MODULUS_PLUS_ONE, + FE_INVALID_UINT256_MAX, FE_INVALID_UINT256_MID, + FE_INVALID_LENGTH_PLUS_ONE, FE_INVALID_LENGTH_MINUS_ONE] def hash(x): @@ -114,7 +135,7 @@ def case01_blob_to_kzg_commitment(): def case02_compute_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -127,7 +148,7 @@ def case02_compute_kzg_proof(): # Edge case: Invalid blobs for blob in INVALID_BLOBS: - z = VALID_ZS[0] + z = VALID_FIELD_ELEMENTS[0] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -139,7 +160,7 @@ def case02_compute_kzg_proof(): } # Edge case: Invalid z - for z in INVALID_ZS: + for z in INVALID_FIELD_ELEMENTS: blob = VALID_BLOBS[4] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' @@ -155,7 +176,7 @@ def case02_compute_kzg_proof(): def case03_verify_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) assert spec.verify_kzg_proof(commitment, z, y, proof) @@ -172,7 +193,7 @@ def case03_verify_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof_orig, y = spec.compute_kzg_proof(blob, z) proof = bls_add_one(proof_orig) commitment = spec.blob_to_kzg_commitment(blob) @@ -189,8 +210,8 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid z - for z in INVALID_ZS: - blob, validz = VALID_BLOBS[4], VALID_ZS[1] + for z in INVALID_FIELD_ELEMENTS: + blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -206,12 +227,29 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid y - blob, z = VALID_BLOBS[1], VALID_ZS[1] - proof, _ = spec.compute_kzg_proof(blob, z) + for y in INVALID_FIELD_ELEMENTS: + blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] + proof, _ = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(y)}' + yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not in G1 + blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[0] + proof = P1_NOT_IN_G1 commitment = spec.blob_to_kzg_commitment(blob) - y = INVALID_ZS[0] + y = VALID_FIELD_ELEMENTS[1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_invalid_y', { + yield 'verify_kzg_proof_case_proof_not_in_G1', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -221,13 +259,13 @@ def case03_verify_kzg_proof(): 'output': None } - # Edge case: Invalid proof, not in G1 - blob, z = VALID_BLOBS[2], VALID_ZS[0] - proof = P1_NOT_IN_G1 + # Edge case: Invalid proof, not on curve + blob, z = VALID_BLOBS[3], VALID_FIELD_ELEMENTS[1] + proof = P1_NOT_ON_CURVE commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[1] + y = VALID_FIELD_ELEMENTS[1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_in_G1', { + yield 'verify_kzg_proof_case_proof_not_on_curve', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -237,13 +275,31 @@ def case03_verify_kzg_proof(): 'output': None } - # Edge case: Invalid proof, not on curve - blob, z = VALID_BLOBS[3], VALID_ZS[1] - proof = P1_NOT_ON_CURVE + # Edge case: Invalid proof, too few bytes + blob = VALID_BLOBS[1] commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[1] + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + proof = proof[:-1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_on_curve', { + yield 'verify_kzg_proof_case_proof_too_few_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + proof = proof + b"\x00" + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_too_many_bytes', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -254,7 +310,7 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid commitment, not in G1 - blob, z = VALID_BLOBS[4], VALID_ZS[3] + blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[3] proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_IN_G1 expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -269,7 +325,7 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid commitment, not on curve - blob, z = VALID_BLOBS[1], VALID_ZS[4] + blob, z = VALID_BLOBS[1], VALID_FIELD_ELEMENTS[4] proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_ON_CURVE expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -283,6 +339,38 @@ def case03_verify_kzg_proof(): 'output': None } + # Edge case: Invalid commitment, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob)[:-1] + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_too_few_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + b"\x00" + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_too_many_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + def case04_compute_blob_kzg_proof(): # Valid cases @@ -397,6 +485,34 @@ def case05_verify_blob_kzg_proof(): 'output': None } + # Edge case: Invalid proof, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment)[:-1] + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_too_few_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + b"\x00" + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_too_many_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + # Edge case: Invalid commitment, not in G1 blob = VALID_BLOBS[0] proof = G1 @@ -425,6 +541,36 @@ def case05_verify_blob_kzg_proof(): 'output': None } + # Edge case: Invalid commitment, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + commitment = commitment[:-1] + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_too_few_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + commitment = commitment + b"\x00" + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_too_many_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + # Edge case: Invalid blob for blob in INVALID_BLOBS: proof = G1 @@ -473,6 +619,20 @@ def case06_verify_blob_kzg_proof_batch(): 'output': False } + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + blobs_invalid = VALID_BLOBS[:4] + [blob] + VALID_BLOBS[5:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_batch_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(blobs_invalid), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + # Edge case: Invalid proof, not in G1 proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1) @@ -497,6 +657,30 @@ def case06_verify_blob_kzg_proof_batch(): 'output': None } + # Edge case: Invalid proof, too few bytes + proofs_invalid_tooFewBytes = proofs[:1] + [proofs[1][:-1]] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooFewBytes) + yield 'verify_blob_kzg_proof_batch_case_proof_too_few_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_tooFewBytes), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + proofs_invalid_tooManyBytes = proofs[:1] + [proofs[1] + b"\x00"] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooManyBytes) + yield 'verify_blob_kzg_proof_batch_case_proof_too_many_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_tooManyBytes), + }, + 'output': None + } + # Edge case: Invalid commitment, not in G1 commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1) @@ -521,13 +705,25 @@ def case06_verify_blob_kzg_proof_batch(): 'output': None } - # Edge case: Invalid blob - blobs_invalid = VALID_BLOBS[:4] + [BLOB_INVALID] + VALID_BLOBS[5:] - expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) - yield 'verify_blob_kzg_proof_batch_case_invalid_blob', { + # Edge case: Invalid commitment, too few bytes + commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes) + yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', { 'input': { - 'blobs': encode_hex_list(blobs_invalid), - 'commitments': encode_hex_list(commitments), + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_tooFewBytes), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes) + yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_tooManyBytes), 'proofs': encode_hex_list(proofs), }, 'output': None From b4c130a4a25bd50299c3fbb67bc0b8723db26c01 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 21:57:45 +0000 Subject: [PATCH 44/73] Remove state from unit tests --- .../test_polynomial_commitments.py | 88 +++++++++++-------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 91115a299a..8ad552056c 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -1,7 +1,8 @@ import random from eth2spec.test.context import ( - spec_state_test, + spec_test, + single_phase, with_deneb_and_later, expect_assertion_error ) @@ -35,8 +36,9 @@ def field_element_bytes(x): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof(spec): """ Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs. """ @@ -51,8 +53,9 @@ def test_verify_kzg_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_incorrect_proof(spec): """ Test the wrapper function `verify_kzg_proof` fails on an incorrect proof. """ @@ -68,8 +71,9 @@ def test_verify_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_impl(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_impl(spec): """ Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. """ @@ -84,8 +88,9 @@ def test_verify_kzg_proof_impl(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_impl_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_impl_incorrect_proof(spec): """ Test the implementation function `verify_kzg_proof` fails on an incorrect proof. """ @@ -101,8 +106,9 @@ def test_verify_kzg_proof_impl_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_barycentric_outside_domain(spec, state): +@spec_test +@single_phase +def test_barycentric_outside_domain(spec): """ Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain (the roots of unity). @@ -134,8 +140,9 @@ def test_barycentric_outside_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_barycentric_within_domain(spec, state): +@spec_test +@single_phase +def test_barycentric_within_domain(spec): """ Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain (the roots of unity). @@ -166,8 +173,9 @@ def test_barycentric_within_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_compute_kzg_proof_within_domain(spec, state): +@spec_test +@single_phase +def test_compute_kzg_proof_within_domain(spec): """ Create and verify KZG proof that p(z) == y where z is in the domain of our KZG scheme (i.e. a relevant root of unity). @@ -186,8 +194,9 @@ def test_compute_kzg_proof_within_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_blob_kzg_proof(spec, state): +@spec_test +@single_phase +def test_verify_blob_kzg_proof(spec): """ Test the functions to compute and verify a blob KZG proof """ @@ -199,8 +208,9 @@ def test_verify_blob_kzg_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_blob_kzg_proof_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_blob_kzg_proof_incorrect_proof(spec): """ Check that `verify_blob_kzg_proof` fails on an incorrect proof """ @@ -213,8 +223,9 @@ def test_verify_blob_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_generator(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_generator(spec): """ Verify that `validate_kzg_g1` allows the generator G1 """ @@ -223,8 +234,9 @@ def test_validate_kzg_g1_generator(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_neutral_element(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_neutral_element(spec): """ Verify that `validate_kzg_g1` allows the neutral element in G1 """ @@ -233,8 +245,9 @@ def test_validate_kzg_g1_neutral_element(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_not_in_g1(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_not_in_g1(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 """ @@ -243,8 +256,9 @@ def test_validate_kzg_g1_not_in_g1(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_not_on_curve(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_not_on_curve(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 """ @@ -253,8 +267,9 @@ def test_validate_kzg_g1_not_on_curve(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_zero(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_zero(spec): """ Verify that `bytes_to_bls_field` handles zero """ @@ -263,8 +278,9 @@ def test_bytes_to_bls_field_zero(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_modulus_minus_one(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_modulus_minus_one(spec): """ Verify that `bytes_to_bls_field` handles modulus minus one """ @@ -273,8 +289,9 @@ def test_bytes_to_bls_field_modulus_minus_one(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_modulus(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_modulus(spec): """ Verify that `bytes_to_bls_field` fails on BLS modulus """ @@ -285,8 +302,9 @@ def test_bytes_to_bls_field_modulus(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_max(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_max(spec): """ Verify that `bytes_to_bls_field` fails on 2**256 - 1 """ From 43e714e60f142d5951bc68eab55e5a6a801f0a39 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 8 Mar 2023 15:34:56 +0100 Subject: [PATCH 45/73] Check correct fork version in LC sync protocol - Sync committee is determined by signature_slot - Signature fork version is determined by max(signature_slot, 1) - 1 - Attested block fork version can be anything < signature_slot Old logic incorrectly derived signature fork version from signature_slot and did not subtract a slot. Extended tests to check this edge case. --- specs/altair/light-client/sync-protocol.md | 3 ++- .../eth2spec/test/altair/light_client/test_sync.py | 13 +++++-------- .../pyspec/eth2spec/test/helpers/fork_transition.py | 12 ++++++++++-- .../pyspec/eth2spec/test/helpers/light_client.py | 2 +- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index f1b012e981..8cc048219f 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -387,7 +387,8 @@ def validate_light_client_update(store: LightClientStore, pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) if bit ] - fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot)) + fork_version_slot = max(update.signature_slot, 1) - 1 + fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) signing_root = compute_signing_root(update.attested_header.beacon, domain) assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 63bec26b09..d33e68961d 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -668,10 +668,9 @@ def run_test_single_fork(spec, phases, state, fork): # Upgrade to post-fork spec, attested block is still before the fork attested_block = block.copy() attested_state = state.copy() - state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False) + sync_aggregate, _ = get_sync_aggregate(phases[fork], state) + state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) spec = phases[fork] - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) assert test.store.finalized_header.beacon.slot == finalized_state.slot assert test.store.next_sync_committee == finalized_state.next_sync_committee @@ -755,18 +754,16 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2): # ..., attested is from `fork_1`, ... fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1) - state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False) + state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch) spec = phases[fork_1] - attested_block = state_transition_with_full_block(spec, state, True, True) attested_state = state.copy() # ..., and signature is from `fork_2` fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1) - state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False) + sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state) + state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate) spec = phases[fork_2] - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) # Check that update applies yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index 96d0d20dcd..241c7dc37e 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -47,6 +47,7 @@ def _set_operations_by_dict(block, operation_dict): def _state_transition_and_sign_block_at_slot(spec, state, + sync_aggregate=None, operation_dict=None): """ Cribbed from ``transition_unsigned_block`` helper @@ -61,6 +62,8 @@ def _state_transition_and_sign_block_at_slot(spec, Thus use dict to pass operations. """ block = build_empty_block(spec, state) + if sync_aggregate is not None: + block.body.sync_aggregate = sync_aggregate if operation_dict: _set_operations_by_dict(block, operation_dict) @@ -141,7 +144,7 @@ def state_transition_across_slots_with_ignoring_proposers(spec, next_slot(spec, state) -def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None): +def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None): spec.process_slots(state, state.slot + 1) assert state.slot % spec.SLOTS_PER_EPOCH == 0 @@ -172,7 +175,12 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict= assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION if with_block: - return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict) + return state, _state_transition_and_sign_block_at_slot( + post_spec, + state, + sync_aggregate=sync_aggregate, + operation_dict=operation_dict, + ) else: return state, None diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py index 215d174fc8..ceca145e94 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py @@ -31,7 +31,7 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None): sync_committee_signature = compute_aggregate_sync_committee_signature( spec, signature_state, - signature_slot, + max(signature_slot, 1) - 1, committee_indices[:num_participants], ) sync_aggregate = spec.SyncAggregate( From 54c7df5bbf3bd550d9a4361a081ae78c6e049d3e Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 8 Mar 2023 15:42:56 +0100 Subject: [PATCH 46/73] Fix lint --- specs/altair/light-client/sync-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index 8cc048219f..baef684c62 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -387,7 +387,7 @@ def validate_light_client_update(store: LightClientStore, pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) if bit ] - fork_version_slot = max(update.signature_slot, 1) - 1 + fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1) fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) signing_root = compute_signing_root(update.attested_header.beacon, domain) From 7f74a08a6c4734e2d87c07e65dbc4a2624c16ab6 Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Thu, 9 Mar 2023 11:07:01 +0000 Subject: [PATCH 47/73] Fix trailing whitespace --- specs/phase0/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 3176c1cd5d..8681975ca9 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -388,7 +388,7 @@ def on_tick(store: Store, time: uint64) -> None: # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) if ancestor_at_finalized_slot == store.finalized_checkpoint.root: store.justified_checkpoint = store.best_justified_checkpoint From 0da79bdbfd127be24102b1dc0454f5ba200499d5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 9 Mar 2023 21:05:07 +0600 Subject: [PATCH 48/73] Provide validator guide for EIP6110 --- specs/_features/eip6110/validator.md | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 specs/_features/eip6110/validator.md diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md new file mode 100644 index 0000000000..dcaaf11041 --- /dev/null +++ b/specs/_features/eip6110/validator.md @@ -0,0 +1,42 @@ +# EIP-6110 -- Honest Validator + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Block proposal](#block-proposal) + - [Deposits](#deposits) + + + + +## Introduction + +This document represents the changes to be made in the code of an "honest validator" to implement EIP-6110. + +## Prerequisites + +This document is an extension of the [Capella -- Honest Validator](../../capella/validator.md) guide. +All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. + +All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout. +Please see related Beacon Chain doc before continuing and use them as a reference throughout. + +## Block proposal + +### Deposits + +The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function: + +```python +def get_eth1_deposit_count(state: BeaconState) -> uint64: + eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) + if state.eth1_deposit_index < eth1_deposit_index_limit: + return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) + else: + return 0 +``` From 92f8c5cf6ba4647aa2893bce0f08572666aefbf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Thu, 9 Mar 2023 23:06:30 +0100 Subject: [PATCH 49/73] Update disclosure page and email for reporting bugs Update disclosure page and email for reporting bugs --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index e46fab4de1..2101ea1554 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -8,4 +8,4 @@ Please see [Releases](https://github.com/ethereum/consensus-specs/releases/). We **Please do not file a public ticket** mentioning the vulnerability. -To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://eth2bounty.ethereum.org](https://eth2bounty.ethereum.org) or email eth2bounty@ethereum.org. Please read the [disclosure page](https://eth2bounty.ethereum.org) for more information about publicly disclosed security vulnerabilities. +To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://ethereum.org/bug-bounty](https://ethereum.org/bug-bounty) or email bounty@ethereum.org. Please read the [disclosure page](https://ethereum.org/bug-bounty) for more information about publicly disclosed security vulnerabilities. From a5333a1d108838b3bb20ec91759616b8bb4ab5be Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 10 Mar 2023 11:43:05 +0000 Subject: [PATCH 50/73] Remove useless test_kzg.py --- .../eth2spec/test/deneb/unittests/test_kzg.py | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py deleted file mode 100644 index 71bfae8b89..0000000000 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py +++ /dev/null @@ -1,21 +0,0 @@ - -from eth2spec.test.helpers.constants import ( - DENEB, - MINIMAL, -) -from eth2spec.test.helpers.sharding import ( - get_sample_blob, -) -from eth2spec.test.context import ( - with_phases, - spec_state_test, - with_presets, -) - - -@with_phases([DENEB]) -@spec_state_test -@with_presets([MINIMAL]) -def test_blob_to_kzg_commitment(spec, state): - blob = get_sample_blob(spec) - spec.blob_to_kzg_commitment(blob) From 6b69450992f5133a0d716930147598fdaf56503f Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 11 Mar 2023 15:44:42 +0000 Subject: [PATCH 51/73] fix typo in type of KZG_SETUP_LAGRANGE --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index edf2062b2a..57496eb020 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -105,7 +105,7 @@ but reusing the `mainnet` settings in public networks is a critical security req | `KZG_SETUP_G2_LENGTH` | `65` | | `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | | `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD | -| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | +| `KZG_SETUP_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | ## Helper functions From 5e74c5141138cadf7c339031cac1aefc91f8b8bc Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sat, 11 Mar 2023 22:34:41 +0000 Subject: [PATCH 52/73] Tests for validate_blobs_and_kzg_commitments and fix --- specs/deneb/validator.md | 2 +- .../deneb/unittests/validator/__init__.py | 0 .../unittests/validator/test_validator.py | 113 ++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 50ab832e00..6519ec6e69 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -72,7 +72,7 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, # Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine) assert len(blob_kzg_commitments) == len(blobs) - assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)] + assert all(blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)) ``` 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py new file mode 100644 index 0000000000..48a01f624d --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -0,0 +1,113 @@ +import random + +from eth2spec.test.context import ( + spec_state_test, + with_deneb_and_later, + expect_assertion_error +) +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash, +) +from eth2spec.test.helpers.sharding import ( + get_sample_opaque_tx, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, +) +from eth2spec.utils import bls +from eth2spec.utils.bls import BLS_MODULUS + +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def field_element_bytes(x): + return int.to_bytes(x % BLS_MODULUS, 32, "little") + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + ) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + + blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + ) \ No newline at end of file From 29b5309f7df8166ef25803c6542116c69ffa1203 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sat, 11 Mar 2023 23:02:35 +0000 Subject: [PATCH 53/73] Add proofs to validate_blobs_and_kzg_commitments --- specs/deneb/validator.md | 9 +++-- .../unittests/validator/test_validator.py | 39 +++++++++++++++++-- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 6519ec6e69..d2209986e6 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,7 +44,7 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]: +def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` @@ -66,13 +66,14 @@ use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blo ```python def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, blobs: Sequence[Blob], - blob_kzg_commitments: Sequence[KZGCommitment]) -> None: + blob_kzg_commitments: Sequence[KZGCommitment], + blob_kzg_proofs: Sequence[KZGProof]) -> None: # Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments) # Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine) - assert len(blob_kzg_commitments) == len(blobs) - assert all(blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)) + assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs) + assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs) ``` 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 48a01f624d..33b216e876 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -53,14 +53,15 @@ def test_validate_blobs_and_kzg_commitments(spec, state): block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) @with_deneb_and_later @@ -79,11 +80,39 @@ def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs) + ) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars][:-1] expect_assertion_error(lambda: spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) ) @@ -103,11 +132,13 @@ def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) expect_assertion_error(lambda: spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) ) \ No newline at end of file From 96ad61bcecb6473283e312094d23dc3378cfb796 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:05:01 +0000 Subject: [PATCH 54/73] Add tests for blob sidecar signature --- specs/deneb/p2p-interface.md | 11 ++ specs/deneb/validator.md | 9 +- .../eth2spec/test/deneb/sanity/test_blocks.py | 4 +- .../fork_choice/test_validate_blobs.py | 5 +- .../test/deneb/unittests/test_offset.py | 2 +- .../unittests/validator/test_validator.py | 122 ++++++++++++------ .../pyspec/eth2spec/test/helpers/sharding.py | 5 +- 7 files changed, 105 insertions(+), 53 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 107c90b9a9..9f4c158493 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -73,6 +73,17 @@ class BlobIdentifier(Container): index: BlobIndex ``` +### Helpers + +#### `verify_sidecar_signature` + +```python +def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool: + proposer = state.validators[signed_blob_sidecar.message.proposer_index] + signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR)) + return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature) +``` + ## The gossip domain: gossipsub Some gossip meshes are upgraded in the fork of Deneb to support upgraded types. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index d2209986e6..a21fadd085 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,7 +44,8 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: +def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ + Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` @@ -88,7 +89,9 @@ Blobs associated with a block are packaged into sidecar objects for distribution Each `sidecar` is obtained from: ```python -def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]: +def get_blob_sidecars(block: BeaconBlock, + blobs: Sequence[Blob], + blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]: return [ BlobSidecar( block_root=hash_tree_root(block), @@ -97,7 +100,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], - kzg_proof=compute_blob_kzg_proof(blob, block.body.blob_kzg_commitments[index]), + kzg_proof=blob_kzg_proofs[index], ) for index, blob in enumerate(blobs) ] diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index c7fb708b8f..5e65dbd4ef 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -22,7 +22,7 @@ def test_one_blob(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) @@ -38,7 +38,7 @@ def test_max_blobs(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py index d9934c5ade..0d7bd53e52 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py @@ -18,14 +18,13 @@ def _run_validate_blobs(spec, state, blob_count): block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) state_transition_and_sign_block(spec, state, block) - # Also test the proof generation in `get_blob_sidecars` - blob_sidecars = spec.get_blob_sidecars(block, blobs) + blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs) blobs = [sidecar.blob for sidecar in blob_sidecars] kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py index 13150180bc..3c3b51ff1a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py @@ -17,7 +17,7 @@ @spec_state_test @with_presets([MINIMAL]) def test_tx_peek_blob_versioned_hashes(spec, state): - otx, blobs, commitments = get_sample_opaque_tx(spec) + otx, _, commitments, _ = get_sample_opaque_tx(spec) data_hashes = spec.tx_peek_blob_versioned_hashes(otx) expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments] assert expected == data_hashes diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 33b216e876..7e5cf8bfe9 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -1,6 +1,5 @@ -import random - from eth2spec.test.context import ( + always_bls, spec_state_test, with_deneb_and_later, expect_assertion_error @@ -14,8 +13,8 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot ) -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, +from eth2spec.test.helpers.keys import ( + pubkey_to_privkey ) from eth2spec.utils import bls from eth2spec.utils.bls import BLS_MODULUS @@ -49,15 +48,11 @@ def test_validate_blobs_and_kzg_commitments(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, blob_kzg_commitments, @@ -72,21 +67,18 @@ def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs[:-1], + blob_kzg_commitments, + proofs + ) ) @@ -98,21 +90,18 @@ def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars][:-1] - - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs[:-1] + ) ) @@ -124,21 +113,68 @@ def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) - ) \ No newline at end of file + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs + ) + ) + + +@with_deneb_and_later +@spec_state_test +def test_blob_sidecar_signature(spec, state): + """ + Test `get_blob_sidecar_signature` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs) + proposer = state.validators[blob_sidecars[1].proposer_index] + privkey = pubkey_to_privkey[proposer.pubkey] + sidecar_signature = spec.get_blob_sidecar_signature(state, + blob_sidecars[1], + privkey) + + signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature) + + assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar) + + +@with_deneb_and_later +@spec_state_test +@always_bls +def test_blob_sidecar_signature_incorrect(spec, state): + """ + Test `get_blob_sidecar_signature` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs) + + sidecar_signature = spec.get_blob_sidecar_signature(state, + blob_sidecars[1], + 123) + + signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature) + + assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py index fd60d5d3be..89f03c3c39 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py @@ -101,13 +101,16 @@ def get_poly_in_both_forms(spec, rng=None): def get_sample_opaque_tx(spec, blob_count=1, rng=None): blobs = [] blob_kzg_commitments = [] + blob_kzg_proofs = [] blob_versioned_hashes = [] for _ in range(blob_count): blob = get_sample_blob(spec, rng) blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob)) + blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment) blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment) blobs.append(blob) blob_kzg_commitments.append(blob_commitment) + blob_kzg_proofs.append(blob_kzg_proof) blob_versioned_hashes.append(blob_versioned_hash) signed_blob_tx = SignedBlobTransaction( @@ -117,4 +120,4 @@ def get_sample_opaque_tx(spec, blob_count=1, rng=None): ) serialized_tx = serialize(signed_blob_tx) opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx - return opaque_tx, blobs, blob_kzg_commitments + return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs From 723e8a11fe4ee025f9f4faa3352c5ca27bfb59a5 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:07:19 +0000 Subject: [PATCH 55/73] Remove unused imports/functions in test_validator.py --- .../unittests/validator/test_validator.py | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 7e5cf8bfe9..07039ccfeb 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -16,28 +16,6 @@ from eth2spec.test.helpers.keys import ( pubkey_to_privkey ) -from eth2spec.utils import bls -from eth2spec.utils.bls import BLS_MODULUS - -G1 = bls.G1_to_bytes48(bls.G1()) -P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcdef") -P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcde0") - - -def bls_add_one(x): - """ - Adds "one" (actually bls.G1()) to a compressed group element. - Useful to compute definitely incorrect proofs. - """ - return bls.G1_to_bytes48( - bls.add(bls.bytes48_to_G1(x), bls.G1()) - ) - - -def field_element_bytes(x): - return int.to_bytes(x % BLS_MODULUS, 32, "little") @with_deneb_and_later From cc284b2b60af6c857210f0e413f2ed423a95e6fa Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:11:40 +0000 Subject: [PATCH 56/73] Toc --- specs/deneb/p2p-interface.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 9f4c158493..6f10f7a345 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -15,6 +15,8 @@ The specification of these changes continues in the same format as the network s - [`BlobSidecar`](#blobsidecar) - [`SignedBlobSidecar`](#signedblobsidecar) - [`BlobIdentifier`](#blobidentifier) + - [Helpers](#helpers) + - [`verify_sidecar_signature`](#verify_sidecar_signature) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) From e9551715d5ae2655189f03917d01968d290a4904 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Thu, 9 Mar 2023 15:17:11 -0800 Subject: [PATCH 57/73] large fork choice upgrade --- presets/mainnet/phase0.yaml | 6 - presets/minimal/phase0.yaml | 6 - specs/bellatrix/fork-choice.md | 21 +- specs/deneb/fork-choice.md | 21 +- specs/phase0/fork-choice.md | 234 +++- .../eth2spec/test/helpers/attestations.py | 10 +- .../eth2spec/test/helpers/fork_choice.py | 93 +- .../test/phase0/fork_choice/test_get_head.py | 439 +++++-- .../test/phase0/fork_choice/test_on_block.py | 1119 ++++++++++++----- .../test/phase0/fork_choice/test_reorg.py | 498 ++++++++ .../phase0/fork_choice/test_withholding.py | 205 +++ .../unittests/fork_choice/test_on_block.py | 87 -- .../unittests/fork_choice/test_on_tick.py | 77 +- tests/formats/fork_choice/README.md | 5 - tests/generators/fork_choice/main.py | 2 + 15 files changed, 2163 insertions(+), 660 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py create mode 100644 tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py delete mode 100644 tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py diff --git a/presets/mainnet/phase0.yaml b/presets/mainnet/phase0.yaml index 89bb97d6a8..02bc96c8cd 100644 --- a/presets/mainnet/phase0.yaml +++ b/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/presets/minimal/phase0.yaml b/presets/minimal/phase0.yaml index 2c6fbb3691..e7028f5a42 100644 --- a/presets/minimal/phase0.yaml +++ b/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index 94d0688273..ed7d60a932 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -174,6 +174,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # [New in Bellatrix] @@ -181,9 +182,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: validate_merge_block(block) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -191,15 +192,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint - - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 830c487645..61714cf1a8 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -91,6 +91,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Check the merge transition @@ -98,9 +99,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: validate_merge_block(block) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -108,15 +109,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint - - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 8681975ca9..6cbebe507c 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -8,10 +8,10 @@ - [Introduction](#introduction) - [Fork choice](#fork-choice) - [Constant](#constant) - - [Preset](#preset) - [Configuration](#configuration) - [Helpers](#helpers) - [`LatestMessage`](#latestmessage) + - [`is_previous_epoch_justified`](#is_previous_epoch_justified) - [`Store`](#store) - [`get_forkchoice_store`](#get_forkchoice_store) - [`get_slots_since_genesis`](#get_slots_since_genesis) @@ -19,10 +19,16 @@ - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - [`get_weight`](#get_weight) + - [`get_voting_source`](#get_voting_source) - [`filter_block_tree`](#filter_block_tree) - [`get_filtered_block_tree`](#get_filtered_block_tree) - [`get_head`](#get_head) - - [`should_update_justified_checkpoint`](#should_update_justified_checkpoint) + - [`update_checkpoints`](#update_checkpoints) + - [`update_unrealized_checkpoints`](#update_unrealized_checkpoints) + - [Pull-up tip helpers](#pull-up-tip-helpers) + - [`compute_pulled_up_tip`](#compute_pulled_up_tip) + - [`on_tick` helpers](#on_tick-helpers) + - [`on_tick_per_slot`](#on_tick_per_slot) - [`on_attestation` helpers](#on_attestation-helpers) - [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time) - [`validate_on_attestation`](#validate_on_attestation) @@ -67,12 +73,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass | -------------------- | ----------- | | `INTERVALS_PER_SLOT` | `uint64(3)` | -### Preset - -| Name | Value | Unit | Duration | -| -------------------------------- | ------------ | :---: | :--------: | -| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds | - ### Configuration | Name | Value | @@ -92,8 +92,26 @@ class LatestMessage(object): root: Root ``` + +### `is_previous_epoch_justified` + +```python +def is_previous_epoch_justified(store: Store) -> bool: + current_slot = get_current_slot(store) + current_epoch = compute_epoch_at_slot(current_slot) + return store.justified_checkpoint.epoch + 1 == current_epoch +``` + + #### `Store` +The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: + +- `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. +- `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occured. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. + ```python @dataclass class Store(object): @@ -101,13 +119,15 @@ class Store(object): genesis_time: uint64 justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint - best_justified_checkpoint: Checkpoint + unrealized_justified_checkpoint: Checkpoint + unrealized_finalized_checkpoint: Checkpoint proposer_boost_root: Root equivocating_indices: Set[ValidatorIndex] blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) block_states: Dict[Root, BeaconState] = field(default_factory=dict) checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) + unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict) ``` #### `get_forkchoice_store` @@ -130,12 +150,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) - genesis_time=anchor_state.genesis_time, justified_checkpoint=justified_checkpoint, finalized_checkpoint=finalized_checkpoint, - best_justified_checkpoint=justified_checkpoint, + unrealized_justified_checkpoint=justified_checkpoint, + unrealized_finalized_checkpoint=finalized_checkpoint, proposer_boost_root=proposer_boost_root, equivocating_indices=set(), blocks={anchor_root: copy(anchor_block)}, block_states={anchor_root: copy(anchor_state)}, checkpoint_states={justified_checkpoint: copy(anchor_state)}, + unrealized_justifications={anchor_root: justified_checkpoint} ) ``` @@ -179,9 +201,12 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: ```python def get_weight(store: Store, root: Root) -> Gwei: state = store.checkpoint_states[store.justified_checkpoint] - active_indices = get_active_validator_indices(state, get_current_epoch(state)) + unslashed_and_active_indices = [ + i for i in get_active_validator_indices(state, get_current_epoch(state)) + if not state.validators[i].slashed + ] attestation_score = Gwei(sum( - state.validators[i].effective_balance for i in active_indices + state.validators[i].effective_balance for i in unslashed_and_active_indices if (i in store.latest_messages and i not in store.equivocating_indices and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root) @@ -199,8 +224,32 @@ def get_weight(store: Store, root: Root) -> Gwei: return attestation_score + proposer_score ``` +#### `get_voting_source` + +```python +def get_voting_source(store: Store, block_root: Root) -> Checkpoint: + """ + Compute the voting source checkpoint in the case that block with root ``block_root`` + is chosen as the head block + """ + block = store.blocks[block_root] + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + block_epoch = compute_epoch_at_slot(block.slot) + if current_epoch > block_epoch: + # The block is from a prior epoch, the voting source will be pulled-up. + return store.unrealized_justifications[block_root] + else: + # The block is not from a prior epoch, therefore the voting source is + # not pulled up. + head_state = store.block_states[block_root] + return head_state.current_justified_checkpoint + +``` + #### `filter_block_tree` +*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST have `block_root` as `store.justified_checkpoint`. + ```python def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: block = store.blocks[block_root] @@ -218,17 +267,30 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB return True return False - # If leaf block, check finalized/justified checkpoints as matching latest. - head_state = store.block_states[block_root] + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + voting_source = get_voting_source(store, block_root) + # The voting source should be at the same height as the store's justified checkpoint. correct_justified = ( store.justified_checkpoint.epoch == GENESIS_EPOCH - or head_state.current_justified_checkpoint == store.justified_checkpoint + or voting_source.epoch == store.justified_checkpoint.epoch ) + + # If the block should be pulled-up due to previous epoch being justified, also check + # that the unrealized justification is higher than the store's justified + # checkpoint, and the voting source is not more than two epochs ago. + if not correct_justified and is_previous_epoch_justified(store): + correct_justified = ( + store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and + voting_source.epoch + 2 >= current_epoch + ) + + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or head_state.finalized_checkpoint == store.finalized_checkpoint + or store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) ) + # If expected finalized/justified, add to viable block-tree and signal viability to parent. if correct_justified and correct_finalized: blocks[block_root] = block @@ -272,25 +334,87 @@ def get_head(store: Store) -> Root: head = max(children, key=lambda root: (get_weight(store, root), root)) ``` -#### `should_update_justified_checkpoint` +#### `update_checkpoints` ```python -def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool: +def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None: """ - To address the bouncing attack, only update conflicting justified - checkpoints in the fork choice if in the early slots of the epoch. - Otherwise, delay incorporation of new justified checkpoint until next epoch boundary. + Update checkpoints in store if necessary + """ + # Update justified checkpoint + if justified_checkpoint.epoch > store.justified_checkpoint.epoch: + store.justified_checkpoint = justified_checkpoint + + # Update finalized checkpoint + if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: + store.finalized_checkpoint = finalized_checkpoint +``` - See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion. +#### `update_unrealized_checkpoints` + +```python +def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint) -> None: """ - if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED: - return True + Update unrealized checkpoints in store if necessary + """ + # Update unrealized justified checkpoint + if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch: + store.unrealized_justified_checkpoint = unrealized_justified_checkpoint - justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch) - if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root: - return False + # Update unrealized finalized checkpoint + if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch: + store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint +``` + + +#### Pull-up tip helpers + +##### `compute_pulled_up_tip` + +```python +def compute_pulled_up_tip(store: Store, block_root: Root) -> None: + state = store.block_states[block_root].copy() + # Pull up the post-state of the block to the next epoch boundary + process_justification_and_finalization(state) + + # Store the unrealized justification. + store.unrealized_justifications[block_root] = state.current_justified_checkpoint + + # Update unrealized checkpoints in store if necessary + update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # If the block is from a prior epoch, apply the realized values. + block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + if block_epoch < current_epoch: + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) +``` + + +#### `on_tick` helpers + +##### `on_tick_per_slot` + +```python +def on_tick_per_slot(store: Store, time: uint64) -> None: + previous_slot = get_current_slot(store) + + # update store time + store.time = time + + current_slot = get_current_slot(store) + + # Reset store.proposer_boost_root if this is a new slot + if current_slot > previous_slot: + store.proposer_boost_root = Root() - return True + # Not a new epoch, return + if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): + return + + # Pull-up justification and finalization from previous epoch + update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` #### `on_attestation` helpers @@ -323,7 +447,8 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc # Check that the epoch number and slot number are matching assert target.epoch == compute_epoch_at_slot(attestation.data.slot) - # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found + # Attestations target be for a known block. + # If target block is unknown, delay consideration until the block is found. assert target.root in store.blocks # Attestations must be for a known block. If block is unknown, delay consideration until the block is found @@ -355,7 +480,9 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: ##### `update_latest_messages` ```python -def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: +def update_latest_messages(store: Store, + attesting_indices: Sequence[ValidatorIndex], + attestation: Attestation) -> None: target = attestation.data.target beacon_block_root = attestation.data.beacon_block_root non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices] @@ -371,27 +498,13 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn ```python def on_tick(store: Store, time: uint64) -> None: - previous_slot = get_current_slot(store) - - # update store time - store.time = time - - current_slot = get_current_slot(store) - - # Reset store.proposer_boost_root if this is a new slot - if current_slot > previous_slot: - store.proposer_boost_root = Root() - - # Not a new epoch, return - if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): - return - - # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain - if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) - if ancestor_at_finalized_slot == store.finalized_checkpoint.root: - store.justified_checkpoint = store.best_justified_checkpoint + # If the ``store.time`` falls behind, catch up slot by slot to + # ensure that every previous slot will be processed with ``on_tick_per_slot``. + tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT + while get_current_slot(store) < tick_slot: + previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT + on_tick_per_slot(store, previous_time) + on_tick_per_slot(store, time) ``` #### `on_block` @@ -414,11 +527,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -426,17 +540,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` #### `on_attestation` diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index c60d047b92..360e194f59 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -187,7 +187,7 @@ def add_attestations_to_state(spec, state, attestations, slot): spec.process_attestation(state, attestation) -def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): +def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest)) for index in range(committees_per_slot): def participants_filter(comm): @@ -262,7 +262,7 @@ def state_transition_with_full_block(spec, if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)): - attestations = _get_valid_attestation_at_slot( + attestations = get_valid_attestation_at_slot( state, spec, slot_to_attest, @@ -272,7 +272,7 @@ def state_transition_with_full_block(spec, block.body.attestations.append(attestation) if fill_prev_epoch: slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1 - attestations = _get_valid_attestation_at_slot( + attestations = get_valid_attestation_at_slot( state, spec, slot_to_attest, @@ -300,7 +300,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f slots = state.slot % spec.SLOTS_PER_EPOCH for slot_offset in range(slots): target_slot = state.slot - slot_offset - attestations += _get_valid_attestation_at_slot( + attestations += get_valid_attestation_at_slot( state, spec, target_slot, @@ -311,7 +311,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH for slot_offset in range(1, slots): target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset - attestations += _get_valid_attestation_at_slot( + attestations += get_valid_attestation_at_slot( state, spec, target_slot, diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index bd8abd95b5..af231d87ff 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -3,6 +3,7 @@ from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, + state_transition_with_full_block, ) @@ -16,12 +17,13 @@ def get_anchor_root(spec, state): def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, merge_block=False, block_not_found=False, is_optimistic=False): pre_state = store.block_states[signed_block.message.parent_root] - block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT if merge_block: assert spec.is_merge_transition_block(pre_state, signed_block.message.body) - if store.time < block_time: - on_tick_and_append_step(spec, store, block_time, test_steps) + block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT + while store.time < block_time: + time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT + on_tick_and_append_step(spec, store, time, test_steps) post_state = yield from add_block( spec, store, signed_block, test_steps, @@ -39,6 +41,11 @@ def add_attestation(spec, store, attestation, test_steps, is_from_block=False): test_steps.append({'attestation': get_attestation_file_name(attestation)}) +def add_attestations(spec, store, attestations, test_steps, is_from_block=False): + for attestation in attestations: + yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block) + + def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False): parent_block = store.blocks[attestation.data.beacon_block_root] pre_state = store.block_states[spec.hash_tree_root(parent_block)] @@ -90,6 +97,7 @@ def get_attester_slashing_file_name(attester_slashing): def on_tick_and_append_step(spec, store, time, test_steps): spec.on_tick(store, time) test_steps.append({'tick': int(time)}) + output_store_checks(spec, store, test_steps) def run_on_block(spec, store, signed_block, valid=True): @@ -153,25 +161,7 @@ def add_block(spec, assert store.blocks[block_root] == signed_block.message assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root if not is_optimistic: - test_steps.append({ - 'checks': { - 'time': int(store.time), - 'head': get_formatted_head_output(spec, store), - 'justified_checkpoint': { - 'epoch': int(store.justified_checkpoint.epoch), - 'root': encode_hex(store.justified_checkpoint.root), - }, - 'finalized_checkpoint': { - 'epoch': int(store.finalized_checkpoint.epoch), - 'root': encode_hex(store.finalized_checkpoint.root), - }, - 'best_justified_checkpoint': { - 'epoch': int(store.best_justified_checkpoint.epoch), - 'root': encode_hex(store.best_justified_checkpoint.root), - }, - 'proposer_boost_root': encode_hex(store.proposer_boost_root), - } - }) + output_store_checks(spec, store, test_steps) return store.block_states[signed_block.message.hash_tree_root()] @@ -217,6 +207,32 @@ def get_formatted_head_output(spec, store): } +def output_head_check(spec, store, test_steps): + test_steps.append({ + 'checks': { + 'head': get_formatted_head_output(spec, store), + } + }) + + +def output_store_checks(spec, store, test_steps): + test_steps.append({ + 'checks': { + 'time': int(store.time), + 'head': get_formatted_head_output(spec, store), + 'justified_checkpoint': { + 'epoch': int(store.justified_checkpoint.epoch), + 'root': encode_hex(store.justified_checkpoint.root), + }, + 'finalized_checkpoint': { + 'epoch': int(store.finalized_checkpoint.epoch), + 'root': encode_hex(store.finalized_checkpoint.root), + }, + 'proposer_boost_root': encode_hex(store.proposer_boost_root), + } + }) + + def apply_next_epoch_with_attestations(spec, state, store, @@ -263,6 +279,39 @@ def apply_next_slots_with_attestations(spec, return post_state, store, last_signed_block +def is_ready_to_justify(spec, state): + """ + Check if the given ``state`` will trigger justification updates at epoch boundary. + """ + temp_state = state.copy() + spec.process_justification_and_finalization(temp_state) + return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch + + +def find_next_justifying_slot(spec, + state, + fill_cur_epoch, + fill_prev_epoch, + participation_fn=None): + temp_state = state.copy() + + signed_blocks = [] + justifying_slot = None + while justifying_slot is None: + signed_block = state_transition_with_full_block( + spec, + temp_state, + fill_cur_epoch, + fill_prev_epoch, + participation_fn, + ) + signed_blocks.append(signed_block) + if is_ready_to_justify(spec, temp_state): + justifying_slot = temp_state.slot + + return signed_blocks, justifying_slot + + def get_pow_block_file_name(pow_block): return f"pow_block_{encode_hex(pow_block.block_hash)}" diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 990c420313..2107a470a7 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -1,9 +1,9 @@ import random -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test, with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations @@ -22,6 +22,8 @@ add_attestation, tick_and_run_on_attestation, tick_and_add_block, + output_head_check, + apply_next_epoch_with_attestations, ) from eth2spec.test.helpers.forks import ( is_post_altair, @@ -71,11 +73,7 @@ def test_chain_no_attestations(spec, state): anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # On receiving a block of `GENESIS_SLOT + 1` slot block_1 = build_empty_block_for_next_slot(spec, state) @@ -88,11 +86,7 @@ def test_chain_no_attestations(spec, state): yield from tick_and_add_block(spec, store, signed_block_2, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -109,11 +103,7 @@ def test_split_tie_breaker_no_attestations(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Create block at slot 1 block_1_state = genesis_state.copy() @@ -135,11 +125,7 @@ def test_split_tie_breaker_no_attestations(spec, state): highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2)) assert spec.get_head(store) == highest_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -156,11 +142,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # build longer tree long_state = genesis_state.copy() @@ -183,11 +165,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps) assert spec.get_head(store) == spec.hash_tree_root(short_block) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -203,11 +181,7 @@ def test_filtered_block_tree(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # transition state past initial couple of epochs next_epoch(spec, state) @@ -227,13 +201,7 @@ def test_filtered_block_tree(spec, state): # the last block in the branch should be the head expected_head_root = spec.hash_tree_root(signed_blocks[-1].message) assert spec.get_head(store) == expected_head_root - - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - 'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root), - } - }) + output_head_check(spec, store, test_steps) # # create branch containing the justified block but not containing enough on @@ -274,11 +242,7 @@ def test_filtered_block_tree(spec, state): # ensure that get_head still returns the head from the previous branch assert spec.get_head(store) == expected_head_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store) - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -295,11 +259,7 @@ def test_proposer_boost_correct_head(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Build block that serves as head ONLY on timely arrival, and ONLY in that slot state_1 = genesis_state.copy() @@ -337,19 +297,14 @@ def test_proposer_boost_correct_head(spec, state): on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) - - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_all_phases @spec_state_test -def test_discard_equivocations(spec, state): +def test_discard_equivocations_on_attester_slashing(spec, state): test_steps = [] genesis_state = state.copy() @@ -359,11 +314,7 @@ def test_discard_equivocations(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Build block that serves as head before discarding equivocations state_1 = genesis_state.copy() @@ -418,11 +369,359 @@ def test_discard_equivocations(spec, state): # The head should revert to block_2 yield from add_attester_slashing(spec, store, attester_slashing, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) + output_head_check(spec, store, test_steps) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_discard_equivocations_slashed_validator_censoring(spec, state): + # Check that the store does not count LMD votes from validators that are slashed in the justified state + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + + # We will slash all validators voting at the 2nd slot of epoch 0 + current_slot = spec.get_current_slot(store) + eqv_slot = current_slot + 1 + eqv_epoch = spec.compute_epoch_at_slot(eqv_slot) + assert eqv_slot % spec.SLOTS_PER_EPOCH == 1 + assert eqv_epoch == 0 + slashed_validators = [] + comm_count = spec.get_committee_count_per_slot(state, eqv_epoch) + for comm_index in range(comm_count): + comm = spec.get_beacon_committee(state, eqv_slot, comm_index) + slashed_validators += comm + assert len(slashed_validators) > 0 + + # Slash those validators in the state + for val_index in slashed_validators: + state.validators[val_index].slashed = True + + # Store this state as the anchor state + anchor_state = state.copy() + # Generate an anchor block with correct state root + anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root()) + yield 'anchor_state', anchor_state + yield 'anchor_block', anchor_block + + # Get a new store with the anchor state & anchor block + store = spec.get_forkchoice_store(anchor_state, anchor_block) + + # Now generate the store checks + current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Create two competing blocks at eqv_slot + next_slots(spec, state, eqv_slot - state.slot - 1) + assert state.slot == eqv_slot - 1 + + state_1 = state.copy() + block_1 = build_empty_block_for_next_slot(spec, state_1) + signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1) + + state_2 = state.copy() + block_2 = build_empty_block_for_next_slot(spec, state_2) + block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32 + signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2) + + assert block_1.slot == block_2.slot == eqv_slot + + # Add both blocks to the store + yield from tick_and_add_block(spec, store, signed_block_1, test_steps) + yield from tick_and_add_block(spec, store, signed_block_2, test_steps) + + # Find out which block will win in tie breaking + if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2): + block_low_root = block_1.hash_tree_root() + block_low_root_post_state = state_1 + block_high_root = block_2.hash_tree_root() + else: + block_low_root = block_2.hash_tree_root() + block_low_root_post_state = state_2 + block_high_root = block_1.hash_tree_root() + assert block_low_root < block_high_root + + # Tick to next slot so proposer boost does not apply + current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT + on_tick_and_append_step(spec, store, current_time, test_steps) + + # Check that block with higher root wins + assert spec.get_head(store) == block_high_root + + # Create attestation for block with lower root + attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True) + # Check that all attesting validators were slashed in the anchor state + att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0) + for i in att_comm: + assert anchor_state.validators[i].slashed + # Add attestation to the store + yield from add_attestation(spec, store, attestation, test_steps) + # Check that block with higher root still wins + assert spec.get_head(store) == block_high_root + output_head_check(spec, store, test_steps) + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_voting_source_within_two_epoch(spec, state): + """ + Check that the store allows for a head block that has: + - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and + - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and + - store.voting_source[block_root].epoch + 2 >= current_epoch, and + - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Copy the state to use later + fork_state = state.copy() + + # Fill epoch 4 + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Create a fork from the earlier saved state + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 5 + _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) + # Only keep the blocks from epoch 5, so discard the last generated block + signed_blocks = signed_blocks[:-1] + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5 + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Check that the last block from the fork is the head + # LMD votes for the competing branch are overwritten so this fork should win + last_fork_block_root = last_fork_block.hash_tree_root() + # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch + assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch + # assert store.voting_source[last_fork_block_root].epoch + 2 >= \ + # spec.compute_epoch_at_slot(spec.get_current_slot(store)) + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) == last_fork_block_root + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_voting_source_beyond_two_epoch(spec, state): + """ + Check that the store doesn't allow for a head block that has: + - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and + - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and + - store.voting_source[block_root].epoch + 2 < current_epoch, and + - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Copy the state to use later + fork_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert store.finalized_checkpoint.epoch == 4 + + # Create a fork from the earlier saved state + for _ in range(2): + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 6 + assert fork_state.current_justified_checkpoint.epoch == 3 + _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) + # Only keep the blocks from epoch 6, so discard the last generated block + signed_blocks = signed_blocks[:-1] + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6 + + # Store the head before adding the fork to the store + correct_head = spec.get_head(store) + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert store.finalized_checkpoint.epoch == 4 + + last_fork_block_root = last_fork_block.hash_tree_root() + last_fork_block_state = store.block_states[last_fork_block_root] + assert last_fork_block_state.current_justified_checkpoint.epoch == 3 + + # Check that the head is unchanged + # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch + assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch + # assert store.voting_source[last_fork_block_root].epoch + 2 < \ + # spec.compute_epoch_at_slot(spec.get_current_slot(store)) + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) == correct_head + + yield 'steps', test_steps + + +""" +Note: +We are unable to generate test vectors that check failure of the correct_finalized condition. +We cannot generate a block that: +- has !correct_finalized, and +- has correct_justified, and +- is a descendant of store.justified_checkpoint.root + +The block being a descendant of store.justified_checkpoint.root is necessary because +filter_block_tree descends the tree starting at store.justified_checkpoint.root + +@with_all_phases +@spec_state_test +def test_incorrect_finalized(spec, state): + # Check that the store doesn't allow for a head block that has: + # - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and + # - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and + # - store.finalized_checkpoint.root != get_ancestor(store, block_root, finalized_slot) + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 4 + for _ in range(4): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Identify the fork block as the last block in epoch 4 + fork_block_root = state.latest_block_header.parent_root + fork_block = store.blocks[fork_block_root] + assert spec.compute_epoch_at_slot(fork_block.slot) == 4 + # Copy the state to use later + fork_state = store.block_states[fork_block_root].copy() + assert spec.compute_epoch_at_slot(fork_state.slot) == 4 + assert fork_state.current_justified_checkpoint.epoch == 3 + assert fork_state.finalized_checkpoint.epoch == 2 + + # Create a fork from the earlier saved state + for _ in range(2): + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 6 + assert fork_state.current_justified_checkpoint.epoch == 4 + assert fork_state.finalized_checkpoint.epoch == 3 + # Fill epoch 6 + signed_blocks = [] + _, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) + signed_blocks += signed_blocks_1 + assert spec.compute_epoch_at_slot(fork_state.slot) == 7 + # Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork + assert fork_state.current_justified_checkpoint.epoch == 6 + assert fork_state.finalized_checkpoint.epoch == 3 + # Create a chain in epoch 7 that has new justification for epoch 7 + _, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) + # Only keep the blocks from epoch 7, so discard the last generated block + signed_blocks_2 = signed_blocks_2[:-1] + signed_blocks += signed_blocks_2 + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7 + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 3 + + # Fill epoch 5 and 6 in the original chain + for _ in range(2): + state, store, signed_head_block = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 5 + # Store the expected head + head_root = signed_head_block.message.hash_tree_root() + + # Check that the head is unchanged + last_fork_block_root = last_fork_block.hash_tree_root() + assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch + assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root != spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) != last_fork_block_root + assert spec.get_head(store) == head_root yield 'steps', test_steps +""" diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index 23514b325b..eaae825ab2 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -2,12 +2,16 @@ from eth_utils import encode_hex from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets +from eth2spec.test.context import ( + MINIMAL, + spec_state_test, + with_all_phases, + with_altair_and_later, + with_presets +) from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, - state_transition_with_full_block, - state_transition_with_full_attestations_block, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -22,6 +26,8 @@ tick_and_add_block, apply_next_epoch_with_attestations, apply_next_slots_with_attestations, + is_ready_to_justify, + find_next_justifying_slot, ) from eth2spec.test.helpers.state import ( next_epoch, @@ -280,301 +286,22 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): yield 'steps', test_steps -@with_all_phases -@spec_state_test -@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") -def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): - """ - Test `should_update_justified_checkpoint`: - compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED - """ - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # Skip epoch 0 & 1 - for _ in range(2): - next_epoch(spec, state) - # Fill epoch 2 - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 - # Skip epoch 3 & 4 - for _ in range(2): - next_epoch(spec, state) - # Epoch 5: Attest current epoch - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, participation_fn=_drop_random_one_third, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == 2 - assert store.justified_checkpoint.epoch == 2 - assert state.current_justified_checkpoint == store.justified_checkpoint - - # Skip epoch 6 - next_epoch(spec, state) - - pre_state = state.copy() - - # Build a block to justify epoch 5 - signed_block = state_transition_with_full_block(spec, state, True, True) - assert state.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == 5 - assert state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - # Run on_block - yield from tick_and_add_block(spec, store, signed_block, test_steps) - # Ensure justified_checkpoint has been changed but finality is unchanged - assert store.justified_checkpoint.epoch == 5 - assert store.justified_checkpoint == state.current_justified_checkpoint - assert store.finalized_checkpoint.epoch == pre_state.finalized_checkpoint.epoch == 0 - - yield 'steps', test_steps - - -@with_all_phases -@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") -@spec_state_test -def test_on_block_outside_safe_slots_but_finality(spec, state): - """ - Test `should_update_justified_checkpoint` case - - compute_slots_since_epoch_start(get_current_slot(store)) > SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - new_justified_checkpoint and store.justified_checkpoint.root are NOT conflicting - - Thus should_update_justified_checkpoint returns True. - - Part of this script is similar to `test_new_justified_is_later_than_store_justified`. - """ - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # Skip epoch 0 - next_epoch(spec, state) - # Fill epoch 1 to 3, attest current epoch - for _ in range(3): - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - - # Skip epoch 4-6 - for _ in range(3): - next_epoch(spec, state) - - # epoch 7 - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, True, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == 7 - - # epoch 8, attest the first 5 blocks - state, store, _ = yield from apply_next_slots_with_attestations( - spec, state, store, 5, True, True, test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 - - # Propose a block at epoch 9, 5th slot - next_epoch(spec, state) - next_slots(spec, state, 4) - signed_block = state_transition_with_full_attestations_block(spec, state, True, True) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 - - # Propose an empty block at epoch 10, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot - # This block would trigger justification and finality updates on store - next_epoch(spec, state) - next_slots(spec, state, 4) - block = build_empty_block_for_next_slot(spec, state) - signed_block = state_transition_and_sign_block(spec, state, block) - assert state.finalized_checkpoint.epoch == 7 - assert state.current_justified_checkpoint.epoch == 8 - # Step time past safe slots and run on_block - if store.time < spec.compute_time_at_slot(state, signed_block.message.slot): - time = store.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - yield from add_block(spec, store, signed_block, test_steps) - - # Ensure justified_checkpoint finality has been changed - assert store.finalized_checkpoint.epoch == 7 - assert store.finalized_checkpoint == state.finalized_checkpoint - assert store.justified_checkpoint.epoch == 8 - assert store.justified_checkpoint == state.current_justified_checkpoint - - yield 'steps', test_steps - - -@with_all_phases -@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") -@spec_state_test -def test_new_justified_is_later_than_store_justified(spec, state): - """ - J: Justified - F: Finalized - fork_1_state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] - F J - - fork_2_state (forked from fork_1_state's epoch 2): - epoch - └──── [3] <- [4] <- [5] <- [6] - F J - - fork_3_state (forked from genesis): - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J - """ - # The 1st fork, from genesis - fork_1_state = state.copy() - # The 3rd fork, from genesis - fork_3_state = state.copy() - - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # ----- Process fork_1_state - # Skip epoch 0 - next_epoch(spec, fork_1_state) - # Fill epoch 1 with previous epoch attestations - fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( - spec, fork_1_state, store, False, True, test_steps=test_steps) - - # Fork `fork_2_state` at the start of epoch 2 - fork_2_state = fork_1_state.copy() - assert spec.get_current_epoch(fork_2_state) == 2 - - # Skip epoch 2 - next_epoch(spec, fork_1_state) - # # Fill epoch 3 & 4 with previous epoch attestations - for _ in range(2): - fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( - spec, fork_1_state, store, False, True, test_steps=test_steps) - - assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint - - # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint - # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch` - all_blocks = [] - - # Proposed an empty block at epoch 2, 1st slot - block = build_empty_block_for_next_slot(spec, fork_2_state) - signed_block = state_transition_and_sign_block(spec, fork_2_state, block) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Skip to epoch 4 - for _ in range(2): - next_epoch(spec, fork_2_state) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 4, 5th slot - # Propose a block at epoch 5, 5th slot - for _ in range(2): - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, 4) - signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2) - signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) - assert fork_2_state.finalized_checkpoint.epoch == 0 - assert fork_2_state.current_justified_checkpoint.epoch == 5 - # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED - time = store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - # Run on_block - yield from add_block(spec, store, signed_block, test_steps) - assert store.finalized_checkpoint.epoch == 0 - assert store.justified_checkpoint.epoch == 3 - assert store.best_justified_checkpoint.epoch == 5 - - # ------ fork_3_state: Create another chain to test the - # "Update justified if new justified is later than store justified" case - all_blocks = [] - for _ in range(3): - next_epoch(spec, fork_3_state) - - # epoch 3 - _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True) - all_blocks += signed_blocks - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # epoch 4, attest the first 5 blocks - _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True) - all_blocks += blocks.copy() - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 5, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 6, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 3 - assert fork_3_state.current_justified_checkpoint.epoch == 4 - - # Apply blocks of `fork_3_state` to `store` - for block in all_blocks: - if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): - time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - yield from add_block(spec, store, block, test_steps) - - assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint - assert store.justified_checkpoint == fork_3_state.current_justified_checkpoint - assert store.justified_checkpoint != store.best_justified_checkpoint - assert store.best_justified_checkpoint == fork_2_state.current_justified_checkpoint - - yield 'steps', test_steps - - +""" @with_all_phases @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): - """ - J: Justified - F: Finalized - state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J + # J: Justified + # F: Finalized + # state (forked from genesis): + # epoch + # [0] <- [1] <- [2] <- [3] <- [4] <- [5] + # F J + + # another_state (forked from epoch 0): + # └──── [1] <- [2] <- [3] <- [4] <- [5] + # F J - another_state (forked from epoch 0): - └──── [1] <- [2] <- [3] <- [4] <- [5] - F J - """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) @@ -631,9 +358,15 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): assert ancestor_at_finalized_slot != store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint - assert store.justified_checkpoint == another_state.current_justified_checkpoint + + # NOTE: inconsistent justified/finalized checkpoints in this edge case. + # This can only happen when >1/3 validators are slashable, as this testcase requires that + # store.justified_checkpoint is higher than store.finalized_checkpoint and on a different branch. + # Ignoring this testcase for now. + assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps +""" @with_all_phases @@ -701,7 +434,9 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): assert ancestor_at_finalized_slot == store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint - assert store.justified_checkpoint == another_state.current_justified_checkpoint + + # NOTE: inconsistent justified/finalized checkpoints in this edge case + assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps @@ -797,3 +532,797 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state): }) yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_withholding(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + for _ in range(2): + next_epoch(spec, state) + + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(state) == 4 + + # ------------ + + # Create attacker's fork that can justify epoch 4 + # Do not apply attacker's blocks to store + attacker_state = state.copy() + attacker_signed_blocks = [] + + while not is_ready_to_justify(spec, attacker_state): + attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( + spec, attacker_state, 1, True, False) + attacker_signed_blocks += signed_blocks + + assert attacker_state.finalized_checkpoint.epoch == 2 + assert attacker_state.current_justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(attacker_state) == 4 + + # ------------ + + # The honest fork sees all except the last block from attacker_signed_blocks + # Apply honest fork to store + honest_signed_blocks = attacker_signed_blocks[:-1] + assert len(honest_signed_blocks) > 0 + + for signed_block in honest_signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + + last_honest_block = honest_signed_blocks[-1].message + honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() + + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(honest_state) == 4 + + # Create & apply an honest block in epoch 5 that can justify epoch 4 + next_epoch(spec, honest_state) + assert spec.get_current_epoch(honest_state) == 5 + + honest_block = build_empty_block_for_next_slot(spec, honest_state) + honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations + signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_head(store) == hash_tree_root(honest_block) + assert is_ready_to_justify(spec, honest_state) + + # ------------ + + # When the attacker's block is received, the honest block is still the head + # This relies on the honest block's LMD score increasing due to proposer boost + yield from tick_and_add_block(spec, store, attacker_signed_blocks[-1], test_steps) + assert store.finalized_checkpoint.epoch == 3 + assert store.justified_checkpoint.epoch == 4 + assert spec.get_head(store) == hash_tree_root(honest_block) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_withholding_reverse_order(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + for _ in range(2): + next_epoch(spec, state) + + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(state) == 4 + + # ------------ + + # Create attacker's fork that can justify epoch 4 + attacker_state = state.copy() + attacker_signed_blocks = [] + + while not is_ready_to_justify(spec, attacker_state): + attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( + spec, attacker_state, 1, True, False) + assert len(signed_blocks) == 1 + attacker_signed_blocks += signed_blocks + yield from tick_and_add_block(spec, store, signed_blocks[0], test_steps) + + assert attacker_state.finalized_checkpoint.epoch == 2 + assert attacker_state.current_justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(attacker_state) == 4 + attackers_head = hash_tree_root(attacker_signed_blocks[-1].message) + assert spec.get_head(store) == attackers_head + + # ------------ + + # The honest fork sees all except the last block from attacker_signed_blocks + honest_signed_blocks = attacker_signed_blocks[:-1] + assert len(honest_signed_blocks) > 0 + + last_honest_block = honest_signed_blocks[-1].message + honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() + + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(honest_state) == 4 + + # Create an honest block in epoch 5 that can justify epoch 4 + next_epoch(spec, honest_state) + assert spec.get_current_epoch(honest_state) == 5 + + honest_block = build_empty_block_for_next_slot(spec, honest_state) + honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations + signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert is_ready_to_justify(spec, honest_state) + + # When the honest block is received, the honest block becomes the head + # This relies on the honest block's LMD score increasing due to proposer boost + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert store.finalized_checkpoint.epoch == 3 + assert store.justified_checkpoint.epoch == 4 + assert spec.get_head(store) == hash_tree_root(honest_block) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_update_beginning_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the first slot of an epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create a block that has new justification information contained within it, but don't add to store yet + another_state = state.copy() + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) + assert spec.compute_epoch_at_slot(another_state.slot) == 5 + assert another_state.current_justified_checkpoint.epoch == 4 + + # Tick store to the start of the next epoch + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_update_end_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the last slot of an epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create a block that has new justification information contained within it, but don't add to store yet + another_state = state.copy() + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) + assert spec.compute_epoch_at_slot(another_state.slot) == 5 + assert another_state.current_justified_checkpoint.epoch == 4 + + # Tick store to the last slot of the next epoch + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + slot = slot + spec.SLOTS_PER_EPOCH - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_incompatible_justification_update_start_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the start slot of an epoch, even when the better justified checkpoint is not a descendant of + the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + + # Copy the state to create a fork later + another_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a block that has new justification information contained within it, but don't add to store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + last_block_root = another_state.latest_block_header.parent_root + + # Tick store to the last slot of the next epoch + slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root + justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + assert store.finalized_checkpoint.epoch == 4 + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_incompatible_justification_update_end_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the last slot of an epoch, even when the better justified checkpoint is not a descendant of + the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + + # Copy the state to create a fork later + another_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a block that has new justification information contained within it, but don't add to store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + last_block_root = another_state.latest_block_header.parent_root + + # Tick store to the last slot of the next epoch + slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + slot = slot + spec.SLOTS_PER_EPOCH - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root + justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + assert store.finalized_checkpoint.epoch == 4 + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_not_realized_finality(spec, state): + """ + Check that the store updates its justified checkpoint if a higher justified checkpoint is found that is + a descendant of the finalized checkpoint, but does not know about the finality + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # We'll make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy the post-state to use later + another_state = state.copy() + + # Create a fork that finalizes our block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + assert state.finalized_checkpoint.root == store.finalized_checkpoint.root == finalized_root + + # Create a fork for a better justification that is a descendant of the finalized block, + # but does not realize the finality. + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 4 + last_block = signed_blocks[-1] + last_block_root = last_block.message.hash_tree_root() + ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) + assert ancestor_at_finalized_slot == store.finalized_checkpoint.root + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_monotonic(spec, state): + """ + Check that the store does not update it's justified checkpoint with lower justified checkpoints. + This testcase checks that the store's justified checkpoint remains the same even when we input a block that has: + - a higher finalized checkpoint than the store's finalized checkpoint, and + - a lower justified checkpoint than the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # We'll eventually make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy into another variable so we can use `state` later + another_state = state.copy() + + # Create a fork with justification that is a descendant of the finalized block + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 2 + last_block = signed_blocks[-1] + last_block_root = last_block.message.hash_tree_root() + ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) + assert ancestor_at_finalized_slot == finalized_root + + # Create a fork with lower justification that also finalizes our chosen block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert state.current_justified_checkpoint.epoch == 5 + # Check that store's finalized checkpoint is updated + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + # Check that store's justified checkpoint is not updated + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_always_if_better(spec, state): + """ + Check that the store updates it's justified checkpoint with any higher justified checkpoint. + This testcase checks that the store's justified checkpoint is updated when we input a block that has: + - a lower finalized checkpoint than the store's finalized checkpoint, and + - a higher justified checkpoint than the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # We'll eventually make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy into another variable to use later + another_state = state.copy() + + # Create a fork with lower justification that also finalizes our chosen block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a fork with higher justification that is a descendant of the finalized block + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_pull_up_past_epoch_block(spec, state): + """ + Check that the store pulls-up a block from the past epoch to realize it's justification & finalization information + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Create a chain within epoch 4 that contains a justification for epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 4 + + # Tick store to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Add the previously created chain to the store and check for updates + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_not_pull_up_current_epoch_block(spec, state): + """ + Check that the store does not pull-up a block from the current epoch if the previous epoch is not justified + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Skip to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create a chain within epoch 5 that contains a justification for epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 + + # Add the previously created chain to the store and check that store does not apply pull-up updates + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_pull_up_on_tick(spec, state): + """ + Check that the store pulls-up current epoch tips on the on_tick transition to the next epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Skip to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create a chain within epoch 5 that contains a justification for epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 + + # Add the previously created chain to the store and check that store does not apply pull-up updates, + # since the previous epoch was not justified + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Now tick the store to the next epoch and check that pull-up tip updates were applied + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 6 + assert store.justified_checkpoint.epoch == 5 + # There's no new finality, so no finality updates expected + assert store.finalized_checkpoint.epoch == 3 + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py new file mode 100644 index 0000000000..30f1b06c7b --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py @@ -0,0 +1,498 @@ +from eth2spec.test.context import ( + spec_state_test, + with_all_phases, + with_presets, +) +from eth2spec.test.helpers.constants import ( + MINIMAL, +) +from eth2spec.test.helpers.attestations import ( + state_transition_with_full_block, + get_valid_attestation, + get_valid_attestation_at_slot, +) +from eth2spec.test.helpers.block import ( + build_empty_block, + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + add_attestations, + tick_and_add_block, + apply_next_epoch_with_attestations, + find_next_justifying_slot, + is_ready_to_justify, +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, + next_epoch, + next_slot, + transition_to, +) + + +TESTING_PRESETS = [MINIMAL] + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state): + """ + [Case 1] + + { epoch 4 }{ epoch 5 } + [c4]<--[a]<--[-]<--[y] + ↑____[-]<--[z] + + At c4, c3 is the latest justified checkpoint (or something earlier) + + The block y doesn't have enough votes to justify c4. + The block z also doesn't have enough votes to justify c4. + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # create block_a, it needs 2 more full blocks to justify epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + for signed_block in signed_blocks[:-2]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert state.current_justified_checkpoint.epoch == 3 + next_slot(spec, state) + state_a = state.copy() + + # to test the "no withholding" situation, temporarily store the blocks in lists + signed_blocks_of_y = [] + signed_blocks_of_z = [] + + # add an empty block on chain y + block_y = build_empty_block_for_next_slot(spec, state) + signed_block_y = state_transition_and_sign_block(spec, state, block_y) + signed_blocks_of_y.append(signed_block_y) + + # chain y has some on-chain attestations, but not enough to justify c4 + signed_block_y = state_transition_with_full_block(spec, state, True, True) + assert not is_ready_to_justify(spec, state) + signed_blocks_of_y.append(signed_block_y) + assert store.justified_checkpoint.epoch == 3 + + state = state_a.copy() + signed_block_z = None + # add one block on chain z, which is not enough to justify c4 + attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) + block_z = build_empty_block_for_next_slot(spec, state) + block_z.body.attestations = [attestation] + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + signed_blocks_of_z.append(signed_block_z) + + # add an empty block on chain z + block_z = build_empty_block_for_next_slot(spec, state) + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + signed_blocks_of_z.append(signed_block_z) + + # ensure z couldn't justify c4 + assert not is_ready_to_justify(spec, state) + + # apply blocks to store + # (i) slot block_a.slot + 1 + signed_block_y = signed_blocks_of_y.pop(0) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + # apply block of chain `z` + signed_block_z = signed_blocks_of_z.pop(0) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + + # (ii) slot block_a.slot + 2 + # apply block of chain `z` + signed_block_z = signed_blocks_of_z.pop(0) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + # apply block of chain `y` + signed_block_y = signed_blocks_of_y.pop(0) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + # chain `y` remains the winner since it arrives earlier than `z` + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + + # tick to the prior of the epoch boundary + slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + # chain `y` reminds the winner + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + # to next block + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch): + """ + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 2 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + if is_justifying_previous_epoch: + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, False, test_steps=test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + if is_justifying_previous_epoch: + # try to find the block that can justify epoch 3 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True) + else: + # try to find the block that can justify epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + spec.get_head(store) == signed_block.message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == 3 + + assert is_ready_to_justify(spec, state) + state_b = state.copy() + + # add chain y + if is_justifying_previous_epoch: + signed_block_y = state_transition_with_full_block(spec, state, False, True) + else: + signed_block_y = state_transition_with_full_block(spec, state, True, True) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + if is_justifying_previous_epoch: + assert store.justified_checkpoint.epoch == 2 + else: + assert store.justified_checkpoint.epoch == 3 + + # add attestations of y + temp_state = state.copy() + next_slot(spec, temp_state) + attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot)) + current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + yield from add_attestations(spec, store, attestations_for_y, test_steps) + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + if attemped_reorg: + # add chain z + state = state_b.copy() + slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 + transition_to(spec, state, slot) + block_z = build_empty_block_for_next_slot(spec, state) + assert spec.compute_epoch_at_slot(block_z.slot) == 5 + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + else: + # next epoch + state = state_b.copy() + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + + # no reorg + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state): + """ + [Case 2] + + { epoch 4 }{ epoch 5 } + [c4]<--[b]<--[y] + ↑______________[z] + At c4, c3 is the latest justified checkpoint (or something earlier) + + block_b: the block that can justify c4. + z: the child of block of x at the first slot of epoch 5. + block z can reorg the chain from block y. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False) + + +def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 2 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + if is_justifying_previous_epoch: + block_a = build_empty_block_for_next_slot(spec, state) + signed_block_a = state_transition_and_sign_block(spec, state, block_a) + yield from tick_and_add_block(spec, store, signed_block_a, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + # fill one more epoch + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + signed_block_a = state_transition_with_full_block(spec, state, True, True) + yield from tick_and_add_block(spec, store, signed_block_a, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + spec.get_head(store) == signed_block_a.message.hash_tree_root() + + state = store.block_states[spec.get_head(store)].copy() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == 3 + state_a = state.copy() + + if is_justifying_previous_epoch: + # try to find the block that can justify epoch 3 + _, justifying_slot = find_next_justifying_slot(spec, state, False, True) + else: + # try to find the block that can justify epoch 4 + _, justifying_slot = find_next_justifying_slot(spec, state, True, True) + + last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1 + last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1 + + # to test the "no withholding" situation, temporarily store the blocks in lists + signed_blocks_of_y = [] + + # build an empty chain to the slot prior epoch boundary + signed_blocks_of_empty_chain = [] + states_of_empty_chain = [] + + for slot in range(state.slot + 1, last_slot_of_y + 1): + block = build_empty_block(spec, state, slot=slot) + signed_block = state_transition_and_sign_block(spec, state, block) + signed_blocks_of_empty_chain.append(signed_block) + states_of_empty_chain.append(state.copy()) + signed_blocks_of_y.append(signed_block) + + signed_block_y = signed_blocks_of_empty_chain[-1] + + # create 2/3 votes for the empty chain + attestations_for_y = [] + # target_is_current = not is_justifying_previous_epoch + attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot)) + attestations_for_y.append(attestations) + for state in states_of_empty_chain: + attestations = list(get_valid_attestation_at_slot(state, spec, state.slot)) + attestations_for_y.append(attestations) + + state = state_a.copy() + signed_block_z = None + + for slot in range(state_a.slot + 1, last_slot_of_z + 1): + # apply chain y, the empty chain + if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0: + signed_block_y = signed_blocks_of_y.pop(0) + assert signed_block_y.message.slot == slot + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + + # apply chain z, a fork chain that includes these attestations_for_y + block = build_empty_block(spec, state, slot=slot) + if ( + len(attestations_for_y) > 0 and ( + (not is_justifying_previous_epoch) + or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5) + ) + ): + block.body.attestations = attestations_for_y.pop(0) + signed_block_z = state_transition_and_sign_block(spec, state, block) + if signed_block_y != signed_block_z: + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + if is_ready_to_justify(spec, state): + break + + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + if enough_ffg: + assert is_ready_to_justify(spec, state) + else: + assert not is_ready_to_justify(spec, state) + + # to next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + if enough_ffg: + # reorg + assert spec.get_head(store) == signed_block_z.message.hash_tree_root() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + else: + # no reorg + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state): + """ + [Case 3] + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=True, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state): + """ + [Case 4] + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=False, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_delayed_justification_current_epoch(spec, state): + """ + [Case 5] + + To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``, + this is the basic case if there is no chain z + + { epoch 4 }{ epoch 5 } + [c4]<--[b]<--[y] + + At c4, c3 is the latest justified checkpoint. + + block_b: the block that can justify c4. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_delayed_justification_previous_epoch(spec, state): + """ + [Case 6] + + Similar to ``test_delayed_justification_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + { epoch 3 }{ epoch 4 }{ epoch 5 } + [c3]<---------------[c4]---[b]<---------------------------------[y] + + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state): + """ + [Case 7] + + Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + { epoch 3 }{ epoch 4 }{ epoch 5 } + [c3]<---------------[c4]<--[b]<--[y] + ↑______________[z] + + At c4, c2 is the latest justified checkpoint. + + block_b: the block that can justify c3. + z: the child of block of x at the first slot of epoch 5. + block z can reorg the chain from block y. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state): + """ + [Case 8] + + Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=True, is_justifying_previous_epoch=True) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py new file mode 100644 index 0000000000..61926875ad --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py @@ -0,0 +1,205 @@ +from eth2spec.test.context import ( + spec_state_test, + with_altair_and_later, + with_presets, +) +from eth2spec.test.helpers.constants import ( + MINIMAL, +) +from eth2spec.test.helpers.attestations import ( + state_transition_with_full_block, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + tick_and_add_block, + apply_next_epoch_with_attestations, + find_next_justifying_slot, +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, + next_epoch, +) + + +TESTING_PRESETS = [MINIMAL] + + +@with_altair_and_later +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_withholding_attack(spec, state): + """ + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create the attack block that includes justifying attestations for epoch 4 + # This block is withheld & revealed only in epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + assert len(signed_blocks) > 1 + signed_attack_block = signed_blocks[-1] + for signed_block in signed_blocks[:-1]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert spec.compute_epoch_at_slot(state.slot) == 4 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create an honest chain in epoch 5 that includes the justifying attestations from the attack block + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 5 + assert state.current_justified_checkpoint.epoch == 3 + # Create two block in the honest chain with full attestations, and add to the store + for _ in range(2): + signed_block = state_transition_with_full_block(spec, state, True, False) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + # Create final block in the honest chain that includes the justifying attestations from the attack block + honest_block = build_empty_block_for_next_slot(spec, state) + honest_block.body.attestations = signed_attack_block.message.body.attestations + signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) + # Add the honest block to the store + yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Tick to the next slot so proposer boost is not a factor in choosing the head + current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Upon revealing the withheld attack block, the honest block should still be the head + yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + # As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated + assert store.justified_checkpoint.epoch == 4 + + # Even after going to the next epoch, the honest block should remain the head + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_withholding_attack_unviable_honest_chain(spec, state): + """ + Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond + two epochs ago. + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create the attack block that includes justifying attestations for epoch 5 + # This block is withheld & revealed only in epoch 6 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + assert len(signed_blocks) > 1 + signed_attack_block = signed_blocks[-1] + for signed_block in signed_blocks[:-1]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert spec.compute_epoch_at_slot(state.slot) == 5 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create an honest chain in epoch 6 that includes the justifying attestations from the attack block + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 6 + assert state.current_justified_checkpoint.epoch == 3 + # Create two block in the honest chain with full attestations, and add to the store + for _ in range(2): + signed_block = state_transition_with_full_block(spec, state, True, False) + assert state.current_justified_checkpoint.epoch == 3 + yield from tick_and_add_block(spec, store, signed_block, test_steps) + # Create final block in the honest chain that includes the justifying attestations from the attack block + honest_block = build_empty_block_for_next_slot(spec, state) + honest_block.body.attestations = signed_attack_block.message.body.attestations + signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) + honest_block_root = signed_honest_block.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == 3 + # Add the honest block to the store + yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) + current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store)) + assert current_epoch == 6 + # assert store.voting_source[honest_block_root].epoch == 3 + assert spec.get_head(store) == honest_block_root + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Tick to the next slot so proposer boost is not a factor in choosing the head + current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.get_head(store) == honest_block_root + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Upon revealing the withheld attack block, it should become the head + yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) + # The attack block is pulled up and store.justified_checkpoint is updated + assert store.justified_checkpoint.epoch == 5 + attack_block_root = signed_attack_block.message.hash_tree_root() + assert spec.get_head(store) == attack_block_root + + # After going to the next epoch, the honest block should become the head + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + # assert store.voting_source[honest_block_root].epoch == 5 + assert spec.get_head(store) == honest_block_root + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py deleted file mode 100644 index 92382c884b..0000000000 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ /dev/null @@ -1,87 +0,0 @@ -from copy import deepcopy - -from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import ( - spec_state_test, - with_all_phases, -) -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, -) -from eth2spec.test.helpers.fork_choice import ( - get_genesis_forkchoice_store, - run_on_block, - apply_next_epoch_with_attestations, -) -from eth2spec.test.helpers.state import ( - next_epoch, - state_transition_and_sign_block, -) - - -@with_all_phases -@spec_state_test -def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): - """ - NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint - """ - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False) - last_block_root = hash_tree_root(last_signed_block.message) - - # NOTE: Mock fictitious justified checkpoint in store - store.justified_checkpoint = spec.Checkpoint( - epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot), - root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000") - ) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - - # Create new higher justified checkpoint not in branch of store's justified checkpoint - just_block = build_empty_block_for_next_slot(spec, state) - store.blocks[just_block.hash_tree_root()] = just_block - - # Step time past safe slots - spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - previously_finalized = store.finalized_checkpoint - previously_justified = store.justified_checkpoint - - # Add a series of new blocks with "better" justifications - best_justified_checkpoint = spec.Checkpoint(epoch=0) - for i in range(3, 0, -1): - # Mutate store - just_state = store.block_states[last_block_root] - new_justified = spec.Checkpoint( - epoch=previously_justified.epoch + i, - root=just_block.hash_tree_root(), - ) - if new_justified.epoch > best_justified_checkpoint.epoch: - best_justified_checkpoint = new_justified - - just_state.current_justified_checkpoint = new_justified - - block = build_empty_block_for_next_slot(spec, just_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) - - # NOTE: Mock store so that the modified state could be accessed - parent_block = store.blocks[last_block_root].copy() - parent_block.state_root = just_state.hash_tree_root() - store.blocks[block.parent_root] = parent_block - store.block_states[block.parent_root] = just_state.copy() - assert block.parent_root in store.blocks.keys() - assert block.parent_root in store.block_states.keys() - - run_on_block(spec, store, signed_block) - - assert store.finalized_checkpoint == previously_finalized - assert store.justified_checkpoint == previously_justified - # ensure the best from the series was stored - assert store.best_justified_checkpoint == best_justified_checkpoint diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py index 0d9f6ddf54..33d1bbac44 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py @@ -18,7 +18,6 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False): assert store.time == time if new_justified_checkpoint: - assert store.justified_checkpoint == store.best_justified_checkpoint assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch assert store.justified_checkpoint.root != previous_justified_checkpoint.root else: @@ -32,12 +31,12 @@ def test_basic(spec, state): run_on_tick(spec, store, store.time + 1) +""" @with_all_phases @spec_state_test def test_update_justified_single_on_store_finalized_chain(spec, state): store = get_genesis_forkchoice_store(spec, state) - # [Mock store.best_justified_checkpoint] # Create a block at epoch 1 next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) @@ -58,8 +57,6 @@ def test_update_justified_single_on_store_finalized_chain(spec, state): state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block store.block_states[block.hash_tree_root()] = state - # Mock store.best_justified_checkpoint - store.best_justified_checkpoint = state.current_justified_checkpoint.copy() run_on_tick( spec, @@ -67,6 +64,7 @@ def test_update_justified_single_on_store_finalized_chain(spec, state): store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, new_justified_checkpoint=True ) +""" @with_all_phases @@ -89,7 +87,6 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state): root=block.hash_tree_root(), ) - # [Mock store.best_justified_checkpoint] # Create a block at epoch 1 state = init_state.copy() next_epoch(spec, state) @@ -112,79 +109,9 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state): state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() - # Mock store.best_justified_checkpoint - store.best_justified_checkpoint = state.current_justified_checkpoint.copy() run_on_tick( spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, ) - - -@with_all_phases -@spec_state_test -def test_no_update_same_slot_at_epoch_boundary(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - # set store time to already be at epoch boundary - store.time = seconds_per_epoch - - run_on_tick(spec, store, store.time + 1) - - -@with_all_phases -@spec_state_test -def test_no_update_not_epoch_boundary(spec, state): - store = get_genesis_forkchoice_store(spec, state) - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - run_on_tick(spec, store, store.time + spec.config.SECONDS_PER_SLOT) - - -@with_all_phases -@spec_state_test -def test_no_update_new_justified_equal_epoch(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - store.justified_checkpoint = spec.Checkpoint( - epoch=store.best_justified_checkpoint.epoch, - root=b'\44' * 32, - ) - - run_on_tick(spec, store, store.time + seconds_per_epoch) - - -@with_all_phases -@spec_state_test -def test_no_update_new_justified_later_epoch(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - store.justified_checkpoint = spec.Checkpoint( - epoch=store.best_justified_checkpoint.epoch + 1, - root=b'\44' * 32, - ) - - run_on_tick(spec, store, store.time + seconds_per_epoch) diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index f79d436eb7..c94b959338 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -146,10 +146,6 @@ finalized_checkpoint: { epoch: int, -- Integer value from store.finalized_checkpoint.epoch root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root } -best_justified_checkpoint: { - epoch: int, -- Integer value from store.best_justified_checkpoint.epoch - root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root -} proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root ``` @@ -160,7 +156,6 @@ For example: head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'} justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'} finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'} - best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'} proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb' ``` diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index c106810f8e..945e687005 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -7,6 +7,8 @@ 'get_head', 'on_block', 'ex_ante', + 'reorg', + 'withholding', ]} # No additional Altair specific finality tests, yet. altair_mods = phase_0_mods From 23c5b7c66e12a1cd8ee183ac4da1017d0f494461 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Mon, 13 Mar 2023 13:48:34 -0700 Subject: [PATCH 58/73] fix typo --- specs/phase0/fork-choice.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 6cbebe507c..b7125e36aa 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -109,7 +109,7 @@ The `Store` is responsible for tracking information required for the fork choice - `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. - `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. -- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occured. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occurred. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. - `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. ```python @@ -231,10 +231,10 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: """ Compute the voting source checkpoint in the case that block with root ``block_root`` is chosen as the head block - """ + """ block = store.blocks[block_root] current_epoch = compute_epoch_at_slot(get_current_slot(store)) - block_epoch = compute_epoch_at_slot(block.slot) + block_epoch = compute_epoch_at_slot(block.slot) if current_epoch > block_epoch: # The block is from a prior epoch, the voting source will be pulled-up. return store.unrealized_justifications[block_root] @@ -242,7 +242,7 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: # The block is not from a prior epoch, therefore the voting source is # not pulled up. head_state = store.block_states[block_root] - return head_state.current_justified_checkpoint + return head_state.current_justified_checkpoint ``` From 0ae18d86e3fee6ace2187d92da0afca78e43b75d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 14 Mar 2023 11:22:12 -0600 Subject: [PATCH 59/73] Update specs/_features/eip6110/validator.md Co-authored-by: Hsiao-Wei Wang --- specs/_features/eip6110/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md index dcaaf11041..ae9d493a6f 100644 --- a/specs/_features/eip6110/validator.md +++ b/specs/_features/eip6110/validator.md @@ -38,5 +38,5 @@ def get_eth1_deposit_count(state: BeaconState) -> uint64: if state.eth1_deposit_index < eth1_deposit_index_limit: return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) else: - return 0 + return uint64(0) ``` From 3de96f7a198ebbaa0149d0f8bb0f38d0f4e2f611 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Tue, 14 Mar 2023 13:54:57 -0700 Subject: [PATCH 60/73] Apply suggestions from code review Co-authored-by: Danny Ryan --- specs/phase0/fork-choice.md | 46 +++++++++++++++---------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index b7125e36aa..96bb2a87a4 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -107,9 +107,9 @@ def is_previous_epoch_justified(store: Store) -> bool: The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: -- `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. -- `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. -- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occurred. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm. +- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. - `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. ```python @@ -229,18 +229,16 @@ def get_weight(store: Store, root: Root) -> Gwei: ```python def get_voting_source(store: Store, block_root: Root) -> Checkpoint: """ - Compute the voting source checkpoint in the case that block with root ``block_root`` - is chosen as the head block + Compute the voting source checkpoint in event that block with root ``block_root`` is the head block """ block = store.blocks[block_root] current_epoch = compute_epoch_at_slot(get_current_slot(store)) block_epoch = compute_epoch_at_slot(block.slot) if current_epoch > block_epoch: - # The block is from a prior epoch, the voting source will be pulled-up. + # The block is from a prior epoch, the voting source will be pulled-up return store.unrealized_justifications[block_root] else: - # The block is not from a prior epoch, therefore the voting source is - # not pulled up. + # The block is not from a prior epoch, therefore the voting source is not pulled up head_state = store.block_states[block_root] return head_state.current_justified_checkpoint @@ -248,7 +246,7 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: #### `filter_block_tree` -*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST have `block_root` as `store.justified_checkpoint`. +*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`. ```python def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: @@ -270,7 +268,7 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB current_epoch = compute_epoch_at_slot(get_current_slot(store)) voting_source = get_voting_source(store, block_root) - # The voting source should be at the same height as the store's justified checkpoint. + # The voting source should be at the same height as the store's justified checkpoint correct_justified = ( store.justified_checkpoint.epoch == GENESIS_EPOCH or voting_source.epoch == store.justified_checkpoint.epoch @@ -378,20 +376,16 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None: # Pull up the post-state of the block to the next epoch boundary process_justification_and_finalization(state) - # Store the unrealized justification. store.unrealized_justifications[block_root] = state.current_justified_checkpoint - - # Update unrealized checkpoints in store if necessary update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # If the block is from a prior epoch, apply the realized values. + # If the block is from a prior epoch, apply the realized values block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) current_epoch = compute_epoch_at_slot(get_current_slot(store)) if block_epoch < current_epoch: update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) ``` - #### `on_tick` helpers ##### `on_tick_per_slot` @@ -400,21 +394,18 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None: def on_tick_per_slot(store: Store, time: uint64) -> None: previous_slot = get_current_slot(store) - # update store time + # Update store time store.time = time current_slot = get_current_slot(store) - # Reset store.proposer_boost_root if this is a new slot + # If this is a new slot, reset store.proposer_boost_root if current_slot > previous_slot: store.proposer_boost_root = Root() - # Not a new epoch, return - if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): - return - - # Pull-up justification and finalization from previous epoch - update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) + # If a new epoch, pull-up justification and finalization from previous epoch + if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: + update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` #### `on_attestation` helpers @@ -447,8 +438,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc # Check that the epoch number and slot number are matching assert target.epoch == compute_epoch_at_slot(attestation.data.slot) - # Attestations target be for a known block. - # If target block is unknown, delay consideration until the block is found. + # Attestation target must be for a known block. If target block is unknown, delay consideration until block is found assert target.root in store.blocks # Attestations must be for a known block. If block is unknown, delay consideration until the block is found @@ -498,8 +488,8 @@ def update_latest_messages(store: Store, ```python def on_tick(store: Store, time: uint64) -> None: - # If the ``store.time`` falls behind, catch up slot by slot to - # ensure that every previous slot will be processed with ``on_tick_per_slot``. + # If the ``store.time`` falls behind, while loop catches up slot by slot + # to ensure that every previous slot is processed with ``on_tick_per_slot`` tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT while get_current_slot(store) < tick_slot: previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT @@ -543,7 +533,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # Eagerly compute unrealized justification and finality. + # Eagerly compute unrealized justification and finality compute_pulled_up_tip(store, block_root) ``` From 637ef341331bdae3773cbfa8ae1c08a4b1d99ec1 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Tue, 14 Mar 2023 21:00:20 +0000 Subject: [PATCH 61/73] fix : change description for blob --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 57496eb020..bde9627565 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -65,7 +65,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point | | `KZGProof` | `Bytes48` | Same as for `KZGCommitment` | | `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form | -| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data | +| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A fixed-length sequence of bytes that may uniquely encode the evaluations of a polynomial | ## Constants From 1a2967a5424a53c98c423f8e44fb54086ec68312 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Tue, 14 Mar 2023 14:49:41 -0700 Subject: [PATCH 62/73] Apply suggestions from @djrtwo --- specs/phase0/fork-choice.md | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 96bb2a87a4..6e281d5c3d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -189,11 +189,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: block = store.blocks[root] if block.slot > slot: return get_ancestor(store, block.parent_root, slot) - elif block.slot == slot: - return root - else: - # root is older than queried slot, thus a skip slot. Return most recent root prior to slot - return root + return root ``` #### `get_weight` @@ -274,9 +270,8 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB or voting_source.epoch == store.justified_checkpoint.epoch ) - # If the block should be pulled-up due to previous epoch being justified, also check - # that the unrealized justification is higher than the store's justified - # checkpoint, and the voting source is not more than two epochs ago. + # If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized + # justification is higher than the store and that the voting source is not more than two epochs ago if not correct_justified and is_previous_epoch_justified(store): correct_justified = ( store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and @@ -404,7 +399,7 @@ def on_tick_per_slot(store: Store, time: uint64) -> None: store.proposer_boost_root = Root() # If a new epoch, pull-up justification and finalization from previous epoch - if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: + if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` @@ -470,9 +465,7 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: ##### `update_latest_messages` ```python -def update_latest_messages(store: Store, - attesting_indices: Sequence[ValidatorIndex], - attestation: Attestation) -> None: +def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: target = attestation.data.target beacon_block_root = attestation.data.beacon_block_root non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices] From ff7a6c5d0e8ea7af80836fb808bb22e391390f6d Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:14:31 +0000 Subject: [PATCH 63/73] Add description when to use verify_sidecar_signature --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 6f10f7a345..b6960db3c2 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -125,7 +125,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. -- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature` - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From 2d4bfabceba821cd1ce3fba67322a14294c7343c Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:16:41 +0000 Subject: [PATCH 64/73] Correct signature of get_blobs_and_kzg_commitments --- specs/deneb/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index a21fadd085..04d8e2a409 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -45,7 +45,7 @@ Implementers may also retrieve blobs individually per transaction. ```python def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ - Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: + Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` From 3e281e745770dc8282b3f9fc7cf944f39bf3f393 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:22:23 +0000 Subject: [PATCH 65/73] Alternative for linter --- specs/deneb/validator.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 04d8e2a409..b627de023e 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,8 +44,9 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ - Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: +def get_blobs_and_kzg_commitments( + payload_id: PayloadId +) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` From 8fd22ab5044b7e8dbe8b11044ee35adce9f57244 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Wed, 15 Mar 2023 15:25:09 +0000 Subject: [PATCH 66/73] code review --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index bde9627565..c48857d9e8 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -65,7 +65,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point | | `KZGProof` | `Bytes48` | Same as for `KZGCommitment` | | `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form | -| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A fixed-length sequence of bytes that may uniquely encode the evaluations of a polynomial | +| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic data blob | ## Constants From 5977f36fef8de792c3cf1eea5d5883adabdbe9a9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 15 Mar 2023 10:54:49 -0600 Subject: [PATCH 67/73] minor nits from code review --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 6207cc76a8..9be028620d 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -126,7 +126,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`). -- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature` +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature`. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From 67984b5665362e327871007e438d6f67ef2cd8de Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 15 Mar 2023 11:30:46 -0600 Subject: [PATCH 68/73] bump version.txt --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 99aab26b29..9b388ed89d 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.3.0-rc.3 +1.3.0-rc.4 From 5a217607b0ace03fcea5214eed9074ed19fb031e Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Wed, 15 Mar 2023 16:32:57 -0700 Subject: [PATCH 69/73] shift all f.c. tests to altair & later --- .../test/phase0/fork_choice/test_ex_ante.py | 12 +++--- .../test/phase0/fork_choice/test_get_head.py | 19 ++++----- .../test/phase0/fork_choice/test_on_block.py | 41 +++++++++---------- .../test/phase0/fork_choice/test_reorg.py | 18 ++++---- 4 files changed, 44 insertions(+), 46 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py index 0a145dfa52..15feffa83d 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( MAINNET, spec_state_test, - with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import ( @@ -31,7 +31,7 @@ def _apply_base_block_a(spec, state, store, test_steps): assert spec.get_head(store) == signed_block_a.message.hash_tree_root() -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_vanilla(spec, state): """ @@ -118,7 +118,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro return proposer_score // base_effective_balance + 1 -@with_all_phases +@with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state): @@ -191,7 +191,7 @@ def _filter_participant_set(participants): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_sandwich_without_attestations(spec, state): """ @@ -254,7 +254,7 @@ def test_ex_ante_sandwich_without_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_sandwich_with_honest_attestation(spec, state): """ @@ -335,7 +335,7 @@ def _filter_participant_set(participants): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 2107a470a7..f5960ff703 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -2,7 +2,6 @@ from eth2spec.test.context import ( spec_state_test, - with_all_phases, with_altair_and_later, with_presets, ) @@ -38,7 +37,7 @@ rng = random.Random(1001) -@with_all_phases +@with_altair_and_later @spec_state_test def test_genesis(spec, state): test_steps = [] @@ -62,7 +61,7 @@ def test_genesis(spec, state): yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets." -@with_all_phases +@with_altair_and_later @spec_state_test def test_chain_no_attestations(spec, state): test_steps = [] @@ -91,7 +90,7 @@ def test_chain_no_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_split_tie_breaker_no_attestations(spec, state): test_steps = [] @@ -130,7 +129,7 @@ def test_split_tie_breaker_no_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_shorter_chain_but_heavier_weight(spec, state): test_steps = [] @@ -170,7 +169,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_filtered_block_tree(spec, state): @@ -247,7 +246,7 @@ def test_filtered_block_tree(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost_correct_head(spec, state): test_steps = [] @@ -302,7 +301,7 @@ def test_proposer_boost_correct_head(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_discard_equivocations_on_attester_slashing(spec, state): test_steps = [] @@ -374,7 +373,7 @@ def test_discard_equivocations_on_attester_slashing(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_discard_equivocations_slashed_validator_censoring(spec, state): @@ -636,7 +635,7 @@ def test_voting_source_beyond_two_epoch(spec, state): The block being a descendant of store.justified_checkpoint.root is necessary because filter_block_tree descends the tree starting at store.justified_checkpoint.root -@with_all_phases +@with_altair_and_later @spec_state_test def test_incorrect_finalized(spec, state): # Check that the store doesn't allow for a head block that has: diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index eaae825ab2..0af7753391 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -5,7 +5,6 @@ from eth2spec.test.context import ( MINIMAL, spec_state_test, - with_all_phases, with_altair_and_later, with_presets ) @@ -47,7 +46,7 @@ def _drop_random_one_third(_slot, _index, indices): return rng.sample(sorted(indices), participant_count) -@with_all_phases +@with_altair_and_later @spec_state_test def test_basic(spec, state): test_steps = [] @@ -77,7 +76,7 @@ def test_basic(spec, state): # TODO: add tests for justified_root and finalized_root -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_checkpoints(spec, state): @@ -114,7 +113,7 @@ def test_on_block_checkpoints(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_on_block_future_block(spec, state): test_steps = [] @@ -135,7 +134,7 @@ def test_on_block_future_block(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_on_block_bad_parent_root(spec, state): test_steps = [] @@ -161,7 +160,7 @@ def test_on_block_bad_parent_root(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_before_finalized(spec, state): @@ -193,7 +192,7 @@ def test_on_block_before_finalized(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots(spec, state): @@ -240,7 +239,7 @@ def test_on_block_finalized_skip_slots(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): @@ -287,7 +286,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): """ -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): @@ -369,7 +368,7 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): """ -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): @@ -441,7 +440,7 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost(spec, state): test_steps = [] @@ -500,7 +499,7 @@ def test_proposer_boost(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost_root_same_slot_untimely_block(spec, state): test_steps = [] @@ -534,7 +533,7 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding(spec, state): @@ -616,7 +615,7 @@ def test_justification_withholding(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding_reverse_order(spec, state): @@ -693,7 +692,7 @@ def test_justification_withholding_reverse_order(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_beginning_of_epoch(spec, state): @@ -742,7 +741,7 @@ def test_justification_update_beginning_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_end_of_epoch(spec, state): @@ -792,7 +791,7 @@ def test_justification_update_end_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_start_of_epoch(spec, state): @@ -868,7 +867,7 @@ def test_incompatible_justification_update_start_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_end_of_epoch(spec, state): @@ -945,7 +944,7 @@ def test_incompatible_justification_update_end_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_not_realized_finality(spec, state): @@ -1018,7 +1017,7 @@ def test_justified_update_not_realized_finality(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_monotonic(spec, state): @@ -1097,7 +1096,7 @@ def test_justified_update_monotonic(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_always_if_better(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py index 30f1b06c7b..afff8d4f46 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( spec_state_test, - with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.constants import ( @@ -35,7 +35,7 @@ TESTING_PRESETS = [MINIMAL] -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state): @@ -250,7 +250,7 @@ def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previo yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state): @@ -401,7 +401,7 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state): @@ -412,7 +412,7 @@ def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(s spec, state, enough_ffg=True, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state): @@ -423,7 +423,7 @@ def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoc spec, state, enough_ffg=False, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_current_epoch(spec, state): @@ -443,7 +443,7 @@ def test_delayed_justification_current_epoch(spec, state): yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_previous_epoch(spec, state): @@ -460,7 +460,7 @@ def test_delayed_justification_previous_epoch(spec, state): yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state): @@ -483,7 +483,7 @@ def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state): From c48012f674007d4ea3e99d814eca0770fe0eb155 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 08:34:44 +0800 Subject: [PATCH 70/73] Add EIP-6110 to README table --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 49e1c3a4d9..61600d3890 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ Features are researched and developed in parallel, and then consolidated into se | Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| | Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | | Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/_features/das/das-core.md)
    • [Fork choice changes](specs/_features/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/_features/das/p2p-interface.md)
    • [Sampling process](specs/_features/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| +| EIP-6110 |
  • Core
    • [Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)
    • [EIP-6110 fork](specs/_features/eip6110/fork.md)
  • Additions
    • [Honest validator guide changes](specs/_features/eip6110/validator.md)
| ### Accompanying documents can be found in [specs](specs) and include: From debf51e87afe299c3c2b0ce2459b5fba859b3b00 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 10:13:09 +0800 Subject: [PATCH 71/73] Fix `get_sample_opaque_tx` call signature --- tests/core/pyspec/eth2spec/test/helpers/sharding.py | 2 +- tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py index 89f03c3c39..6b913b90ec 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py @@ -12,7 +12,7 @@ # -# Containers from Deneb +# Containers from EIP-4844 # MAX_CALLDATA_SIZE = 2**24 MAX_VERSIONED_HASHES_LIST_SIZE = 2**24 diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 35ddbc330a..c164515103 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -235,7 +235,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random( def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_capella(spec, state, signed_blocks, scenario_state) # TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1) block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) block.body.blob_kzg_commitments = blob_kzg_commitments From 8d3097be999c1692e7d29238072ec22918fdba31 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Wed, 15 Mar 2023 19:48:16 -0700 Subject: [PATCH 72/73] remove phase 0 from f.c. test generator --- tests/generators/fork_choice/main.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 945e687005..20aa393923 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -3,15 +3,14 @@ if __name__ == "__main__": - phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ + # Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore + altair_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ 'get_head', 'on_block', 'ex_ante', 'reorg', 'withholding', ]} - # No additional Altair specific finality tests, yet. - altair_mods = phase_0_mods # For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several # PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function @@ -23,7 +22,6 @@ deneb_mods = capella_mods # No additional Capella specific fork choice tests all_mods = { - PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, From 74a1c90bbce92228044db3d6c3e0dd7da7f89844 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 11:34:04 +0800 Subject: [PATCH 73/73] fix lint --- tests/generators/fork_choice/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 20aa393923..4456c2546b 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__":