Skip to content

Commit

Permalink
Merge pull request #4078 from ethereum/dev
Browse files Browse the repository at this point in the history
Release v1.5.0-beta.0
  • Loading branch information
jtraglia authored Jan 10, 2025
2 parents 7deecbb + da17461 commit 9a0b3ef
Show file tree
Hide file tree
Showing 47 changed files with 2,201 additions and 713 deletions.
4 changes: 2 additions & 2 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ commands:
description: "Restore the cache with pyspec keys"
steps:
- restore_cached_venv:
venv_name: v30-pyspec
venv_name: v32-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }}
save_pyspec_cached_venv:
description: Save a venv into a cache with pyspec keys"
steps:
- save_cached_venv:
venv_name: v30-pyspec
venv_name: v32-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }}
venv_path: ./venv
jobs:
Expand Down
23 changes: 13 additions & 10 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ ALL_EXECUTABLE_SPEC_NAMES = \

# A list of fake targets.
.PHONY: \
check_toc \
clean \
coverage \
detect_errors \
Expand All @@ -39,7 +38,6 @@ NORM = $(shell tput sgr0)

# Print target descriptions.
help:
@echo "make $(BOLD)check_toc$(NORM) -- check table of contents"
@echo "make $(BOLD)clean$(NORM) -- delete all untracked files"
@echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage"
@echo "make $(BOLD)detect_errors$(NORM) -- detect generator errors"
Expand Down Expand Up @@ -85,7 +83,7 @@ $(ETH2SPEC): setup.py | $(VENV)

# Force rebuild/install the eth2spec package.
eth2spec:
$(MAKE) --always-make $(ETH2SPEC)
@$(MAKE) --always-make $(ETH2SPEC)

# Create the pyspec for all phases.
pyspec: $(VENV) setup.py
Expand All @@ -99,6 +97,8 @@ pyspec: $(VENV) setup.py
TEST_REPORT_DIR = $(PYSPEC_DIR)/test-reports

# Run pyspec tests.
# Note: for debugging output to show, print to stderr.
#
# To run a specific test, append k=<test>, eg:
# make test k=test_verify_kzg_proof
# To run tests for a specific fork, append fork=<fork>, eg:
Expand All @@ -117,6 +117,7 @@ test: $(ETH2SPEC) pyspec
@mkdir -p $(TEST_REPORT_DIR)
@$(PYTHON_VENV) -m pytest \
-n auto \
--capture=no \
$(MAYBE_TEST) \
$(MAYBE_FORK) \
$(PRESET) \
Expand Down Expand Up @@ -193,10 +194,6 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)

# Check all files and error if any ToC were modified.
check_toc: $(MARKDOWN_FILES:=.toc)
@[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0

# Generate ToC sections & save copy of original if modified.
%.toc:
@cp $* $*.tmp; \
Expand All @@ -209,8 +206,12 @@ check_toc: $(MARKDOWN_FILES:=.toc)
echo "\033[1;34m See $*.tmp\033[0m"; \
fi

# Check all files and error if any ToC were modified.
_check_toc: $(MARKDOWN_FILES:=.toc)
@[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0

# Check for mistakes.
lint: $(ETH2SPEC) pyspec check_toc
lint: $(ETH2SPEC) pyspec _check_toc
@$(CODESPELL_VENV) . --skip "./.git,$(VENV),$(PYSPEC_DIR)/.mypy_cache" -I .codespell-whitelist
@$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(PYSPEC_DIR)/eth2spec
@$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(TEST_GENERATORS_DIR)
Expand All @@ -235,17 +236,19 @@ gen_list:
done

# Run one generator.
# This will forcibly rebuild eth2spec just in case.
# To check modules for a generator, append modcheck=true, eg:
# make gen_genesis modcheck=true
gen_%: MAYBE_MODCHECK := $(if $(filter true,$(modcheck)),--modcheck)
gen_%: $(ETH2SPEC) pyspec
gen_%: eth2spec
@mkdir -p $(TEST_VECTOR_DIR)
@$(PYTHON_VENV) $(GENERATOR_DIR)/$*/main.py \
--output $(TEST_VECTOR_DIR) \
$(MAYBE_MODCHECK)

# Run all generators then check for errors.
gen_all: $(GENERATOR_TARGETS) detect_errors
gen_all: $(GENERATOR_TARGETS)
@$(MAKE) detect_errors

# Detect errors in generators.
detect_errors: $(TEST_VECTOR_DIR)
Expand Down
4 changes: 1 addition & 3 deletions configs/mainnet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -115,15 +115,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
MAX_PAYLOAD_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
Expand Down
4 changes: 1 addition & 3 deletions configs/minimal.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
MAX_PAYLOAD_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
Expand Down
2 changes: 1 addition & 1 deletion docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Handy commands:

Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users.

The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.
The `scripts/build_run_docker_tests.sh` script will cover most use cases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.

E.g:
- `./build_run_docker_tests.sh --p mainnet` will run the mainnet preset tests
Expand Down
23 changes: 20 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import copy
from collections import OrderedDict
import json
from functools import reduce
from functools import lru_cache

from pysetup.constants import (
# code names
Expand Down Expand Up @@ -70,22 +70,26 @@ def installPackage(package: str):
from marko.ext.gfm.elements import Table


@lru_cache(maxsize=None)
def _get_name_from_heading(heading: Heading) -> Optional[str]:
last_child = heading.children[-1]
if isinstance(last_child, CodeSpan):
return last_child.children
return None


@lru_cache(maxsize=None)
def _get_source_from_code_block(block: FencedCode) -> str:
return block.children[0].children.strip()


@lru_cache(maxsize=None)
def _get_function_name_from_source(source: str) -> str:
fn = ast.parse(source).body[0]
return fn.name


@lru_cache(maxsize=None)
def _get_self_type_from_source(source: str) -> Optional[str]:
fn = ast.parse(source).body[0]
args = fn.args.args
Expand All @@ -98,6 +102,7 @@ def _get_self_type_from_source(source: str) -> Optional[str]:
return args[0].annotation.id


@lru_cache(maxsize=None)
def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
class_def = ast.parse(source).body[0]
base = class_def.bases[0]
Expand All @@ -113,12 +118,14 @@ def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
return class_def.name, parent_class


@lru_cache(maxsize=None)
def _is_constant_id(name: str) -> bool:
if name[0] not in string.ascii_uppercase + '_':
return False
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))


@lru_cache(maxsize=None)
def _load_kzg_trusted_setups(preset_name):
trusted_setups_file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/trusted_setup_4096.json'

Expand All @@ -130,6 +137,7 @@ def _load_kzg_trusted_setups(preset_name):

return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial

@lru_cache(maxsize=None)
def _load_curdleproofs_crs(preset_name):
"""
NOTE: File generated from https://github.com/asn-d6/curdleproofs/blob/8e8bf6d4191fb6a844002f75666fb7009716319b/tests/crs.rs#L53-L67
Expand All @@ -153,6 +161,7 @@ def _load_curdleproofs_crs(preset_name):
}


@lru_cache(maxsize=None)
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
_, _, title = child._parse_info
if not (title[0] == "(" and title[len(title)-1] == ")"):
Expand All @@ -163,6 +172,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()


@lru_cache(maxsize=None)
def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition:
comment = None
if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"):
Expand All @@ -185,6 +195,11 @@ def _update_constant_vars_with_kzg_setups(constant_vars, preset_name):
constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None)


@lru_cache(maxsize=None)
def parse_markdown(content: str):
return gfm.parse(content)


def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject:
functions: Dict[str, str] = {}
protocols: Dict[str, ProtocolDefinition] = {}
Expand All @@ -198,7 +213,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
custom_types: Dict[str, str] = {}

with open(file_name) as source_file:
document = gfm.parse(source_file.read())
document = parse_markdown(source_file.read())

current_name = None
should_skip = False
Expand Down Expand Up @@ -326,6 +341,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
)


@lru_cache(maxsize=None)
def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:
"""
Loads the a directory of preset files, merges the result into one preset.
Expand All @@ -344,6 +360,7 @@ def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:
return parse_config_vars(preset)


@lru_cache(maxsize=None)
def load_config(config_path: Path) -> Dict[str, str]:
"""
Loads the given configuration file.
Expand All @@ -358,7 +375,7 @@ def build_spec(fork: str,
source_files: Sequence[Path],
preset_files: Sequence[Path],
config_file: Path) -> str:
preset = load_preset(preset_files)
preset = load_preset(tuple(preset_files))
config = load_config(config_file)
all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files]

Expand Down
2 changes: 1 addition & 1 deletion specs/_features/custody_game/beacon-chain.md
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
for attester_index in attesters:
if attester_index != custody_slashing.malefactor_index:
increase_balance(state, attester_index, whistleblower_reward)
# No special whisteblower reward: it is expected to be an attester. Others are free to slash too however.
# No special whistleblower reward: it is expected to be an attester. Others are free to slash too however.
else:
# The claim was false, the custody bit was correct. Slash the whistleblower that induced this work.
slash_validator(state, custody_slashing.whistleblower_index)
Expand Down
6 changes: 3 additions & 3 deletions specs/_features/eip7732/p2p-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB

There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed:

- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK`
- _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot
-- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`.
- If `execution_payload` verification of block's parent by an execution node is *not* complete:
Expand All @@ -151,7 +151,7 @@ This topic is used to propagate execution payload messages as `SignedExecutionPa

The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`:

- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved).
- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue payload for processing once the block is retrieved).
- _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder.

Let `block` be the block with `envelope.beacon_block_root`.
Expand All @@ -171,7 +171,7 @@ The following validations MUST pass before forwarding the `payload_attestation_m
- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`.
- _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`.
- _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`.
- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
- _[REJECT]_ The message's block `data.beacon_block_root` passes validation.
- _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice.
- _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index.
Expand Down
2 changes: 1 addition & 1 deletion specs/_features/whisk/beacon-chain.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ This document details the beacon chain additions and changes of to support the W
| `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers |
| `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7 - 4)` (= 124) | number of validators shuffled per shuffle step |
| `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof |
| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof |
| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of an opening proof |

## Configuration

Expand Down
2 changes: 1 addition & 1 deletion specs/altair/light-client/full-node.md
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `

- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot`
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time.
- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical chain as selected by fork choice are considered, regardless of ranking by `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD NOT be provided.

### `create_light_client_finality_update`

Expand Down
4 changes: 2 additions & 2 deletions specs/altair/validator.md
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ The `subnet_id` is derived from the position in the sync committee such that the
*Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.

```python
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[uint64]:
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]:
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
sync_committee = state.current_sync_committee
Expand All @@ -305,7 +305,7 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali
target_pubkey = state.validators[validator_index].pubkey
sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
return set([
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
for index in sync_committee_indices
])
```
Expand Down
4 changes: 2 additions & 2 deletions specs/bellatrix/p2p-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
#### Why was the max gossip message size increased at Bellatrix?

With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network.
field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in
place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on the network.
At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
filled entirely with data at a cost of 16 gas per byte can create a valid
`ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for
Expand Down
Loading

0 comments on commit 9a0b3ef

Please sign in to comment.