-
Notifications
You must be signed in to change notification settings - Fork 952
Implement EIP-7892 BPO hardforks #7521
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 2 commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
71ae25f
Initial Commit of EIP-7892
ethDreamer fbdacc3
simplify max_blobs_per_block
ethDreamer 36147d4
Cleanup and Address Comments
ethDreamer e831dc6
Merge branch 'unstable' into eip-7892
jimmygchen 8be32c5
Fix failing test due to missing blobs.
jimmygchen 5b0b468
Merge branch 'unstable' into eip-7892
jimmygchen f2d2f70
Fix builld
jimmygchen 2679608
Merge branch 'eip-7892' of github.com:ethDreamer/lighthouse into eip-…
jimmygchen File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -243,7 +243,7 @@ pub struct ChainSpec { | |
| /* | ||
| * Networking Fulu | ||
| */ | ||
| max_blobs_per_block_fulu: u64, | ||
| blob_schedule: Vec<BPOFork>, | ||
|
|
||
| /* | ||
| * Networking Derived | ||
|
|
@@ -653,19 +653,48 @@ impl ChainSpec { | |
| } | ||
| } | ||
|
|
||
| /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. | ||
| /// Return the value of `MAX_BLOBS_PER_BLOCK` for the given `epoch`. | ||
| /// NOTE: this function is *technically* not spec compliant, but | ||
| /// I'm told this is what the other clients are doing for `devnet-0`.. | ||
| pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { | ||
| self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) | ||
| match self.fulu_fork_epoch { | ||
| Some(fulu_epoch) if epoch >= fulu_epoch => { | ||
| let mut max_blobs_per_block = self.max_blobs_per_block_electra; | ||
| let mut blob_schedule = self.blob_schedule.clone(); | ||
| blob_schedule.sort_by_key(|entry| entry.epoch); | ||
| for entry in blob_schedule { | ||
|
||
| if epoch < entry.epoch { | ||
| return max_blobs_per_block; | ||
| } | ||
| max_blobs_per_block = entry.max_blobs_per_block; | ||
| } | ||
| max_blobs_per_block | ||
| } | ||
| _ => match self.electra_fork_epoch { | ||
| Some(electra_epoch) if epoch >= electra_epoch => self.max_blobs_per_block_electra, | ||
| _ => self.max_blobs_per_block, | ||
| }, | ||
| } | ||
| } | ||
|
|
||
| /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. | ||
| pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { | ||
| if fork_name.fulu_enabled() { | ||
| self.max_blobs_per_block_fulu | ||
| } else if fork_name.electra_enabled() { | ||
| self.max_blobs_per_block_electra | ||
| // TODO(EIP-7892): remove this once we have fork-version changes on BPO forks | ||
| pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 { | ||
jimmygchen marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| if !fork_name.fulu_enabled() { | ||
| if fork_name.electra_enabled() { | ||
| self.max_blobs_per_block_electra | ||
| } else { | ||
| self.max_blobs_per_block | ||
| } | ||
| } else { | ||
| self.max_blobs_per_block | ||
| // Find the max blobs per block in the fork schedule | ||
| // This logic will need to be more complex once there are forks beyond Fulu | ||
| let mut max_blobs_per_block = self.max_blobs_per_block_electra; | ||
| for entry in self.blob_schedule.iter() { | ||
| if entry.max_blobs_per_block > max_blobs_per_block { | ||
| max_blobs_per_block = entry.max_blobs_per_block; | ||
| } | ||
| } | ||
| max_blobs_per_block | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -1002,7 +1031,7 @@ impl ChainSpec { | |
| /* | ||
| * Networking Fulu specific | ||
| */ | ||
| max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), | ||
| blob_schedule: default_blob_schedule(), | ||
|
|
||
| /* | ||
| * Application specific | ||
|
|
@@ -1336,7 +1365,7 @@ impl ChainSpec { | |
| /* | ||
| * Networking Fulu specific | ||
| */ | ||
| max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), | ||
| blob_schedule: default_blob_schedule(), | ||
|
|
||
| /* | ||
| * Application specific | ||
|
|
@@ -1357,6 +1386,14 @@ impl Default for ChainSpec { | |
| } | ||
| } | ||
|
|
||
| #[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] | ||
| #[serde(rename_all = "UPPERCASE")] | ||
| pub struct BPOFork { | ||
| epoch: Epoch, | ||
| #[serde(with = "serde_utils::quoted_u64")] | ||
| max_blobs_per_block: u64, | ||
| } | ||
|
|
||
| /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). | ||
| /// | ||
| /// Fields relevant to hard forks after Altair should be optional so that we can continue | ||
|
|
@@ -1557,9 +1594,9 @@ pub struct Config { | |
| #[serde(default = "default_custody_requirement")] | ||
| #[serde(with = "serde_utils::quoted_u64")] | ||
| custody_requirement: u64, | ||
| #[serde(default = "default_max_blobs_per_block_fulu")] | ||
| #[serde(with = "serde_utils::quoted_u64")] | ||
| max_blobs_per_block_fulu: u64, | ||
| #[serde(default = "default_blob_schedule")] | ||
| #[serde(skip_serializing_if = "Vec::is_empty")] | ||
| blob_schedule: Vec<BPOFork>, | ||
| } | ||
|
|
||
| fn default_bellatrix_fork_version() -> [u8; 4] { | ||
|
|
@@ -1697,8 +1734,9 @@ const fn default_max_blobs_per_block_electra() -> u64 { | |
| 9 | ||
| } | ||
|
|
||
| const fn default_max_blobs_per_block_fulu() -> u64 { | ||
| 12 | ||
| const fn default_blob_schedule() -> Vec<BPOFork> { | ||
| // TODO(EIP-7892): think about what the default should be | ||
| vec![] | ||
| } | ||
|
|
||
| const fn default_attestation_propagation_slot_range() -> u64 { | ||
|
|
@@ -1937,7 +1975,7 @@ impl Config { | |
| data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, | ||
| samples_per_slot: spec.samples_per_slot, | ||
| custody_requirement: spec.custody_requirement, | ||
| max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu, | ||
| blob_schedule: spec.blob_schedule.clone(), | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -2016,7 +2054,7 @@ impl Config { | |
| data_column_sidecar_subnet_count, | ||
| samples_per_slot, | ||
| custody_requirement, | ||
| max_blobs_per_block_fulu, | ||
| ref blob_schedule, | ||
| } = self; | ||
|
|
||
| if preset_base != E::spec_name().to_string().as_str() { | ||
|
|
@@ -2100,7 +2138,7 @@ impl Config { | |
| data_column_sidecar_subnet_count, | ||
| samples_per_slot, | ||
| custody_requirement, | ||
| max_blobs_per_block_fulu, | ||
| blob_schedule: blob_schedule.clone(), | ||
|
|
||
| ..chain_spec.clone() | ||
| }) | ||
|
|
@@ -2287,6 +2325,102 @@ mod yaml_tests { | |
| assert_eq!(from, yamlconfig); | ||
| } | ||
|
|
||
| #[test] | ||
| fn blob_schedule_max_blobs_per_block() { | ||
| let spec_contents = r#" | ||
| PRESET_BASE: 'mainnet' | ||
| MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384 | ||
| MIN_GENESIS_TIME: 1748264340 | ||
| GENESIS_FORK_VERSION: 0x10355025 | ||
| GENESIS_DELAY: 60 | ||
| SECONDS_PER_SLOT: 12 | ||
| SECONDS_PER_ETH1_BLOCK: 12 | ||
| MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 | ||
| SHARD_COMMITTEE_PERIOD: 256 | ||
| ETH1_FOLLOW_DISTANCE: 2048 | ||
| INACTIVITY_SCORE_BIAS: 4 | ||
| INACTIVITY_SCORE_RECOVERY_RATE: 16 | ||
| EJECTION_BALANCE: 16000000000 | ||
| MIN_PER_EPOCH_CHURN_LIMIT: 4 | ||
| CHURN_LIMIT_QUOTIENT: 65536 | ||
| MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 | ||
| PROPOSER_SCORE_BOOST: 40 | ||
| REORG_HEAD_WEIGHT_THRESHOLD: 20 | ||
| REORG_PARENT_WEIGHT_THRESHOLD: 160 | ||
| REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 | ||
| DEPOSIT_CHAIN_ID: 7042643276 | ||
| DEPOSIT_NETWORK_ID: 7042643276 | ||
| DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa | ||
|
|
||
| ALTAIR_FORK_VERSION: 0x20355025 | ||
| ALTAIR_FORK_EPOCH: 0 | ||
| BELLATRIX_FORK_VERSION: 0x30355025 | ||
| BELLATRIX_FORK_EPOCH: 0 | ||
| CAPELLA_FORK_VERSION: 0x40355025 | ||
| CAPELLA_FORK_EPOCH: 0 | ||
| DENEB_FORK_VERSION: 0x50355025 | ||
| DENEB_FORK_EPOCH: 64 | ||
| ELECTRA_FORK_VERSION: 0x60355025 | ||
| ELECTRA_FORK_EPOCH: 128 | ||
| FULU_FORK_VERSION: 0x70355025 | ||
| FULU_FORK_EPOCH: 256 | ||
| BLOB_SCHEDULE: | ||
| - EPOCH: 512 | ||
| MAX_BLOBS_PER_BLOCK: 12 | ||
| - EPOCH: 768 | ||
| MAX_BLOBS_PER_BLOCK: 15 | ||
| - EPOCH: 1024 | ||
| MAX_BLOBS_PER_BLOCK: 18 | ||
| - EPOCH: 1280 | ||
| MAX_BLOBS_PER_BLOCK: 9 | ||
| - EPOCH: 1584 | ||
| MAX_BLOBS_PER_BLOCK: 20 | ||
| "#; | ||
| let config: Config = | ||
| serde_yaml::from_str(spec_contents).expect("error while deserializing"); | ||
| let spec = | ||
| ChainSpec::from_config::<MainnetEthSpec>(&config).expect("error while creating spec"); | ||
|
|
||
| // test out max_blobs_per_block(epoch) | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(64)), | ||
| default_max_blobs_per_block() | ||
| ); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(127)), | ||
| default_max_blobs_per_block() | ||
| ); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(128)), | ||
| default_max_blobs_per_block_electra() | ||
| ); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(255)), | ||
| default_max_blobs_per_block_electra() | ||
| ); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(256)), | ||
| default_max_blobs_per_block_electra() | ||
| ); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(511)), | ||
| default_max_blobs_per_block_electra() | ||
| ); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(512)), 12); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(767)), 12); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(768)), 15); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1023)), 15); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1024)), 18); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1279)), 18); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1280)), 9); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1583)), 9); | ||
| assert_eq!(spec.max_blobs_per_block(Epoch::new(1584)), 20); | ||
| assert_eq!( | ||
| spec.max_blobs_per_block(Epoch::new(18446744073709551615)), | ||
| 20 | ||
| ); | ||
| } | ||
|
|
||
| #[test] | ||
| fn apply_to_spec() { | ||
| let mut spec = ChainSpec::minimal(); | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why not sort the chainspec blob_schedule at construction from the config instead of sorting it everytime on function call?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My concern was that the spec was constructed in multiple places and I want to ensure the blob schedule was always sorted. I've done that now by adding a wrapper
BlobScheduletype. Let me know what you think :)