Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,9 @@ impl<E: EthSpec> ExecutionBlockGenerator<E> {
if fork_name.deneb_enabled() {
// get random number between 0 and Max Blobs
let mut rng = self.rng.lock();
let max_blobs = self.spec.max_blobs_per_block_by_fork(fork_name) as usize;
// TODO(EIP-7892): see FIXME below
// FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp..
let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize;
let num_blobs = rng.gen::<usize>() % (max_blobs + 1);
let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?;
for tx in Vec::from(transactions) {
Expand Down
7 changes: 3 additions & 4 deletions beacon_node/lighthouse_network/src/rpc/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use std::{
use tokio::time::{sleep, Sleep};
use tokio_util::time::{delay_queue, DelayQueue};
use tracing::{debug, trace};
use types::{EthSpec, ForkContext};
use types::{EthSpec, ForkContext, Slot};

/// The number of times to retry an outbound upgrade in the case of IO errors.
const IO_ERROR_RETRIES: u8 = 3;
Expand Down Expand Up @@ -932,9 +932,8 @@ where
}
}
RequestType::BlobsByRange(request) => {
let max_requested_blobs = request
.count
.saturating_mul(spec.max_blobs_per_block_by_fork(current_fork));
let epoch = Slot::new(request.start_slot).epoch(E::slots_per_epoch());
let max_requested_blobs = request.max_blobs_requested(epoch, spec);
let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64;
if max_requested_blobs > max_allowed {
self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound {
Expand Down
7 changes: 3 additions & 4 deletions beacon_node/lighthouse_network/src/rpc/methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,10 @@ use types::blob_sidecar::BlobIdentifier;
use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES;
use types::{
blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar,
DataColumnsByRootIdentifier, Epoch, EthSpec, Hash256, LightClientBootstrap,
DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap,
LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList,
SignedBeaconBlock, Slot,
};
use types::{ForkContext, ForkName};

/// Maximum length of error message.
pub type MaxErrorLen = U256;
Expand Down Expand Up @@ -328,8 +327,8 @@ pub struct BlobsByRangeRequest {
}

impl BlobsByRangeRequest {
pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork);
pub fn max_blobs_requested(&self, epoch: Epoch, spec: &ChainSpec) -> u64 {
let max_blobs_per_block = spec.max_blobs_per_block(epoch);
self.count.saturating_mul(max_blobs_per_block)
}
}
Expand Down
12 changes: 8 additions & 4 deletions beacon_node/lighthouse_network/src/rpc/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use types::{
EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap,
LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair,
LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate,
MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock,
MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
};

// Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is
Expand Down Expand Up @@ -633,7 +633,8 @@ pub fn rpc_blob_limits<E: EthSpec>() -> RpcLimits {
pub fn rpc_data_column_limits<E: EthSpec>(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits {
RpcLimits::new(
DataColumnSidecar::<E>::min_size(),
DataColumnSidecar::<E>::max_size(spec.max_blobs_per_block_by_fork(fork_name) as usize),
// TODO(EIP-7892): fix this once we change fork-version on BPO forks
DataColumnSidecar::<E>::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize),
)
}

Expand Down Expand Up @@ -732,13 +733,16 @@ impl<E: EthSpec> RequestType<E> {
/* These functions are used in the handler for stream management */

/// Maximum number of responses expected for this request.
pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 {
/// TODO(EIP-7892): refactor this to remove `_current_fork`
pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 {
match self {
RequestType::Status(_) => 1,
RequestType::Goodbye(_) => 0,
RequestType::BlocksByRange(req) => *req.count(),
RequestType::BlocksByRoot(req) => req.block_roots().len() as u64,
RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec),
RequestType::BlobsByRange(req) => {
req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec)
}
RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64,
RequestType::DataColumnsByRoot(req) => req.max_requested() as u64,
RequestType::DataColumnsByRange(req) => req.max_requested::<E>(),
Expand Down
4 changes: 3 additions & 1 deletion common/eth2/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1296,7 +1296,9 @@ impl BeaconNodeHttpClient {
}

self.get_fork_contextual(path, |fork| {
(fork, spec.max_blobs_per_block_by_fork(fork) as usize)
// TODO(EIP-7892): this will overestimate the max number of blobs
// It would be better if we could get an epoch passed into this function
(fork, spec.max_blobs_per_block_within_fork(fork) as usize)
})
.await
.map(|opt| opt.map(BeaconResponse::ForkVersioned))
Expand Down
174 changes: 154 additions & 20 deletions consensus/types/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ pub struct ChainSpec {
/*
* Networking Fulu
*/
max_blobs_per_block_fulu: u64,
blob_schedule: Vec<BPOFork>,

/*
* Networking Derived
Expand Down Expand Up @@ -653,19 +653,48 @@ impl ChainSpec {
}
}

/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`.
/// Return the value of `MAX_BLOBS_PER_BLOCK` for the given `epoch`.
/// NOTE: this function is *technically* not spec compliant, but
/// I'm told this is what the other clients are doing for `devnet-0`..
pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 {
self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch))
match self.fulu_fork_epoch {
Some(fulu_epoch) if epoch >= fulu_epoch => {
let mut max_blobs_per_block = self.max_blobs_per_block_electra;
let mut blob_schedule = self.blob_schedule.clone();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not sort the chainspec blob_schedule at construction from the config instead of sorting it everytime on function call?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My concern was that the spec was constructed in multiple places and I want to ensure the blob schedule was always sorted. I've done that now by adding a wrapper BlobSchedule type. Let me know what you think :)

blob_schedule.sort_by_key(|entry| entry.epoch);
for entry in blob_schedule {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can do something like

    let index = self.blob_schedule.partition_point(|entry| entry.epoch <= epoch);
    if index > 0 {
        max_blobs = self.blob_schedule[index - 1].max_blobs_per_block;
    }

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've updated the function to be implemented on the BlobSchedule. It's simpler now. Let me know what you think :)

if epoch < entry.epoch {
return max_blobs_per_block;
}
max_blobs_per_block = entry.max_blobs_per_block;
}
max_blobs_per_block
}
_ => match self.electra_fork_epoch {
Some(electra_epoch) if epoch >= electra_epoch => self.max_blobs_per_block_electra,
_ => self.max_blobs_per_block,
},
}
}

/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`.
pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 {
if fork_name.fulu_enabled() {
self.max_blobs_per_block_fulu
} else if fork_name.electra_enabled() {
self.max_blobs_per_block_electra
// TODO(EIP-7892): remove this once we have fork-version changes on BPO forks
pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 {
if !fork_name.fulu_enabled() {
if fork_name.electra_enabled() {
self.max_blobs_per_block_electra
} else {
self.max_blobs_per_block
}
} else {
self.max_blobs_per_block
// Find the max blobs per block in the fork schedule
// This logic will need to be more complex once there are forks beyond Fulu
let mut max_blobs_per_block = self.max_blobs_per_block_electra;
for entry in self.blob_schedule.iter() {
if entry.max_blobs_per_block > max_blobs_per_block {
max_blobs_per_block = entry.max_blobs_per_block;
}
}
max_blobs_per_block
}
}

Expand Down Expand Up @@ -1002,7 +1031,7 @@ impl ChainSpec {
/*
* Networking Fulu specific
*/
max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(),
blob_schedule: default_blob_schedule(),

/*
* Application specific
Expand Down Expand Up @@ -1336,7 +1365,7 @@ impl ChainSpec {
/*
* Networking Fulu specific
*/
max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(),
blob_schedule: default_blob_schedule(),

/*
* Application specific
Expand All @@ -1357,6 +1386,14 @@ impl Default for ChainSpec {
}
}

#[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)]
#[serde(rename_all = "UPPERCASE")]
pub struct BPOFork {
epoch: Epoch,
#[serde(with = "serde_utils::quoted_u64")]
max_blobs_per_block: u64,
}

/// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON).
///
/// Fields relevant to hard forks after Altair should be optional so that we can continue
Expand Down Expand Up @@ -1557,9 +1594,9 @@ pub struct Config {
#[serde(default = "default_custody_requirement")]
#[serde(with = "serde_utils::quoted_u64")]
custody_requirement: u64,
#[serde(default = "default_max_blobs_per_block_fulu")]
#[serde(with = "serde_utils::quoted_u64")]
max_blobs_per_block_fulu: u64,
#[serde(default = "default_blob_schedule")]
#[serde(skip_serializing_if = "Vec::is_empty")]
blob_schedule: Vec<BPOFork>,
}

fn default_bellatrix_fork_version() -> [u8; 4] {
Expand Down Expand Up @@ -1697,8 +1734,9 @@ const fn default_max_blobs_per_block_electra() -> u64 {
9
}

const fn default_max_blobs_per_block_fulu() -> u64 {
12
const fn default_blob_schedule() -> Vec<BPOFork> {
// TODO(EIP-7892): think about what the default should be
vec![]
}

const fn default_attestation_propagation_slot_range() -> u64 {
Expand Down Expand Up @@ -1937,7 +1975,7 @@ impl Config {
data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count,
samples_per_slot: spec.samples_per_slot,
custody_requirement: spec.custody_requirement,
max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu,
blob_schedule: spec.blob_schedule.clone(),
}
}

Expand Down Expand Up @@ -2016,7 +2054,7 @@ impl Config {
data_column_sidecar_subnet_count,
samples_per_slot,
custody_requirement,
max_blobs_per_block_fulu,
ref blob_schedule,
} = self;

if preset_base != E::spec_name().to_string().as_str() {
Expand Down Expand Up @@ -2100,7 +2138,7 @@ impl Config {
data_column_sidecar_subnet_count,
samples_per_slot,
custody_requirement,
max_blobs_per_block_fulu,
blob_schedule: blob_schedule.clone(),

..chain_spec.clone()
})
Expand Down Expand Up @@ -2287,6 +2325,102 @@ mod yaml_tests {
assert_eq!(from, yamlconfig);
}

#[test]
fn blob_schedule_max_blobs_per_block() {
let spec_contents = r#"
PRESET_BASE: 'mainnet'
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384
MIN_GENESIS_TIME: 1748264340
GENESIS_FORK_VERSION: 0x10355025
GENESIS_DELAY: 60
SECONDS_PER_SLOT: 12
SECONDS_PER_ETH1_BLOCK: 12
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
SHARD_COMMITTEE_PERIOD: 256
ETH1_FOLLOW_DISTANCE: 2048
INACTIVITY_SCORE_BIAS: 4
INACTIVITY_SCORE_RECOVERY_RATE: 16
EJECTION_BALANCE: 16000000000
MIN_PER_EPOCH_CHURN_LIMIT: 4
CHURN_LIMIT_QUOTIENT: 65536
MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8
PROPOSER_SCORE_BOOST: 40
REORG_HEAD_WEIGHT_THRESHOLD: 20
REORG_PARENT_WEIGHT_THRESHOLD: 160
REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2
DEPOSIT_CHAIN_ID: 7042643276
DEPOSIT_NETWORK_ID: 7042643276
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa

ALTAIR_FORK_VERSION: 0x20355025
ALTAIR_FORK_EPOCH: 0
BELLATRIX_FORK_VERSION: 0x30355025
BELLATRIX_FORK_EPOCH: 0
CAPELLA_FORK_VERSION: 0x40355025
CAPELLA_FORK_EPOCH: 0
DENEB_FORK_VERSION: 0x50355025
DENEB_FORK_EPOCH: 64
ELECTRA_FORK_VERSION: 0x60355025
ELECTRA_FORK_EPOCH: 128
FULU_FORK_VERSION: 0x70355025
FULU_FORK_EPOCH: 256
BLOB_SCHEDULE:
- EPOCH: 512
MAX_BLOBS_PER_BLOCK: 12
- EPOCH: 768
MAX_BLOBS_PER_BLOCK: 15
- EPOCH: 1024
MAX_BLOBS_PER_BLOCK: 18
- EPOCH: 1280
MAX_BLOBS_PER_BLOCK: 9
- EPOCH: 1584
MAX_BLOBS_PER_BLOCK: 20
"#;
let config: Config =
serde_yaml::from_str(spec_contents).expect("error while deserializing");
let spec =
ChainSpec::from_config::<MainnetEthSpec>(&config).expect("error while creating spec");

// test out max_blobs_per_block(epoch)
assert_eq!(
spec.max_blobs_per_block(Epoch::new(64)),
default_max_blobs_per_block()
);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(127)),
default_max_blobs_per_block()
);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(128)),
default_max_blobs_per_block_electra()
);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(255)),
default_max_blobs_per_block_electra()
);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(256)),
default_max_blobs_per_block_electra()
);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(511)),
default_max_blobs_per_block_electra()
);
assert_eq!(spec.max_blobs_per_block(Epoch::new(512)), 12);
assert_eq!(spec.max_blobs_per_block(Epoch::new(767)), 12);
assert_eq!(spec.max_blobs_per_block(Epoch::new(768)), 15);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1023)), 15);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1024)), 18);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1279)), 18);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1280)), 9);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1583)), 9);
assert_eq!(spec.max_blobs_per_block(Epoch::new(1584)), 20);
assert_eq!(
spec.max_blobs_per_block(Epoch::new(18446744073709551615)),
20
);
}

#[test]
fn apply_to_spec() {
let mut spec = ChainSpec::minimal();
Expand Down
Loading