@@ -180,9 +180,10 @@ type CacheConfig struct {
180
180
SnapshotNoBuild bool // Whether the background generation is allowed
181
181
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
182
182
183
+ ChainHistoryMode history.HistoryMode
183
184
// This defines the cutoff block for history expiry.
184
185
// Blocks before this number may be unavailable in the chain database.
185
- ChainHistoryMode history. HistoryMode
186
+ HistoryPruningCutoff uint64
186
187
}
187
188
188
189
// triedbConfig derives the configures for trie database.
@@ -391,10 +392,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
391
392
bc .processor = NewStateProcessor (chainConfig , bc .hc , bc )
392
393
393
394
genesisHeader := bc .GetHeaderByNumber (0 )
394
- if genesisHeader == nil {
395
+ bc .genesisBlock = types .NewBlockWithHeader (genesisHeader )
396
+ if bc .genesisBlock == nil {
395
397
return nil , ErrNoGenesis
396
398
}
397
- bc .genesisBlock = types .NewBlockWithHeader (genesisHeader )
398
399
399
400
bc .currentBlock .Store (nil )
400
401
bc .currentSnapBlock .Store (nil )
@@ -1446,6 +1447,7 @@ func (bc *BlockChain) stopWithoutSaving() {
1446
1447
// the mutex should become available quickly. It cannot be taken again after Close has
1447
1448
// returned.
1448
1449
bc .chainmu .Close ()
1450
+ bc .wg .Wait ()
1449
1451
}
1450
1452
1451
1453
// Stop stops the blockchain service. If any imports are currently in progress
@@ -1534,36 +1536,45 @@ const (
1534
1536
SideStatTy
1535
1537
)
1536
1538
1537
- // InsertReceiptChain inserts a batch of blocks along with their receipts into
1538
- // the database. Unlike InsertChain, this function does not verify the state root
1539
- // in the blocks. It is used exclusively for snap sync. All the inserted blocks
1540
- // will be regarded as canonical, chain reorg is not supported.
1541
- //
1542
- // The optional ancientLimit can also be specified and chain segment before that
1543
- // will be directly stored in the ancient, getting rid of the chain migration.
1539
+ // InsertReceiptChain attempts to complete an already existing header chain with
1540
+ // transaction and receipt data.
1544
1541
func (bc * BlockChain ) InsertReceiptChain (blockChain types.Blocks , receiptChain []types.Receipts , ancientLimit uint64 ) (int , error ) {
1545
- // Verify the supplied headers before insertion without lock
1546
- var headers [] * types. Header
1547
- for _ , block := range blockChain {
1548
- headers = append ( headers , block . Header () )
1542
+ // We don't require the chainMu here since we want to maximize the
1543
+ // concurrency of header insertion and receipt insertion.
1544
+ bc . wg . Add ( 1 )
1545
+ defer bc . wg . Done ( )
1549
1546
1550
- // Here we also validate that blob transactions in the block do not
1551
- // contain a sidecar. While the sidecar does not affect the block hash
1552
- // or tx hash, sending blobs within a block is not allowed.
1547
+ var (
1548
+ ancientBlocks , liveBlocks types.Blocks
1549
+ ancientReceipts , liveReceipts []types.Receipts
1550
+ )
1551
+ // Do a sanity check that the provided chain is actually ordered and linked
1552
+ for i , block := range blockChain {
1553
+ if i != 0 {
1554
+ prev := blockChain [i - 1 ]
1555
+ if block .NumberU64 () != prev .NumberU64 ()+ 1 || block .ParentHash () != prev .Hash () {
1556
+ log .Error ("Non contiguous receipt insert" ,
1557
+ "number" , block .Number (), "hash" , block .Hash (), "parent" , block .ParentHash (),
1558
+ "prevnumber" , prev .Number (), "prevhash" , prev .Hash ())
1559
+ return 0 , fmt .Errorf ("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])" ,
1560
+ i - 1 , prev .NumberU64 (), prev .Hash ().Bytes ()[:4 ],
1561
+ i , block .NumberU64 (), block .Hash ().Bytes ()[:4 ], block .ParentHash ().Bytes ()[:4 ])
1562
+ }
1563
+ }
1564
+ if block .NumberU64 () <= ancientLimit {
1565
+ ancientBlocks , ancientReceipts = append (ancientBlocks , block ), append (ancientReceipts , receiptChain [i ])
1566
+ } else {
1567
+ liveBlocks , liveReceipts = append (liveBlocks , block ), append (liveReceipts , receiptChain [i ])
1568
+ }
1569
+
1570
+ // Here we also validate that blob transactions in the block do not contain a sidecar.
1571
+ // While the sidecar does not affect the block hash / tx hash, sending blobs within a block is not allowed.
1553
1572
for txIndex , tx := range block .Transactions () {
1554
1573
if tx .Type () == types .BlobTxType && tx .BlobTxSidecar () != nil {
1555
1574
return 0 , fmt .Errorf ("block #%d contains unexpected blob sidecar in tx at index %d" , block .NumberU64 (), txIndex )
1556
1575
}
1557
1576
}
1558
1577
}
1559
- if n , err := bc .hc .ValidateHeaderChain (headers ); err != nil {
1560
- return n , err
1561
- }
1562
- // Hold the mutation lock
1563
- if ! bc .chainmu .TryLock () {
1564
- return 0 , errChainStopped
1565
- }
1566
- defer bc .chainmu .Unlock ()
1567
1578
1568
1579
var (
1569
1580
stats = struct { processed , ignored int32 }{}
@@ -1611,8 +1622,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
1611
1622
// this function only accepts canonical chain data. All side chain will be reverted
1612
1623
// eventually.
1613
1624
writeAncient := func (blockChain types.Blocks , receiptChain []types.Receipts ) (int , error ) {
1614
- // Ensure genesis is in the ancient store
1615
- if blockChain [0 ].NumberU64 () == 1 {
1625
+ first := blockChain [0 ]
1626
+ last := blockChain [len (blockChain )- 1 ]
1627
+
1628
+ // Ensure genesis is in ancients.
1629
+ if first .NumberU64 () == 1 {
1616
1630
if frozen , _ := bc .db .Ancients (); frozen == 0 {
1617
1631
td := bc .genesisBlock .Difficulty ()
1618
1632
writeSize , err := rawdb .WriteAncientBlocks (bc .db , []* types.Block {bc .genesisBlock }, []types.Receipts {nil }, []types.Receipts {nil }, td )
@@ -1728,12 +1742,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
1728
1742
return 0 , nil
1729
1743
}
1730
1744
1731
- // writeLive writes the blockchain and corresponding receipt chain to the active store.
1732
- //
1733
- // Notably, in different snap sync cycles, the supplied chain may partially reorganize
1734
- // existing local chain segments (reorg around the chain tip). The reorganized part
1735
- // will be included in the provided chain segment, and stale canonical markers will be
1736
- // silently rewritten. Therefore, no explicit reorg logic is needed.
1745
+ // writeLive writes blockchain and corresponding receipt chain into active store.
1737
1746
writeLive := func (blockChain types.Blocks , receiptChain []types.Receipts ) (int , error ) {
1738
1747
headers := make ([]* types.Header , 0 , len (blockChain ))
1739
1748
var (
@@ -1766,8 +1775,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
1766
1775
}
1767
1776
}
1768
1777
// Write all the data out into the database
1769
- rawdb .WriteCanonicalHash (batch , block .Hash (), block .NumberU64 ())
1770
- rawdb .WriteBlock (batch , block )
1778
+ rawdb .WriteBody (batch , block .Hash (), block .NumberU64 (), block .Body ())
1771
1779
rawdb .WriteReceipts (batch , block .Hash (), block .NumberU64 (), receiptChain [i ])
1772
1780
1773
1781
// Write everything belongs to the blocks into the database. So that
@@ -1800,22 +1808,18 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
1800
1808
return 0 , nil
1801
1809
}
1802
1810
1803
- // Split the supplied blocks into two groups, according to the
1804
- // given ancient limit.
1805
- index := sort .Search (len (blockChain ), func (i int ) bool {
1806
- return blockChain [i ].NumberU64 () >= ancientLimit
1807
- })
1808
- if index > 0 {
1809
- if n , err := writeAncient (blockChain [:index ], receiptChain [:index ]); err != nil {
1811
+ // Write downloaded chain data and corresponding receipt chain data
1812
+ if len (ancientBlocks ) > 0 {
1813
+ if n , err := writeAncient (ancientBlocks , ancientReceipts ); err != nil {
1810
1814
if err == errInsertionInterrupted {
1811
1815
return 0 , nil
1812
1816
}
1813
1817
1814
1818
return n , err
1815
1819
}
1816
1820
}
1817
- if index != len (blockChain ) {
1818
- if n , err := writeLive (blockChain [ index :], receiptChain [ index :] ); err != nil {
1821
+ if len (liveBlocks ) > 0 {
1822
+ if n , err := writeLive (liveBlocks , liveReceipts ); err != nil {
1819
1823
if err == errInsertionInterrupted {
1820
1824
return 0 , nil
1821
1825
}
@@ -1837,6 +1841,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
1837
1841
}
1838
1842
1839
1843
log .Debug ("Imported new block receipts" , context ... )
1844
+
1840
1845
return 0 , nil
1841
1846
}
1842
1847
@@ -3347,6 +3352,7 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
3347
3352
if i , err := bc .hc .ValidateHeaderChain (chain ); err != nil {
3348
3353
return i , err
3349
3354
}
3355
+
3350
3356
if ! bc .chainmu .TryLock () {
3351
3357
return 0 , errChainStopped
3352
3358
}
@@ -3359,74 +3365,6 @@ func (bc *BlockChain) GetChainConfig() *params.ChainConfig {
3359
3365
return bc .chainConfig
3360
3366
}
3361
3367
3362
- // InsertHeadersBeforeCutoff inserts the given headers into the ancient store
3363
- // as they are claimed older than the configured chain cutoff point. All the
3364
- // inserted headers are regarded as canonical and chain reorg is not supported.
3365
- func (bc * BlockChain ) InsertHeadersBeforeCutoff (headers []* types.Header ) (int , error ) {
3366
- if len (headers ) == 0 {
3367
- return 0 , nil
3368
- }
3369
- // TODO(rjl493456442): Headers before the configured cutoff have already
3370
- // been verified by the hash of cutoff header. Theoretically, header validation
3371
- // could be skipped here.
3372
- if n , err := bc .hc .ValidateHeaderChain (headers ); err != nil {
3373
- return n , err
3374
- }
3375
- if ! bc .chainmu .TryLock () {
3376
- return 0 , errChainStopped
3377
- }
3378
- defer bc .chainmu .Unlock ()
3379
-
3380
- // Initialize the ancient store with genesis block if it's empty.
3381
- var (
3382
- frozen , _ = bc .db .Ancients ()
3383
- first = headers [0 ].Number .Uint64 ()
3384
- )
3385
- if first == 1 && frozen == 0 {
3386
- _ , err := rawdb .WriteAncientBlocks (bc .db , []* types.Block {bc .genesisBlock }, []types.Receipts {nil })
3387
- if err != nil {
3388
- log .Error ("Error writing genesis to ancients" , "err" , err )
3389
- return 0 , err
3390
- }
3391
- log .Info ("Wrote genesis to ancient store" )
3392
- } else if frozen != first {
3393
- return 0 , fmt .Errorf ("headers are gapped with the ancient store, first: %d, ancient: %d" , first , frozen )
3394
- }
3395
-
3396
- // Write headers to the ancient store, with block bodies and receipts set to nil
3397
- // to ensure consistency across tables in the freezer.
3398
- _ , err := rawdb .WriteAncientHeaderChain (bc .db , headers )
3399
- if err != nil {
3400
- return 0 , err
3401
- }
3402
- if err := bc .db .Sync (); err != nil {
3403
- return 0 , err
3404
- }
3405
- // Write hash to number mappings
3406
- batch := bc .db .NewBatch ()
3407
- for _ , header := range headers {
3408
- rawdb .WriteHeaderNumber (batch , header .Hash (), header .Number .Uint64 ())
3409
- }
3410
- // Write head header and head snap block flags
3411
- last := headers [len (headers )- 1 ]
3412
- rawdb .WriteHeadHeaderHash (batch , last .Hash ())
3413
- rawdb .WriteHeadFastBlockHash (batch , last .Hash ())
3414
- if err := batch .Write (); err != nil {
3415
- return 0 , err
3416
- }
3417
- // Truncate the useless chain segment (zero bodies and receipts) in the
3418
- // ancient store.
3419
- if _ , err := bc .db .TruncateTail (last .Number .Uint64 () + 1 ); err != nil {
3420
- return 0 , err
3421
- }
3422
- // Last step update all in-memory markers
3423
- bc .hc .currentHeader .Store (last )
3424
- bc .currentSnapBlock .Store (last )
3425
- headHeaderGauge .Update (last .Number .Int64 ())
3426
- headFastBlockGauge .Update (last .Number .Int64 ())
3427
- return 0 , nil
3428
- }
3429
-
3430
3368
// SetBlockValidatorAndProcessorForTesting sets the current validator and processor.
3431
3369
// This method can be used to force an invalid blockchain to be verified for tests.
3432
3370
// This method is unsafe and should only be used before block import starts.
0 commit comments