diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 3a6c09d8c..e44515f0e 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -939,8 +939,8 @@ func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*t return fb.bc.GetHeaderByHash(hash), nil } -func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return fb.backend.pendingBlock, fb.backend.pendingReceipts +func (fb *filterBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { + return fb.backend.pendingBlock, fb.backend.pendingReceipts, fb.backend.pendingState } func (fb *filterBackend) StateAt(root common.Hash) (*state.StateDB, error) { @@ -976,10 +976,6 @@ func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscr return fb.bc.SubscribeLogsEvent(ch) } -func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { - return nullSubscription() -} - func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 } func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) { diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 96777711c..b5906ca74 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -70,7 +70,6 @@ func TestConsoleWelcome(t *testing.T) { Welcome to the Geth JavaScript console! instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} -coinbase: {{.Etherbase}} at block: 0 ({{niltime}}) datadir: {{.Datadir}} modules: {{apis}} @@ -131,7 +130,6 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) attach.SetTemplateFunc("gover", runtime.Version) attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") }) - attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase }) attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") }) @@ -144,7 +142,6 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { Welcome to the Geth JavaScript console! instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} -coinbase: {{etherbase}} at block: 0 ({{niltime}}){{if ipc}} datadir: {{datadir}}{{end}} modules: {{apis}} diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f29340af2..9d9468d3c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -30,7 +30,6 @@ import ( "github.com/scroll-tech/go-ethereum/cmd/utils" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/console/prompt" - "github.com/scroll-tech/go-ethereum/eth" "github.com/scroll-tech/go-ethereum/eth/downloader" "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/internal/debug" @@ -122,16 +121,14 @@ var ( utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, - utils.MiningEnabledFlag, - utils.MinerThreadsFlag, - utils.MinerNotifyFlag, + utils.MiningEnabledFlag, // deprecated utils.LegacyMinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerGasPriceFlag, - utils.MinerEtherbaseFlag, + utils.MinerEtherbaseFlag, // deprecated utils.MinerExtraDataFlag, + utils.MinerPendingFeeRecipientFlag, utils.MinerRecommitIntervalFlag, - utils.MinerNoVerifyFlag, utils.MinerStoreSkippedTxTracesFlag, utils.MinerMaxAccountsNumFlag, utils.NATFlag, @@ -162,7 +159,6 @@ var ( utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, utils.GpoIgnoreGasPriceFlag, - utils.MinerNotifyFullFlag, configFileFlag, utils.CatalystFlag, utils.CircuitCapacityCheckEnabledFlag, @@ -427,24 +423,24 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { } // Start auxiliary services if enabled - if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { - // Mining only makes sense if a full Ethereum node is running - if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { - utils.Fatalf("Light clients do not support mining") - } - ethBackend, ok := backend.(*eth.EthAPIBackend) - if !ok { - utils.Fatalf("Ethereum service not running") - } - // Set the gas price to the limits from the CLI and start mining - gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) - ethBackend.TxPool().SetGasPrice(gasprice) - // start mining - threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) - if err := ethBackend.StartMining(threads); err != nil { - utils.Fatalf("Failed to start mining: %v", err) - } - } + //if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { + // // Mining only makes sense if a full Ethereum node is running + // if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + // utils.Fatalf("Light clients do not support mining") + // } + // ethBackend, ok := backend.(*eth.EthAPIBackend) + // if !ok { + // utils.Fatalf("Ethereum service not running") + // } + // // Set the gas price to the limits from the CLI and start mining + // gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) + // ethBackend.TxPool().SetGasPrice(gasprice) + // // start mining + // threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) + // if err := ethBackend.StartMining(threads); err != nil { + // utils.Fatalf("Failed to start mining: %v", err) + // } + //} } // unlockAccounts unlocks any account specifically requested. diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index a8e55573a..3678453d1 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -186,14 +186,11 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Flags: []cli.Flag{ utils.MiningEnabledFlag, utils.MinerThreadsFlag, - utils.MinerNotifyFlag, - utils.MinerNotifyFullFlag, utils.MinerGasPriceFlag, utils.MinerGasLimitFlag, utils.MinerEtherbaseFlag, utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, - utils.MinerNoVerifyFlag, utils.MinerStoreSkippedTxTracesFlag, utils.MinerMaxAccountsNumFlag, }, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3407eef65..72299b56e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -19,6 +19,7 @@ package utils import ( "crypto/ecdsa" + "encoding/hex" "fmt" "io" "io/ioutil" @@ -464,22 +465,15 @@ var ( } // Miner settings - MiningEnabledFlag = cli.BoolFlag{ - Name: "mine", - Usage: "Enable mining", - } MinerThreadsFlag = cli.IntFlag{ Name: "miner.threads", Usage: "Number of CPU threads to use for mining", Value: 0, } - MinerNotifyFlag = cli.StringFlag{ - Name: "miner.notify", - Usage: "Comma separated HTTP URL list to notify of new work packages", - } - MinerNotifyFullFlag = cli.BoolFlag{ - Name: "miner.notify.full", - Usage: "Notify with pending block headers instead of work packages", + MinerPendingFeeRecipientFlag = &cli.StringFlag{ + Name: "miner.pending.feeRecipient", + Usage: "0x prefixed public address for the pending block producer (not used for actual block production)", + Value: "0", } MinerGasLimitFlag = cli.Uint64Flag{ Name: "miner.gaslimit", @@ -491,11 +485,6 @@ var ( Usage: "Minimum gas price for mining a transaction", Value: ethconfig.Defaults.Miner.GasPrice, } - MinerEtherbaseFlag = cli.StringFlag{ - Name: "miner.etherbase", - Usage: "Public address for block mining rewards (default = first account)", - Value: "0", - } MinerExtraDataFlag = cli.StringFlag{ Name: "miner.extradata", Usage: "Block extra data set by the miner (default = client version)", @@ -505,10 +494,6 @@ var ( Usage: "Time interval to recreate the block being mined", Value: ethconfig.Defaults.Miner.Recommit, } - MinerNoVerifyFlag = cli.BoolFlag{ - Name: "miner.noverify", - Usage: "Disable remote sealing verification", - } MinerNewBlockTimeout = &cli.DurationFlag{ Name: "miner.newblock-timeout", @@ -1235,22 +1220,23 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error // command line flags or from the keystore if CLI indexed. func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *ethconfig.Config) { // Extract the current etherbase - var etherbase string - if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) { - etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name) - } - // Convert the etherbase into an address and configure it - if etherbase != "" { - if ks != nil { - account, err := MakeAddress(ks, etherbase) - if err != nil { - Fatalf("Invalid miner etherbase: %v", err) - } - cfg.Miner.Etherbase = account.Address - } else { - Fatalf("No etherbase configured") - } + if ctx.IsSet(MinerEtherbaseFlag.Name) { + log.Warn("Option --miner.etherbase is deprecated") + return } + if !ctx.IsSet(MinerPendingFeeRecipientFlag.Name) { + return + } + addr := ctx.String(MinerPendingFeeRecipientFlag.Name) + if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") { + addr = addr[2:] + } + b, err := hex.DecodeString(addr) + if err != nil || len(b) != common.AddressLength { + Fatalf("-%s: invalid pending block producer address %q", MinerPendingFeeRecipientFlag.Name, addr) + return + } + cfg.Miner.PendingFeeRecipient = common.BytesToAddress(b) } // MakePasswordList reads password lines from the file specified by the global --password flag. @@ -1530,10 +1516,9 @@ func setEthash(ctx *cli.Context, cfg *ethconfig.Config) { } func setMiner(ctx *cli.Context, cfg *miner.Config) { - if ctx.GlobalIsSet(MinerNotifyFlag.Name) { - cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") + if ctx.Bool(MiningEnabledFlag.Name) { + log.Warn("The flag --mine is deprecated and will be removed") } - cfg.NotifyFull = ctx.GlobalBool(MinerNotifyFullFlag.Name) if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } @@ -1546,9 +1531,6 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) { cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name) } - if ctx.GlobalIsSet(MinerNoVerifyFlag.Name) { - cfg.Noverify = ctx.GlobalBool(MinerNoVerifyFlag.Name) - } if ctx.GlobalIsSet(MinerStoreSkippedTxTracesFlag.Name) { cfg.StoreSkippedTxTraces = ctx.GlobalBool(MinerStoreSkippedTxTracesFlag.Name) } @@ -1878,9 +1860,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // when we're definitely concerned with only one account. passphrase = list[0] } + // Unlock the developer account by local keystore. + var ks *keystore.KeyStore + if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 { + ks = keystores[0].(*keystore.KeyStore) + } + if ks == nil { + Fatalf("Keystore is not available") + } + // setEtherbase has been called above, configuring the miner address from command line flags. - if cfg.Miner.Etherbase != (common.Address{}) { - developer = accounts.Account{Address: cfg.Miner.Etherbase} + if cfg.Miner.PendingFeeRecipient != (common.Address{}) { + developer = accounts.Account{Address: cfg.Miner.PendingFeeRecipient} } else if accs := ks.Accounts(); len(accs) > 0 { developer = ks.Accounts()[0] } else { @@ -1889,6 +1880,10 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { Fatalf("Failed to create developer account: %v", err) } } + // Make sure the address is configured as fee recipient, otherwise + // the miner will fail to start. + cfg.Miner.PendingFeeRecipient = developer.Address + if err := ks.Unlock(developer, passphrase); err != nil { Fatalf("Failed to unlock developer account: %v", err) } diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 8bc084a54..1d5468936 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -50,6 +50,16 @@ var ( Usage: "Target gas floor for mined blocks (deprecated)", Value: ethconfig.Defaults.Miner.GasFloor, } + + MinerEtherbaseFlag = cli.StringFlag{ + Name: "miner.etherbase", + Usage: "Public address for block mining rewards (default = first account)", + Value: "0", + } + MiningEnabledFlag = &cli.BoolFlag{ + Name: "mine", + Usage: "Enable mining", + } ) // showDeprecated displays deprecated flags that will be soon removed from the codebase. diff --git a/consensus/consensus.go b/consensus/consensus.go index d4c71b6fa..c217a5124 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -117,11 +117,3 @@ type Engine interface { // Close terminates any background threads maintained by the consensus engine. Close() error } - -// PoW is a consensus engine based on proof-of-work. -type PoW interface { - Engine - - // Hashrate returns the current mining hashrate of a PoW consensus engine. - Hashrate() float64 -} diff --git a/console/console.go b/console/console.go index c3f662be7..9f645a7d3 100644 --- a/console/console.go +++ b/console/console.go @@ -305,9 +305,6 @@ func (c *Console) Welcome() { // Print some generic Geth metadata if res, err := c.jsre.Run(` var message = "instance: " + web3.version.node + "\n"; - try { - message += "coinbase: " + eth.coinbase + "\n"; - } catch (err) {} message += "at block: " + eth.blockNumber + " (" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + ")\n"; try { message += " datadir: " + admin.datadir + "\n"; diff --git a/console/console_test.go b/console/console_test.go index 772bf877e..8bfe04a77 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -101,7 +101,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { ethConf := ðconfig.Config{ Genesis: core.DeveloperGenesisBlock(15, 11_500_000, common.Address{}), Miner: miner.Config{ - Etherbase: common.HexToAddress(testAddress), + PendingFeeRecipient: common.HexToAddress(testAddress), }, Ethash: ethash.Config{ PowMode: ethash.ModeTest, @@ -174,9 +174,6 @@ func TestWelcome(t *testing.T) { if want := fmt.Sprintf("instance: %s", testInstance); !strings.Contains(output, want) { t.Fatalf("console output missing instance: have\n%s\nwant also %s", output, want) } - if want := fmt.Sprintf("coinbase: %s", testAddress); !strings.Contains(output, want) { - t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want) - } if want := "at block: 0"; !strings.Contains(output, want) { t.Fatalf("console output missing sync status: have\n%s\nwant also %s", output, want) } diff --git a/core/blockchain.go b/core/blockchain.go index ea871905a..823b39e5f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -216,13 +216,13 @@ type BlockChain struct { processor Processor // Block transaction processor interface vmConfig vm.Config - shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. + shouldPreserve func(*types.Header) bool // Function used to determine whether should preserve the given block. } // NewBlockChain returns a fully initialised block chain using information // available in the database. It initialises the default Ethereum Validator and // Processor. -func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { +func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) { if cacheConfig == nil { cacheConfig = defaultCacheConfig } diff --git a/eth/api.go b/eth/api.go index 14b15ed4e..c886d5a01 100644 --- a/eth/api.go +++ b/eth/api.go @@ -25,7 +25,6 @@ import ( "io" "math/big" "os" - "runtime" "strings" "time" @@ -44,112 +43,6 @@ import ( "github.com/scroll-tech/go-ethereum/trie" ) -// PublicEthereumAPI provides an API to access Ethereum full node-related -// information. -type PublicEthereumAPI struct { - e *Ethereum -} - -// NewPublicEthereumAPI creates a new Ethereum protocol API for full nodes. -func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI { - return &PublicEthereumAPI{e} -} - -// Etherbase is the address that mining rewards will be send to -func (api *PublicEthereumAPI) Etherbase() (common.Address, error) { - return api.e.Etherbase() -} - -// Coinbase is the address that mining rewards will be send to (alias for Etherbase) -func (api *PublicEthereumAPI) Coinbase() (common.Address, error) { - return api.Etherbase() -} - -// Hashrate returns the POW hashrate -func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 { - return hexutil.Uint64(api.e.Miner().Hashrate()) -} - -// PublicMinerAPI provides an API to control the miner. -// It offers only methods that operate on data that pose no security risk when it is publicly accessible. -type PublicMinerAPI struct { - e *Ethereum -} - -// NewPublicMinerAPI create a new PublicMinerAPI instance. -func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI { - return &PublicMinerAPI{e} -} - -// Mining returns an indication if this node is currently mining. -func (api *PublicMinerAPI) Mining() bool { - return api.e.IsMining() -} - -// PrivateMinerAPI provides private RPC methods to control the miner. -// These methods can be abused by external users and must be considered insecure for use by untrusted users. -type PrivateMinerAPI struct { - e *Ethereum -} - -// NewPrivateMinerAPI create a new RPC service which controls the miner of this node. -func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI { - return &PrivateMinerAPI{e: e} -} - -// Start starts the miner with the given number of threads. If threads is nil, -// the number of workers started is equal to the number of logical CPUs that are -// usable by this process. If mining is already running, this method adjust the -// number of threads allowed to use and updates the minimum price required by the -// transaction pool. -func (api *PrivateMinerAPI) Start(threads *int) error { - if threads == nil { - return api.e.StartMining(runtime.NumCPU()) - } - return api.e.StartMining(*threads) -} - -// Stop terminates the miner, both at the consensus engine level as well as at -// the block creation level. -func (api *PrivateMinerAPI) Stop() { - api.e.StopMining() -} - -// SetExtra sets the extra data string that is included when this miner mines a block. -func (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) { - if err := api.e.Miner().SetExtra([]byte(extra)); err != nil { - return false, err - } - return true, nil -} - -// SetGasPrice sets the minimum accepted gas price for the miner. -func (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool { - api.e.lock.Lock() - api.e.gasPrice = (*big.Int)(&gasPrice) - api.e.lock.Unlock() - - api.e.txPool.SetGasPrice((*big.Int)(&gasPrice)) - return true -} - -// SetGasLimit sets the gaslimit to target towards during mining. -func (api *PrivateMinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { - api.e.Miner().SetGasCeil(uint64(gasLimit)) - return true -} - -// SetEtherbase sets the etherbase of the miner -func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool { - api.e.SetEtherbase(etherbase) - return true -} - -// SetRecommitInterval updates the interval for miner sealing work recommitting. -func (api *PrivateMinerAPI) SetRecommitInterval(interval int) { - api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) -} - // PrivateAdminAPI is the collection of Ethereum full node-related APIs // exposed over the private admin endpoint. type PrivateAdminAPI struct { @@ -282,7 +175,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error // If we're dumping the pending state, we need to request // both the pending block as well as the pending state from // the miner and operate on those - _, stateDb := api.eth.miner.Pending() + _, _, stateDb := api.eth.miner.Pending() return stateDb.RawDump(opts), nil } var block *types.Block @@ -371,7 +264,7 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta // If we're dumping the pending state, we need to request // both the pending block as well as the pending state from // the miner and operate on those - _, stateDb = api.eth.miner.Pending() + _, _, stateDb = api.eth.miner.Pending() } else { var block *types.Block if number == rpc.LatestBlockNumber { diff --git a/eth/api_backend.go b/eth/api_backend.go index aa49b9089..746b4c349 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -35,7 +35,6 @@ import ( "github.com/scroll-tech/go-ethereum/eth/gasprice" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/event" - "github.com/scroll-tech/go-ethereum/miner" "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rpc" ) @@ -69,7 +68,7 @@ func (b *EthAPIBackend) SetHead(number uint64) { func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { - block := b.eth.miner.PendingBlock() + block, _, _ := b.eth.miner.Pending() return block.Header(), nil } // Otherwise resolve and return the block @@ -103,7 +102,7 @@ func (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*ty func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { - block := b.eth.miner.PendingBlock() + block, _, _ := b.eth.miner.Pending() return block, nil } // Otherwise resolve and return the block @@ -138,14 +137,14 @@ func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r return nil, errors.New("invalid arguments; neither block nor hash specified") } -func (b *EthAPIBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return b.eth.miner.PendingBlockAndReceipts() +func (b *EthAPIBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { + return b.eth.miner.Pending() } func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { // Pending state is only known by the miner if number == rpc.PendingBlockNumber { - block, state := b.eth.miner.Pending() + block, _, state := b.eth.miner.Pending() return state, block.Header(), nil } // Otherwise resolve the block number and return its state @@ -210,10 +209,6 @@ func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch) } -func (b *EthAPIBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { - return b.eth.miner.SubscribePendingLogs(ch) -} - func (b *EthAPIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return b.eth.BlockChain().SubscribeChainEvent(ch) } @@ -340,14 +335,6 @@ func (b *EthAPIBackend) CurrentHeader() *types.Header { return b.eth.blockchain.CurrentHeader() } -func (b *EthAPIBackend) Miner() *miner.Miner { - return b.eth.Miner() -} - -func (b *EthAPIBackend) StartMining(threads int) error { - return b.eth.StartMining(threads) -} - func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) { return b.eth.stateAtBlock(ctx, block, reexec, base, checkLive, preferDisk) } diff --git a/eth/api_miner.go b/eth/api_miner.go new file mode 100644 index 000000000..4c372fc6c --- /dev/null +++ b/eth/api_miner.go @@ -0,0 +1,57 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/common/hexutil" +) + +// MinerAPI provides an API to control the miner. +type MinerAPI struct { + e *Ethereum +} + +// NewMinerAPI creates a new MinerAPI instance. +func NewMinerAPI(e *Ethereum) *MinerAPI { + return &MinerAPI{e} +} + +// SetExtra sets the extra data string that is included when this miner mines a block. +func (api *MinerAPI) SetExtra(extra string) (bool, error) { + if err := api.e.Miner().SetExtra([]byte(extra)); err != nil { + return false, err + } + return true, nil +} + +// SetGasPrice sets the minimum accepted gas price for the miner. +func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool { + api.e.lock.Lock() + api.e.gasPrice = (*big.Int)(&gasPrice) + api.e.lock.Unlock() + + api.e.txPool.SetGasPrice((*big.Int)(&gasPrice)) + return true +} + +// SetGasLimit sets the gaslimit to target towards during mining. +func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { + api.e.Miner().SetGasCeil(uint64(gasLimit)) + return true +} diff --git a/eth/backend.go b/eth/backend.go index 1e3698162..2f6064a67 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -30,7 +30,6 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/consensus" - "github.com/scroll-tech/go-ethereum/consensus/clique" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/bloombits" "github.com/scroll-tech/go-ethereum/core/rawdb" @@ -86,9 +85,8 @@ type Ethereum struct { APIBackend *EthAPIBackend - miner *miner.Miner - gasPrice *big.Int - etherbase common.Address + miner *miner.Miner + gasPrice *big.Int networkID uint64 netRPCService *ethapi.PublicNetAPI @@ -126,10 +124,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) - // Transfer mining-related config to the ethash config. - ethashConfig := config.Ethash - ethashConfig.NotifyFull = config.Miner.NotifyFull - // Assemble the Ethereum object chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) if err != nil { @@ -144,16 +138,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { log.Error("Failed to recover state", "error", err) } + engine, err := ethconfig.CreateConsensusEngine(chainConfig, chainDb) + if err != nil { + return nil, err + } eth := &Ethereum{ config: config, chainDb: chainDb, eventMux: stack.EventMux(), accountManager: stack.AccountManager(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, ðashConfig, config.Miner.Notify, config.Miner.Noverify, chainDb), + engine: engine, closeBloomHandler: make(chan struct{}), networkID: config.NetworkId, gasPrice: config.Miner.GasPrice, - etherbase: config.Miner.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), p2pServer: stack.Server(), @@ -193,7 +190,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { MPTWitness: config.MPTWitness, } ) - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) + // TODO (MariusVanDerWijden) get rid of shouldPreserve in a follow-up PR + shouldPreserve := func(header *types.Header) bool { + return false + } + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, shouldPreserve, &config.TxLookupLimit) if err != nil { return nil, err } @@ -235,7 +236,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { return nil, err } - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock) + eth.miner = miner.New(eth, config.Miner, eth.engine) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) // new batch handler @@ -313,16 +314,6 @@ func (s *Ethereum) APIs() []rpc.API { // Append all the local APIs and return return append(apis, []rpc.API{ { - Namespace: "eth", - Version: "1.0", - Service: NewPublicEthereumAPI(s), - Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: NewPublicMinerAPI(s), - Public: true, - }, { Namespace: "eth", Version: "1.0", Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), @@ -330,7 +321,7 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "miner", Version: "1.0", - Service: NewPrivateMinerAPI(s), + Service: NewMinerAPI(s), Public: false, }, { Namespace: "admin", @@ -363,141 +354,6 @@ func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) { s.blockchain.ResetWithGenesisBlock(gb) } -func (s *Ethereum) Etherbase() (eb common.Address, err error) { - s.lock.RLock() - etherbase := s.etherbase - s.lock.RUnlock() - - if etherbase != (common.Address{}) { - return etherbase, nil - } - return common.Address{}, fmt.Errorf("etherbase must be explicitly specified") -} - -// isLocalBlock checks whether the specified block is mined -// by local miner accounts. -// -// We regard two types of accounts as local miner account: etherbase -// and accounts specified via `txpool.locals` flag. -func (s *Ethereum) isLocalBlock(block *types.Block) bool { - author, err := s.engine.Author(block.Header()) - if err != nil { - log.Warn("Failed to retrieve block author", "number", block.NumberU64(), "hash", block.Hash(), "err", err) - return false - } - // Check whether the given address is etherbase. - s.lock.RLock() - etherbase := s.etherbase - s.lock.RUnlock() - if author == etherbase { - return true - } - // Check whether the given address is specified by `txpool.local` - // CLI flag. - for _, account := range s.config.TxPool.Locals { - if account == author { - return true - } - } - return false -} - -// shouldPreserve checks whether we should preserve the given block -// during the chain reorg depending on whether the author of block -// is a local account. -func (s *Ethereum) shouldPreserve(block *types.Block) bool { - // The reason we need to disable the self-reorg preserving for clique - // is it can be probable to introduce a deadlock. - // - // e.g. If there are 7 available signers - // - // r1 A - // r2 B - // r3 C - // r4 D - // r5 A [X] F G - // r6 [X] - // - // In the round5, the inturn signer E is offline, so the worst case - // is A, F and G sign the block of round5 and reject the block of opponents - // and in the round6, the last available signer B is offline, the whole - // network is stuck. - if _, ok := s.engine.(*clique.Clique); ok { - return false - } - return s.isLocalBlock(block) -} - -// SetEtherbase sets the mining reward address. -func (s *Ethereum) SetEtherbase(etherbase common.Address) { - s.lock.Lock() - s.etherbase = etherbase - s.lock.Unlock() - - s.miner.SetEtherbase(etherbase) -} - -// StartMining starts the miner with the given number of CPU threads. If mining -// is already running, this method adjust the number of threads allowed to use -// and updates the minimum price required by the transaction pool. -func (s *Ethereum) StartMining(threads int) error { - // Update the thread count within the consensus engine - type threaded interface { - SetThreads(threads int) - } - if th, ok := s.engine.(threaded); ok { - log.Info("Updated mining threads", "threads", threads) - if threads == 0 { - threads = -1 // Disable the miner from within - } - th.SetThreads(threads) - } - // If the miner was not running, initialize it - if !s.IsMining() { - // Propagate the initial price point to the transaction pool - s.lock.RLock() - price := s.gasPrice - s.lock.RUnlock() - s.txPool.SetGasPrice(price) - - // Configure the local mining address - eb, err := s.Etherbase() - if err != nil { - log.Error("Cannot start mining without etherbase", "err", err) - return fmt.Errorf("etherbase missing: %v", err) - } - if clique, ok := s.engine.(*clique.Clique); ok { - wallet, err := s.accountManager.Find(accounts.Account{Address: eb}) - if wallet == nil || err != nil { - log.Error("Etherbase account unavailable locally", "err", err) - return fmt.Errorf("signer missing: %v", err) - } - clique.Authorize(eb, wallet.SignData) - } - // If mining is started, we can disable the transaction rejection mechanism - // introduced to speed sync times. - atomic.StoreUint32(&s.handler.acceptTxs, 1) - - go s.miner.Start(eb) - } - return nil -} - -// StopMining terminates the miner, both at the consensus engine level as well as -// at the block creation level. -func (s *Ethereum) StopMining() { - // Update the thread count within the consensus engine - type threaded interface { - SetThreads(threads int) - } - if th, ok := s.engine.(threaded); ok { - th.SetThreads(-1) - } - // Stop the block creating itself - s.miner.Stop() -} - -func (s *Ethereum) IsMining() bool { return s.miner.Mining() } func (s *Ethereum) Miner() *miner.Miner { return s.miner } func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager } @@ -517,6 +373,10 @@ func (s *Ethereum) SyncMode() downloader.SyncMode { return mode } +func (s *Ethereum) SetSynced() { + atomic.StoreUint32(&s.handler.acceptTxs, 1) +} + // Protocols returns all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 92ef7ba49..74aeca1e8 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -129,15 +129,11 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD pending := pool.Pending(true) - coinbase, err := api.eth.Etherbase() - if err != nil { - return nil, err - } num := parent.Number() header := &types.Header{ ParentHash: parent.Hash(), Number: num.Add(num, common.Big1), - Coinbase: coinbase, + Coinbase: common.Address{}, GasLimit: parent.GasLimit(), // Keep the gas limit constant in this prototype Extra: []byte{}, Time: params.Timestamp, @@ -150,7 +146,7 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD parentL1BaseFee := fees.GetL1BaseFee(stateDb) header.BaseFee = misc.CalcBaseFee(config, parent.Header(), parentL1BaseFee) } - err = api.eth.Engine().Prepare(bc, header) + err := api.eth.Engine().Prepare(bc, header) if err != nil { return nil, err } @@ -180,7 +176,7 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD // Execute the transaction env.state.SetTxContext(tx.Hash(), env.tcount) - err = env.commitTransaction(tx, coinbase) + err = env.commitTransaction(tx, common.Address{}) switch err { case core.ErrGasLimitReached: // Pop the current out-of-gas transaction without shifting in the next from the account diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index b0f7dbc6b..aabd8a5de 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -250,7 +250,6 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) t.Fatal("cannot set canonical: ", err) } } - ethservice.SetEtherbase(testAddr) return n, ethservice } diff --git a/eth/catalyst/l2_api.go b/eth/catalyst/l2_api.go index 26e2b2061..0f5ead725 100644 --- a/eth/catalyst/l2_api.go +++ b/eth/catalyst/l2_api.go @@ -83,7 +83,8 @@ func (api *l2ConsensusAPI) AssembleL2Block(params AssembleL2BlockParams) (*Execu } start := time.Now() - block, stateDB, receipts, rc, skippedTxs, err := api.eth.Miner().BuildBlock(parent.Hash(), time.Now(), transactions) + //block, stateDB, receipts, rc, skippedTxs, err := api.eth.Miner().BuildBlock(parent.Hash(), time.Now(), transactions) + newBlockResult, err := api.eth.Miner().BuildBlock(parent.Hash(), time.Now(), transactions) if err != nil { return nil, err } @@ -93,30 +94,30 @@ func (api *l2ConsensusAPI) AssembleL2Block(params AssembleL2BlockParams) (*Execu // return nil, nil // } procTime := time.Since(start) - withdrawTrieRoot := api.writeVerified(stateDB, block, receipts, skippedTxs, procTime) + withdrawTrieRoot := api.writeVerified(newBlockResult.State, newBlockResult.Block, newBlockResult.Receipts, newBlockResult.SkippedTxs, procTime) var resRc types.RowConsumption - if rc != nil { - resRc = *rc + if newBlockResult.RowConsumption != nil { + resRc = *newBlockResult.RowConsumption } return &ExecutableL2Data{ - ParentHash: block.ParentHash(), - Number: block.NumberU64(), - Miner: block.Coinbase(), - Timestamp: block.Time(), - GasLimit: block.GasLimit(), - BaseFee: block.BaseFee(), - Transactions: encodeTransactions(block.Transactions()), - - StateRoot: block.Root(), - GasUsed: block.GasUsed(), - ReceiptRoot: block.ReceiptHash(), - LogsBloom: block.Bloom().Bytes(), - NextL1MessageIndex: block.Header().NextL1MsgIndex, + ParentHash: newBlockResult.Block.ParentHash(), + Number: newBlockResult.Block.NumberU64(), + Miner: newBlockResult.Block.Coinbase(), + Timestamp: newBlockResult.Block.Time(), + GasLimit: newBlockResult.Block.GasLimit(), + BaseFee: newBlockResult.Block.BaseFee(), + Transactions: encodeTransactions(newBlockResult.Block.Transactions()), + + StateRoot: newBlockResult.Block.Root(), + GasUsed: newBlockResult.Block.GasUsed(), + ReceiptRoot: newBlockResult.Block.ReceiptHash(), + LogsBloom: newBlockResult.Block.Bloom().Bytes(), + NextL1MessageIndex: newBlockResult.Block.Header().NextL1MsgIndex, WithdrawTrieRoot: withdrawTrieRoot, RowUsages: resRc, - SkippedTxs: skippedTxs, + SkippedTxs: newBlockResult.SkippedTxs, - Hash: block.Hash(), + Hash: newBlockResult.Block.Hash(), }, nil } diff --git a/eth/catalyst/l2_api_test.go b/eth/catalyst/l2_api_test.go index 0f6f93f23..6a01d8fa8 100644 --- a/eth/catalyst/l2_api_test.go +++ b/eth/catalyst/l2_api_test.go @@ -24,6 +24,7 @@ func l2ChainConfig() params.ChainConfig { config.TerminalTotalDifficulty = common.Big0 addr := common.BigToAddress(big.NewInt(123)) config.Scroll.FeeVaultAddress = &addr + config.CurieBlock = nil return config } @@ -110,8 +111,9 @@ func TestValidateL2Block(t *testing.T) { // generic case err = sendTransfer(config, ethService) require.NoError(t, err) - block, _, _, _, _, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) + ret, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) require.NoError(t, err) + block := ret.Block l2Data := ExecutableL2Data{ ParentHash: block.ParentHash(), Number: block.NumberU64(), @@ -168,7 +170,8 @@ func TestNewL2Block(t *testing.T) { err := sendTransfer(config, ethService) require.NoError(t, err) - block, _, _, _, _, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) + ret, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) + block := ret.Block require.NoError(t, err) l2Data := ExecutableL2Data{ ParentHash: block.ParentHash(), @@ -220,8 +223,9 @@ func TestNewSafeL2Block(t *testing.T) { err := sendTransfer(config, ethService) require.NoError(t, err) - block, _, _, _, _, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) + ret, err := ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), nil) require.NoError(t, err) + block := ret.Block l2Data := SafeL2Data{ Number: block.NumberU64(), Timestamp: block.Time(), @@ -238,7 +242,7 @@ func TestNewSafeL2Block(t *testing.T) { func TestValidateL1Message(t *testing.T) { genesis, blocks := generateTestL2Chain(0) n, ethService := startEthService(t, genesis, blocks) - require.NoError(t, ethService.StartMining(0)) + //require.NoError(t, ethService.StartMining(0)) defer n.Close() for _, block := range blocks { @@ -251,8 +255,9 @@ func TestValidateL1Message(t *testing.T) { l1Txs, l1Messages := makeL1Txs(0, 10) // case: include #0, #1, fail on #2, skip it and seal the block ccc.ScheduleError(3, circuitcapacitychecker.ErrUnknown) - block, _, _, _, skippedTxs, err := api.eth.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), l1Txs) + ret, err := api.eth.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), l1Txs) require.NoError(t, err) + block := ret.Block require.EqualValues(t, 2, block.Transactions().Len()) require.EqualValues(t, 3, block.Header().NextL1MsgIndex) l2Data := ExecutableL2Data{ @@ -269,7 +274,7 @@ func TestValidateL1Message(t *testing.T) { ReceiptRoot: block.ReceiptHash(), LogsBloom: block.Bloom().Bytes(), NextL1MessageIndex: block.Header().NextL1MsgIndex, - SkippedTxs: skippedTxs, + SkippedTxs: ret.SkippedTxs, Hash: block.Hash(), } @@ -290,16 +295,18 @@ func TestValidateL1Message(t *testing.T) { // expected: Unexpected L1 message queue index, build none transaction restL1Txs := l1Txs[2:] restL1Messages := l1Messages[2:] - block, _, _, _, _, err = ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), restL1Txs) + ret, err = ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), restL1Txs) require.NoError(t, err) + block = ret.Block require.EqualValues(t, 0, block.Transactions().Len()) // case: #3 - #9, skip #3, includes the rest restL1Txs = restL1Txs[1:] restL1Messages = restL1Messages[1:] ccc.ScheduleError(1, circuitcapacitychecker.ErrBlockRowConsumptionOverflow) - block, _, _, _, skippedTxs, err = ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), restL1Txs) + ret, err = ethService.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), restL1Txs) require.NoError(t, err) + block = ret.Block require.EqualValues(t, 6, block.Transactions().Len()) l2Data = ExecutableL2Data{ ParentHash: block.ParentHash(), @@ -315,7 +322,7 @@ func TestValidateL1Message(t *testing.T) { ReceiptRoot: block.ReceiptHash(), LogsBloom: block.Bloom().Bytes(), NextL1MessageIndex: block.Header().NextL1MsgIndex, - SkippedTxs: skippedTxs, + SkippedTxs: ret.SkippedTxs, Hash: block.Hash(), } @@ -332,8 +339,9 @@ func TestValidateL1Message(t *testing.T) { // case: includes all l1messages from #10 l1Txs, l1Messages = makeL1Txs(10, 5) - block, _, _, _, skippedTxs, err = api.eth.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), l1Txs) + ret, err = api.eth.Miner().BuildBlock(ethService.BlockChain().CurrentHeader().Hash(), time.Now(), l1Txs) require.NoError(t, err) + block = ret.Block l2Data = ExecutableL2Data{ ParentHash: block.ParentHash(), Number: block.NumberU64(), @@ -348,7 +356,7 @@ func TestValidateL1Message(t *testing.T) { ReceiptRoot: block.ReceiptHash(), LogsBloom: block.Bloom().Bytes(), NextL1MessageIndex: block.Header().NextL1MsgIndex, - SkippedTxs: skippedTxs, + SkippedTxs: ret.SkippedTxs, Hash: block.Hash(), } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index b7da43fd1..e97242c7a 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -34,9 +34,7 @@ import ( "github.com/scroll-tech/go-ethereum/eth/downloader" "github.com/scroll-tech/go-ethereum/eth/gasprice" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/miner" - "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" ) @@ -84,18 +82,13 @@ var Defaults = Config{ TrieTimeout: 60 * time.Minute, SnapshotCache: 102, FilterLogCacheSize: 32, - Miner: miner.Config{ - GasCeil: 8000000, - GasPrice: big.NewInt(params.GWei), - Recommit: 3 * time.Second, - NewBlockTimeout: 3 * time.Second, - }, - TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether - MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility + Miner: miner.DefaultConfig, + TxPool: core.DefaultTxPoolConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether + MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility } func init() { @@ -222,36 +215,13 @@ type Config struct { MaxBlockRange int64 } -// CreateConsensusEngine creates a consensus engine for the given chain configuration. -func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { - var engine consensus.Engine - // If proof-of-authority is requested, set it up - if chainConfig.Clique != nil { - engine = clique.New(chainConfig.Clique, db) - } else { - // Otherwise assume proof-of-work - switch config.PowMode { - case ethash.ModeFake: - log.Warn("Ethash used in fake mode") - case ethash.ModeTest: - log.Warn("Ethash used in test mode") - case ethash.ModeShared: - log.Warn("Ethash used in shared mode") - } - engine = ethash.New(ethash.Config{ - PowMode: config.PowMode, - CacheDir: stack.ResolvePath(config.CacheDir), - CachesInMem: config.CachesInMem, - CachesOnDisk: config.CachesOnDisk, - CachesLockMmap: config.CachesLockMmap, - DatasetDir: config.DatasetDir, - DatasetsInMem: config.DatasetsInMem, - DatasetsOnDisk: config.DatasetsOnDisk, - DatasetsLockMmap: config.DatasetsLockMmap, - NotifyFull: config.NotifyFull, - }, notify, noverify) - engine.(*ethash.Ethash).SetThreads(-1) // Disable CPU mining +// CreateConsensusEngine creates a consensus engine for the given chain config. +// Clique is allowed for now to live standalone, but ethash is forbidden and can +// only exist on already merged networks. +func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) { + // Wrap previously supported consensus engines into their post-merge counterpart + if config.Clique != nil { + return l2.New(clique.New(config.Clique, db), config), nil } - - return l2.New(engine, chainConfig) + return l2.New(ethash.NewFaker(), config), nil } diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 38e1785e7..c9c82e456 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -284,7 +284,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*typ // pendingLogs returns the logs matching the filter criteria within the pending block. func (f *Filter) pendingLogs() ([]*types.Log, error) { - block, receipts := f.sys.backend.PendingBlockAndReceipts() + block, receipts, _ := f.sys.backend.Pending() if bloomFilter(block.Bloom(), f.addresses, f.topics) { var unfiltered []*types.Log for _, r := range receipts { diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index a5002f18f..fb62a4065 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -61,7 +61,7 @@ type Backend interface { HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) - PendingBlockAndReceipts() (*types.Block, types.Receipts) + Pending() (*types.Block, types.Receipts, *state.StateDB) StateAt(root common.Hash) (*state.StateDB, error) CurrentHeader() *types.Header @@ -70,7 +70,6 @@ type Backend interface { SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription BloomStatus() (uint64, uint64) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) @@ -171,20 +170,18 @@ type EventSystem struct { lastHead *types.Header // Subscriptions - txsSub event.Subscription // Subscription for new transaction event - logsSub event.Subscription // Subscription for new log event - rmLogsSub event.Subscription // Subscription for removed log event - pendingLogsSub event.Subscription // Subscription for pending log event - chainSub event.Subscription // Subscription for new chain event + txsSub event.Subscription // Subscription for new transaction event + logsSub event.Subscription // Subscription for new log event + rmLogsSub event.Subscription // Subscription for removed log event + chainSub event.Subscription // Subscription for new chain event // Channels - install chan *subscription // install filter for event notification - uninstall chan *subscription // remove filter for event notification - txsCh chan core.NewTxsEvent // Channel to receive new transactions event - logsCh chan []*types.Log // Channel to receive new log event - pendingLogsCh chan []*types.Log // Channel to receive new log event - rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event - chainCh chan core.ChainEvent // Channel to receive new chain event + install chan *subscription // install filter for event notification + uninstall chan *subscription // remove filter for event notification + txsCh chan core.NewTxsEvent // Channel to receive new transactions event + logsCh chan []*types.Log // Channel to receive new log event + rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event + chainCh chan core.ChainEvent // Channel to receive new chain event } // NewEventSystem creates a new manager that listens for event on the given mux, @@ -195,16 +192,15 @@ type EventSystem struct { // or by stopping the given mux. func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { m := &EventSystem{ - sys: sys, - backend: sys.backend, - lightMode: lightMode, - install: make(chan *subscription), - uninstall: make(chan *subscription), - txsCh: make(chan core.NewTxsEvent, txChanSize), - logsCh: make(chan []*types.Log, logsChanSize), - rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), - pendingLogsCh: make(chan []*types.Log, logsChanSize), - chainCh: make(chan core.ChainEvent, chainEvChanSize), + sys: sys, + backend: sys.backend, + lightMode: lightMode, + install: make(chan *subscription), + uninstall: make(chan *subscription), + txsCh: make(chan core.NewTxsEvent, txChanSize), + logsCh: make(chan []*types.Log, logsChanSize), + rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), + chainCh: make(chan core.ChainEvent, chainEvChanSize), } // Subscribe events @@ -212,10 +208,9 @@ func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) - m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh) // Make sure none of the subscriptions are empty - if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil { + if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil { log.Crit("Subscribe for event system failed") } @@ -392,12 +387,12 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc type filterIndex map[Type]map[rpc.ID]*subscription -func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) { - if len(ev) == 0 { +func (es *EventSystem) handleLogs(filters filterIndex, logs []*types.Log) { + if len(logs) == 0 { return } for _, f := range filters[LogsSubscription] { - matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + matchedLogs := filterLogs(logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) if len(matchedLogs) > 0 { f.logs <- matchedLogs } @@ -526,7 +521,6 @@ func (es *EventSystem) eventLoop() { es.txsSub.Unsubscribe() es.logsSub.Unsubscribe() es.rmLogsSub.Unsubscribe() - es.pendingLogsSub.Unsubscribe() es.chainSub.Unsubscribe() }() @@ -543,10 +537,29 @@ func (es *EventSystem) eventLoop() { es.handleLogs(index, ev) case ev := <-es.rmLogsCh: es.handleRemovedLogs(index, ev) - case ev := <-es.pendingLogsCh: - es.handlePendingLogs(index, ev) case ev := <-es.chainCh: es.handleChainEvent(index, ev) + // If we have no pending log subscription, + // we don't need to collect any pending logs. + if len(index[PendingLogsSubscription]) == 0 { + continue + } + + // Pull the pending logs if there is a new chain head. + pendingBlock, pendingReceipts, _ := es.backend.Pending() + if pendingBlock == nil || pendingReceipts == nil { + continue + } + if pendingBlock.ParentHash() != ev.Block.Hash() { + continue + } + var logs []*types.Log + for _, receipt := range pendingReceipts { + if len(receipt.Logs) > 0 { + logs = append(logs, receipt.Logs...) + } + } + es.handlePendingLogs(index, logs) case f := <-es.install: if f.typ == MinedAndPendingLogsSubscription { diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 8998b0d03..a58961c99 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -50,8 +50,9 @@ type testBackend struct { txFeed event.Feed logsFeed event.Feed rmLogsFeed event.Feed - pendingLogsFeed event.Feed chainFeed event.Feed + pendingBlock *types.Block + pendingReceipts types.Receipts } func (b *testBackend) ChainConfig() *params.ChainConfig { @@ -106,8 +107,8 @@ func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint return logs, nil } -func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return nil, nil +func (b *testBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { + return b.pendingBlock, b.pendingReceipts, nil } func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { @@ -122,10 +123,6 @@ func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript return b.logsFeed.Subscribe(ch) } -func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { - return b.pendingLogsFeed.Subscribe(ch) -} - func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { return b.chainFeed.Subscribe(ch) } @@ -165,6 +162,20 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc }() } +func (b *testBackend) setPending(block *types.Block, receipts types.Receipts) { + b.pendingBlock = block + b.pendingReceipts = receipts +} + +func (b *testBackend) notifyPending(logs []*types.Log) { + genesis := &core.Genesis{ + Config: params.TestChainConfig, + } + _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), rawdb.NewMemoryDatabase(), 2, func(i int, b *core.BlockGen) {}) + b.setPending(blocks[1], []*types.Receipt{{Logs: logs}}) + b.chainFeed.Send(core.ChainEvent{Block: blocks[0]}) +} + func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { backend := &testBackend{db: db} sys := NewFilterSystem(backend, cfg) @@ -185,7 +196,7 @@ func TestBlockSubscription(t *testing.T) { api = NewFilterAPI(sys, false, ethconfig.Defaults.MaxBlockRange) genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) - chainEvents = []core.ChainEvent{} + chainEvents []core.ChainEvent ) for _, blk := range chain { @@ -547,9 +558,9 @@ func TestLogFilter(t *testing.T) { if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { t.Fatal("Logs event not delivered") } - if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { - t.Fatal("Pending logs event not delivered") - } + + // set pending logs + backend.notifyPending(allLogs) for i, tt := range testCases { var fetched []*types.Log @@ -755,10 +766,12 @@ func TestPendingLogsSubscription(t *testing.T) { }() } - // raise events - for _, ev := range allLogs { - backend.pendingLogsFeed.Send(ev) + // set pending logs + var flattenLogs []*types.Log + for _, logs := range allLogs { + flattenLogs = append(flattenLogs, logs...) } + backend.notifyPending(flattenLogs) for i := range testCases { err := <-testCases[i].err diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 647a1b55f..0ca9d68fb 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -152,7 +152,7 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block ) // query either pending block or head header and set headBlock if lastBlock == rpc.PendingBlockNumber { - if pendingBlock, pendingReceipts = oracle.backend.PendingBlockAndReceipts(); pendingBlock != nil { + if pendingBlock, pendingReceipts, _ = oracle.backend.Pending(); pendingBlock != nil { lastBlock = rpc.BlockNumber(pendingBlock.NumberU64()) headBlock = lastBlock - 1 } else { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 96ee73901..04c83050a 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -56,7 +56,7 @@ type OracleBackend interface { HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) - PendingBlockAndReceipts() (*types.Block, types.Receipts) + Pending() (*types.Block, types.Receipts, *state.StateDB) ChainConfig() *params.ChainConfig SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription StateAt(root common.Hash) (*state.StateDB, error) diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 24822c4c2..3ada8a0bf 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -80,12 +80,13 @@ func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types. return b.chain.GetReceiptsByHash(hash), nil } -func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { +func (b *testBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { if b.pending { block := b.chain.GetBlockByNumber(testHead + 1) - return block, b.chain.GetReceiptsByHash(block.Hash()) + state, _ := b.chain.StateAt(block.Root()) + return block, b.chain.GetReceiptsByHash(block.Hash()), state } - return nil, nil + return nil, nil, nil } func (b *testBackend) ChainConfig() *params.ChainConfig { diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index f4b3bc20a..2cf2667e7 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -592,18 +592,22 @@ func testEstimateGas(t *testing.T, client *rpc.Client) { func testAtFunctions(t *testing.T, client *rpc.Client) { ec := NewClient(client) - // send a transaction for some interesting pending status + // and wait for the transaction to be included in the pending block sendTransaction(ec) - time.Sleep(100 * time.Millisecond) - // Check pending transaction count - pending, err := ec.PendingTransactionCount(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if pending != 1 { - t.Fatalf("unexpected pending, wanted 1 got: %v", pending) + // wait for the transaction to be included in the pending block + for { + // Check pending transaction count + pending, err := ec.PendingTransactionCount(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if pending == 1 { + break + } + time.Sleep(100 * time.Millisecond) } + // Query balance balance, err := ec.BalanceAt(context.Background(), testAddr, nil) if err != nil { @@ -699,7 +703,7 @@ func sendTransaction(ec *Client) error { if err != nil { return err } - nonce, err := ec.PendingNonceAt(context.Background(), testAddr) + nonce, err := ec.NonceAt(context.Background(), testAddr, nil) if err != nil { return err } diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index 1e33b8010..66e638f73 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -42,7 +42,6 @@ import ( "github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/les" "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/miner" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/p2p" "github.com/scroll-tech/go-ethereum/rpc" @@ -75,7 +74,6 @@ type backend interface { // reporting to ethstats type fullNodeBackend interface { backend - Miner() *miner.Miner BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) CurrentBlock() *types.Block SuggestGasTipCap(ctx context.Context) (*big.Int, error) @@ -103,13 +101,14 @@ type Service struct { // websocket. // // From Gorilla websocket docs: -// Connections support one concurrent reader and one concurrent writer. -// Applications are responsible for ensuring that no more than one goroutine calls the write methods -// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel -// concurrently and that no more than one goroutine calls the read methods -// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler -// concurrently. -// The Close and WriteControl methods can be called concurrently with all other methods. +// +// Connections support one concurrent reader and one concurrent writer. +// Applications are responsible for ensuring that no more than one goroutine calls the write methods +// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel +// concurrently and that no more than one goroutine calls the read methods +// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler +// concurrently. +// The Close and WriteControl methods can be called concurrently with all other methods. type connWrapper struct { conn *websocket.Conn @@ -758,28 +757,22 @@ func (s *Service) reportPending(conn *connWrapper) error { type nodeStats struct { Active bool `json:"active"` Syncing bool `json:"syncing"` - Mining bool `json:"mining"` - Hashrate int `json:"hashrate"` Peers int `json:"peers"` GasPrice int `json:"gasPrice"` Uptime int `json:"uptime"` } -// reportStats retrieves various stats about the node at the networking and -// mining layer and reports it to the stats server. +// reportStats retrieves various stats about the node at the networking layer +// and reports it to the stats server. func (s *Service) reportStats(conn *connWrapper) error { // Gather the syncing and mining infos from the local miner instance var ( - mining bool - hashrate int syncing bool gasprice int ) // check if backend is a full node fullBackend, ok := s.backend.(fullNodeBackend) if ok { - mining = fullBackend.Miner().Mining() - hashrate = int(fullBackend.Miner().Hashrate()) sync := fullBackend.SyncProgress() syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock @@ -800,8 +793,6 @@ func (s *Service) reportStats(conn *connWrapper) error { "id": s.node, "stats": &nodeStats{ Active: true, - Mining: mining, - Hashrate: hashrate, Peers: s.server.PeerCount(), GasPrice: gasprice, Syncing: syncing, diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index bc12329f8..79b56573b 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -66,7 +66,7 @@ type Backend interface { BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) - PendingBlockAndReceipts() (*types.Block, types.Receipts) + Pending() (*types.Block, types.Receipts, *state.StateDB) StateAt(root common.Hash) (*state.StateDB, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetTd(ctx context.Context, hash common.Hash) *big.Int @@ -95,7 +95,6 @@ type Backend interface { GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription BloomStatus() (uint64, uint64) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index f86ac2b3d..d83afd598 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -604,22 +604,6 @@ const MinerJs = ` web3._extend({ property: 'miner', methods: [ - new web3._extend.Method({ - name: 'start', - call: 'miner_start', - params: 1, - inputFormatter: [null] - }), - new web3._extend.Method({ - name: 'stop', - call: 'miner_stop' - }), - new web3._extend.Method({ - name: 'setEtherbase', - call: 'miner_setEtherbase', - params: 1, - inputFormatter: [web3._extend.formatters.inputAddressFormatter] - }), new web3._extend.Method({ name: 'setExtra', call: 'miner_setExtra', @@ -637,15 +621,6 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.utils.fromDecimal] }), - new web3._extend.Method({ - name: 'setRecommitInterval', - call: 'miner_setRecommitInterval', - params: 1, - }), - new web3._extend.Method({ - name: 'getHashrate', - call: 'miner_getHashrate' - }), ], properties: [] }); diff --git a/les/api_backend.go b/les/api_backend.go index 3046f6f85..05f5b39e3 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -134,8 +134,8 @@ func (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r return nil, errors.New("invalid arguments; neither block nor hash specified") } -func (b *LesApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return nil, nil +func (b *LesApiBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { + return nil, nil, nil } func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { diff --git a/les/client.go b/les/client.go index 85d1fe28c..6141cbc29 100644 --- a/les/client.go +++ b/les/client.go @@ -94,6 +94,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { log.Info("Initialised chain configuration", "config", chainConfig) peers := newServerPeerSet() + engine, err := ethconfig.CreateConsensusEngine(chainConfig, chainDb) + if err != nil { + return nil, err + } leth := &LightEthereum{ lesCommons: lesCommons{ genesis: genesisHash, @@ -108,7 +112,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { eventMux: stack.EventMux(), reqDist: newRequestDistributor(peers, &mclock.System{}), accountManager: stack.AccountManager(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb), + engine: engine, bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), p2pServer: stack.Server(), diff --git a/miner/miner.go b/miner/miner.go index f4e603a92..a200d7004 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -18,24 +18,23 @@ package miner import ( + "errors" "fmt" "math/big" "sync" + "sync/atomic" "time" - "github.com/scroll-tech/go-ethereum/rollup/circuitcapacitychecker" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/consensus" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/eth/downloader" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/circuitcapacitychecker" ) // Backend wraps all methods required for mining. @@ -43,126 +42,97 @@ type Backend interface { BlockChain() *core.BlockChain TxPool() *core.TxPool ChainDb() ethdb.Database + SetSynced() } // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). - NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). + Etherbase common.Address `toml:"-"` // Deprecated + PendingFeeRecipient common.Address `toml:"-"` // Address for pending block rewards. + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + Recommit time.Duration // The time interval for miner to re-create mining work. NewBlockTimeout time.Duration // The maximum time allowance for creating a new block StoreSkippedTxTraces bool // Whether store the wrapped traces when storing a skipped tx MaxAccountsNum int // Maximum number of accounts that miner will fetch the pending transactions of when building a new block } -// Miner creates blocks and searches for proof-of-work values. -type Miner struct { - mux *event.TypeMux - worker *worker - coinbase common.Address - eth Backend - engine consensus.Engine - exitCh chan struct{} - startCh chan common.Address - stopCh chan struct{} +// DefaultConfig contains default settings for miner. +var DefaultConfig = Config{ + GasCeil: 8000000, + GasPrice: big.NewInt(params.GWei / 1000), - wg sync.WaitGroup + // The default recommit time is chosen as two seconds since + // consensus-layer usually will wait a half slot of time(6s) + // for payload generation. It should be enough for Geth to + // run 3 rounds. + Recommit: 2 * time.Second, } -func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool) *Miner { - miner := &Miner{ - eth: eth, - mux: mux, - engine: engine, - exitCh: make(chan struct{}), - startCh: make(chan common.Address), - stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), - } - miner.wg.Add(1) - go miner.update() - return miner +// Miner creates blocks and searches for proof-of-work values. +type Miner struct { + confMu sync.RWMutex // The lock used to protect the config fields: GasCeil, GasTip and Extradata + config *Config + chainConfig *params.ChainConfig + engine consensus.Engine + chainDB ethdb.Database + txpool *core.TxPool + chain *core.BlockChain + pending *pending + pendingMu sync.Mutex // Lock protects the pending block + + // newBlockTimeout is the maximum timeout allowance for creating block. + // The default value is 3 seconds but node operator can set it to arbitrary + // large value. A large timeout allowance may cause Geth to fail creating + // a non-empty block within the specified time and eventually miss the chance to be a proposer + // in case there are some computation expensive transactions in txpool. + newBlockTimeout time.Duration + + // Make sure the checker here is used by a single block one time, and must be reset for another block. + circuitCapacityChecker *circuitcapacitychecker.CircuitCapacityChecker + prioritizedTx *prioritizedTransaction + + getWorkCh chan *getWorkReq + exitCh chan struct{} + wg sync.WaitGroup } -// update keeps track of the downloader events. Please be aware that this is a one shot type of update loop. -// It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and -// the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks -// and halt your mining operation for as long as the DOS continues. -func (miner *Miner) update() { - defer miner.wg.Done() - - events := miner.mux.Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) - defer func() { - if !events.Closed() { - events.Unsubscribe() - } - }() - - shouldStart := false - canStart := true - dlEventCh := events.Chan() - for { - select { - case ev := <-dlEventCh: - if ev == nil { - // Unsubscription done, stop listening - dlEventCh = nil - continue - } - switch ev.Data.(type) { - case downloader.StartEvent: - wasMining := miner.Mining() - miner.worker.stop() - canStart = false - if wasMining { - // Resume mining after sync was finished - shouldStart = true - log.Info("Mining aborted due to sync") - } - case downloader.FailedEvent: - canStart = true - if shouldStart { - miner.SetEtherbase(miner.coinbase) - miner.worker.start() - } - case downloader.DoneEvent: - canStart = true - if shouldStart { - miner.SetEtherbase(miner.coinbase) - miner.worker.start() - } - // Stop reacting to downloader events - events.Unsubscribe() - } - case addr := <-miner.startCh: - miner.SetEtherbase(addr) - if canStart { - miner.worker.start() - } - shouldStart = true - case <-miner.stopCh: - shouldStart = false - miner.worker.stop() - case <-miner.exitCh: - miner.worker.close() - return - } +func New(eth Backend, config Config, engine consensus.Engine) *Miner { + // Sanitize the timeout config for creating block. + newBlockTimeout := config.NewBlockTimeout + if newBlockTimeout == 0 { + log.Warn("Sanitizing new block timeout to default", "provided", newBlockTimeout, "updated", 3*time.Second) + newBlockTimeout = 3 * time.Second + } + if newBlockTimeout < time.Millisecond*100 { + log.Warn("Low block timeout may cause high amount of non-full blocks", "provided", newBlockTimeout, "default", 3*time.Second) } -} -func (miner *Miner) Start(coinbase common.Address) { - miner.startCh <- coinbase -} + miner := &Miner{ + config: &config, + chainConfig: eth.BlockChain().Config(), + chainDB: eth.ChainDb(), + engine: engine, + txpool: eth.TxPool(), + chain: eth.BlockChain(), + pending: &pending{}, + circuitCapacityChecker: circuitcapacitychecker.NewCircuitCapacityChecker(true), + + newBlockTimeout: newBlockTimeout, + getWorkCh: make(chan *getWorkReq), + exitCh: make(chan struct{}), + } + miner.wg.Add(1) + go miner.generateWorkLoop() -func (miner *Miner) Stop() { - miner.stopCh <- struct{}{} + // fixme later + // short-term fix: setSynced when consensus client notifies it + // long-term fix: setSynced when snap sync completed + eth.SetSynced() + return miner } func (miner *Miner) Close() { @@ -170,107 +140,118 @@ func (miner *Miner) Close() { miner.wg.Wait() } -func (miner *Miner) Mining() bool { - return miner.worker.isRunning() -} - -func (miner *Miner) Hashrate() uint64 { - if pow, ok := miner.engine.(consensus.PoW); ok { - return uint64(pow.Hashrate()) - } - return 0 -} - func (miner *Miner) SetExtra(extra []byte) error { if uint64(len(extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra exceeds max length. %d > %v", len(extra), params.MaximumExtraDataSize) } - miner.worker.setExtra(extra) + miner.confMu.Lock() + miner.config.ExtraData = extra + miner.confMu.Unlock() return nil } -// SetRecommitInterval sets the interval for sealing work resubmitting. -func (miner *Miner) SetRecommitInterval(interval time.Duration) { - miner.worker.setRecommitInterval(interval) -} - -// Pending returns the currently pending block and associated state. -func (miner *Miner) Pending() (*types.Block, *state.StateDB) { - return miner.worker.pending() -} - -// PendingBlock returns the currently pending block. -// -// Note, to access both the pending block and the pending state -// simultaneously, please use Pending(), as the pending state can -// change between multiple method calls -func (miner *Miner) PendingBlock() *types.Block { - return miner.worker.pendingBlock() -} - -// PendingBlockAndReceipts returns the currently pending block and corresponding receipts. -func (miner *Miner) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return miner.worker.pendingBlockAndReceipts() -} - -func (miner *Miner) SetEtherbase(addr common.Address) { - miner.coinbase = addr - miner.worker.setEtherbase(addr) +// Pending returns the currently pending block and associated receipts, logs +// and statedb. The returned values can be nil in case the pending block is +// not initialized. +func (miner *Miner) Pending() (*types.Block, types.Receipts, *state.StateDB) { + pending := miner.getPending() + if pending == nil { + return nil, nil, nil + } + return pending.Block, pending.Receipts, pending.State.Copy() } // SetGasCeil sets the gaslimit to strive for when mining blocks post 1559. // For pre-1559 blocks, it sets the ceiling. func (miner *Miner) SetGasCeil(ceil uint64) { - miner.worker.setGasCeil(ceil) -} - -// EnablePreseal turns on the preseal mining feature. It's enabled by default. -// Note this function shouldn't be exposed to API, it's unnecessary for users -// (miners) to actually know the underlying detail. It's only for outside project -// which uses this library. -func (miner *Miner) EnablePreseal() { - miner.worker.enablePreseal() + miner.confMu.Lock() + miner.config.GasCeil = ceil + miner.confMu.Unlock() } -// DisablePreseal turns off the preseal mining feature. It's necessary for some -// fake consensus engine which can seal blocks instantaneously. -// Note this function shouldn't be exposed to API, it's unnecessary for users -// (miners) to actually know the underlying detail. It's only for outside project -// which uses this library. -func (miner *Miner) DisablePreseal() { - miner.worker.disablePreseal() +func (miner *Miner) GetCCC() *circuitcapacitychecker.CircuitCapacityChecker { + return miner.circuitCapacityChecker } -// SubscribePendingLogs starts delivering logs from pending transactions -// to the given channel. -func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { - return miner.worker.pendingLogsFeed.Subscribe(ch) +func (miner *Miner) getSealingBlockAndState(params *generateParams) (*NewBlockResult, error) { + interrupt := new(int32) + timer := time.AfterFunc(params.timeout, func() { + atomic.StoreInt32(interrupt, commitInterruptTimeout) + }) + defer timer.Stop() + + req := &getWorkReq{ + interrupt: interrupt, + params: params, + result: make(chan getWorkResp), + } + select { + case miner.getWorkCh <- req: + result := <-req.result + close(req.result) + return result.ret, result.err + case <-miner.exitCh: + return nil, errors.New("miner closed") + } } -func (miner *Miner) GetSealingBlockAndState(parentHash common.Hash, timestamp time.Time, transactions types.Transactions) (*types.Block, *state.StateDB, types.Receipts, *types.RowConsumption, []*types.SkippedTransaction, error) { - return miner.worker.generateWork(&generateParams{ - parentHash: parentHash, +func (miner *Miner) BuildBlock(parentHash common.Hash, timestamp time.Time, transactions types.Transactions) (*NewBlockResult, error) { + return miner.getSealingBlockAndState(&generateParams{ timestamp: uint64(timestamp.Unix()), + parentHash: parentHash, transactions: transactions, - }, nil) + timeout: miner.newBlockTimeout, + }) } -func (miner *Miner) BuildBlock(parentHash common.Hash, timestamp time.Time, transactions types.Transactions) (*types.Block, *state.StateDB, types.Receipts, *types.RowConsumption, []*types.SkippedTransaction, error) { - return miner.worker.getSealingBlockAndState(parentHash, timestamp, transactions) -} +func (miner *Miner) SimulateL1Messages(parentHash common.Hash, transactions types.Transactions) ([]*types.Transaction, []*types.SkippedTransaction, error) { + if transactions.Len() == 0 { + return nil, nil, nil + } -func (miner *Miner) GetCCC() *circuitcapacitychecker.CircuitCapacityChecker { - return miner.worker.circuitCapacityChecker -} + ret, err := miner.getSealingBlockAndState(&generateParams{ + timestamp: uint64(time.Now().Unix()), + parentHash: parentHash, + coinbase: miner.config.PendingFeeRecipient, + transactions: transactions, + simulate: true, + timeout: miner.newBlockTimeout * 2, // double the timeout, in case it is blocked due to the previous work + }) + if err != nil { + return nil, nil, err + } -func (miner *Miner) MakeHeader(parent *types.Block, timestamp uint64, coinBase common.Address) (*types.Header, error) { - return miner.worker.makeHeader(parent, timestamp, coinBase) + return ret.Block.Transactions(), ret.SkippedTxs, nil } -func (miner *Miner) SimulateL1Messages(parentHash common.Hash, transactions types.Transactions) ([]*types.Transaction, []*types.SkippedTransaction, error) { - return miner.worker.simulateL1Messages(&generateParams{ - parentHash: parentHash, - timestamp: uint64(time.Now().Unix()), - transactions: transactions, - }, transactions) +// getPending retrieves the pending block based on the current head block. +// The result might be nil if pending generation is failed. +func (miner *Miner) getPending() *NewBlockResult { + header := miner.chain.CurrentHeader() + miner.pendingMu.Lock() + defer miner.pendingMu.Unlock() + if cached := miner.pending.resolve(header.Hash()); cached != nil { + return cached + } + + interrupt := new(int32) + timer := time.AfterFunc(miner.newBlockTimeout, func() { + atomic.StoreInt32(interrupt, commitInterruptTimeout) + }) + defer timer.Stop() + + // It may cause the `generateWork` fall into concurrent case, + // but it is ok here, as it skips CCC so that will not reset ccc unexpectedly. + ret, err := miner.generateWork(&generateParams{ + timestamp: uint64(time.Now().Unix()), + parentHash: header.Hash(), + coinbase: miner.config.PendingFeeRecipient, + skipCCC: true, + }, interrupt) + + if err != nil { + return nil + } + miner.pending.update(header.Hash(), ret) + return ret } diff --git a/miner/miner_test.go b/miner/miner_test.go index 4769d2096..5305e8e8e 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -19,8 +19,8 @@ package miner import ( "math" + "sync" "testing" - "time" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/consensus/clique" @@ -29,7 +29,6 @@ import ( "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/vm" - "github.com/scroll-tech/go-ethereum/eth/downloader" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" "github.com/scroll-tech/go-ethereum/event" @@ -62,6 +61,8 @@ func (m *mockBackend) ChainDb() ethdb.Database { return m.chainDb } +func (m *mockBackend) SetSynced() {} + type testBlockChain struct { statedb *state.StateDB gasLimit uint64 @@ -86,162 +87,25 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) return bc.chainHeadFeed.Subscribe(ch) } -func TestMiner(t *testing.T) { - miner, mux := createMiner(t) - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - // Subsequent downloader events after a successful DoneEvent should not cause the - // miner to start or stop. This prevents a security vulnerability - // that would allow entities to present fake high blocks that would - // stop mining operations by causing a downloader sync - // until it was discovered they were invalid, whereon mining would resume. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, true) - - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) -} - -// TestMinerDownloaderFirstFails tests that mining is only -// permitted to run indefinitely once the downloader sees a DoneEvent (success). -// An initial FailedEvent should allow mining to stop on a subsequent -// downloader StartEvent. -func TestMinerDownloaderFirstFails(t *testing.T) { - miner, mux := createMiner(t) - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) - - // Since the downloader hasn't yet emitted a successful DoneEvent, - // we expect the miner to stop on next StartEvent. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Downloader finally succeeds. - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - // Downloader starts again. - // Since it has achieved a DoneEvent once, we expect miner - // state to be unchanged. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, true) - - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) -} - -func TestMinerStartStopAfterDownloaderEvents(t *testing.T) { - miner, mux := createMiner(t) - - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Downloader finally succeeds. - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - miner.Stop() - waitForMiningState(t, miner, false) - - miner.Start(common.HexToAddress("0x678910")) - waitForMiningState(t, miner, true) - - miner.Stop() - waitForMiningState(t, miner, false) -} - -func TestStartWhileDownload(t *testing.T) { - miner, mux := createMiner(t) - waitForMiningState(t, miner, false) - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Starting the miner after the downloader should not work - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, false) -} - -func TestStartStopMiner(t *testing.T) { - miner, _ := createMiner(t) - waitForMiningState(t, miner, false) - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - miner.Stop() - waitForMiningState(t, miner, false) -} - -func TestCloseMiner(t *testing.T) { - miner, _ := createMiner(t) - waitForMiningState(t, miner, false) - miner.Start(common.HexToAddress("0x12345")) - waitForMiningState(t, miner, true) - // Terminate the miner and wait for the update loop to run - miner.Close() - waitForMiningState(t, miner, false) -} - -// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't -// possible at the moment -func TestMinerSetEtherbase(t *testing.T) { - miner, mux := createMiner(t) - // Start with a 'bad' mining address - miner.Start(common.HexToAddress("0xdead")) - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Now user tries to configure proper mining address - miner.Start(common.HexToAddress("0x1337")) - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.DoneEvent{}) - - waitForMiningState(t, miner, true) - // The miner should now be using the good address - if got, exp := miner.coinbase, common.HexToAddress("0x1337"); got != exp { - t.Fatalf("Wrong coinbase, got %x expected %x", got, exp) - } -} - -// waitForMiningState waits until either -// * the desired mining state was reached -// * a timeout was reached which fails the test -func waitForMiningState(t *testing.T, m *Miner, mining bool) { - t.Helper() - - var state bool - for i := 0; i < 100; i++ { - time.Sleep(10 * time.Millisecond) - if state = m.Mining(); state == mining { - return +func TestBuildPendingBlocks(t *testing.T) { + miner := createMiner(t) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + block, _, _ := miner.Pending() + if block == nil { + t.Error("Pending failed") } - } - t.Fatalf("Mining() == %t, want %t", state, mining) + }() + wg.Wait() } -func createMiner(t *testing.T) (*Miner, *event.TypeMux) { +func createMiner(t *testing.T) *Miner { // Create Ethash config config := Config{ - Etherbase: common.HexToAddress("123456789"), - MaxAccountsNum: math.MaxInt, + PendingFeeRecipient: common.HexToAddress("123456789"), + MaxAccountsNum: math.MaxInt, } // Create chainConfig memdb := memorydb.New() @@ -261,10 +125,8 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil) blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)} - pool := core.NewTxPool(testTxPoolConfig, chainConfig, blockchain) + pool := core.NewTxPool(core.DefaultTxPoolConfig, chainConfig, blockchain) backend := NewMockBackend(bc, pool, chainDB) - // Create event Mux - mux := new(event.TypeMux) // Create Miner - return New(backend, &config, chainConfig, mux, engine, nil), mux + return New(backend, config, engine) } diff --git a/miner/morph_worker.go b/miner/morph_worker.go new file mode 100644 index 000000000..618637b98 --- /dev/null +++ b/miner/morph_worker.go @@ -0,0 +1,812 @@ +package miner + +import ( + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/consensus/misc" + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/state" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/metrics" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/circuitcapacitychecker" + "github.com/scroll-tech/go-ethereum/rollup/fees" + "github.com/scroll-tech/go-ethereum/rollup/tracing" +) + +var ( + errBlockInterruptedByNewHead = errors.New("new head arrived while building block") + errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") + errBlockInterruptedByTimeout = errors.New("timeout while building block") + + // Metrics for the skipped txs + l1TxGasLimitExceededCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/gas_limit_exceeded", nil) + l1TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/row_consumption_overflow", nil) + l2TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/row_consumption_overflow", nil) + l1TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/ccc_unknown_err", nil) + l2TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/ccc_unknown_err", nil) + l1TxStrangeErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/strange_err", nil) + + l2CommitTxsTimer = metrics.NewRegisteredTimer("miner/commit/txs_all", nil) + l2CommitTxTimer = metrics.NewRegisteredTimer("miner/commit/tx_all", nil) + l2CommitTxFailedTimer = metrics.NewRegisteredTimer("miner/commit/tx_all_failed", nil) + l2CommitTxTraceTimer = metrics.NewRegisteredTimer("miner/commit/tx_trace", nil) + l2CommitTxTraceStateRevertTimer = metrics.NewRegisteredTimer("miner/commit/tx_trace_state_revert", nil) + l2CommitTxCCCTimer = metrics.NewRegisteredTimer("miner/commit/tx_ccc", nil) + l2CommitTxApplyTimer = metrics.NewRegisteredTimer("miner/commit/tx_apply", nil) + + l2CommitNewWorkTimer = metrics.NewRegisteredTimer("miner/commit/new_work_all", nil) + l2CommitNewWorkL1CollectTimer = metrics.NewRegisteredTimer("miner/commit/new_work_collect_l1", nil) + l2CommitNewWorkPrepareTimer = metrics.NewRegisteredTimer("miner/commit/new_work_prepare", nil) + l2CommitNewWorkCommitUncleTimer = metrics.NewRegisteredTimer("miner/commit/new_work_uncle", nil) + l2CommitNewWorkTidyPendingTxTimer = metrics.NewRegisteredTimer("miner/commit/new_work_tidy_pending", nil) + l2CommitNewWorkCommitL1MsgTimer = metrics.NewRegisteredTimer("miner/commit/new_work_commit_l1_msg", nil) + l2CommitNewWorkPrioritizedTxCommitTimer = metrics.NewRegisteredTimer("miner/commit/new_work_prioritized", nil) + l2CommitNewWorkRemoteLocalCommitTimer = metrics.NewRegisteredTimer("miner/commit/new_work_remote_local", nil) + l2CommitNewWorkLocalPriceAndNonceTimer = metrics.NewRegisteredTimer("miner/commit/new_work_local_price_and_nonce", nil) + l2CommitNewWorkRemotePriceAndNonceTimer = metrics.NewRegisteredTimer("miner/commit/new_work_remote_price_and_nonce", nil) + + l2CommitTimer = metrics.NewRegisteredTimer("miner/commit/all", nil) + l2CommitTraceTimer = metrics.NewRegisteredTimer("miner/commit/trace", nil) + l2CommitCCCTimer = metrics.NewRegisteredTimer("miner/commit/ccc", nil) + l2ResultTimer = metrics.NewRegisteredTimer("miner/result/all", nil) +) + +// prioritizedTransaction represents a single transaction that +// should be processed as the first transaction in the next block. +type prioritizedTransaction struct { + blockNumber uint64 + tx *types.Transaction +} + +// environment is the worker's current environment and holds all +// information of the sealing block generation. +type environment struct { + signer types.Signer + + state *state.StateDB // apply state changes here + tcount int // tx count in cycle + blockSize common.StorageSize // approximate size of tx payload in bytes + l1TxCount int // l1 msg count in cycle + gasPool *core.GasPool // available gas used to pack transactions + + header *types.Header + txs []*types.Transaction + receipts []*types.Receipt + + // circuit capacity check related fields + skipCCC bool // skip CCC + traceEnv *tracing.TraceEnv // env for tracing + accRows *types.RowConsumption // accumulated row consumption for a block + nextL1MsgIndex uint64 // next L1 queue index to be processed + isSimulate bool +} + +const ( + commitInterruptNone int32 = iota + commitInterruptNewHead + commitInterruptResubmit + commitInterruptTimeout +) + +type getWorkResp struct { + ret *NewBlockResult + err error +} + +// getWorkReq represents a request for getting a new sealing work with provided parameters. +type getWorkReq struct { + interrupt *int32 + params *generateParams + result chan getWorkResp // non-blocking channel +} + +type NewBlockResult struct { + Block *types.Block + State *state.StateDB + Receipts types.Receipts + RowConsumption *types.RowConsumption + SkippedTxs []*types.SkippedTransaction +} + +// generateParams wraps various of settings for generating sealing task. +type generateParams struct { + timestamp uint64 // The timstamp for sealing task + parentHash common.Hash // Parent block hash, empty means the latest chain head + coinbase common.Address // The fee recipient address for including transaction + transactions types.Transactions // L1Message transactions to include at the start of the block + skipCCC bool + simulate bool + timeout time.Duration +} + +func (miner *Miner) generateWorkLoop() { + defer miner.wg.Done() + for { + select { + case req := <-miner.getWorkCh: + ret, err := miner.generateWork(req.params, req.interrupt) + req.result <- getWorkResp{ + ret: ret, + err: err, + } + // System stopped + case <-miner.exitCh: + return + } + } +} + +// generateWork generates a sealing block based on the given parameters. +// TODO the produced state data by the transactions will be commit to database, whether the block is confirmed or not. +// TODO this issue will persist until the current zktrie based database optimizes its strategy. +func (miner *Miner) generateWork(genParams *generateParams, interrupt *int32) (*NewBlockResult, error) { + // reset circuitCapacityChecker for a new block + miner.resetCCC(genParams.skipCCC) + work, prepareErr := miner.prepareWork(genParams) + if prepareErr != nil { + return nil, prepareErr + } + + if work.gasPool == nil { + work.gasPool = new(core.GasPool).AddGas(work.header.GasLimit) + } + + fillTxErr, skippedTxs := miner.fillTransactions(work, genParams.transactions, interrupt) + if fillTxErr != nil && errors.Is(fillTxErr, errBlockInterruptedByTimeout) { + log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(miner.newBlockTimeout)) + } + + // calculate rows if + // 1. it is an empty block, and + // 2. it is not a simulation, and + // 3. it does not skip CCC + if work.accRows == nil && !genParams.simulate && !genParams.skipCCC { + log.Trace( + "Worker apply ccc for empty block", + "id", miner.circuitCapacityChecker.ID, + "number", work.header.Number, + "hash", work.header.Hash().String(), + ) + var ( + traces *types.BlockTrace + err error + ) + withTimer(l2CommitTraceTimer, func() { + traces, err = work.traceEnv.GetBlockTrace(types.NewBlockWithHeader(work.header)) + }) + if err != nil { + return nil, err + } + // truncate ExecutionResults&TxStorageTraces, because we declare their lengths with a dummy tx before; + // however, we need to clean it up for an empty block + traces.ExecutionResults = traces.ExecutionResults[:0] + traces.TxStorageTraces = traces.TxStorageTraces[:0] + var accRows *types.RowConsumption + withTimer(l2CommitCCCTimer, func() { + accRows, err = miner.circuitCapacityChecker.ApplyBlock(traces) + }) + if err != nil { + return nil, err + } + log.Trace( + "Worker apply ccc for empty block result", + "id", miner.circuitCapacityChecker.ID, + "number", work.header.Number, + "hash", work.header.Hash().String(), + "accRows", accRows, + ) + work.accRows = accRows + } + + block, finalizeErr := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, work.txs, nil, work.receipts) + if finalizeErr != nil { + return nil, finalizeErr + } + return &NewBlockResult{ + Block: block, + State: work.state, + Receipts: work.receipts, + RowConsumption: work.accRows, + SkippedTxs: skippedTxs, + }, nil +} + +// prepareWork constructs the sealing task according to the given parameters, +// either based on the last chain head or specified parent. In this function +// the pending transactions are not filled yet, only the empty task returned. +func (miner *Miner) prepareWork(genParams *generateParams) (*environment, error) { + miner.confMu.RLock() + defer miner.confMu.RUnlock() + + parent := miner.chain.CurrentBlock() + if genParams.parentHash != (common.Hash{}) { + parent = miner.chain.GetBlockByHash(genParams.parentHash) + } + if parent == nil { + return nil, fmt.Errorf("missing parent") + } + + timestamp := genParams.timestamp + if parent.Time() >= genParams.timestamp { + timestamp = parent.Time() + 1 + } + var coinBase common.Address + if genParams.coinbase != (common.Address{}) { + coinBase = genParams.coinbase + } + header, err := miner.makeHeader(parent, timestamp, coinBase) + if err != nil { + return nil, err + } + + env, err := miner.makeEnv(parent, header) + if err != nil { + log.Error("Failed to create sealing context", "err", err) + return nil, err + } + env.skipCCC = genParams.skipCCC + env.isSimulate = genParams.simulate + return env, nil +} + +func (miner *Miner) makeHeader(parent *types.Block, timestamp uint64, coinBase common.Address) (*types.Header, error) { + num := parent.Number() + header := &types.Header{ + ParentHash: parent.Hash(), + Number: num.Add(num, common.Big1), + GasLimit: core.CalcGasLimit(parent.GasLimit(), miner.config.GasCeil), + Extra: miner.config.ExtraData, + Time: timestamp, + Coinbase: coinBase, + } + + // Set baseFee if we are on an EIP-1559 chain + if miner.chainConfig.IsCurie(header.Number) { + state, err := miner.chain.StateAt(parent.Root()) + if err != nil { + log.Error("Failed to create mining context", "err", err) + return nil, err + } + parentL1BaseFee := fees.GetL1BaseFee(state) + header.BaseFee = misc.CalcBaseFee(miner.chainConfig, parent.Header(), parentL1BaseFee) + } + // Run the consensus preparation with the default or customized consensus engine. + if err := miner.engine.Prepare(miner.chain, header); err != nil { + log.Error("Failed to prepare header for sealing", "err", err) + return nil, err + } + return header, nil +} + +func (miner *Miner) makeEnv(parent *types.Block, header *types.Header) (*environment, error) { + // Retrieve the parent state to execute on top and start a prefetcher for + // the miner to speed block sealing up a bit + stateDB, err := miner.chain.StateAt(parent.Root()) + if err != nil { + return nil, err + } + + // don't commit the state during tracing for circuit capacity checker, otherwise we cannot revert. + // and even if we don't commit the state, the `refund` value will still be correct, as explained in `CommitTransaction` + commitStateAfterApply := false + traceEnv, err := tracing.CreateTraceEnv(miner.chainConfig, miner.chain, miner.engine, miner.chainDB, stateDB, parent, + // new block with a placeholder tx, for traceEnv's ExecutionResults length & TxStorageTraces length + types.NewBlockWithHeader(header).WithBody([]*types.Transaction{types.NewTx(&types.LegacyTx{})}, nil), + commitStateAfterApply) + if err != nil { + return nil, err + } + + stateDB.StartPrefetcher("miner") + + env := &environment{ + signer: types.MakeSigner(miner.chainConfig, header.Number), + state: stateDB, + header: header, + traceEnv: traceEnv, + accRows: nil, + nextL1MsgIndex: parent.Header().NextL1MsgIndex, + } + + // Keep track of transactions which return errors so they can be removed + env.tcount = 0 + env.blockSize = 0 + env.l1TxCount = 0 + return env, nil +} + +func (miner *Miner) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, *types.BlockTrace, error) { + var accRows *types.RowConsumption + var traces *types.BlockTrace + var err error + + // do not do CCC when it is called from `commitNewWork` + if !env.skipCCC { + defer func(t0 time.Time) { + l2CommitTxTimer.Update(time.Since(t0)) + if err != nil { + l2CommitTxFailedTimer.Update(time.Since(t0)) + } + }(time.Now()) + + // do gas limit check up-front and do not run CCC if it fails + if env.gasPool.Gas() < tx.Gas() { + return nil, nil, core.ErrGasLimitReached + } + + snap := env.state.Snapshot() + + log.Trace( + "Worker apply ccc for tx", + "id", miner.circuitCapacityChecker.ID, + "txHash", tx.Hash().Hex(), + ) + + // 1. we have to check circuit capacity before `core.ApplyTransaction`, + // because if the tx can be successfully executed but circuit capacity overflows, it will be inconvenient to revert. + // 2. even if we don't commit to the state during the tracing (which means `clearJournalAndRefund` is not called during the tracing), + // the `refund` value will still be correct, because: + // 2.1 when starting handling the first tx, `state.refund` is 0 by default, + // 2.2 after tracing, the state is either committed in `core.ApplyTransaction`, or reverted, so the `state.refund` can be cleared, + // 2.3 when starting handling the following txs, `state.refund` comes as 0 + common.WithTimer(l2CommitTxTraceTimer, func() { + traces, err = env.traceEnv.GetBlockTrace( + types.NewBlockWithHeader(env.header).WithBody([]*types.Transaction{tx}, nil), + ) + }) + common.WithTimer(l2CommitTxTraceStateRevertTimer, func() { + // `env.traceEnv.State` & `env.state` share a same pointer to the state, so only need to revert `env.state` + // revert to snapshot for calling `core.ApplyMessage` again, (both `traceEnv.GetBlockTrace` & `core.ApplyTransaction` will call `core.ApplyMessage`) + env.state.RevertToSnapshot(snap) + }) + if err != nil { + return nil, nil, err + } + common.WithTimer(l2CommitTxCCCTimer, func() { + accRows, err = miner.circuitCapacityChecker.ApplyTransaction(traces) + }) + if err != nil { + return nil, traces, err + } + log.Trace( + "Worker apply ccc for tx result", + "id", miner.circuitCapacityChecker.ID, + "txHash", tx.Hash().Hex(), + "accRows", accRows, + ) + } + + // create new snapshot for `core.ApplyTransaction` + snap := env.state.Snapshot() + + var receipt *types.Receipt + common.WithTimer(l2CommitTxApplyTimer, func() { + receipt, err = core.ApplyTransaction(miner.chainConfig, miner.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *miner.chain.GetVMConfig()) + }) + if err != nil { + env.state.RevertToSnapshot(snap) + + if accRows != nil { + // At this point, we have called CCC but the transaction failed in `ApplyTransaction`. + // If we skip this tx and continue to pack more, the next tx will likely fail with + // `circuitcapacitychecker.ErrUnknown`. However, at this point we cannot decide whether + // we should seal the block or skip the tx and continue, so we simply return the error. + log.Error( + "GetBlockTrace passed but ApplyTransaction failed, ccc is left in inconsistent state", + "blockNumber", env.header.Number, + "txHash", tx.Hash().Hex(), + "err", err, + ) + } + + return nil, traces, err + } + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, receipt) + env.accRows = accRows + + return receipt.Logs, traces, nil +} + +func (miner *Miner) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) (error, bool, []*types.SkippedTransaction) { + defer func(t0 time.Time) { + l2CommitTxsTimer.Update(time.Since(t0)) + }(time.Now()) + + var circuitCapacityReached bool + + // Short circuit if current is nil + if env == nil { + return errors.New("no env found"), circuitCapacityReached, nil + } + + gasLimit := env.header.GasLimit + if env.gasPool == nil { + env.gasPool = new(core.GasPool).AddGas(gasLimit) + } + + var ( + coalescedLogs []*types.Log + loops int64 + skippedTxs = make([]*types.SkippedTransaction, 0) + ) + +loop: + for { + + loops++ + if interrupt != nil { + if signal := atomic.LoadInt32(interrupt); signal != commitInterruptNone { + return signalToErr(signal), circuitCapacityReached, skippedTxs + } + } + if env.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) + break + } + // Retrieve the next transaction and abort if all done + tx := txs.Peek() + if tx == nil { + break + } + + // If we have collected enough transactions then we're done + // Originally we only limit l2txs count, but now strictly limit total txs number. + if !miner.chainConfig.Scroll.IsValidTxCount(env.tcount + 1) { + log.Trace("Transaction count limit reached", "have", env.tcount, "want", miner.chainConfig.Scroll.MaxTxPerBlock) + break + } + if tx.IsL1MessageTx() && !env.isSimulate && tx.AsL1MessageTx().QueueIndex != env.nextL1MsgIndex { + log.Error( + "Unexpected L1 message queue index in worker", + "expected", env.nextL1MsgIndex, + "got", tx.AsL1MessageTx().QueueIndex, + ) + break + } + if !tx.IsL1MessageTx() && !miner.chainConfig.Scroll.IsValidBlockSize(env.blockSize+tx.Size()) { + log.Trace("Block size limit reached", "have", env.blockSize, "want", miner.chainConfig.Scroll.MaxTxPayloadBytesPerBlock, "tx", tx.Size()) + txs.Pop() // skip transactions from this account + continue + } + // Error may be ignored here. The error has already been checked + // during transaction acceptance in the transaction pool. + // + // We use the eip155 signer regardless of the current hf. + from, _ := types.Sender(env.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf + // phase, start ignoring the sender until we do. + if tx.Protected() && !miner.chainConfig.IsEIP155(env.header.Number) { + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", miner.chainConfig.EIP155Block) + + txs.Pop() + continue + } + // Start executing the transaction + env.state.SetTxContext(tx.Hash(), env.tcount) + + logs, traces, err := miner.commitTransaction(env, tx, coinbase) + switch { + case errors.Is(err, core.ErrGasLimitReached) && tx.IsL1MessageTx(): + // If this block already contains some L1 messages, + // terminate here and try again in the next block. + if env.l1TxCount > 0 { + break loop + } + // A single L1 message leads to out-of-gas. Skip it. + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "gas limit exceeded") + env.nextL1MsgIndex = queueIndex + 1 + txs.Shift() + + var storeTraces *types.BlockTrace + if miner.config.StoreSkippedTxTraces { + storeTraces = traces + } + skippedTxs = append(skippedTxs, &types.SkippedTransaction{ + Tx: *tx, + Reason: "gas limit exceeded", + Trace: storeTraces, + }) + l1TxGasLimitExceededCounter.Inc(1) + + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + txs.Pop() + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + txs.Shift() + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + txs.Pop() + + case errors.Is(err, nil): + // Everything ok, collect the logs and shift in the next transaction from the same account + coalescedLogs = append(coalescedLogs, logs...) + env.tcount++ + txs.Shift() + + if tx.IsL1MessageTx() { + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Debug("Including L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String()) + env.l1TxCount++ + env.nextL1MsgIndex = queueIndex + 1 + } else { + // only consider block size limit for L2 transactions + env.blockSize += tx.Size() + } + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + txs.Pop() + + // Circuit capacity check + case errors.Is(err, circuitcapacitychecker.ErrBlockRowConsumptionOverflow): + if env.tcount >= 1 { + // 1. Circuit capacity limit reached in a block, and it's not the first tx: + // don't pop or shift, just quit the loop immediately; + // though it might still be possible to add some "smaller" txs, + // but it's a trade-off between tracing overhead & block usage rate + log.Trace("Circuit capacity limit reached in a block", "acc_rows", env.accRows, "tx", tx.Hash().String()) + log.Info("Skipping message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "accumulated row consumption overflow") + + if !tx.IsL1MessageTx() { + // Prioritize transaction for the next block. + // If there are no new L1 messages, this transaction will be the 1st transaction in the next block, + // at which point we can definitively decide if we should skip it or not. + log.Debug("Prioritizing transaction for next block", "blockNumber", env.header.Number.Uint64()+1, "tx", tx.Hash().String()) + miner.prioritizedTx = &prioritizedTransaction{ + blockNumber: env.header.Number.Uint64() + 1, + tx: tx, + } + } + + circuitCapacityReached = true + break loop + } else { + // 2. Circuit capacity limit reached in a block, and it's the first tx: skip the tx + log.Trace("Circuit capacity limit reached for a single tx", "tx", tx.Hash().String()) + + if tx.IsL1MessageTx() { + // Skip L1 message transaction, + // shift to the next from the account because we shouldn't skip the entire txs from the same account + txs.Shift() + + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "row consumption overflow") + env.nextL1MsgIndex = queueIndex + 1 + l1TxRowConsumptionOverflowCounter.Inc(1) + } else { + // Skip L2 transaction and all other transactions from the same sender account + log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "first tx row consumption overflow") + txs.Pop() + miner.txpool.RemoveTx(tx.Hash(), true) + l2TxRowConsumptionOverflowCounter.Inc(1) + } + + // Reset ccc so that we can process other transactions for this block + miner.resetCCC(env.skipCCC) + log.Trace("Worker reset ccc", "id", miner.circuitCapacityChecker.ID) + circuitCapacityReached = false + + var storeTraces *types.BlockTrace + if miner.config.StoreSkippedTxTraces { + storeTraces = traces + } + skippedTxs = append(skippedTxs, &types.SkippedTransaction{ + Tx: *tx, + Reason: "row consumption overflow", + Trace: storeTraces, + }) + } + + case errors.Is(err, circuitcapacitychecker.ErrUnknown) && tx.IsL1MessageTx(): + // Circuit capacity check: unknown circuit capacity checker error for L1MessageTx, + // shift to the next from the account because we shouldn't skip the entire txs from the same account + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Trace("Unknown circuit capacity checker error for L1MessageTx", "tx", tx.Hash().String(), "queueIndex", queueIndex) + log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "unknown row consumption error") + env.nextL1MsgIndex = queueIndex + 1 + // TODO: propagate more info about the error from CCC + var storeTraces *types.BlockTrace + if miner.config.StoreSkippedTxTraces { + storeTraces = traces + } + skippedTxs = append(skippedTxs, &types.SkippedTransaction{ + Tx: *tx, + Reason: "unknown circuit capacity checker error", + Trace: storeTraces, + }) + l1TxCccUnknownErrCounter.Inc(1) + + // Normally we would do `txs.Shift()` here. + // However, after `ErrUnknown`, ccc might remain in an + // inconsistent state, so we cannot pack more transactions. + circuitCapacityReached = true + miner.checkCurrentTxNumWithCCC(env.tcount) + break loop + + case errors.Is(err, circuitcapacitychecker.ErrUnknown) && !tx.IsL1MessageTx(): + // Circuit capacity check: unknown circuit capacity checker error for L2MessageTx, skip the account + log.Trace("Unknown circuit capacity checker error for L2MessageTx", "tx", tx.Hash().String()) + log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "unknown row consumption error") + // TODO: propagate more info about the error from CCC + if miner.config.StoreSkippedTxTraces { + rawdb.WriteSkippedTransaction(miner.chainDB, tx, traces, "unknown circuit capacity checker error", env.header.Number.Uint64(), nil) + } else { + rawdb.WriteSkippedTransaction(miner.chainDB, tx, nil, "unknown circuit capacity checker error", env.header.Number.Uint64(), nil) + } + l2TxCccUnknownErrCounter.Inc(1) + + // Normally we would do `txs.Pop()` here. + // However, after `ErrUnknown`, ccc might remain in an + // inconsistent state, so we cannot pack more transactions. + miner.txpool.RemoveTx(tx.Hash(), true) + circuitCapacityReached = true + miner.checkCurrentTxNumWithCCC(env.tcount) + break loop + + case errors.Is(err, core.ErrInsufficientFunds) || errors.Is(errors.Unwrap(err), core.ErrInsufficientFunds): + log.Trace("Skipping tx with insufficient funds", "sender", from, "tx", tx.Hash().String()) + txs.Pop() + miner.txpool.RemoveTx(tx.Hash(), true) + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Debug("Transaction failed, account skipped", "hash", tx.Hash().String(), "err", err) + if tx.IsL1MessageTx() { + queueIndex := tx.AsL1MessageTx().QueueIndex + log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "strange error", "err", err) + env.nextL1MsgIndex = queueIndex + 1 + + var storeTraces *types.BlockTrace + if miner.config.StoreSkippedTxTraces { + storeTraces = traces + } + skippedTxs = append(skippedTxs, &types.SkippedTransaction{ + Tx: *tx, + Reason: fmt.Sprintf("strange error: %v", err), + Trace: storeTraces, + }) + l1TxStrangeErrCounter.Inc(1) + } + txs.Shift() + } + } + + return nil, circuitCapacityReached, skippedTxs +} + +// fillTransactions retrieves the pending transactions from the txpool and fills them +// into the given sealing block. The transaction selection and ordering strategy can +// be customized with the plugin in the future. +func (miner *Miner) fillTransactions(env *environment, l1Transactions types.Transactions, interrupt *int32) (error, []*types.SkippedTransaction) { + var ( + err error + circuitCapacityReached bool + skippedTxs []*types.SkippedTransaction + ) + + defer func(env *environment) { + if env.header != nil { + env.header.NextL1MsgIndex = env.nextL1MsgIndex + } + }(env) + + if len(l1Transactions) > 0 { + l1Txs := make(map[common.Address]types.Transactions) + for _, tx := range l1Transactions { + sender, _ := types.Sender(env.signer, tx) + senderTxs, ok := l1Txs[sender] + if ok { + senderTxs = append(senderTxs, tx) + l1Txs[sender] = senderTxs + } else { + l1Txs[sender] = types.Transactions{tx} + } + } + txs := types.NewTransactionsByPriceAndNonce(env.signer, l1Txs, env.header.BaseFee) + err, circuitCapacityReached, skippedTxs = miner.commitTransactions(env, txs, env.header.Coinbase, interrupt) + if err != nil || circuitCapacityReached { + return err, skippedTxs + } + } + + // Giving up involving L2 transactions if it is simulation for L1Messages + if env.isSimulate { + return err, skippedTxs + } + + // Split the pending transactions into locals and remotes + // Fill the block with all available pending transactions. + pending := miner.txpool.PendingWithMax(false, miner.config.MaxAccountsNum) + localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending + for _, account := range miner.txpool.Locals() { + if txs := remoteTxs[account]; len(txs) > 0 { + delete(remoteTxs, account) + localTxs[account] = txs + } + } + + if miner.prioritizedTx != nil && env.header.Number.Uint64() > miner.prioritizedTx.blockNumber { + miner.prioritizedTx = nil + } + if miner.prioritizedTx != nil && env.header.Number.Uint64() == miner.prioritizedTx.blockNumber { + tx := miner.prioritizedTx.tx + from, _ := types.Sender(env.signer, tx) // error already checked before + txList := map[common.Address]types.Transactions{from: []*types.Transaction{tx}} + txs := types.NewTransactionsByPriceAndNonce(env.signer, txList, env.header.BaseFee) + err, circuitCapacityReached, _ = miner.commitTransactions(env, txs, env.header.Coinbase, interrupt) + if err != nil || circuitCapacityReached { + return err, skippedTxs + } + } + + if len(localTxs) > 0 { + txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) + err, circuitCapacityReached, _ = miner.commitTransactions(env, txs, env.header.Coinbase, interrupt) + if err != nil || circuitCapacityReached { + return err, skippedTxs + } + } + if len(remoteTxs) > 0 { + txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) + err, _, _ = miner.commitTransactions(env, txs, env.header.Coinbase, nil) // always return false + } + + return err, skippedTxs +} + +func (miner *Miner) resetCCC(skip bool) { + if !skip { + miner.circuitCapacityChecker.Reset() + } +} + +func (miner *Miner) checkCurrentTxNumWithCCC(expected int) { + match, got, err := miner.circuitCapacityChecker.CheckTxNum(expected) + if err != nil { + log.Error("failed to CheckTxNum in ccc", "err", err) + return + } + if !match { + log.Error("tx count in miner is different with CCC", "current env tcount", expected, "got", got) + } +} + +func withTimer(timer metrics.Timer, f func()) { + if metrics.Enabled { + timer.Time(f) + } else { + f() + } +} + +// signalToErr converts the interruption signal to a concrete error type for return. +// The given signal must be a valid interruption signal. +func signalToErr(signal int32) error { + switch signal { + case commitInterruptNewHead: + return errBlockInterruptedByNewHead + case commitInterruptResubmit: + return errBlockInterruptedByRecommit + case commitInterruptTimeout: + return errBlockInterruptedByTimeout + default: + panic(fmt.Errorf("undefined signal %d", signal)) + } +} diff --git a/miner/pending.go b/miner/pending.go new file mode 100644 index 000000000..7fc80d193 --- /dev/null +++ b/miner/pending.go @@ -0,0 +1,67 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package miner + +import ( + "sync" + "time" + + "github.com/scroll-tech/go-ethereum/common" +) + +// pendingTTL indicates the period of time a generated pending block should +// exist to serve RPC requests before being discarded if the parent block +// has not changed yet. +const pendingTTL = 2 * time.Second + +// pending wraps a pending block with additional metadata. +type pending struct { + created time.Time + parentHash common.Hash + result *NewBlockResult + lock sync.Mutex +} + +// resolve retrieves the cached pending result if it's available. Nothing will be +// returned if the parentHash is not matched or the result is already too old. +// +// Note, don't modify the returned payload result. +func (p *pending) resolve(parentHash common.Hash) *NewBlockResult { + p.lock.Lock() + defer p.lock.Unlock() + + if p.result == nil { + return nil + } + if parentHash != p.parentHash { + return nil + } + if time.Since(p.created) > pendingTTL { + return nil + } + return p.result +} + +// update refreshes the cached pending block with newly created one. +func (p *pending) update(parent common.Hash, result *NewBlockResult) { + p.lock.Lock() + defer p.lock.Unlock() + + p.parentHash = parent + p.result = result + p.created = time.Now() +} diff --git a/miner/stress/1559/main.go b/miner/stress/1559/main.go deleted file mode 100644 index b3772102d..000000000 --- a/miner/stress/1559/main.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// This file contains a miner stress test for eip 1559. -package main - -import ( - "crypto/ecdsa" - "io/ioutil" - "math/big" - "math/rand" - "os" - "os/signal" - "time" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/fdlimit" - "github.com/scroll-tech/go-ethereum/consensus/ethash" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/eth" - "github.com/scroll-tech/go-ethereum/eth/downloader" - "github.com/scroll-tech/go-ethereum/eth/ethconfig" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/miner" - "github.com/scroll-tech/go-ethereum/node" - "github.com/scroll-tech/go-ethereum/p2p" - "github.com/scroll-tech/go-ethereum/p2p/enode" - "github.com/scroll-tech/go-ethereum/params" -) - -var ( - londonBlock = big.NewInt(30) // Predefined london fork block for activating eip 1559. -) - -func main() { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - fdlimit.Raise(2048) - - // Generate a batch of accounts to seal and fund with - faucets := make([]*ecdsa.PrivateKey, 128) - for i := 0; i < len(faucets); i++ { - faucets[i], _ = crypto.GenerateKey() - } - // Pre-generate the ethash mining DAG so we don't race - ethash.MakeDataset(1, ethconfig.Defaults.Ethash.DatasetDir) - - // Create an Ethash network based off of the Ropsten config - genesis := makeGenesis(faucets) - - // Handle interrupts. - interruptCh := make(chan os.Signal, 5) - signal.Notify(interruptCh, os.Interrupt) - - var ( - stacks []*node.Node - nodes []*eth.Ethereum - enodes []*enode.Node - ) - for i := 0; i < 4; i++ { - // Start the node and wait until it's up - stack, ethBackend, err := makeMiner(genesis) - if err != nil { - panic(err) - } - defer stack.Close() - - for stack.Server().NodeInfo().Ports.Listener == 0 { - time.Sleep(250 * time.Millisecond) - } - // Connect the node to all the previous ones - for _, n := range enodes { - stack.Server().AddPeer(n) - } - // Start tracking the node and its enode - nodes = append(nodes, ethBackend) - enodes = append(enodes, stack.Server().Self()) - } - - // Iterate over all the nodes and start mining - time.Sleep(3 * time.Second) - for _, node := range nodes { - if err := node.StartMining(1); err != nil { - panic(err) - } - } - time.Sleep(3 * time.Second) - - // Start injecting transactions from the faucets like crazy - var ( - nonces = make([]uint64, len(faucets)) - - // The signer activates the 1559 features even before the fork, - // so the new 1559 txs can be created with this signer. - signer = types.LatestSignerForChainID(genesis.Config.ChainID) - ) - for { - // Stop when interrupted. - select { - case <-interruptCh: - for _, node := range stacks { - node.Close() - } - return - default: - } - - // Pick a random mining node - index := rand.Intn(len(faucets)) - backend := nodes[index%len(nodes)] - - headHeader := backend.BlockChain().CurrentHeader() - baseFee := headHeader.BaseFee - - // Create a self transaction and inject into the pool. The legacy - // and 1559 transactions can all be created by random even if the - // fork is not happened. - tx := makeTransaction(nonces[index], faucets[index], signer, baseFee) - if err := backend.TxPool().AddLocal(tx); err != nil { - continue - } - nonces[index]++ - - // Wait if we're too saturated - if pend, _ := backend.TxPool().Stats(); pend > 4192 { - time.Sleep(100 * time.Millisecond) - } - - // Wait if the basefee is raised too fast - if baseFee != nil && baseFee.Cmp(new(big.Int).Mul(big.NewInt(100), big.NewInt(params.GWei))) > 0 { - time.Sleep(500 * time.Millisecond) - } - } -} - -func makeTransaction(nonce uint64, privKey *ecdsa.PrivateKey, signer types.Signer, baseFee *big.Int) *types.Transaction { - // Generate legacy transaction - if rand.Intn(2) == 0 { - tx, err := types.SignTx(types.NewTransaction(nonce, crypto.PubkeyToAddress(privKey.PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), signer, privKey) - if err != nil { - panic(err) - } - return tx - } - // Generate eip 1559 transaction - recipient := crypto.PubkeyToAddress(privKey.PublicKey) - - // Feecap and feetip are limited to 32 bytes. Offer a sightly - // larger buffer for creating both valid and invalid transactions. - var buf = make([]byte, 32+5) - rand.Read(buf) - gasTipCap := new(big.Int).SetBytes(buf) - - // If the given base fee is nil(the 1559 is still not available), - // generate a fake base fee in order to create 1559 tx forcibly. - if baseFee == nil { - baseFee = new(big.Int).SetInt64(int64(rand.Int31())) - } - // Generate the feecap, 75% valid feecap and 25% unguaranted. - var gasFeeCap *big.Int - if rand.Intn(4) == 0 { - rand.Read(buf) - gasFeeCap = new(big.Int).SetBytes(buf) - } else { - gasFeeCap = new(big.Int).Add(baseFee, gasTipCap) - } - return types.MustSignNewTx(privKey, signer, &types.DynamicFeeTx{ - ChainID: signer.ChainID(), - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: 21000, - To: &recipient, - Value: big.NewInt(100), - Data: nil, - AccessList: nil, - }) -} - -// makeGenesis creates a custom Ethash genesis block based on some pre-defined -// faucet accounts. -func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis { - genesis := core.DefaultRopstenGenesisBlock() - - genesis.Config = params.AllEthashProtocolChanges - genesis.Config.LondonBlock = londonBlock - genesis.Difficulty = params.MinimumDifficulty - - // Small gaslimit for easier basefee moving testing. - genesis.GasLimit = 8_000_000 - - genesis.Config.ChainID = big.NewInt(18) - genesis.Config.EIP150Hash = common.Hash{} - - genesis.Alloc = core.GenesisAlloc{} - for _, faucet := range faucets { - genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{ - Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil), - } - } - if londonBlock.Sign() == 0 { - log.Info("Enabled the eip 1559 by default") - } else { - log.Info("Registered the london fork", "number", londonBlock) - } - return genesis -} - -func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { - // Define the basic configurations for the Ethereum node - datadir, _ := ioutil.TempDir("", "") - - config := &node.Config{ - Name: "geth", - Version: params.Version, - DataDir: datadir, - P2P: p2p.Config{ - ListenAddr: "0.0.0.0:0", - NoDiscovery: true, - MaxPeers: 25, - }, - UseLightweightKDF: true, - } - // Create the node and configure a full Ethereum node on it - stack, err := node.New(config) - if err != nil { - return nil, nil, err - } - ethBackend, err := eth.New(stack, ðconfig.Config{ - Genesis: genesis, - NetworkId: genesis.Config.ChainID.Uint64(), - SyncMode: downloader.FullSync, - DatabaseCache: 256, - DatabaseHandles: 256, - TxPool: core.DefaultTxPoolConfig, - GPO: ethconfig.Defaults.GPO, - Ethash: ethconfig.Defaults.Ethash, - Miner: miner.Config{ - Etherbase: common.Address{1}, - GasCeil: genesis.GasLimit * 11 / 10, - GasPrice: big.NewInt(1), - Recommit: time.Second, - }, - }) - if err != nil { - return nil, nil, err - } - err = stack.Start() - return stack, ethBackend, err -} diff --git a/miner/stress/clique/main.go b/miner/stress/clique/main.go deleted file mode 100644 index fd5b96962..000000000 --- a/miner/stress/clique/main.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// This file contains a miner stress test based on the Clique consensus engine. -package main - -import ( - "bytes" - "crypto/ecdsa" - "io/ioutil" - "math/big" - "math/rand" - "os" - "os/signal" - "time" - - "github.com/scroll-tech/go-ethereum/accounts/keystore" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/fdlimit" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/eth" - "github.com/scroll-tech/go-ethereum/eth/downloader" - "github.com/scroll-tech/go-ethereum/eth/ethconfig" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/miner" - "github.com/scroll-tech/go-ethereum/node" - "github.com/scroll-tech/go-ethereum/p2p" - "github.com/scroll-tech/go-ethereum/p2p/enode" - "github.com/scroll-tech/go-ethereum/params" -) - -func main() { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - fdlimit.Raise(2048) - - // Generate a batch of accounts to seal and fund with - faucets := make([]*ecdsa.PrivateKey, 128) - for i := 0; i < len(faucets); i++ { - faucets[i], _ = crypto.GenerateKey() - } - sealers := make([]*ecdsa.PrivateKey, 4) - for i := 0; i < len(sealers); i++ { - sealers[i], _ = crypto.GenerateKey() - } - // Create a Clique network based off of the Rinkeby config - genesis := makeGenesis(faucets, sealers) - - // Handle interrupts. - interruptCh := make(chan os.Signal, 5) - signal.Notify(interruptCh, os.Interrupt) - - var ( - stacks []*node.Node - nodes []*eth.Ethereum - enodes []*enode.Node - ) - for _, sealer := range sealers { - // Start the node and wait until it's up - stack, ethBackend, err := makeSealer(genesis) - if err != nil { - panic(err) - } - defer stack.Close() - - for stack.Server().NodeInfo().Ports.Listener == 0 { - time.Sleep(250 * time.Millisecond) - } - // Connect the node to all the previous ones - for _, n := range enodes { - stack.Server().AddPeer(n) - } - // Start tracking the node and its enode - stacks = append(stacks, stack) - nodes = append(nodes, ethBackend) - enodes = append(enodes, stack.Server().Self()) - - // Inject the signer key and start sealing with it - ks := keystore.NewKeyStore(stack.KeyStoreDir(), keystore.LightScryptN, keystore.LightScryptP) - signer, err := ks.ImportECDSA(sealer, "") - if err != nil { - panic(err) - } - if err := ks.Unlock(signer, ""); err != nil { - panic(err) - } - stack.AccountManager().AddBackend(ks) - } - - // Iterate over all the nodes and start signing on them - time.Sleep(3 * time.Second) - for _, node := range nodes { - if err := node.StartMining(1); err != nil { - panic(err) - } - } - time.Sleep(3 * time.Second) - - // Start injecting transactions from the faucet like crazy - nonces := make([]uint64, len(faucets)) - for { - // Stop when interrupted. - select { - case <-interruptCh: - for _, node := range stacks { - node.Close() - } - return - default: - } - - // Pick a random signer node - index := rand.Intn(len(faucets)) - backend := nodes[index%len(nodes)] - - // Create a self transaction and inject into the pool - tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000), nil), types.HomesteadSigner{}, faucets[index]) - if err != nil { - panic(err) - } - if err := backend.TxPool().AddLocal(tx); err != nil { - panic(err) - } - nonces[index]++ - - // Wait if we're too saturated - if pend, _ := backend.TxPool().Stats(); pend > 2048 { - time.Sleep(100 * time.Millisecond) - } - } -} - -// makeGenesis creates a custom Clique genesis block based on some pre-defined -// signer and faucet accounts. -func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core.Genesis { - // Create a Clique network based off of the Rinkeby config - genesis := core.DefaultRinkebyGenesisBlock() - genesis.GasLimit = 25000000 - - genesis.Config.ChainID = big.NewInt(18) - genesis.Config.Clique.Period = 1 - genesis.Config.EIP150Hash = common.Hash{} - - genesis.Alloc = core.GenesisAlloc{} - for _, faucet := range faucets { - genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{ - Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil), - } - } - // Sort the signers and embed into the extra-data section - signers := make([]common.Address, len(sealers)) - for i, sealer := range sealers { - signers[i] = crypto.PubkeyToAddress(sealer.PublicKey) - } - for i := 0; i < len(signers); i++ { - for j := i + 1; j < len(signers); j++ { - if bytes.Compare(signers[i][:], signers[j][:]) > 0 { - signers[i], signers[j] = signers[j], signers[i] - } - } - } - genesis.ExtraData = make([]byte, 32+len(signers)*common.AddressLength+65) - for i, signer := range signers { - copy(genesis.ExtraData[32+i*common.AddressLength:], signer[:]) - } - // Return the genesis block for initialization - return genesis -} - -func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { - // Define the basic configurations for the Ethereum node - datadir, _ := ioutil.TempDir("", "") - - config := &node.Config{ - Name: "geth", - Version: params.Version, - DataDir: datadir, - P2P: p2p.Config{ - ListenAddr: "0.0.0.0:0", - NoDiscovery: true, - MaxPeers: 25, - }, - } - // Start the node and configure a full Ethereum node on it - stack, err := node.New(config) - if err != nil { - return nil, nil, err - } - // Create and register the backend - ethBackend, err := eth.New(stack, ðconfig.Config{ - Genesis: genesis, - NetworkId: genesis.Config.ChainID.Uint64(), - SyncMode: downloader.FullSync, - DatabaseCache: 256, - DatabaseHandles: 256, - TxPool: core.DefaultTxPoolConfig, - GPO: ethconfig.Defaults.GPO, - Miner: miner.Config{ - GasCeil: genesis.GasLimit * 11 / 10, - GasPrice: big.NewInt(1), - Recommit: time.Second, - }, - }) - if err != nil { - return nil, nil, err - } - - err = stack.Start() - return stack, ethBackend, err -} diff --git a/miner/stress/ethash/main.go b/miner/stress/ethash/main.go deleted file mode 100644 index b8a9d4c06..000000000 --- a/miner/stress/ethash/main.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// This file contains a miner stress test based on the Ethash consensus engine. -package main - -import ( - "crypto/ecdsa" - "io/ioutil" - "math/big" - "math/rand" - "os" - "os/signal" - "time" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/common/fdlimit" - "github.com/scroll-tech/go-ethereum/consensus/ethash" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/eth" - "github.com/scroll-tech/go-ethereum/eth/downloader" - "github.com/scroll-tech/go-ethereum/eth/ethconfig" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/miner" - "github.com/scroll-tech/go-ethereum/node" - "github.com/scroll-tech/go-ethereum/p2p" - "github.com/scroll-tech/go-ethereum/p2p/enode" - "github.com/scroll-tech/go-ethereum/params" -) - -func main() { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - fdlimit.Raise(2048) - - // Generate a batch of accounts to seal and fund with - faucets := make([]*ecdsa.PrivateKey, 128) - for i := 0; i < len(faucets); i++ { - faucets[i], _ = crypto.GenerateKey() - } - // Pre-generate the ethash mining DAG so we don't race - ethash.MakeDataset(1, ethconfig.Defaults.Ethash.DatasetDir) - - // Create an Ethash network based off of the Ropsten config - genesis := makeGenesis(faucets) - - // Handle interrupts. - interruptCh := make(chan os.Signal, 5) - signal.Notify(interruptCh, os.Interrupt) - - var ( - stacks []*node.Node - nodes []*eth.Ethereum - enodes []*enode.Node - ) - for i := 0; i < 4; i++ { - // Start the node and wait until it's up - stack, ethBackend, err := makeMiner(genesis) - if err != nil { - panic(err) - } - defer stack.Close() - - for stack.Server().NodeInfo().Ports.Listener == 0 { - time.Sleep(250 * time.Millisecond) - } - // Connect the node to all the previous ones - for _, n := range enodes { - stack.Server().AddPeer(n) - } - // Start tracking the node and its enode - stacks = append(stacks, stack) - nodes = append(nodes, ethBackend) - enodes = append(enodes, stack.Server().Self()) - } - - // Iterate over all the nodes and start mining - time.Sleep(3 * time.Second) - for _, node := range nodes { - if err := node.StartMining(1); err != nil { - panic(err) - } - } - time.Sleep(3 * time.Second) - - // Start injecting transactions from the faucets like crazy - nonces := make([]uint64, len(faucets)) - for { - // Stop when interrupted. - select { - case <-interruptCh: - for _, node := range stacks { - node.Close() - } - return - default: - } - - // Pick a random mining node - index := rand.Intn(len(faucets)) - backend := nodes[index%len(nodes)] - - // Create a self transaction and inject into the pool - tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.HomesteadSigner{}, faucets[index]) - if err != nil { - panic(err) - } - if err := backend.TxPool().AddLocal(tx); err != nil { - panic(err) - } - nonces[index]++ - - // Wait if we're too saturated - if pend, _ := backend.TxPool().Stats(); pend > 2048 { - time.Sleep(100 * time.Millisecond) - } - } -} - -// makeGenesis creates a custom Ethash genesis block based on some pre-defined -// faucet accounts. -func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis { - genesis := core.DefaultRopstenGenesisBlock() - genesis.Difficulty = params.MinimumDifficulty - genesis.GasLimit = 25000000 - - genesis.Config.ChainID = big.NewInt(18) - genesis.Config.EIP150Hash = common.Hash{} - - genesis.Alloc = core.GenesisAlloc{} - for _, faucet := range faucets { - genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{ - Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil), - } - } - return genesis -} - -func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { - // Define the basic configurations for the Ethereum node - datadir, _ := ioutil.TempDir("", "") - - config := &node.Config{ - Name: "geth", - Version: params.Version, - DataDir: datadir, - P2P: p2p.Config{ - ListenAddr: "0.0.0.0:0", - NoDiscovery: true, - MaxPeers: 25, - }, - UseLightweightKDF: true, - } - // Create the node and configure a full Ethereum node on it - stack, err := node.New(config) - if err != nil { - return nil, nil, err - } - ethBackend, err := eth.New(stack, ðconfig.Config{ - Genesis: genesis, - NetworkId: genesis.Config.ChainID.Uint64(), - SyncMode: downloader.FullSync, - DatabaseCache: 256, - DatabaseHandles: 256, - TxPool: core.DefaultTxPoolConfig, - GPO: ethconfig.Defaults.GPO, - Ethash: ethconfig.Defaults.Ethash, - Miner: miner.Config{ - Etherbase: common.Address{1}, - GasCeil: genesis.GasLimit * 11 / 10, - GasPrice: big.NewInt(1), - Recommit: time.Second, - }, - }) - if err != nil { - return nil, nil, err - } - - err = stack.Start() - return stack, ethBackend, err -} diff --git a/miner/unconfirmed.go b/miner/unconfirmed.go deleted file mode 100644 index e1d9764fe..000000000 --- a/miner/unconfirmed.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "container/ring" - "sync" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/log" -) - -// chainRetriever is used by the unconfirmed block set to verify whether a previously -// mined block is part of the canonical chain or not. -type chainRetriever interface { - // GetHeaderByNumber retrieves the canonical header associated with a block number. - GetHeaderByNumber(number uint64) *types.Header - - // GetBlockByNumber retrieves the canonical block associated with a block number. - GetBlockByNumber(number uint64) *types.Block -} - -// unconfirmedBlock is a small collection of metadata about a locally mined block -// that is placed into a unconfirmed set for canonical chain inclusion tracking. -type unconfirmedBlock struct { - index uint64 - hash common.Hash -} - -// unconfirmedBlocks implements a data structure to maintain locally mined blocks -// have not yet reached enough maturity to guarantee chain inclusion. It is -// used by the miner to provide logs to the user when a previously mined block -// has a high enough guarantee to not be reorged out of the canonical chain. -type unconfirmedBlocks struct { - chain chainRetriever // Blockchain to verify canonical status through - depth uint // Depth after which to discard previous blocks - blocks *ring.Ring // Block infos to allow canonical chain cross checks - lock sync.Mutex // Protects the fields from concurrent access -} - -// newUnconfirmedBlocks returns new data structure to track currently unconfirmed blocks. -func newUnconfirmedBlocks(chain chainRetriever, depth uint) *unconfirmedBlocks { - return &unconfirmedBlocks{ - chain: chain, - depth: depth, - } -} - -// Insert adds a new block to the set of unconfirmed ones. -func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) { - // If a new block was mined locally, shift out any old enough blocks - set.Shift(index) - - // Create the new item as its own ring - item := ring.New(1) - item.Value = &unconfirmedBlock{ - index: index, - hash: hash, - } - // Set as the initial ring or append to the end - set.lock.Lock() - defer set.lock.Unlock() - - if set.blocks == nil { - set.blocks = item - } else { - set.blocks.Move(-1).Link(item) - } - // Display a log for the user to notify of a new mined block unconfirmed - log.Info("🔨 mined potential block", "number", index, "hash", hash) -} - -// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth -// allowance, checking them against the canonical chain for inclusion or staleness -// report. -func (set *unconfirmedBlocks) Shift(height uint64) { - set.lock.Lock() - defer set.lock.Unlock() - - for set.blocks != nil { - // Retrieve the next unconfirmed block and abort if too fresh - next := set.blocks.Value.(*unconfirmedBlock) - if next.index+uint64(set.depth) > height { - break - } - // Block seems to exceed depth allowance, check for canonical status - header := set.chain.GetHeaderByNumber(next.index) - switch { - case header == nil: - log.Warn("Failed to retrieve header of mined block", "number", next.index, "hash", next.hash) - case header.Hash() == next.hash: - log.Info("🔗 block reached canonical chain", "number", next.index, "hash", next.hash) - default: - // Block is not canonical, check whether we have an uncle or a lost block - included := false - for number := next.index; !included && number < next.index+uint64(set.depth) && number <= height; number++ { - if block := set.chain.GetBlockByNumber(number); block != nil { - for _, uncle := range block.Uncles() { - if uncle.Hash() == next.hash { - included = true - break - } - } - } - } - if included { - log.Info("â‘‚ block became an uncle", "number", next.index, "hash", next.hash) - } else { - log.Info("😱 block lost", "number", next.index, "hash", next.hash) - } - } - // Drop the block out of the ring - if set.blocks.Value == set.blocks.Next().Value { - set.blocks = nil - } else { - set.blocks = set.blocks.Move(-1) - set.blocks.Unlink(1) - set.blocks = set.blocks.Move(1) - } - } -} diff --git a/miner/unconfirmed_test.go b/miner/unconfirmed_test.go deleted file mode 100644 index 1bdfe0cae..000000000 --- a/miner/unconfirmed_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "testing" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -// noopChainRetriever is an implementation of headerRetriever that always -// returns nil for any requested headers. -type noopChainRetriever struct{} - -func (r *noopChainRetriever) GetHeaderByNumber(number uint64) *types.Header { - return nil -} -func (r *noopChainRetriever) GetBlockByNumber(number uint64) *types.Block { - return nil -} - -// Tests that inserting blocks into the unconfirmed set accumulates them until -// the desired depth is reached, after which they begin to be dropped. -func TestUnconfirmedInsertBounds(t *testing.T) { - limit := uint(10) - - pool := newUnconfirmedBlocks(new(noopChainRetriever), limit) - for depth := uint64(0); depth < 2*uint64(limit); depth++ { - // Insert multiple blocks for the same level just to stress it - for i := 0; i < int(depth); i++ { - pool.Insert(depth, [32]byte{byte(depth), byte(i)}) - } - // Validate that no blocks below the depth allowance are left in - pool.blocks.Do(func(block interface{}) { - if block := block.(*unconfirmedBlock); block.index+uint64(limit) <= depth { - t.Errorf("depth %d: block %x not dropped", depth, block.hash) - } - }) - } -} - -// Tests that shifting blocks out of the unconfirmed set works both for normal -// cases as well as for corner cases such as empty sets, empty shifts or full -// shifts. -func TestUnconfirmedShifts(t *testing.T) { - // Create a pool with a few blocks on various depths - limit, start := uint(10), uint64(25) - - pool := newUnconfirmedBlocks(new(noopChainRetriever), limit) - for depth := start; depth < start+uint64(limit); depth++ { - pool.Insert(depth, [32]byte{byte(depth)}) - } - // Try to shift below the limit and ensure no blocks are dropped - pool.Shift(start + uint64(limit) - 1) - if n := pool.blocks.Len(); n != int(limit) { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit) - } - // Try to shift half the blocks out and verify remainder - pool.Shift(start + uint64(limit) - 1 + uint64(limit/2)) - if n := pool.blocks.Len(); n != int(limit)/2 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit/2) - } - // Try to shift all the remaining blocks out and verify emptyness - pool.Shift(start + 2*uint64(limit)) - if n := pool.blocks.Len(); n != 0 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0) - } - // Try to shift out from the empty set and make sure it doesn't break - pool.Shift(start + 3*uint64(limit)) - if n := pool.blocks.Len(); n != 0 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0) - } -} diff --git a/miner/worker.go b/miner/worker.go deleted file mode 100644 index cb4c7098e..000000000 --- a/miner/worker.go +++ /dev/null @@ -1,1636 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "bytes" - "errors" - "fmt" - "math" - "math/big" - "sync" - "sync/atomic" - "time" - - mapset "github.com/deckarep/golang-set" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/consensus" - "github.com/scroll-tech/go-ethereum/consensus/misc" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/state" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/event" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/metrics" - "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/circuitcapacitychecker" - "github.com/scroll-tech/go-ethereum/rollup/fees" - "github.com/scroll-tech/go-ethereum/rollup/tracing" - "github.com/scroll-tech/go-ethereum/trie" -) - -const ( - // resultQueueSize is the size of channel listening to sealing result. - resultQueueSize = 10 - - // txChanSize is the size of channel listening to NewTxsEvent. - // The number is referenced from the size of tx pool. - txChanSize = 4096 - - // chainHeadChanSize is the size of channel listening to ChainHeadEvent. - chainHeadChanSize = 10 - - // chainSideChanSize is the size of channel listening to ChainSideEvent. - chainSideChanSize = 10 - - // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. - resubmitAdjustChanSize = 10 - - // miningLogAtDepth is the number of confirmations before logging successful mining. - miningLogAtDepth = 7 - - // minRecommitInterval is the minimal time interval to recreate the mining block with - // any newly arrived transactions. - minRecommitInterval = 1 * time.Second - - // maxRecommitInterval is the maximum time interval to recreate the mining block with - // any newly arrived transactions. - maxRecommitInterval = 15 * time.Second - - // intervalAdjustRatio is the impact a single interval adjustment has on sealing work - // resubmitting interval. - intervalAdjustRatio = 0.1 - - // intervalAdjustBias is applied during the new resubmit interval calculation in favor of - // increasing upper limit or decreasing lower limit so that the limit can be reachable. - intervalAdjustBias = 200 * 1000.0 * 1000.0 - - // staleThreshold is the maximum depth of the acceptable stale block. - staleThreshold = 7 -) - -var ( - errBlockInterruptedByNewHead = errors.New("new head arrived while building block") - errBlockInterruptedByRecommit = errors.New("recommit interrupt while building block") - errBlockInterruptedByTimeout = errors.New("timeout while building block") - - // Metrics for the skipped txs - l1TxGasLimitExceededCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/gas_limit_exceeded", nil) - l1TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/row_consumption_overflow", nil) - l2TxRowConsumptionOverflowCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/row_consumption_overflow", nil) - l1TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/ccc_unknown_err", nil) - l2TxCccUnknownErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l2/ccc_unknown_err", nil) - l1TxStrangeErrCounter = metrics.NewRegisteredCounter("miner/skipped_txs/l1/strange_err", nil) - - l2CommitTxsTimer = metrics.NewRegisteredTimer("miner/commit/txs_all", nil) - l2CommitTxTimer = metrics.NewRegisteredTimer("miner/commit/tx_all", nil) - l2CommitTxFailedTimer = metrics.NewRegisteredTimer("miner/commit/tx_all_failed", nil) - l2CommitTxTraceTimer = metrics.NewRegisteredTimer("miner/commit/tx_trace", nil) - l2CommitTxTraceStateRevertTimer = metrics.NewRegisteredTimer("miner/commit/tx_trace_state_revert", nil) - l2CommitTxCCCTimer = metrics.NewRegisteredTimer("miner/commit/tx_ccc", nil) - l2CommitTxApplyTimer = metrics.NewRegisteredTimer("miner/commit/tx_apply", nil) - - l2CommitNewWorkTimer = metrics.NewRegisteredTimer("miner/commit/new_work_all", nil) - l2CommitNewWorkL1CollectTimer = metrics.NewRegisteredTimer("miner/commit/new_work_collect_l1", nil) - l2CommitNewWorkPrepareTimer = metrics.NewRegisteredTimer("miner/commit/new_work_prepare", nil) - l2CommitNewWorkCommitUncleTimer = metrics.NewRegisteredTimer("miner/commit/new_work_uncle", nil) - l2CommitNewWorkTidyPendingTxTimer = metrics.NewRegisteredTimer("miner/commit/new_work_tidy_pending", nil) - l2CommitNewWorkCommitL1MsgTimer = metrics.NewRegisteredTimer("miner/commit/new_work_commit_l1_msg", nil) - l2CommitNewWorkPrioritizedTxCommitTimer = metrics.NewRegisteredTimer("miner/commit/new_work_prioritized", nil) - l2CommitNewWorkRemoteLocalCommitTimer = metrics.NewRegisteredTimer("miner/commit/new_work_remote_local", nil) - l2CommitNewWorkLocalPriceAndNonceTimer = metrics.NewRegisteredTimer("miner/commit/new_work_local_price_and_nonce", nil) - l2CommitNewWorkRemotePriceAndNonceTimer = metrics.NewRegisteredTimer("miner/commit/new_work_remote_price_and_nonce", nil) - - l2CommitTimer = metrics.NewRegisteredTimer("miner/commit/all", nil) - l2CommitTraceTimer = metrics.NewRegisteredTimer("miner/commit/trace", nil) - l2CommitCCCTimer = metrics.NewRegisteredTimer("miner/commit/ccc", nil) - l2ResultTimer = metrics.NewRegisteredTimer("miner/result/all", nil) -) - -// environment is the worker's current environment and holds all of the current state information. -type environment struct { - signer types.Signer - - state *state.StateDB // apply state changes here - ancestors mapset.Set // ancestor set (used for checking uncle parent validity) - family mapset.Set // family set (used for checking uncle invalidity) - uncles mapset.Set // uncle set - tcount int // tx count in cycle - blockSize common.StorageSize // approximate size of tx payload in bytes - l1TxCount int // l1 msg count in cycle - gasPool *core.GasPool // available gas used to pack transactions - - header *types.Header - txs []*types.Transaction - receipts []*types.Receipt - - // circuit capacity check related fields - skipCCC bool // skip CCC when commitNewWork - traceEnv *tracing.TraceEnv // env for tracing - accRows *types.RowConsumption // accumulated row consumption for a block - nextL1MsgIndex uint64 // next L1 queue index to be processed - isSimulate bool -} - -// task contains all information for consensus engine sealing and result submitting. -type task struct { - receipts []*types.Receipt - state *state.StateDB - block *types.Block - createdAt time.Time - accRows *types.RowConsumption // accumulated row consumption in the circuit side -} - -const ( - commitInterruptNone int32 = iota - commitInterruptNewHead - commitInterruptResubmit - commitInterruptTimeout -) - -// newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. -type newWorkReq struct { - interrupt *int32 - noempty bool - timestamp int64 -} - -// intervalAdjust represents a resubmitting interval adjustment. -type intervalAdjust struct { - ratio float64 - inc bool -} - -// prioritizedTransaction represents a single transaction that -// should be processed as the first transaction in the next block. -type prioritizedTransaction struct { - blockNumber uint64 - tx *types.Transaction -} - -// worker is the main object which takes care of submitting new work to consensus engine -// and gathering the sealing result. -type worker struct { - config *Config - chainConfig *params.ChainConfig - engine consensus.Engine - eth Backend - chain *core.BlockChain - - // Feeds - pendingLogsFeed event.Feed - - // Subscriptions - mux *event.TypeMux - txsCh chan core.NewTxsEvent - txsSub event.Subscription - chainHeadCh chan core.ChainHeadEvent - chainHeadSub event.Subscription - chainSideCh chan core.ChainSideEvent - chainSideSub event.Subscription - - // Channels - newWorkCh chan *newWorkReq - getWorkCh chan *getWorkReq - taskCh chan *task - resultCh chan *types.Block - startCh chan struct{} - exitCh chan struct{} - resubmitIntervalCh chan time.Duration - resubmitAdjustCh chan *intervalAdjust - - wg sync.WaitGroup - - current *environment // An environment for current running cycle. - localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. - remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. - unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. - - mu sync.RWMutex // The lock used to protect the coinbase and extra fields - coinbase common.Address - extra []byte - - pendingMu sync.RWMutex - pendingTasks map[common.Hash]*task - - snapshotMu sync.RWMutex // The lock used to protect the snapshots below - snapshotBlock *types.Block - snapshotReceipts types.Receipts - snapshotState *state.StateDB - - // atomic status counters - running int32 // The indicator whether the consensus engine is running or not. - newTxs int32 // New arrival transaction count since last sealing work submitting. - - // noempty is the flag used to control whether the feature of pre-seal empty - // block is enabled. The default value is false(pre-seal is enabled by default). - // But in some special scenario the consensus engine will seal blocks instantaneously, - // in this case this feature will add all empty blocks into canonical chain - // non-stop and no real transaction will be included. - noempty uint32 - - // newpayloadTimeout is the maximum timeout allowance for creating block. - // The default value is 3 seconds but node operator can set it to arbitrary - // large value. A large timeout allowance may cause Geth to fail creating - // a non-empty block within the specified time and eventually miss the chance to be a proposer - // in case there are some computation expensive transactions in txpool. - newBlockTimeout time.Duration - - // External functions - isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. - // Make sure the checker here is used by a single block one time, and must be reset for another block. - circuitCapacityChecker *circuitcapacitychecker.CircuitCapacityChecker - prioritizedTx *prioritizedTransaction - - // Test hooks - newTaskHook func(*task) // Method to call upon receiving a new sealing task. - skipSealHook func(*task) bool // Method to decide whether skipping the sealing. - fullTaskHook func() // Method to call before pushing the full sealing task. - resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. - beforeTxHook func() // Method to call before processing a transaction. -} - -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { - worker := &worker{ - config: config, - chainConfig: chainConfig, - engine: engine, - eth: eth, - mux: mux, - chain: eth.BlockChain(), - isLocalBlock: isLocalBlock, - localUncles: make(map[common.Hash]*types.Block), - remoteUncles: make(map[common.Hash]*types.Block), - unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), - pendingTasks: make(map[common.Hash]*task), - txsCh: make(chan core.NewTxsEvent, txChanSize), - chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), - newWorkCh: make(chan *newWorkReq), - getWorkCh: make(chan *getWorkReq), - taskCh: make(chan *task), - resultCh: make(chan *types.Block, resultQueueSize), - exitCh: make(chan struct{}), - startCh: make(chan struct{}, 1), - resubmitIntervalCh: make(chan time.Duration), - resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), - circuitCapacityChecker: circuitcapacitychecker.NewCircuitCapacityChecker(true), - } - log.Info("created new worker", "CircuitCapacityChecker ID", worker.circuitCapacityChecker.ID) - - // Subscribe NewTxsEvent for tx pool - worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) - - // Subscribe events for blockchain - worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) - worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) - - // Sanitize recommit interval if the user-specified one is too short. - recommit := worker.config.Recommit - if recommit < minRecommitInterval { - log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) - recommit = minRecommitInterval - } - - // Sanitize the timeout config for creating block. - newBlockTimeout := worker.config.NewBlockTimeout - if newBlockTimeout == 0 { - log.Warn("Sanitizing new block timeout to default", "provided", newBlockTimeout, "updated", 3*time.Second) - newBlockTimeout = 3 * time.Second - } - if newBlockTimeout < time.Millisecond*100 { - log.Warn("Low block timeout may cause high amount of non-full blocks", "provided", newBlockTimeout, "default", 3*time.Second) - } - worker.newBlockTimeout = newBlockTimeout - - // Sanitize account fetch limit. - if worker.config.MaxAccountsNum == 0 { - log.Warn("Sanitizing miner account fetch limit", "provided", worker.config.MaxAccountsNum, "updated", math.MaxInt) - worker.config.MaxAccountsNum = math.MaxInt - } - - worker.wg.Add(4) - go worker.mainLoop() - go worker.newWorkLoop(recommit) - go worker.resultLoop() - go worker.taskLoop() - - // Submit first work to initialize pending state. - if init { - worker.startCh <- struct{}{} - } - return worker -} - -// getCCC returns a pointer to this worker's CCC instance. -// Only used in tests. -func (w *worker) getCCC() *circuitcapacitychecker.CircuitCapacityChecker { - return w.circuitCapacityChecker -} - -// setEtherbase sets the etherbase used to initialize the block coinbase field. -func (w *worker) setEtherbase(addr common.Address) { - w.mu.Lock() - defer w.mu.Unlock() - w.coinbase = addr -} - -func (w *worker) setGasCeil(ceil uint64) { - w.mu.Lock() - defer w.mu.Unlock() - w.config.GasCeil = ceil -} - -// setExtra sets the content used to initialize the block extra field. -func (w *worker) setExtra(extra []byte) { - w.mu.Lock() - defer w.mu.Unlock() - w.extra = extra -} - -// setRecommitInterval updates the interval for miner sealing work recommitting. -func (w *worker) setRecommitInterval(interval time.Duration) { - w.resubmitIntervalCh <- interval -} - -// disablePreseal disables pre-sealing mining feature -func (w *worker) disablePreseal() { - atomic.StoreUint32(&w.noempty, 1) -} - -// enablePreseal enables pre-sealing mining feature -func (w *worker) enablePreseal() { - atomic.StoreUint32(&w.noempty, 0) -} - -// pending returns the pending state and corresponding block. -func (w *worker) pending() (*types.Block, *state.StateDB) { - // return a snapshot to avoid contention on currentMu mutex - w.snapshotMu.RLock() - defer w.snapshotMu.RUnlock() - if w.snapshotState == nil { - return nil, nil - } - return w.snapshotBlock, w.snapshotState.Copy() -} - -// pendingBlock returns pending block. -func (w *worker) pendingBlock() *types.Block { - // return a snapshot to avoid contention on currentMu mutex - w.snapshotMu.RLock() - defer w.snapshotMu.RUnlock() - return w.snapshotBlock -} - -// pendingBlockAndReceipts returns pending block and corresponding receipts. -func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { - // return a snapshot to avoid contention on currentMu mutex - w.snapshotMu.RLock() - defer w.snapshotMu.RUnlock() - return w.snapshotBlock, w.snapshotReceipts -} - -// start sets the running status as 1 and triggers new work submitting. -func (w *worker) start() { - atomic.StoreInt32(&w.running, 1) - w.startCh <- struct{}{} -} - -// stop sets the running status as 0. -func (w *worker) stop() { - atomic.StoreInt32(&w.running, 0) -} - -// isRunning returns an indicator whether worker is running or not. -func (w *worker) isRunning() bool { - return atomic.LoadInt32(&w.running) == 1 -} - -// close terminates all background threads maintained by the worker. -// Note the worker does not support being closed multiple times. -func (w *worker) close() { - atomic.StoreInt32(&w.running, 0) - close(w.exitCh) - w.wg.Wait() -} - -// recalcRecommit recalculates the resubmitting interval upon feedback. -func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { - var ( - prevF = float64(prev.Nanoseconds()) - next float64 - ) - if inc { - next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) - max := float64(maxRecommitInterval.Nanoseconds()) - if next > max { - next = max - } - } else { - next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) - min := float64(minRecommit.Nanoseconds()) - if next < min { - next = min - } - } - return time.Duration(int64(next)) -} - -// newWorkLoop is a standalone goroutine to submit new mining work upon received events. -func (w *worker) newWorkLoop(recommit time.Duration) { - defer w.wg.Done() - var ( - interrupt *int32 - minRecommit = recommit // minimal resubmit interval specified by user. - timestamp int64 // timestamp for each round of mining. - ) - - timer := time.NewTimer(0) - defer timer.Stop() - <-timer.C // discard the initial tick - - // commit aborts in-flight transaction execution with given signal and resubmits a new one. - commit := func(noempty bool, s int32) { - if interrupt != nil { - atomic.StoreInt32(interrupt, s) - } - interrupt = new(int32) - select { - case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: - case <-w.exitCh: - return - } - // we do not need this resubmit loop to acquire higher price transactions here in our cases - // todo use morph config instead later - if !w.chainConfig.Scroll.UseZktrie { - timer.Reset(recommit) - } - atomic.StoreInt32(&w.newTxs, 0) - } - // clearPending cleans the stale pending tasks. - clearPending := func(number uint64) { - w.pendingMu.Lock() - for h, t := range w.pendingTasks { - if t.block.NumberU64()+staleThreshold <= number { - delete(w.pendingTasks, h) - } - } - w.pendingMu.Unlock() - } - - for { - select { - case <-w.startCh: - clearPending(w.chain.CurrentBlock().NumberU64()) - timestamp = time.Now().Unix() - commit(false, commitInterruptNewHead) - - case head := <-w.chainHeadCh: - clearPending(head.Block.NumberU64()) - timestamp = time.Now().Unix() - commit(true, commitInterruptNewHead) - - case <-timer.C: - // If mining is running resubmit a new work cycle periodically to pull in - // higher priced transactions. Disable this overhead for pending blocks. - if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { - // Short circuit if no new transaction arrives. - if atomic.LoadInt32(&w.newTxs) == 0 { - timer.Reset(recommit) - continue - } - commit(true, commitInterruptResubmit) - } - - case interval := <-w.resubmitIntervalCh: - // Adjust resubmit interval explicitly by user. - if interval < minRecommitInterval { - log.Warn("Sanitizing miner recommit interval", "provided", interval, "updated", minRecommitInterval) - interval = minRecommitInterval - } - log.Info("Miner recommit interval update", "from", minRecommit, "to", interval) - minRecommit, recommit = interval, interval - - if w.resubmitHook != nil { - w.resubmitHook(minRecommit, recommit) - } - - case adjust := <-w.resubmitAdjustCh: - // Adjust resubmit interval by feedback. - if adjust.inc { - before := recommit - target := float64(recommit.Nanoseconds()) / adjust.ratio - recommit = recalcRecommit(minRecommit, recommit, target, true) - log.Trace("Increase miner recommit interval", "from", before, "to", recommit) - } else { - before := recommit - recommit = recalcRecommit(minRecommit, recommit, float64(minRecommit.Nanoseconds()), false) - log.Trace("Decrease miner recommit interval", "from", before, "to", recommit) - } - - if w.resubmitHook != nil { - w.resubmitHook(minRecommit, recommit) - } - - case <-w.exitCh: - return - } - } -} - -// mainLoop is a standalone goroutine to regenerate the sealing task based on the received event. -func (w *worker) mainLoop() { - defer w.wg.Done() - defer w.txsSub.Unsubscribe() - defer w.chainHeadSub.Unsubscribe() - defer w.chainSideSub.Unsubscribe() - defer func() { - if w.current != nil && w.current.state != nil { - w.current.state.StopPrefetcher() - } - }() - - for { - select { - case req := <-w.newWorkCh: - w.commitNewWork(req.interrupt, req.noempty, req.timestamp) - // new block created. - - case req := <-w.getWorkCh: - block, stateDB, receipt, rowConsumption, skippedTxs, err := w.generateWork(req.params, req.interrupt) - req.result <- &newBlockResult{ - err: err, - block: block, - state: stateDB, - receipts: receipt, - rowConsumption: rowConsumption, - skippedTxs: skippedTxs, - } - - case ev := <-w.chainSideCh: - // Short circuit for duplicate side blocks - if _, exist := w.localUncles[ev.Block.Hash()]; exist { - continue - } - if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { - continue - } - // Add side block to possible uncle block set depending on the author. - if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { - w.localUncles[ev.Block.Hash()] = ev.Block - } else { - w.remoteUncles[ev.Block.Hash()] = ev.Block - } - // If our mining block contains less than 2 uncle blocks, - // add the new uncle block if valid and regenerate a mining block. - if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 { - start := time.Now() - if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { - var uncles []*types.Header - w.current.uncles.Each(func(item interface{}) bool { - hash, ok := item.(common.Hash) - if !ok { - return false - } - uncle, exist := w.localUncles[hash] - if !exist { - uncle, exist = w.remoteUncles[hash] - } - if !exist { - return false - } - uncles = append(uncles, uncle.Header()) - return false - }) - w.commit(uncles, nil, true, start) - } - } - - case ev := <-w.txsCh: - // Apply transactions to the pending state if we're not mining. - // - // Note all transactions received may not be continuous with transactions - // already included in the current mining block. These transactions will - // be automatically eliminated. - if !w.isRunning() && w.current != nil { - // If block is already full, abort - if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { - continue - } - w.mu.RLock() - coinbase := w.coinbase - w.mu.RUnlock() - - txs := make(map[common.Address]types.Transactions) - for _, tx := range ev.Txs { - acc, _ := types.Sender(w.current.signer, tx) - txs[acc] = append(txs[acc], tx) - } - txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) - tcount := w.current.tcount - // reset circuitCapacityChecker - w.circuitCapacityChecker.Reset() - w.commitTransactions(w.current, txset, coinbase, nil) - // Only update the snapshot if any new transactons were added - // to the pending block - if tcount != w.current.tcount { - w.updateSnapshot() - } - } else { - // Special case, if the consensus engine is 0 period clique(dev mode), - // submit mining work here since all empty submission will be rejected - // by clique. Of course the advance sealing(empty submission) is disabled. - if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { - w.commitNewWork(nil, true, time.Now().Unix()) - } - } - atomic.AddInt32(&w.newTxs, int32(len(ev.Txs))) - - // System stopped - case <-w.exitCh: - return - case <-w.txsSub.Err(): - return - case <-w.chainHeadSub.Err(): - return - case <-w.chainSideSub.Err(): - return - } - } -} - -// taskLoop is a standalone goroutine to fetch sealing task from the generator and -// push them to consensus engine. -func (w *worker) taskLoop() { - defer w.wg.Done() - var ( - stopCh chan struct{} - prev common.Hash - ) - - // interrupt aborts the in-flight sealing task. - interrupt := func() { - if stopCh != nil { - close(stopCh) - stopCh = nil - } - } - for { - select { - case task := <-w.taskCh: - if w.newTaskHook != nil { - w.newTaskHook(task) - } - // Reject duplicate sealing work due to resubmitting. - sealHash := w.engine.SealHash(task.block.Header()) - if sealHash == prev { - continue - } - // Interrupt previous sealing operation - interrupt() - stopCh, prev = make(chan struct{}), sealHash - - if w.skipSealHook != nil && w.skipSealHook(task) { - continue - } - w.pendingMu.Lock() - w.pendingTasks[sealHash] = task - w.pendingMu.Unlock() - - if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { - log.Warn("Block sealing failed", "err", err) - w.pendingMu.Lock() - delete(w.pendingTasks, sealHash) - w.pendingMu.Unlock() - } - case <-w.exitCh: - interrupt() - return - } - } -} - -// resultLoop is a standalone goroutine to handle sealing result submitting -// and flush relative data to the database. -func (w *worker) resultLoop() { - defer w.wg.Done() - for { - select { - case block := <-w.resultCh: - // Short circuit when receiving empty result. - if block == nil { - continue - } - // Short circuit when receiving duplicate result caused by resubmitting. - if w.chain.HasBlock(block.Hash(), block.NumberU64()) { - continue - } - - var ( - sealhash = w.engine.SealHash(block.Header()) - hash = block.Hash() - ) - - w.pendingMu.RLock() - task, exist := w.pendingTasks[sealhash] - w.pendingMu.RUnlock() - - if !exist { - log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash) - continue - } - - startTime := time.Now() - - // Different block could share same sealhash, deep copy here to prevent write-write conflict. - var ( - receipts = make([]*types.Receipt, len(task.receipts)) - logs []*types.Log - ) - for i, taskReceipt := range task.receipts { - receipt := new(types.Receipt) - receipts[i] = receipt - *receipt = *taskReceipt - - // add block location fields - receipt.BlockHash = hash - receipt.BlockNumber = block.Number() - receipt.TransactionIndex = uint(i) - - // Update the block hash in all logs since it is now available and not when the - // receipt/log of individual transactions were created. - receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) - for i, taskLog := range taskReceipt.Logs { - l := new(types.Log) - receipt.Logs[i] = l - *l = *taskLog - l.BlockHash = hash - } - logs = append(logs, receipt.Logs...) - } - - // Store circuit row consumption. - log.Trace( - "Worker write block row consumption", - "id", w.circuitCapacityChecker.ID, - "number", block.Number(), - "hash", hash.String(), - "accRows", task.accRows, - ) - rawdb.WriteBlockRowConsumption(w.eth.ChainDb(), hash, task.accRows) - // Commit block and state to database. - _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) - if err != nil { - l2ResultTimer.Update(time.Since(startTime)) - log.Error("Failed writing block to chain", "err", err) - continue - } - log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, - "elapsed", common.PrettyDuration(time.Since(task.createdAt))) - - // Broadcast the block and announce chain insertion event - w.mux.Post(core.NewMinedBlockEvent{Block: block}) - - // Insert the block into the set of pending ones to resultLoop for confirmations - w.unconfirmed.Insert(block.NumberU64(), block.Hash()) - - l2ResultTimer.Update(time.Since(startTime)) - - case <-w.exitCh: - return - } - } -} - -func (w *worker) makeEnv(parent *types.Block, header *types.Header) (*environment, error) { - // Retrieve the parent state to execute on top and start a prefetcher for - // the miner to speed block sealing up a bit - stateDB, err := w.chain.StateAt(parent.Root()) - if err != nil { - return nil, err - } - - // don't commit the state during tracing for circuit capacity checker, otherwise we cannot revert. - // and even if we don't commit the state, the `refund` value will still be correct, as explained in `CommitTransaction` - commitStateAfterApply := false - traceEnv, err := tracing.CreateTraceEnv(w.chainConfig, w.chain, w.engine, w.eth.ChainDb(), stateDB, parent, - // new block with a placeholder tx, for traceEnv's ExecutionResults length & TxStorageTraces length - types.NewBlockWithHeader(header).WithBody([]*types.Transaction{types.NewTx(&types.LegacyTx{})}, nil), - commitStateAfterApply) - if err != nil { - return nil, err - } - - stateDB.StartPrefetcher("miner") - - env := &environment{ - signer: types.MakeSigner(w.chainConfig, header.Number), - state: stateDB, - ancestors: mapset.NewSet(), - family: mapset.NewSet(), - uncles: mapset.NewSet(), - header: header, - traceEnv: traceEnv, - accRows: nil, - nextL1MsgIndex: parent.Header().NextL1MsgIndex, - } - // when 08 is processed ancestors contain 07 (quick block) - for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { - for _, uncle := range ancestor.Uncles() { - env.family.Add(uncle.Hash()) - } - env.family.Add(ancestor.Hash()) - env.ancestors.Add(ancestor.Hash()) - } - // Keep track of transactions which return errors so they can be removed - env.tcount = 0 - env.blockSize = 0 - env.l1TxCount = 0 - return env, nil -} - -// makeCurrent creates a new environment for the current cycle. -func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { - env, err := w.makeEnv(parent, header) - if err != nil { - return err - } - - // Swap out the old work with the new one, terminating any leftover prefetcher - // processes in the mean time and starting a new one. - if w.current != nil { - w.current.discard() - } - w.current = env - // It does not need CCC for `commitNewWork`. - w.current.skipCCC = true - return nil -} - -// commitUncle adds the given block to uncle block set, returns error if failed to add. -func (w *worker) commitUncle(env *environment, uncle *types.Header) error { - hash := uncle.Hash() - if env.uncles.Contains(hash) { - return errors.New("uncle not unique") - } - if env.header.ParentHash == uncle.ParentHash { - return errors.New("uncle is sibling") - } - if !env.ancestors.Contains(uncle.ParentHash) { - return errors.New("uncle's parent unknown") - } - if env.family.Contains(hash) { - return errors.New("uncle already included") - } - env.uncles.Add(uncle.Hash()) - return nil -} - -// updateSnapshot updates pending snapshot block and state. -// Note this function assumes the current variable is thread safe. -func (w *worker) updateSnapshot() { - w.snapshotMu.Lock() - defer w.snapshotMu.Unlock() - - var uncles []*types.Header - w.current.uncles.Each(func(item interface{}) bool { - hash, ok := item.(common.Hash) - if !ok { - return false - } - uncle, exist := w.localUncles[hash] - if !exist { - uncle, exist = w.remoteUncles[hash] - } - if !exist { - return false - } - uncles = append(uncles, uncle.Header()) - return false - }) - - w.snapshotBlock = types.NewBlock( - w.current.header, - w.current.txs, - uncles, - w.current.receipts, - trie.NewStackTrie(nil), - ) - w.snapshotReceipts = copyReceipts(w.current.receipts) - w.snapshotState = w.current.state.Copy() -} - -func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, *types.BlockTrace, error) { - var accRows *types.RowConsumption - var traces *types.BlockTrace - var err error - - // do not do CCC checks on follower nodes, or it is called from `commitNewWork` - if w.isRunning() && !env.skipCCC { - defer func(t0 time.Time) { - l2CommitTxTimer.Update(time.Since(t0)) - if err != nil { - l2CommitTxFailedTimer.Update(time.Since(t0)) - } - }(time.Now()) - - // do gas limit check up-front and do not run CCC if it fails - if env.gasPool.Gas() < tx.Gas() { - return nil, nil, core.ErrGasLimitReached - } - - snap := env.state.Snapshot() - - log.Trace( - "Worker apply ccc for tx", - "id", w.circuitCapacityChecker.ID, - "txHash", tx.Hash().Hex(), - ) - - // 1. we have to check circuit capacity before `core.ApplyTransaction`, - // because if the tx can be successfully executed but circuit capacity overflows, it will be inconvenient to revert. - // 2. even if we don't commit to the state during the tracing (which means `clearJournalAndRefund` is not called during the tracing), - // the `refund` value will still be correct, because: - // 2.1 when starting handling the first tx, `state.refund` is 0 by default, - // 2.2 after tracing, the state is either committed in `core.ApplyTransaction`, or reverted, so the `state.refund` can be cleared, - // 2.3 when starting handling the following txs, `state.refund` comes as 0 - common.WithTimer(l2CommitTxTraceTimer, func() { - traces, err = env.traceEnv.GetBlockTrace( - types.NewBlockWithHeader(w.current.header).WithBody([]*types.Transaction{tx}, nil), - ) - }) - common.WithTimer(l2CommitTxTraceStateRevertTimer, func() { - // `env.traceEnv.State` & `env.state` share a same pointer to the state, so only need to revert `env.state` - // revert to snapshot for calling `core.ApplyMessage` again, (both `traceEnv.GetBlockTrace` & `core.ApplyTransaction` will call `core.ApplyMessage`) - env.state.RevertToSnapshot(snap) - }) - if err != nil { - return nil, nil, err - } - common.WithTimer(l2CommitTxCCCTimer, func() { - accRows, err = w.circuitCapacityChecker.ApplyTransaction(traces) - }) - if err != nil { - return nil, traces, err - } - log.Trace( - "Worker apply ccc for tx result", - "id", w.circuitCapacityChecker.ID, - "txHash", tx.Hash().Hex(), - "accRows", accRows, - ) - } - - // create new snapshot for `core.ApplyTransaction` - snap := env.state.Snapshot() - - var receipt *types.Receipt - common.WithTimer(l2CommitTxApplyTimer, func() { - receipt, err = core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) - }) - if err != nil { - env.state.RevertToSnapshot(snap) - - if accRows != nil { - // At this point, we have called CCC but the transaction failed in `ApplyTransaction`. - // If we skip this tx and continue to pack more, the next tx will likely fail with - // `circuitcapacitychecker.ErrUnknown`. However, at this point we cannot decide whether - // we should seal the block or skip the tx and continue, so we simply return the error. - log.Error( - "GetBlockTrace passed but ApplyTransaction failed, ccc is left in inconsistent state", - "blockNumber", env.header.Number, - "txHash", tx.Hash().Hex(), - "err", err, - ) - } - - return nil, traces, err - } - env.txs = append(env.txs, tx) - env.receipts = append(env.receipts, receipt) - env.accRows = accRows - - return receipt.Logs, traces, nil -} - -func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) (error, bool, []*types.SkippedTransaction) { - defer func(t0 time.Time) { - l2CommitTxsTimer.Update(time.Since(t0)) - }(time.Now()) - - var circuitCapacityReached bool - - // Short circuit if current is nil - if env == nil { - return errors.New("no env found"), circuitCapacityReached, nil - } - - gasLimit := env.header.GasLimit - if env.gasPool == nil { - env.gasPool = new(core.GasPool).AddGas(gasLimit) - } - - var ( - coalescedLogs []*types.Log - loops int64 - skippedTxs = make([]*types.SkippedTransaction, 0) - ) - -loop: - for { - if w.beforeTxHook != nil { - w.beforeTxHook() - } - - loops++ - if interrupt != nil { - if signal := atomic.LoadInt32(interrupt); signal != commitInterruptNone { - return signalToErr(signal), circuitCapacityReached, skippedTxs - } - } - if env.gasPool.Gas() < params.TxGas { - log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) - break - } - // Retrieve the next transaction and abort if all done - tx := txs.Peek() - if tx == nil { - break - } - - // If we have collected enough transactions then we're done - // Originally we only limit l2txs count, but now strictly limit total txs number. - if !w.chainConfig.Scroll.IsValidTxCount(env.tcount + 1) { - log.Trace("Transaction count limit reached", "have", env.tcount, "want", w.chainConfig.Scroll.MaxTxPerBlock) - break - } - if tx.IsL1MessageTx() && !env.isSimulate && tx.AsL1MessageTx().QueueIndex != env.nextL1MsgIndex { - log.Error( - "Unexpected L1 message queue index in worker", - "expected", env.nextL1MsgIndex, - "got", tx.AsL1MessageTx().QueueIndex, - ) - break - } - if !tx.IsL1MessageTx() && !w.chainConfig.Scroll.IsValidBlockSize(env.blockSize+tx.Size()) { - log.Trace("Block size limit reached", "have", env.blockSize, "want", w.chainConfig.Scroll.MaxTxPayloadBytesPerBlock, "tx", tx.Size()) - txs.Pop() // skip transactions from this account - continue - } - // Error may be ignored here. The error has already been checked - // during transaction acceptance in the transaction pool. - // - // We use the eip155 signer regardless of the current hf. - from, _ := types.Sender(env.signer, tx) - // Check whether the tx is replay protected. If we're not in the EIP155 hf - // phase, start ignoring the sender until we do. - if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) - - txs.Pop() - continue - } - // Start executing the transaction - env.state.SetTxContext(tx.Hash(), env.tcount) - - logs, traces, err := w.commitTransaction(env, tx, coinbase) - switch { - case errors.Is(err, core.ErrGasLimitReached) && tx.IsL1MessageTx(): - // If this block already contains some L1 messages, - // terminate here and try again in the next block. - if env.l1TxCount > 0 { - break loop - } - // A single L1 message leads to out-of-gas. Skip it. - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "gas limit exceeded") - env.nextL1MsgIndex = queueIndex + 1 - txs.Shift() - - var storeTraces *types.BlockTrace - if w.config.StoreSkippedTxTraces { - storeTraces = traces - } - skippedTxs = append(skippedTxs, &types.SkippedTransaction{ - Tx: *tx, - Reason: "gas limit exceeded", - Trace: storeTraces, - }) - l1TxGasLimitExceededCounter.Inc(1) - - case errors.Is(err, core.ErrGasLimitReached): - // Pop the current out-of-gas transaction without shifting in the next from the account - log.Trace("Gas limit exceeded for current block", "sender", from) - txs.Pop() - - case errors.Is(err, core.ErrNonceTooLow): - // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) - txs.Shift() - - case errors.Is(err, core.ErrNonceTooHigh): - // Reorg notification data race between the transaction pool and miner, skip account = - log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) - txs.Pop() - - case errors.Is(err, nil): - // Everything ok, collect the logs and shift in the next transaction from the same account - coalescedLogs = append(coalescedLogs, logs...) - env.tcount++ - txs.Shift() - - if tx.IsL1MessageTx() { - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Debug("Including L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String()) - env.l1TxCount++ - env.nextL1MsgIndex = queueIndex + 1 - } else { - // only consider block size limit for L2 transactions - env.blockSize += tx.Size() - } - - case errors.Is(err, core.ErrTxTypeNotSupported): - // Pop the unsupported transaction without shifting in the next from the account - log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) - txs.Pop() - - // Circuit capacity check - case errors.Is(err, circuitcapacitychecker.ErrBlockRowConsumptionOverflow): - if env.tcount >= 1 { - // 1. Circuit capacity limit reached in a block, and it's not the first tx: - // don't pop or shift, just quit the loop immediately; - // though it might still be possible to add some "smaller" txs, - // but it's a trade-off between tracing overhead & block usage rate - log.Trace("Circuit capacity limit reached in a block", "acc_rows", env.accRows, "tx", tx.Hash().String()) - log.Info("Skipping message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "accumulated row consumption overflow") - - if !tx.IsL1MessageTx() { - // Prioritize transaction for the next block. - // If there are no new L1 messages, this transaction will be the 1st transaction in the next block, - // at which point we can definitively decide if we should skip it or not. - log.Debug("Prioritizing transaction for next block", "blockNumber", env.header.Number.Uint64()+1, "tx", tx.Hash().String()) - w.prioritizedTx = &prioritizedTransaction{ - blockNumber: env.header.Number.Uint64() + 1, - tx: tx, - } - atomic.AddInt32(&w.newTxs, int32(1)) - } - - circuitCapacityReached = true - break loop - } else { - // 2. Circuit capacity limit reached in a block, and it's the first tx: skip the tx - log.Trace("Circuit capacity limit reached for a single tx", "tx", tx.Hash().String()) - - if tx.IsL1MessageTx() { - // Skip L1 message transaction, - // shift to the next from the account because we shouldn't skip the entire txs from the same account - txs.Shift() - - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "row consumption overflow") - env.nextL1MsgIndex = queueIndex + 1 - l1TxRowConsumptionOverflowCounter.Inc(1) - } else { - // Skip L2 transaction and all other transactions from the same sender account - log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "first tx row consumption overflow") - txs.Pop() - w.eth.TxPool().RemoveTx(tx.Hash(), true) - l2TxRowConsumptionOverflowCounter.Inc(1) - } - - // Reset ccc so that we can process other transactions for this block - w.circuitCapacityChecker.Reset() - log.Trace("Worker reset ccc", "id", w.circuitCapacityChecker.ID) - circuitCapacityReached = false - - var storeTraces *types.BlockTrace - if w.config.StoreSkippedTxTraces { - storeTraces = traces - } - skippedTxs = append(skippedTxs, &types.SkippedTransaction{ - Tx: *tx, - Reason: "row consumption overflow", - Trace: storeTraces, - }) - } - - case errors.Is(err, circuitcapacitychecker.ErrUnknown) && tx.IsL1MessageTx(): - // Circuit capacity check: unknown circuit capacity checker error for L1MessageTx, - // shift to the next from the account because we shouldn't skip the entire txs from the same account - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Trace("Unknown circuit capacity checker error for L1MessageTx", "tx", tx.Hash().String(), "queueIndex", queueIndex) - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "unknown row consumption error") - env.nextL1MsgIndex = queueIndex + 1 - // TODO: propagate more info about the error from CCC - var storeTraces *types.BlockTrace - if w.config.StoreSkippedTxTraces { - storeTraces = traces - } - skippedTxs = append(skippedTxs, &types.SkippedTransaction{ - Tx: *tx, - Reason: "unknown circuit capacity checker error", - Trace: storeTraces, - }) - l1TxCccUnknownErrCounter.Inc(1) - - // Normally we would do `txs.Shift()` here. - // However, after `ErrUnknown`, ccc might remain in an - // inconsistent state, so we cannot pack more transactions. - circuitCapacityReached = true - w.checkCurrentTxNumWithCCC(env.tcount) - break loop - - case errors.Is(err, circuitcapacitychecker.ErrUnknown) && !tx.IsL1MessageTx(): - // Circuit capacity check: unknown circuit capacity checker error for L2MessageTx, skip the account - log.Trace("Unknown circuit capacity checker error for L2MessageTx", "tx", tx.Hash().String()) - log.Info("Skipping L2 message", "tx", tx.Hash().String(), "block", env.header.Number, "reason", "unknown row consumption error") - // TODO: propagate more info about the error from CCC - if w.config.StoreSkippedTxTraces { - rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, traces, "unknown circuit capacity checker error", env.header.Number.Uint64(), nil) - } else { - rawdb.WriteSkippedTransaction(w.eth.ChainDb(), tx, nil, "unknown circuit capacity checker error", env.header.Number.Uint64(), nil) - } - l2TxCccUnknownErrCounter.Inc(1) - - // Normally we would do `txs.Pop()` here. - // However, after `ErrUnknown`, ccc might remain in an - // inconsistent state, so we cannot pack more transactions. - w.eth.TxPool().RemoveTx(tx.Hash(), true) - circuitCapacityReached = true - w.checkCurrentTxNumWithCCC(env.tcount) - break loop - - case errors.Is(err, core.ErrInsufficientFunds) || errors.Is(errors.Unwrap(err), core.ErrInsufficientFunds): - log.Trace("Skipping tx with insufficient funds", "sender", from, "tx", tx.Hash().String()) - txs.Pop() - w.eth.TxPool().RemoveTx(tx.Hash(), true) - - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Transaction failed, account skipped", "hash", tx.Hash().String(), "err", err) - if tx.IsL1MessageTx() { - queueIndex := tx.AsL1MessageTx().QueueIndex - log.Info("Skipping L1 message", "queueIndex", queueIndex, "tx", tx.Hash().String(), "block", env.header.Number, "reason", "strange error", "err", err) - env.nextL1MsgIndex = queueIndex + 1 - - var storeTraces *types.BlockTrace - if w.config.StoreSkippedTxTraces { - storeTraces = traces - } - skippedTxs = append(skippedTxs, &types.SkippedTransaction{ - Tx: *tx, - Reason: fmt.Sprintf("strange error: %v", err), - Trace: storeTraces, - }) - l1TxStrangeErrCounter.Inc(1) - } - txs.Shift() - } - } - - if !w.isRunning() && len(coalescedLogs) > 0 { - // We don't push the pendingLogsEvent while we are mining. The reason is that - // when we are mining, the worker will regenerate a mining block every 3 seconds. - // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. - - // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined - // logs by filling in the block hash when the block was mined by the local miner. This can - // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. - cpy := make([]*types.Log, len(coalescedLogs)) - for i, l := range coalescedLogs { - cpy[i] = new(types.Log) - *cpy[i] = *l - } - w.pendingLogsFeed.Send(cpy) - } - // Notify resubmit loop to decrease resubmitting interval if current interval is larger - // than the user-specified one. - if interrupt != nil { - w.resubmitAdjustCh <- &intervalAdjust{inc: false} - } - return nil, circuitCapacityReached, skippedTxs -} - -func (w *worker) checkCurrentTxNumWithCCC(expected int) { - match, got, err := w.circuitCapacityChecker.CheckTxNum(expected) - if err != nil { - log.Error("failed to CheckTxNum in ccc", "err", err) - return - } - if !match { - log.Error("tx count in miner is different with CCC", "current env tcount", expected, "got", got) - } -} - -// commitNewWork generates several new sealing tasks based on the parent block. -func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { - w.mu.RLock() - defer w.mu.RUnlock() - - defer func(t0 time.Time) { - l2CommitNewWorkTimer.Update(time.Since(t0)) - }(time.Now()) - - tstart := time.Now() - parent := w.chain.CurrentBlock() - w.circuitCapacityChecker.Reset() - log.Trace("Worker reset ccc", "id", w.circuitCapacityChecker.ID) - - if parent.Time() >= uint64(timestamp) { - timestamp = int64(parent.Time() + 1) - } - num := parent.Number() - header := &types.Header{ - ParentHash: parent.Hash(), - Number: num.Add(num, common.Big1), - GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil), - Extra: w.extra, - Time: uint64(timestamp), - } - // Set baseFee if we are on an EIP-1559 chain - if w.chainConfig.IsCurie(header.Number) { - state, err := w.chain.StateAt(parent.Root()) - if err != nil { - log.Error("Failed to create mining context", "err", err) - return - } - parentL1BaseFee := fees.GetL1BaseFee(state) - header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header(), parentL1BaseFee) - } - // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) - if w.isRunning() { - if w.coinbase == (common.Address{}) { - log.Error("Refusing to mine without etherbase") - return - } - header.Coinbase = w.coinbase - } - - common.WithTimer(l2CommitNewWorkPrepareTimer, func() { - if err := w.engine.Prepare(w.chain, header); err != nil { - log.Error("Failed to prepare header for mining", "err", err) - return - } - }) - - // If we are care about TheDAO hard-fork check whether to override the extra-data or not - if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil { - // Check whether the block is among the fork extra-override range - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { - // Depending whether we support or oppose the fork, override differently - if w.chainConfig.DAOForkSupport { - header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data - } - } - } - // Could potentially happen if starting to mine in an odd state. - err := w.makeCurrent(parent, header) - if err != nil { - log.Error("Failed to create mining context", "err", err) - return - } - // Create the current work task and check any fork transitions needed - env := w.current - if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { - misc.ApplyDAOHardFork(env.state) - } - // Accumulate the uncles for the current block - uncles := make([]*types.Header, 0, 2) - commitUncles := func(blocks map[common.Hash]*types.Block) { - // Clean up stale uncle blocks first - for hash, uncle := range blocks { - if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() { - delete(blocks, hash) - } - } - for hash, uncle := range blocks { - if len(uncles) == 2 { - break - } - if err := w.commitUncle(env, uncle.Header()); err != nil { - log.Trace("Possible uncle rejected", "hash", hash, "reason", err) - } else { - log.Debug("Committing new uncle to block", "hash", hash) - uncles = append(uncles, uncle.Header()) - } - } - } - - common.WithTimer(l2CommitNewWorkCommitUncleTimer, func() { - // Prefer to locally generated uncle - commitUncles(w.localUncles) - commitUncles(w.remoteUncles) - }) - - // Create an empty block based on temporary copied state for - // sealing in advance without waiting block execution finished. - if !noempty && atomic.LoadUint32(&w.noempty) == 0 { - w.commit(uncles, nil, false, tstart) - } - err, _ = w.fillTransactions(w.current, nil, interrupt) - switch { - case err == nil: - // The entire block is filled, decrease resubmit interval in case - // of current interval is larger than the user-specified one. - w.adjustResubmitInterval(&intervalAdjust{inc: false}) - - case errors.Is(err, errBlockInterruptedByRecommit): - // Notify resubmit loop to increase resubmitting interval if the - // interruption is due to frequent commits. - gaslimit := w.current.header.GasLimit - ratio := float64(gaslimit-w.current.gasPool.Gas()) / float64(gaslimit) - if ratio < 0.1 { - ratio = 0.1 - } - w.adjustResubmitInterval(&intervalAdjust{ - ratio: ratio, - inc: true, - }) - - case errors.Is(err, errBlockInterruptedByNewHead): - // If the block building is interrupted by newhead event, discard it - // totally. Committing the interrupted block introduces unnecessary - // delay, and possibly causes miner to mine on the previous head, - // which could result in higher uncle rate. - w.current.discard() - return - case errors.Is(err, errBlockInterruptedByTimeout): - // If the block building takes too much time, stop it, and commit the block directly - log.Warn("block building timeout") - default: - // unknown error found here, log it and stop committing block - log.Error("unknown error found", "err", err) - return - } - - w.commit(uncles, w.fullTaskHook, true, tstart) -} - -// commit runs any post-transaction state modifications, assembles the final block -// and commits new work if consensus engine is running. -func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error { - defer func(t0 time.Time) { - l2CommitTimer.Update(time.Since(t0)) - }(time.Now()) - - // set w.current.accRows for empty-but-not-genesis block - if (w.current.header.Number.Uint64() != 0) && - (w.current.accRows == nil || len(*w.current.accRows) == 0) && w.isRunning() { - log.Trace( - "Worker apply ccc for empty block", - "id", w.circuitCapacityChecker.ID, - "number", w.current.header.Number, - "hash", w.current.header.Hash().String(), - ) - var traces *types.BlockTrace - var err error - common.WithTimer(l2CommitTraceTimer, func() { - traces, err = w.current.traceEnv.GetBlockTrace(types.NewBlockWithHeader(w.current.header)) - }) - if err != nil { - return err - } - // truncate ExecutionResults&TxStorageTraces, because we declare their lengths with a dummy tx before; - // however, we need to clean it up for an empty block - traces.ExecutionResults = traces.ExecutionResults[:0] - traces.TxStorageTraces = traces.TxStorageTraces[:0] - var accRows *types.RowConsumption - common.WithTimer(l2CommitCCCTimer, func() { - accRows, err = w.circuitCapacityChecker.ApplyBlock(traces) - }) - if err != nil { - return err - } - log.Trace( - "Worker apply ccc for empty block result", - "id", w.circuitCapacityChecker.ID, - "number", w.current.header.Number, - "hash", w.current.header.Hash().String(), - "accRows", accRows, - ) - w.current.accRows = accRows - } - - if w.isRunning() { - if interval != nil { - interval() - } - - // If we use zkTrie, then skip executing FinalizeAndAssemble, as it may commit the state data to database that would cause some state data produced by the transactions that has not been confirmed being flushed to disk. - if !w.chainConfig.Scroll.UseZktrie { - // Deep copy receipts here to avoid interaction between different tasks. - receipts := copyReceipts(w.current.receipts) - s := w.current.state.Copy() - block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts) - if err != nil { - return err - } - // If we're post merge, just ignore - if !w.isTTDReached(block.Header()) { - select { - case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now(), accRows: w.current.accRows}: - w.unconfirmed.Shift(block.NumberU64() - 1) - log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), - "uncles", len(uncles), "txs", w.current.tcount, - "gas", block.GasUsed(), "fees", totalFees(block, receipts), - "elapsed", common.PrettyDuration(time.Since(start))) - - case <-w.exitCh: - log.Info("Worker has exited") - } - } - } - } - if update { - w.updateSnapshot() - } - return nil -} - -// isTTDReached returns the indicator if the given block has reached the total -// terminal difficulty for The Merge transition. -func (w *worker) isTTDReached(header *types.Header) bool { - td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty - return td != nil && ttd != nil && td.Cmp(ttd) >= 0 -} - -// copyReceipts makes a deep copy of the given receipts. -func copyReceipts(receipts []*types.Receipt) []*types.Receipt { - result := make([]*types.Receipt, len(receipts)) - for i, l := range receipts { - cpy := *l - result[i] = &cpy - } - return result -} - -// postSideBlock fires a side chain event, only use it for testing. -func (w *worker) postSideBlock(event core.ChainSideEvent) { - select { - case w.chainSideCh <- event: - case <-w.exitCh: - } -} - -// adjustResubmitInterval adjusts the resubmit interval. -func (w *worker) adjustResubmitInterval(message *intervalAdjust) { - select { - case w.resubmitAdjustCh <- message: - default: - log.Warn("the resubmitAdjustCh is full, discard the message") - } -} - -// totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. -func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { - feesWei := new(big.Int) - for i, tx := range block.Transactions() { - minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) - feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) - } - return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) -} - -// signalToErr converts the interruption signal to a concrete error type for return. -// The given signal must be a valid interruption signal. -func signalToErr(signal int32) error { - switch signal { - case commitInterruptNewHead: - return errBlockInterruptedByNewHead - case commitInterruptResubmit: - return errBlockInterruptedByRecommit - case commitInterruptTimeout: - return errBlockInterruptedByTimeout - default: - panic(fmt.Errorf("undefined signal %d", signal)) - } -} - -func withTimer(timer metrics.Timer, f func()) { - if metrics.Enabled { - timer.Time(f) - } else { - f() - } -} diff --git a/miner/worker_l2.go b/miner/worker_l2.go deleted file mode 100644 index bb7eed933..000000000 --- a/miner/worker_l2.go +++ /dev/null @@ -1,305 +0,0 @@ -package miner - -import ( - "errors" - "fmt" - "sync/atomic" - "time" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/consensus/misc" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/state" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/rollup/fees" -) - -// getWorkReq represents a request for getting a new sealing work with provided parameters. -type getWorkReq struct { - interrupt *int32 - params *generateParams - result chan *newBlockResult // non-blocking channel -} - -type newBlockResult struct { - block *types.Block - state *state.StateDB - receipts types.Receipts - rowConsumption *types.RowConsumption - skippedTxs []*types.SkippedTransaction - err error -} - -// generateParams wraps various of settings for generating sealing task. -type generateParams struct { - timestamp uint64 // The timstamp for sealing task - parentHash common.Hash // Parent block hash, empty means the latest chain head - coinbase common.Address // The fee recipient address for including transaction - transactions types.Transactions // L1Message transactions to include at the start of the block -} - -// prepareWork constructs the sealing task according to the given parameters, -// either based on the last chain head or specified parent. In this function -// the pending transactions are not filled yet, only the empty task returned. -func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { - w.mu.RLock() - defer w.mu.RUnlock() - - parent := w.chain.CurrentBlock() - if genParams.parentHash != (common.Hash{}) { - parent = w.chain.GetBlockByHash(genParams.parentHash) - } - if parent == nil { - return nil, fmt.Errorf("missing parent") - } - - timestamp := genParams.timestamp - if parent.Time() >= genParams.timestamp { - timestamp = parent.Time() + 1 - } - coinBase := w.coinbase - if genParams.coinbase != (common.Address{}) { - coinBase = genParams.coinbase - } - header, err := w.makeHeader(parent, timestamp, coinBase) - if err != nil { - return nil, err - } - - env, err := w.makeEnv(parent, header) - if err != nil { - log.Error("Failed to create sealing context", "err", err) - return nil, err - } - return env, nil -} - -func (w *worker) makeHeader(parent *types.Block, timestamp uint64, coinBase common.Address) (*types.Header, error) { - num := parent.Number() - header := &types.Header{ - ParentHash: parent.Hash(), - Number: num.Add(num, common.Big1), - GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil), - Extra: w.extra, - Time: timestamp, - Coinbase: coinBase, - } - // Set baseFee if we are on an EIP-1559 chain - if w.chainConfig.IsCurie(header.Number) { - state, err := w.chain.StateAt(parent.Root()) - if err != nil { - log.Error("Failed to create mining context", "err", err) - return nil, err - } - parentL1BaseFee := fees.GetL1BaseFee(state) - header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent.Header(), parentL1BaseFee) - } - // Run the consensus preparation with the default or customized consensus engine. - if err := w.engine.Prepare(w.chain, header); err != nil { - log.Error("Failed to prepare header for sealing", "err", err) - return nil, err - } - return header, nil -} - -// fillTransactions retrieves the pending transactions from the txpool and fills them -// into the given sealing block. The transaction selection and ordering strategy can -// be customized with the plugin in the future. -func (w *worker) fillTransactions(env *environment, l1Transactions types.Transactions, interrupt *int32) (error, []*types.SkippedTransaction) { - var ( - err error - circuitCapacityReached bool - skippedTxs []*types.SkippedTransaction - ) - - defer func(env *environment) { - if env.header != nil { - env.header.NextL1MsgIndex = env.nextL1MsgIndex - } - }(env) - - if len(l1Transactions) > 0 { - l1Txs := make(map[common.Address]types.Transactions) - for _, tx := range l1Transactions { - sender, _ := types.Sender(env.signer, tx) - senderTxs, ok := l1Txs[sender] - if ok { - senderTxs = append(senderTxs, tx) - l1Txs[sender] = senderTxs - } else { - l1Txs[sender] = types.Transactions{tx} - } - } - txs := types.NewTransactionsByPriceAndNonce(env.signer, l1Txs, env.header.BaseFee) - err, circuitCapacityReached, skippedTxs = w.commitTransactions(env, txs, env.header.Coinbase, interrupt) - if err != nil || circuitCapacityReached { - return err, skippedTxs - } - } - - // Split the pending transactions into locals and remotes - // Fill the block with all available pending transactions. - pending := w.eth.TxPool().PendingWithMax(false, w.config.MaxAccountsNum) - localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending - for _, account := range w.eth.TxPool().Locals() { - if txs := remoteTxs[account]; len(txs) > 0 { - delete(remoteTxs, account) - localTxs[account] = txs - } - } - - if w.prioritizedTx != nil && env.header.Number.Uint64() > w.prioritizedTx.blockNumber { - w.prioritizedTx = nil - } - if w.prioritizedTx != nil && env.header.Number.Uint64() == w.prioritizedTx.blockNumber { - tx := w.prioritizedTx.tx - from, _ := types.Sender(env.signer, tx) // error already checked before - txList := map[common.Address]types.Transactions{from: []*types.Transaction{tx}} - txs := types.NewTransactionsByPriceAndNonce(env.signer, txList, env.header.BaseFee) - err, circuitCapacityReached, _ = w.commitTransactions(env, txs, w.coinbase, interrupt) - if err != nil || circuitCapacityReached { - return err, skippedTxs - } - } - - if len(localTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) - err, circuitCapacityReached, _ = w.commitTransactions(env, txs, env.header.Coinbase, interrupt) - if err != nil || circuitCapacityReached { - return err, skippedTxs - } - } - if len(remoteTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) - err, _, _ = w.commitTransactions(env, txs, env.header.Coinbase, nil) // always return false - } - - return err, skippedTxs -} - -// generateWork generates a sealing block based on the given parameters. -// TODO the produced state data by the transactions will be commit to database, whether the block is confirmed or not. -// TODO this issue will persist until the current zktrie based database optimizes its strategy. -func (w *worker) generateWork(genParams *generateParams, interrupt *int32) (block *types.Block, state *state.StateDB, receipts types.Receipts, rc *types.RowConsumption, skippedTxs []*types.SkippedTransaction, err error) { - // reset circuitCapacityChecker for a new block - w.circuitCapacityChecker.Reset() - work, prepareErr := w.prepareWork(genParams) - if prepareErr != nil { - err = prepareErr - return - } - defer work.discard() - if work.gasPool == nil { - work.gasPool = new(core.GasPool).AddGas(work.header.GasLimit) - } - - fillTxErr, skippedTxs := w.fillTransactions(work, genParams.transactions, interrupt) - if fillTxErr != nil && errors.Is(fillTxErr, errBlockInterruptedByTimeout) { - log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newBlockTimeout)) - } - - if work.accRows == nil { - log.Trace( - "Worker apply ccc for empty block", - "id", w.circuitCapacityChecker.ID, - "number", work.header.Number, - "hash", work.header.Hash().String(), - ) - var traces *types.BlockTrace - withTimer(l2CommitTraceTimer, func() { - traces, err = work.traceEnv.GetBlockTrace(types.NewBlockWithHeader(work.header)) - }) - if err != nil { - return - } - // truncate ExecutionResults&TxStorageTraces, because we declare their lengths with a dummy tx before; - // however, we need to clean it up for an empty block - traces.ExecutionResults = traces.ExecutionResults[:0] - traces.TxStorageTraces = traces.TxStorageTraces[:0] - var accRows *types.RowConsumption - withTimer(l2CommitCCCTimer, func() { - accRows, err = w.circuitCapacityChecker.ApplyBlock(traces) - }) - if err != nil { - return - } - log.Trace( - "Worker apply ccc for empty block result", - "id", w.circuitCapacityChecker.ID, - "number", work.header.Number, - "hash", work.header.Hash().String(), - "accRows", accRows, - ) - work.accRows = accRows - } - - block, finalizeErr := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts) - if finalizeErr != nil { - err = finalizeErr - return - } - return block, work.state, work.receipts, work.accRows, skippedTxs, nil -} - -func (env *environment) discard() { - if env.state == nil { - return - } - env.state.StopPrefetcher() -} - -// getSealingBlockAndState sealing a new block based on parentHash. -func (w *worker) getSealingBlockAndState(parentHash common.Hash, timestamp time.Time, transactions types.Transactions) (*types.Block, *state.StateDB, types.Receipts, *types.RowConsumption, []*types.SkippedTransaction, error) { - interrupt := new(int32) - timer := time.AfterFunc(w.newBlockTimeout, func() { - atomic.StoreInt32(interrupt, commitInterruptTimeout) - }) - defer timer.Stop() - - req := &getWorkReq{ - interrupt: interrupt, - params: &generateParams{ - parentHash: parentHash, - timestamp: uint64(timestamp.Unix()), - transactions: transactions, - }, - result: make(chan *newBlockResult, 1), - } - select { - case w.getWorkCh <- req: - result := <-req.result - return result.block, result.state, result.receipts, result.rowConsumption, result.skippedTxs, result.err - case <-w.exitCh: - return nil, nil, nil, nil, nil, errors.New("miner closed") - } -} - -func (w *worker) simulateL1Messages(genParams *generateParams, transactions types.Transactions) ([]*types.Transaction, []*types.SkippedTransaction, error) { - if transactions.Len() == 0 { - return nil, nil, nil - } - - env, err := w.prepareWork(genParams) - if err != nil { - return nil, nil, err - } - env.isSimulate = true - - l1Txs := make(map[common.Address]types.Transactions) - for _, tx := range transactions { - sender, _ := types.Sender(env.signer, tx) - senderTxs, ok := l1Txs[sender] - if ok { - senderTxs = append(senderTxs, tx) - l1Txs[sender] = senderTxs - } else { - l1Txs[sender] = types.Transactions{tx} - } - } - - txs := types.NewTransactionsByPriceAndNonce(env.signer, l1Txs, env.header.BaseFee) - _, _, skippedTxs := w.commitTransactions(env, txs, env.header.Coinbase, nil) - - return env.txs, skippedTxs, nil -} diff --git a/miner/worker_test.go b/miner/worker_test.go deleted file mode 100644 index 23572fc57..000000000 --- a/miner/worker_test.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "math" - "math/big" - "math/rand" - "sync/atomic" - "testing" - "time" - - "github.com/scroll-tech/go-ethereum/accounts" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/consensus" - "github.com/scroll-tech/go-ethereum/consensus/clique" - "github.com/scroll-tech/go-ethereum/consensus/ethash" - "github.com/scroll-tech/go-ethereum/core" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/core/vm" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/event" - "github.com/scroll-tech/go-ethereum/params" -) - -const ( - // testCode is the testing contract binary code which will initialises some - // variables in constructor - testCode = "0x60806040527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0060005534801561003457600080fd5b5060fc806100436000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80630c4dae8814603757806398a213cf146053575b600080fd5b603d607e565b6040518082815260200191505060405180910390f35b607c60048036036020811015606757600080fd5b81019080803590602001909291905050506084565b005b60005481565b806000819055507fe9e44f9f7da8c559de847a3232b57364adc0354f15a2cd8dc636d54396f9587a6000546040518082815260200191505060405180910390a15056fea265627a7a723058208ae31d9424f2d0bc2a3da1a5dd659db2d71ec322a17db8f87e19e209e3a1ff4a64736f6c634300050a0032" - - // testGas is the gas required for contract deployment. - testGas = 144109 -) - -var ( - // Test chain configurations - testTxPoolConfig core.TxPoolConfig - ethashChainConfig *params.ChainConfig - cliqueChainConfig *params.ChainConfig - - // Test accounts - testBankKey, _ = crypto.GenerateKey() - testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) - testBankFunds = big.NewInt(1000000000000000000) - - testUserKey, _ = crypto.GenerateKey() - testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey) - - // Test transactions - pendingTxs []*types.Transaction - newTxs []*types.Transaction - - testConfig = &Config{ - Recommit: time.Second, - GasCeil: params.GenesisGasLimit, - MaxAccountsNum: math.MaxInt, - } -) - -func init() { - testTxPoolConfig = core.DefaultTxPoolConfig - testTxPoolConfig.Journal = "" - ethashChainConfig = new(params.ChainConfig) - *ethashChainConfig = *params.TestChainConfig - cliqueChainConfig = new(params.ChainConfig) - *cliqueChainConfig = *params.TestChainConfig - cliqueChainConfig.Clique = ¶ms.CliqueConfig{ - Period: 10, - Epoch: 30000, - } - - signer := types.LatestSigner(params.TestChainConfig) - tx1 := types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: 0, - To: &testUserAddress, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - pendingTxs = append(pendingTxs, tx1) - - tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{ - Nonce: 1, - To: &testUserAddress, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - newTxs = append(newTxs, tx2) - - rand.Seed(time.Now().UnixNano()) -} - -// testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing. -type testWorkerBackend struct { - db ethdb.Database - txPool *core.TxPool - chain *core.BlockChain - testTxFeed event.Feed - genesis *core.Genesis - uncleBlock *types.Block -} - -func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { - var gspec = core.Genesis{ - Config: chainConfig, - Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, - } - - switch e := engine.(type) { - case *clique.Clique: - gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength) - gspec.Timestamp = uint64(time.Now().Unix()) - copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes()) - e.Authorize(testBankAddress, func(account accounts.Account, s string, data []byte) ([]byte, error) { - return crypto.Sign(crypto.Keccak256(data), testBankKey) - }) - case *ethash.Ethash: - default: - t.Fatalf("unexpected consensus engine type: %T", engine) - } - genesis := gspec.MustCommit(db) - - chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{ - Debug: true, - Tracer: vm.NewStructLogger(&vm.LogConfig{EnableMemory: true})}, nil, nil) - txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain) - - // Generate a small n-block chain and an uncle block for it - if n > 0 { - blocks, _ := core.GenerateChain(chainConfig, genesis, engine, db, n, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testBankAddress) - }) - if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("failed to insert origin chain: %v", err) - } - } - parent := genesis - if n > 0 { - parent = chain.GetBlockByHash(chain.CurrentBlock().ParentHash()) - } - blocks, _ := core.GenerateChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testUserAddress) - }) - - return &testWorkerBackend{ - db: db, - chain: chain, - txPool: txpool, - genesis: &gspec, - uncleBlock: blocks[0], - } -} - -func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } -func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool } -func (b *testWorkerBackend) ChainDb() ethdb.Database { return b.db } - -func (b *testWorkerBackend) newRandomUncle() *types.Block { - var parent *types.Block - cur := b.chain.CurrentBlock() - if cur.NumberU64() == 0 { - parent = b.chain.Genesis() - } else { - parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash()) - } - blocks, _ := core.GenerateChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { - var addr = make([]byte, common.AddressLength) - rand.Read(addr) - gen.SetCoinbase(common.BytesToAddress(addr)) - }) - return blocks[0] -} - -func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { - var tx *types.Transaction - gasPrice := big.NewInt(10 * params.InitialBaseFee) - if creation { - tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey) - } else { - tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey) - } - return tx -} - -func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { - backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) - w.setEtherbase(testBankAddress) - return w, backend -} - -func TestGenerateBlockAndImportEthash(t *testing.T) { - testGenerateBlockAndImport(t, false) -} - -func TestGenerateBlockAndImportClique(t *testing.T) { - testGenerateBlockAndImport(t, true) -} - -func testGenerateBlockAndImport(t *testing.T, isClique bool) { - var ( - engine consensus.Engine - chainConfig *params.ChainConfig - db = rawdb.NewMemoryDatabase() - ) - if isClique { - chainConfig = params.AllCliqueProtocolChanges - chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} - engine = clique.New(chainConfig.Clique, db) - } else { - chainConfig = params.AllEthashProtocolChanges - engine = ethash.NewFaker() - } - - chainConfig.LondonBlock = big.NewInt(0) - w, b := newTestWorker(t, chainConfig, engine, db, 0) - defer w.close() - - // This test chain imports the mined blocks. - db2 := rawdb.NewMemoryDatabase() - b.genesis.MustCommit(db2) - chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{ - Debug: true, - Tracer: vm.NewStructLogger(&vm.LogConfig{EnableMemory: true, EnableReturnData: true})}, nil, nil) - defer chain.Stop() - - // Ignore empty commit here for less noise. - w.skipSealHook = func(task *task) bool { - return len(task.receipts) == 0 - } - - // Wait for mined blocks. - sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) - defer sub.Unsubscribe() - - // Start mining! - w.start() - - for i := 0; i < 5; i++ { - b.txPool.AddLocal(b.newRandomTx(true)) - b.txPool.AddLocal(b.newRandomTx(false)) - w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()}) - w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()}) - - select { - case ev := <-sub.Chan(): - block := ev.Data.(core.NewMinedBlockEvent).Block - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err) - } - case <-time.After(3 * time.Second): // Worker needs 1s to include new changes. - t.Fatalf("timeout") - } - } -} - -func TestEmptyWorkEthash(t *testing.T) { - testEmptyWork(t, ethashChainConfig, ethash.NewFaker()) -} -func TestEmptyWorkClique(t *testing.T) { - testEmptyWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - var ( - taskIndex int - taskCh = make(chan struct{}, 2) - ) - checkEqual := func(t *testing.T, task *task, index int) { - // The first empty work without any txs included - receiptLen, balance := 0, big.NewInt(0) - if index == 1 { - // The second full work with 1 tx included - receiptLen, balance = 1, big.NewInt(1000) - } - if len(task.receipts) != receiptLen { - t.Fatalf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen) - } - if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 { - t.Fatalf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance) - } - } - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 1 { - checkEqual(t, task, taskIndex) - taskIndex += 1 - taskCh <- struct{}{} - } - } - w.skipSealHook = func(task *task) bool { return true } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - w.start() // Start mining! - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(3 * time.Second).C: - t.Error("new task timeout") - } - } -} - -func TestStreamUncleBlock(t *testing.T) { - ethash := ethash.NewFaker() - defer ethash.Close() - - w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1) - defer w.close() - - var taskCh = make(chan struct{}) - - taskIndex := 0 - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 2 { - // The first task is an empty task, the second - // one has 1 pending tx, the third one has 1 tx - // and 1 uncle. - if taskIndex == 2 { - have := task.block.Header().UncleHash - want := types.CalcUncleHash([]*types.Header{b.uncleBlock.Header()}) - if have != want { - t.Errorf("uncle hash mismatch: have %s, want %s", have.Hex(), want.Hex()) - } - } - taskCh <- struct{}{} - taskIndex += 1 - } - } - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - w.start() - - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } - } - - w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock}) - - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } -} - -func TestRegenerateMiningBlockEthash(t *testing.T) { - testRegenerateMiningBlock(t, ethashChainConfig, ethash.NewFaker()) -} - -func TestRegenerateMiningBlockClique(t *testing.T) { - testRegenerateMiningBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - var taskCh = make(chan struct{}) - - taskIndex := 0 - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 1 { - // The first task is an empty task, the second - // one has 1 pending tx, the third one has 2 txs - if taskIndex == 2 { - receiptLen, balance := 2, big.NewInt(2000) - if len(task.receipts) != receiptLen { - t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen) - } - if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 { - t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance) - } - } - taskCh <- struct{}{} - taskIndex += 1 - } - } - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - - w.start() - // Ignore the first two works - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } - } - b.txPool.AddLocals(newTxs) - time.Sleep(time.Second) - - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } -} - -func TestAdjustIntervalEthash(t *testing.T) { - testAdjustInterval(t, ethashChainConfig, ethash.NewFaker()) -} - -func TestAdjustIntervalClique(t *testing.T) { - testAdjustInterval(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - var ( - progress = make(chan struct{}, 10) - result = make([]float64, 0, 10) - index = 0 - start uint32 - ) - w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) { - // Short circuit if interval checking hasn't started. - if atomic.LoadUint32(&start) == 0 { - return - } - var wantMinInterval, wantRecommitInterval time.Duration - - switch index { - case 0: - wantMinInterval, wantRecommitInterval = 3*time.Second, 3*time.Second - case 1: - origin := float64(3 * time.Second.Nanoseconds()) - estimate := origin*(1-intervalAdjustRatio) + intervalAdjustRatio*(origin/0.8+intervalAdjustBias) - wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(estimate)*time.Nanosecond - case 2: - estimate := result[index-1] - min := float64(3 * time.Second.Nanoseconds()) - estimate = estimate*(1-intervalAdjustRatio) + intervalAdjustRatio*(min-intervalAdjustBias) - wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(estimate)*time.Nanosecond - case 3: - wantMinInterval, wantRecommitInterval = time.Second, time.Second - } - - // Check interval - if minInterval != wantMinInterval { - t.Errorf("resubmit min interval mismatch: have %v, want %v ", minInterval, wantMinInterval) - } - if recommitInterval != wantRecommitInterval { - t.Errorf("resubmit interval mismatch: have %v, want %v", recommitInterval, wantRecommitInterval) - } - result = append(result, float64(recommitInterval.Nanoseconds())) - index += 1 - progress <- struct{}{} - } - w.start() - - time.Sleep(time.Second) // Ensure two tasks have been summitted due to start opt - atomic.StoreUint32(&start, 1) - - w.setRecommitInterval(3 * time.Second) - select { - case <-progress: - case <-time.NewTimer(time.Second).C: - t.Error("interval reset timeout") - } - - w.resubmitAdjustCh <- &intervalAdjust{inc: true, ratio: 0.8} - select { - case <-progress: - case <-time.NewTimer(time.Second).C: - t.Error("interval reset timeout") - } - - w.resubmitAdjustCh <- &intervalAdjust{inc: false} - select { - case <-progress: - case <-time.NewTimer(time.Second).C: - t.Error("interval reset timeout") - } - - w.setRecommitInterval(500 * time.Millisecond) - select { - case <-progress: - case <-time.NewTimer(time.Second).C: - t.Error("interval reset timeout") - } -}