diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go
index 0ae5a3b8f1..b2eb1dc982 100644
--- a/beacon/engine/gen_ed.go
+++ b/beacon/engine/gen_ed.go
@@ -34,6 +34,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
+ Deposits types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var enc ExecutableData
@@ -59,6 +60,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
+ enc.Deposits = e.Deposits
enc.ExecutionWitness = e.ExecutionWitness
return json.Marshal(&enc)
}
@@ -83,6 +85,7 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
+ Deposits *types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
var dec ExecutableData
@@ -157,6 +160,9 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
+ if dec.Deposits != nil {
+ e.Deposits = *dec.Deposits
+ }
if dec.ExecutionWitness != nil {
e.ExecutionWitness = dec.ExecutionWitness
}
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index 8687a6f504..906056b0a1 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -76,6 +76,7 @@ type ExecutableData struct {
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
+ Deposits types.Deposits `json:"depositRequests"`
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
}
@@ -231,6 +232,19 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
h := types.DeriveSha(types.Withdrawals(data.Withdrawals), trie.NewStackTrie(nil))
withdrawalsRoot = &h
}
+ // Compute requestsHash if any requests are non-nil.
+ var (
+ requestsHash *common.Hash
+ requests types.Requests
+ )
+ if data.Deposits != nil {
+ requests = make(types.Requests, 0)
+ for _, d := range data.Deposits {
+ requests = append(requests, types.NewRequest(d))
+ }
+ h := types.DeriveSha(requests, trie.NewStackTrie(nil))
+ requestsHash = &h
+ }
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
@@ -251,9 +265,10 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
ExcessBlobGas: data.ExcessBlobGas,
BlobGasUsed: data.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
+ RequestsHash: requestsHash,
}
block := types.NewBlockWithHeader(header)
- block = block.WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals})
+ block = block.WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals, Requests: requests})
block = block.WithWitness(data.ExecutionWitness)
if block.Hash() != data.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
@@ -296,13 +311,30 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
+ setRequests(block.Requests(), data)
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
}
-// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
-type ExecutionPayloadBodyV1 struct {
+// setRequests differentiates the different request types and
+// assigns them to the associated fields in ExecutableData.
+func setRequests(requests types.Requests, data *ExecutableData) {
+ if requests != nil {
+ // If requests is non-nil, it means deposits are available in block and we
+ // should return an empty slice instead of nil if there are no deposits.
+ data.Deposits = make(types.Deposits, 0)
+ }
+ for _, r := range requests {
+ if d, ok := r.Inner().(*types.Deposit); ok {
+ data.Deposits = append(data.Deposits, d)
+ }
+ }
+}
+
+// ExecutionPayloadBody is used in the response to GetPayloadBodiesByHash and GetPayloadBodiesByRange
+type ExecutionPayloadBody struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
+ Deposits types.Deposits `json:"depositRequests"`
}
// Client identifiers to support ClientVersionV1.
diff --git a/beacon/light/api/light_api.go b/beacon/light/api/light_api.go
index 45b425164f..91f66c08be 100755
--- a/beacon/light/api/light_api.go
+++ b/beacon/light/api/light_api.go
@@ -24,6 +24,7 @@ import (
"io"
"net/http"
"net/url"
+ "strconv"
"sync"
"time"
@@ -121,8 +122,8 @@ func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLight
}
}
-func (api *BeaconLightApi) httpGet(path string) ([]byte, error) {
- uri, err := api.buildURL(path, nil)
+func (api *BeaconLightApi) httpGet(path string, params url.Values) ([]byte, error) {
+ uri, err := api.buildURL(path, params)
if err != nil {
return nil, err
}
@@ -150,17 +151,16 @@ func (api *BeaconLightApi) httpGet(path string) ([]byte, error) {
}
}
-func (api *BeaconLightApi) httpGetf(format string, params ...any) ([]byte, error) {
- return api.httpGet(fmt.Sprintf(format, params...))
-}
-
// GetBestUpdatesAndCommittees fetches and validates LightClientUpdate for given
// period and full serialized committee for the next period (committee root hash
// equals update.NextSyncCommitteeRoot).
// Note that the results are validated but the update signature should be verified
// by the caller as its validity depends on the update chain.
func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) {
- resp, err := api.httpGetf("/eth/v1/beacon/light_client/updates?start_period=%d&count=%d", firstPeriod, count)
+ resp, err := api.httpGet("/eth/v1/beacon/light_client/updates", map[string][]string{
+ "start_period": {strconv.FormatUint(firstPeriod, 10)},
+ "count": {strconv.FormatUint(count, 10)},
+ })
if err != nil {
return nil, nil, err
}
@@ -197,7 +197,7 @@ func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
func (api *BeaconLightApi) GetOptimisticUpdate() (types.OptimisticUpdate, error) {
- resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update")
+ resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update", nil)
if err != nil {
return types.OptimisticUpdate{}, err
}
@@ -250,7 +250,7 @@ func decodeOptimisticUpdate(enc []byte) (types.OptimisticUpdate, error) {
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) {
- resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update")
+ resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update", nil)
if err != nil {
return types.FinalityUpdate{}, err
}
@@ -316,7 +316,7 @@ func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool,
} else {
blockId = blockRoot.Hex()
}
- resp, err := api.httpGetf("/eth/v1/beacon/headers/%s", blockId)
+ resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/headers/%s", blockId), nil)
if err != nil {
return types.Header{}, false, false, err
}
@@ -347,7 +347,7 @@ func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool,
// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) {
- resp, err := api.httpGetf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:])
+ resp, err := api.httpGet(fmt.Sprintf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:]), nil)
if err != nil {
return nil, err
}
@@ -389,7 +389,7 @@ func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types
}
func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*types.BeaconBlock, error) {
- resp, err := api.httpGetf("/eth/v2/beacon/blocks/0x%x", blockRoot)
+ resp, err := api.httpGet(fmt.Sprintf("/eth/v2/beacon/blocks/0x%x", blockRoot), nil)
if err != nil {
return nil, err
}
diff --git a/build/checksums.txt b/build/checksums.txt
index 3fed6625ed..06de819c70 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -5,55 +5,56 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
-# version:golang 1.23.0
+# version:golang 1.23.1
# https://go.dev/dl/
-42b7a8e80d805daa03022ed3fde4321d4c3bf2c990a144165d01eeecd6f699c6 go1.23.0.src.tar.gz
-257f8560bb4001fb81a5e0ee84f32fecbe18d4450343c9556557d296786847b6 go1.23.0.aix-ppc64.tar.gz
-bc91d2573939a01731413fac0884c329606c1c168883692131ce772669caf27b go1.23.0.darwin-amd64.pkg
-ffd070acf59f054e8691b838f274d540572db0bd09654af851e4e76ab88403dc go1.23.0.darwin-amd64.tar.gz
-d73ae741ed449ea842238f76f4b02935277eb867689f84ace0640965b2caf700 go1.23.0.darwin-arm64.pkg
-b770812aef17d7b2ea406588e2b97689e9557aac7e646fe76218b216e2c51406 go1.23.0.darwin-arm64.tar.gz
-8fd2ab5ac8629fc97d25a056693e23f332446603dd3c2b764ccb496872004b0c go1.23.0.dragonfly-amd64.tar.gz
-2c9b76ead3c44f5b3e40e10b980075addb837f2dd05dafe7c0e4c611fd239753 go1.23.0.freebsd-386.tar.gz
-2c2252902b87ba605fdc0b12b4c860fe6553c0c5483c12cc471756ebdd8249fe go1.23.0.freebsd-amd64.tar.gz
-8ec48b8d99a515644ae00e79d093ad3b7645dcaf2a19c0a9c0d97916187f4514 go1.23.0.freebsd-arm.tar.gz
-f476bbe8efb0db18155671840545370bfb73903fec04ea897d510569dab16d9c go1.23.0.freebsd-arm64.tar.gz
-b0e254b2ea5752b4f1c69934ae43a44bbabf98e0c2843af44e1b6d12390eb551 go1.23.0.freebsd-riscv64.tar.gz
-09716dcc7a2e19891b3d1e2ea68a1aab22838fc664cdc5f82d5f8eef05db78cf go1.23.0.illumos-amd64.tar.gz
-0e8a7340c2632e6fb5088d60f95b52be1f8303143e04cd34e9b2314fafc24edd go1.23.0.linux-386.tar.gz
-905a297f19ead44780548933e0ff1a1b86e8327bb459e92f9c0012569f76f5e3 go1.23.0.linux-amd64.tar.gz
-62788056693009bcf7020eedc778cdd1781941c6145eab7688bd087bce0f8659 go1.23.0.linux-arm64.tar.gz
-0efa1338e644d7f74064fa7f1016b5da7872b2df0070ea3b56e4fef63192e35b go1.23.0.linux-armv6l.tar.gz
-dc8f723ce1a236e85c8b56d1e6749e270314e99dd41b80a58355e7ffcf9ea857 go1.23.0.linux-loong64.tar.gz
-3332cc76c73c05b3413cdecccffc29aaa3469f87db8ed9f9b784ebb527ca5352 go1.23.0.linux-mips.tar.gz
-0ed5cee92433d09fd0816ec5adfbf4b16d712944e833f6342bbe2df18f7826ae go1.23.0.linux-mips64.tar.gz
-06a579dd6d1f9a84bc43cab063e7c759a92a6d4dd01fec3d860f22a32df93406 go1.23.0.linux-mips64le.tar.gz
-d522770d32d6ee963f61331a695c4f8a730f2445b965d8d56db0a2e75c62af57 go1.23.0.linux-mipsle.tar.gz
-8c884cb4f2593d897f58ec1b0f23f303acf5c78fd101e76cb48d6cb1fe5e90e7 go1.23.0.linux-ppc64.tar.gz
-8b26e20d4d43a4d7641cddbdc0298d7ba3804d910a9e06cda7672970dbf2829d go1.23.0.linux-ppc64le.tar.gz
-a87726205f1a283247f877ccae8ce147ff4e77ac802382647ac52256eb5642c7 go1.23.0.linux-riscv64.tar.gz
-003722971de02d97131a4dca2496abdab5cb175a6ee0ed9c8227c5ae9b883e69 go1.23.0.linux-s390x.tar.gz
-b203fa2354874c66c40d828e96a6cce1f4e4db192414050a600d0a09b16cafd3 go1.23.0.netbsd-386.tar.gz
-1502c82c3ba663959df99c2cc3ca5e7a5e1a75a1495fd26bef697d63bf1f291c go1.23.0.netbsd-amd64.tar.gz
-dd50c05c7f613522c8d3d74f598bfc1862c0fee9182b738225820c9b458c7be5 go1.23.0.netbsd-arm.tar.gz
-728a94a648f9502cd6175adaac2b770acde6b26f5f92dcbd8c5a1a43cc44bb10 go1.23.0.netbsd-arm64.tar.gz
-e1ff3584778257778a4e3f0093b09044072423aebedf2015a550537853c46745 go1.23.0.openbsd-386.tar.gz
-d2e30cdb0de256360b51a43f5e551587a7369d8c248120010d5e9432f698a6e8 go1.23.0.openbsd-amd64.tar.gz
-bd5224c8a5f195f4128c866c0d418f1b61db865a1042913fd07714ed85da28db go1.23.0.openbsd-arm.tar.gz
-fc0e0af3a1b4b7168455e8492a5bb6aa96ceaf46321cef1fc04187301c058890 go1.23.0.openbsd-arm64.tar.gz
-ce7ea9343c7c2ef2700b55b80c45549ce39d164031e4d7bb98bec7ca593ed93d go1.23.0.openbsd-ppc64.tar.gz
-93b970a8a41f6c89113daaea12e39f2580038af155e823550d0a94a5502c5e2c go1.23.0.plan9-386.tar.gz
-6231862acbb6c1e02b1455b35446b9789b0b4b3230d249953e6957c393a53011 go1.23.0.plan9-amd64.tar.gz
-632bdd3a1f84b2fe691203423dd2c3f536d4ab250bb52a48e9b05ebf327ae594 go1.23.0.plan9-arm.tar.gz
-16773f85003d9e610960f9af67e00bc6c02359d7914de7224079538cc9c1e93d go1.23.0.solaris-amd64.tar.gz
-803ef1d4f431d37ac8572ad9b0b65a4f945798208cd16b7f0588960e6b0949ba go1.23.0.windows-386.msi
-09448fedec0cdf98ad12397222e0c8bfc835b1d0894c0015ced653534b8d7427 go1.23.0.windows-386.zip
-93e1cf580893303d0f6ac10647335de9f0768199d7027d8a361639cae6ab3145 go1.23.0.windows-amd64.msi
-d4be481ef73079ee0ad46081d278923aa3fd78db1b3cf147172592f73e14c1ac go1.23.0.windows-amd64.zip
-2a361c94879258309343e88c5de5df17f6425df4d74bdf7e333b7298b29f6f29 go1.23.0.windows-arm.msi
-006d93712246a672bdb57906dd5bffcab62facc36169e51a27d52340cdac661f go1.23.0.windows-arm.zip
-a876ed2bb130d9146152aaf391638abd79dcb3a4f2e9cc59b78709dcef29ced3 go1.23.0.windows-arm64.msi
-0be62073ef8f5a2d3b9adcefddf18c417dab0a7975c71488ac2694856e2ff976 go1.23.0.windows-arm64.zip
+6ee44e298379d146a5e5aa6b1c5b5d5f5d0a3365eabdd70741e6e21340ec3b0d go1.23.1.src.tar.gz
+f17f2791717c15728ec63213a014e244c35f9c8846fb29f5a1b63d0c0556f756 go1.23.1.aix-ppc64.tar.gz
+dd9e772686ed908bcff94b6144322d4e2473a7dcd7c696b7e8b6d12f23c887fd go1.23.1.darwin-amd64.pkg
+488d9e4ca3e3ed513ee4edd91bef3a2360c65fa6d6be59cf79640bf840130a58 go1.23.1.darwin-amd64.tar.gz
+be34b488157ec69d94e26e1554558219a2c90789bcb7e3686965a7f9c8cfcbe7 go1.23.1.darwin-arm64.pkg
+e223795ca340e285a760a6446ce57a74500b30e57469a4109961d36184d3c05a go1.23.1.darwin-arm64.tar.gz
+6af626176923a6ae6c5de6dc1c864f38365793c0e4ecd0d6eab847bdc23953e5 go1.23.1.dragonfly-amd64.tar.gz
+cc957c1a019702e6cdc2e257202d42799011ebc1968b6c3bcd6b1965952607d5 go1.23.1.freebsd-386.tar.gz
+a7d57781c50bb80886a8f04066791956d45aa3eea0f83070c5268b6223afb2ff go1.23.1.freebsd-amd64.tar.gz
+c7b09f3fef456048e596db9bea746eb66796aeb82885622b0388feee18f36a3e go1.23.1.freebsd-arm.tar.gz
+b05cd6a77995a0c8439d88df124811c725fb78b942d0b6dd1643529d7ba62f1f go1.23.1.freebsd-arm64.tar.gz
+56236ae70be1613f2915943b94f53c96be5bffc0719314078facd778a89bc57e go1.23.1.freebsd-riscv64.tar.gz
+8644c52df4e831202114fd67c9fcaf1f7233ad27bf945ac53fa7217cf1a0349f go1.23.1.illumos-amd64.tar.gz
+cdee2f4e2efa001f7ee75c90f2efc310b63346cfbba7b549987e9139527c6b17 go1.23.1.linux-386.tar.gz
+49bbb517cfa9eee677e1e7897f7cf9cfdbcf49e05f61984a2789136de359f9bd go1.23.1.linux-amd64.tar.gz
+faec7f7f8ae53fda0f3d408f52182d942cc89ef5b7d3d9f23ff117437d4b2d2f go1.23.1.linux-arm64.tar.gz
+6c7832c7dcd8fb6d4eb308f672a725393403c74ee7be1aeccd8a443015df99de go1.23.1.linux-armv6l.tar.gz
+649ce3856ddc808c00b14a46232eab0bf95e7911cdf497010b17d76656f5ca4e go1.23.1.linux-loong64.tar.gz
+201911048f234e5a0c51ec94b1a11d4e47062fee4398b1d2faa6c820dc026724 go1.23.1.linux-mips.tar.gz
+2bce3743df463915e45d2612f9476ffb03d0b3750b1cb3879347de08715b5fc6 go1.23.1.linux-mips64.tar.gz
+54e301f266e33431b0703136e0bbd4cf02461b1ecedd37b7cbd90cb862a98e5f go1.23.1.linux-mips64le.tar.gz
+8efd495e93d17408c0803595cdc3bf13cb28e0f957aeabd9cc18245fb8e64019 go1.23.1.linux-mipsle.tar.gz
+52bd68689095831ad9af7160844c23b28bb8d0acd268de7e300ff5f0662b7a07 go1.23.1.linux-ppc64.tar.gz
+042888cae54b5fbfd9dd1e3b6bc4a5134879777fe6497fc4c62ec394b5ecf2da go1.23.1.linux-ppc64le.tar.gz
+1a4a609f0391bea202d9095453cbfaf7368fa88a04c206bf9dd715a738664dc3 go1.23.1.linux-riscv64.tar.gz
+47dc49ad45c45e192efa0df7dc7bc5403f5f2d15b5d0dc74ef3018154b616f4d go1.23.1.linux-s390x.tar.gz
+fbfbd5efa6a5d581ea7f5e65015f927db0e52135cab057e43d39d5482da54b61 go1.23.1.netbsd-386.tar.gz
+e96e1cc5cf36113ee6099d1a7306b22cd9c3f975a36bdff954c59f104f22b853 go1.23.1.netbsd-amd64.tar.gz
+c394dfc06bfc276a591209a37e09cd39089ec9a9cc3db30b94814ce2e39eb1d4 go1.23.1.netbsd-arm.tar.gz
+b3b35d64f32821a68b3e2994032dbefb81978f2ec3f218c7a770623b82d36b8e go1.23.1.netbsd-arm64.tar.gz
+3c775c4c16c182e33c2c4ac090d9a247a93b3fb18a3df01d87d490f29599faff go1.23.1.openbsd-386.tar.gz
+5edbe53b47c57b32707fd7154536fbe9eaa79053fea01650c93b54cdba13fc0f go1.23.1.openbsd-amd64.tar.gz
+c30903dd8fa98b8aca8e9db0962ce9f55502aed93e0ef41e5ae148aaa0088de1 go1.23.1.openbsd-arm.tar.gz
+12da183489e58f9c6b357bc1b626f85ed7d4220cab31a49d6a49e6ac6a718b67 go1.23.1.openbsd-arm64.tar.gz
+9cc9aad37696a4a10c31dcec9e35a308de0b369dad354d54cf07406ac6fa7c6f go1.23.1.openbsd-ppc64.tar.gz
+e1d740dda062ce5a276a0c3ed7d8b6353238bc8ff405f63e2e3480bfd26a5ec5 go1.23.1.openbsd-riscv64.tar.gz
+da2a37f9987f01f096859230aa13ecc4ad2e7884465bce91004bc78c64435d65 go1.23.1.plan9-386.tar.gz
+fd8fff8b0697d55c4a4d02a8dc998192b80a9dc2a057647373d6ff607cad29de go1.23.1.plan9-amd64.tar.gz
+52efbc5804c1c86ba7868aa8ebbc31cc8c2a27b62a60fd57944970d48fc67525 go1.23.1.plan9-arm.tar.gz
+f54205f21e2143f2ada1bf1c00ddf64590f5139d5c3fb77cc06175f0d8cc7567 go1.23.1.solaris-amd64.tar.gz
+369a17f0cfd29e5c848e58ffe0d772da20abe334d1c7ca01dbcd55bb3db0b440 go1.23.1.windows-386.msi
+ab866f47d7be56e6b1c67f1d529bf4c23331a339fb0785f435a0552d352cb257 go1.23.1.windows-386.zip
+e99dac215ee437b9bb8f8b14bbfe0e8756882c1ed291f30818e8363bc9c047a5 go1.23.1.windows-amd64.msi
+32dedf277c86610e380e1765593edb66876f00223df71690bd6be68ee17675c0 go1.23.1.windows-amd64.zip
+23169c79dc6b54e0dffb25be6b67425ad9759392a58309bc057430a9bf4c8f6a go1.23.1.windows-arm.msi
+1a57615a09f13534f88e9f2d7efd5743535d1a5719b19e520eef965a634f8efb go1.23.1.windows-arm.zip
+313e1a543931ad8735b4df8969e00f5f4c2ef07be21f54015ede961a70263d35 go1.23.1.windows-arm64.msi
+64ad0954d2c33f556fb1018d62de091254aa6e3a94f1c8a8b16af0d3701d194e go1.23.1.windows-arm64.zip
# version:golangci 1.59.0
# https://github.com/golangci/golangci-lint/releases/
diff --git a/build/ci.go b/build/ci.go
index bdc89082e7..2932d2c9ce 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -304,7 +304,7 @@ func doTest(cmdline []string) {
gotest := tc.Go("test")
// CI needs a bit more time for the statetests (default 10m).
- gotest.Args = append(gotest.Args, "-timeout=20m")
+ gotest.Args = append(gotest.Args, "-timeout=30m")
// Enable CKZG backend in CI.
gotest.Args = append(gotest.Args, "-tags=ckzg")
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index b5cc27a2b5..5cb9fa0297 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -849,7 +849,16 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
if code, _, err := conn.Read(); err != nil {
t.Fatalf("expected disconnect on blob violation, got err: %v", err)
} else if code != discMsg {
- t.Fatalf("expected disconnect on blob violation, got msg code: %d", code)
+ if code == protoOffset(ethProto)+eth.NewPooledTransactionHashesMsg {
+ // sometimes we'll get a blob transaction hashes announcement before the disconnect
+ // because blob transactions are scheduled to be fetched right away.
+ if code, _, err = conn.Read(); err != nil {
+ t.Fatalf("expected disconnect on blob violation, got err on second read: %v", err)
+ }
+ }
+ if code != discMsg {
+ t.Fatalf("expected disconnect on blob violation, got msg code: %d", code)
+ }
}
conn.Close()
}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index d240d161a9..5fd1d6a4a6 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -66,6 +66,8 @@ type ExecutionResult struct {
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
+ RequestsHash *common.Hash `json:"requestsRoot,omitempty"`
+ DepositRequests *types.Deposits `json:"depositRequests,omitempty"`
}
type ommer struct {
@@ -377,9 +379,31 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
+ if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
+ // Parse the requests from the logs
+ var allLogs []*types.Log
+ for _, receipt := range receipts {
+ allLogs = append(allLogs, receipt.Logs...)
+ }
+ requests, err := core.ParseDepositLogs(allLogs, chainConfig)
+ if err != nil {
+ return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
+ }
+ // Calculate the requests root
+ h := types.DeriveSha(requests, trie.NewStackTrie(nil))
+ execRs.RequestsHash = &h
+ // Get the deposits from the requests
+ deposits := make(types.Deposits, 0)
+ for _, req := range requests {
+ if dep, ok := req.Inner().(*types.Deposit); ok {
+ deposits = append(deposits, dep)
+ }
+ }
+ execRs.DepositRequests = &deposits
+ }
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
- statedb, err = state.New(root, statedb.Database(), nil)
+ statedb, err = state.New(root, statedb.Database())
if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
}
@@ -388,8 +412,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
}
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
- sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
- statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
+ tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
+ sdb := state.NewDatabase(tdb, nil)
+ statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce)
@@ -400,7 +425,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false)
- statedb, _ = state.New(root, sdb, nil)
+ statedb, _ = state.New(root, sdb)
return statedb
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index c02f9f0590..235fed6630 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -155,8 +155,8 @@ func runCmd(ctx *cli.Context) error {
})
defer triedb.Close()
genesis := genesisConfig.MustCommit(db, triedb)
- sdb := state.NewDatabaseWithNodeDB(db, triedb)
- statedb, _ = state.New(genesis.Root(), sdb, nil)
+ sdb := state.NewDatabase(triedb, nil)
+ statedb, _ = state.New(genesis.Root(), sdb)
chainConfig = genesisConfig.Config
if ctx.String(SenderFlag.Name) != "" {
@@ -277,7 +277,7 @@ func runCmd(ctx *cli.Context) error {
fmt.Printf("Failed to commit changes %v\n", err)
return err
}
- dumpdb, err := state.New(root, sdb, nil)
+ dumpdb, err := state.New(root, sdb)
if err != nil {
fmt.Printf("Failed to open statedb %v\n", err)
return err
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index fc2bf8223f..4514367e8a 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -107,7 +107,7 @@ func runStateTest(fname string, cfg vm.Config, dump bool) error {
result.Root = &root
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
if dump { // Dump any state to aid debugging
- cpy, _ := state.New(root, tstate.StateDB.Database(), nil)
+ cpy, _ := state.New(root, tstate.StateDB.Database())
dump := cpy.RawDump(nil)
result.State = &dump
}
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index ca2f00512b..018bca4a0f 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -584,7 +584,7 @@ func dump(ctx *cli.Context) error {
triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
defer triedb.Close()
- state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
+ state, err := state.New(root, state.NewDatabase(triedb, nil))
if err != nil {
return err
}
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index fc66e11dca..791b3d1d20 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -262,7 +262,6 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
start = time.Now()
reported = time.Now()
imported = 0
- forker = core.NewForkChoice(chain, nil)
h = sha256.New()
buf = bytes.NewBuffer(nil)
)
@@ -305,7 +304,7 @@ func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, networ
if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
- if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
+ if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start); err != nil {
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
} else if status != core.CanonStatTy {
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 611189a2cc..f55c3b52d0 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -2210,7 +2210,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
}
// Disable transaction indexing/unindexing by default.
- chain, err := core.NewBlockChain(chainDb, cache, gspec, nil, engine, vmcfg, nil, nil)
+ chain, err := core.NewBlockChain(chainDb, cache, gspec, nil, engine, vmcfg, nil)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go
index a631eaf490..1074a358ec 100644
--- a/cmd/utils/history_test.go
+++ b/cmd/utils/history_test.go
@@ -78,7 +78,7 @@ func TestHistoryImportAndExport(t *testing.T) {
})
// Initialize BlockChain.
- chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
}
@@ -171,7 +171,7 @@ func TestHistoryImportAndExport(t *testing.T) {
})
genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
- imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
}
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index f14c8cb5ba..bacd207493 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -55,7 +55,7 @@ func TestReimportMirroredState(t *testing.T) {
copy(genspec.ExtraData[extraVanity:], addr[:])
// Generate a batch of blocks, each properly signed
- chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop()
_, blocks, _ := core.GenerateChainWithGenesis(genspec, engine, 3, func(i int, block *core.BlockGen) {
@@ -87,7 +87,7 @@ func TestReimportMirroredState(t *testing.T) {
}
// Insert the first two blocks and make sure the chain is valid
db = rawdb.NewMemoryDatabase()
- chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
+ chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[:2]); err != nil {
@@ -100,7 +100,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, triggering a sidechain
// reimport.
- chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
+ chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[2:]); err != nil {
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 4ef7a7b3ae..6c46d1db4f 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -458,7 +458,7 @@ func (tt *cliqueTest) run(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
- chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create test chain: %v", err)
}
diff --git a/core/bench_test.go b/core/bench_test.go
index 97713868a5..639d36e9ae 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -195,7 +195,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -312,7 +312,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator.go b/core/block_validator.go
index 75f7f8a94b..a944db0bf8 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -121,14 +121,17 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// ValidateState validates the various changes that happen after a state transition,
// such as amount of used gas, the receipt roots and the state root itself.
-func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64, stateless bool) error {
+func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, res *ProcessResult, stateless bool) error {
+ if res == nil {
+ return fmt.Errorf("nil ProcessResult value")
+ }
header := block.Header()
- if block.GasUsed() != usedGas {
- return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
+ if block.GasUsed() != res.GasUsed {
+ return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), res.GasUsed)
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
- rbloom := types.CreateBloom(receipts)
+ rbloom := types.CreateBloom(res.Receipts)
if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
}
@@ -138,10 +141,17 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return nil
}
// The receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]]))
- receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
+ receiptSha := types.DeriveSha(res.Receipts, trie.NewStackTrie(nil))
if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
}
+ // Validate the parsed requests match the expected header value.
+ if header.RequestsHash != nil {
+ depositSha := types.DeriveSha(res.Requests, trie.NewStackTrie(nil))
+ if depositSha != *header.RequestsHash {
+ return fmt.Errorf("invalid deposit root hash (remote: %x local: %x)", *header.RequestsHash, depositSha)
+ }
+ }
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index c573ef91fa..16824467c1 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -50,7 +50,7 @@ func testHeaderVerification(t *testing.T, scheme string) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -160,7 +160,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
postHeaders[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
defer chain.Stop()
// Verify the blocks before the merging
diff --git a/core/blockchain.go b/core/blockchain.go
index 5d21cc8d01..3683bf1d79 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -72,14 +72,11 @@ var (
storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil)
storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil)
- snapshotAccountReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/account/reads", nil)
- snapshotStorageReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/storage/reads", nil)
- snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
-
accountReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/account/single/reads", nil)
storageReadSingleTimer = metrics.NewRegisteredResettingTimer("chain/storage/single/reads", nil)
- triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
+ snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
+ triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
blockInsertTimer = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredResettingTimer("chain/validation", nil)
@@ -220,7 +217,7 @@ type BlockChain struct {
lastWrite uint64 // Last block when the state was flushed
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
triedb *triedb.Database // The database handler for maintaining trie nodes.
- stateCache state.Database // State database to reuse between imports (contains state cache)
+ statedb *state.CachingDB // State database to reuse between imports (contains state cache)
txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
hc *HeaderChain
@@ -259,7 +256,6 @@ type BlockChain struct {
validator Validator // Block and state validator interface
prefetcher Prefetcher
processor Processor // Block transaction processor interface
- forker *ForkChoice
vmConfig vm.Config
logger *tracing.Hooks
}
@@ -267,7 +263,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator
// and Processor.
-func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) {
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrides *ChainOverrides, engine consensus.Engine, vmConfig vm.Config, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = defaultCacheConfig
}
@@ -312,8 +308,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
return nil, err
}
bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
- bc.forker = NewForkChoice(bc, shouldPreserve)
- bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
+ bc.statedb = state.NewDatabase(bc.triedb, nil)
bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = newStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(chainConfig, bc.hc)
@@ -451,7 +446,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
AsyncBuild: !bc.cacheConfig.SnapshotWait,
}
bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
+
+ // Re-initialize the state database with snapshot
+ bc.statedb = state.NewDatabase(bc.triedb, bc.snaps)
}
+
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
@@ -1243,13 +1242,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Rewind may have occurred, skip in that case.
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
- reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
- if err != nil {
- log.Warn("Reorg failed", "err", err)
- return false
- } else if !reorg {
- return false
- }
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentSnapBlock.Store(head.Header())
headFastBlockGauge.Update(int64(head.NumberU64()))
@@ -1548,42 +1540,30 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
return NonStatTy, err
}
currentBlock := bc.CurrentBlock()
- reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
- if err != nil {
- return NonStatTy, err
- }
- if reorg {
- // Reorganise the chain if the parent is not the head block
- if block.ParentHash() != currentBlock.Hash() {
- if err := bc.reorg(currentBlock, block); err != nil {
- return NonStatTy, err
- }
+
+ // Reorganise the chain if the parent is not the head block
+ if block.ParentHash() != currentBlock.Hash() {
+ if err := bc.reorg(currentBlock, block); err != nil {
+ return NonStatTy, err
}
- status = CanonStatTy
- } else {
- status = SideStatTy
}
+
// Set new head.
- if status == CanonStatTy {
- bc.writeHeadBlock(block)
- }
- if status == CanonStatTy {
- bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
- if len(logs) > 0 {
- bc.logsFeed.Send(logs)
- }
- // In theory, we should fire a ChainHeadEvent when we inject
- // a canonical block, but sometimes we can insert a batch of
- // canonical blocks. Avoid firing too many ChainHeadEvents,
- // we will fire an accumulated ChainHeadEvent and disable fire
- // event here.
- if emitHeadEvent {
- bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
- }
- } else {
- bc.chainSideFeed.Send(ChainSideEvent{Block: block})
+ bc.writeHeadBlock(block)
+
+ bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
+ if len(logs) > 0 {
+ bc.logsFeed.Send(logs)
+ }
+ // In theory, we should fire a ChainHeadEvent when we inject
+ // a canonical block, but sometimes we can insert a batch of
+ // canonical blocks. Avoid firing too many ChainHeadEvents,
+ // we will fire an accumulated ChainHeadEvent and disable fire
+ // event here.
+ if emitHeadEvent {
+ bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
}
- return status, nil
+ return CanonStatTy, nil
}
// InsertChain attempts to insert the given batch of blocks in to the canonical
@@ -1634,7 +1614,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
if bc.insertStopped() {
return 0, nil
}
-
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
@@ -1667,24 +1646,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
// from the canonical chain, which has not been verified.
// Skip all known blocks that are behind us.
- var (
- reorg bool
- current = bc.CurrentBlock()
- )
+ current := bc.CurrentBlock()
for block != nil && bc.skipBlock(err, it) {
- reorg, err = bc.forker.ReorgNeeded(current, block.Header())
- if err != nil {
- return it.index, err
- }
- if reorg {
- // Switch to import mode if the forker says the reorg is necessary
- // and also the block is not on the canonical chain.
- // In eth2 the forker always returns true for reorg decision (blindly trusting
- // the external consensus engine), but in order to prevent the unnecessary
- // reorgs when importing known blocks, the special case is handled here.
- if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
- break
- }
+ if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
+ break
}
log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash())
stats.ignored++
@@ -1803,7 +1768,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
if parent == nil {
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
- statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
+ statedb, err := state.New(parent.Root, bc.statedb)
if err != nil {
return it.index, err
}
@@ -1829,7 +1794,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
var followupInterrupt atomic.Bool
if !bc.cacheConfig.TrieCleanNoPrefetch {
if followup, err := it.peek(); followup != nil && err == nil {
- throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
+ throwaway, _ := state.New(parent.Root, bc.statedb)
go func(start time.Time, followup *types.Block, throwaway *state.StateDB) {
// Disable tracing for prefetcher executions.
@@ -1927,49 +1892,44 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
// Process block using the parent state as reference point
pstart := time.Now()
- receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
+ res, err := bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil {
- bc.reportBlock(block, receipts, err)
+ bc.reportBlock(block, res, err)
return nil, err
}
ptime := time.Since(pstart)
vstart := time.Now()
- if err := bc.validator.ValidateState(block, statedb, receipts, usedGas, false); err != nil {
- bc.reportBlock(block, receipts, err)
+ if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
+ bc.reportBlock(block, res, err)
return nil, err
}
vtime := time.Since(vstart)
if witness := statedb.Witness(); witness != nil {
if err = bc.validator.ValidateWitness(witness, block.ReceiptHash(), block.Root()); err != nil {
- bc.reportBlock(block, receipts, err)
+ bc.reportBlock(block, res, err)
return nil, fmt.Errorf("cross verification failed: %v", err)
}
}
proctime := time.Since(start) // processing + validation
// Update the metrics touched during block processing and validation
- accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
- storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
- snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
- snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
-
- accountRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
- storageRead := statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
if statedb.AccountLoaded != 0 {
- accountReadSingleTimer.Update(accountRead / time.Duration(statedb.AccountLoaded))
+ accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
}
if statedb.StorageLoaded != 0 {
- storageReadSingleTimer.Update(storageRead / time.Duration(statedb.StorageLoaded))
+ storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
}
- accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
- storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
- accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
- triehash := statedb.AccountHashes // The time spent on tries hashing
- trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
- blockExecutionTimer.Update(ptime - (accountRead + storageRead)) // The time spent on EVM processing
- blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
+ triehash := statedb.AccountHashes // The time spent on tries hashing
+ trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
+ blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
// Write the block to the chain and get the status.
var (
@@ -1978,9 +1938,9 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
)
if !setHead {
// Don't set the head, only insert the block
- err = bc.writeBlockWithState(block, receipts, statedb)
+ err = bc.writeBlockWithState(block, res.Receipts, statedb)
} else {
- status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
+ status, err = bc.writeBlockAndSetHead(block, res.Receipts, res.Logs, statedb, false)
}
if err != nil {
return nil, err
@@ -1994,7 +1954,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
blockWriteTimer.Update(time.Since(wstart) - max(statedb.AccountCommits, statedb.StorageCommits) /* concurrent */ - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockInsertTimer.UpdateSince(start)
- return &blockProcessingResult{usedGas: usedGas, procTime: proctime, status: status}, nil
+ return &blockProcessingResult{usedGas: res.GasUsed, procTime: proctime, status: status}, nil
}
// insertSideChain is called when an import batch hits upon a pruned ancestor
@@ -2006,9 +1966,8 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
// insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
var (
- externTd *big.Int
- lastBlock = block
- current = bc.CurrentBlock()
+ externTd *big.Int
+ current = bc.CurrentBlock()
)
// The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
@@ -2059,22 +2018,6 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
- lastBlock = block
- }
- // At this point, we've written all sidechain blocks to database. Loop ended
- // either on some other error or all were processed. If there was some other
- // error, we can ignore the rest of those blocks.
- //
- // If the externTd was larger than our local TD, we now need to reimport the previous
- // blocks to regenerate the required state
- reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
- if err != nil {
- return it.index, err
- }
- if !reorg {
- localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
- log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
- return it.index, err
}
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
@@ -2491,7 +2434,11 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
}
// reportBlock logs a bad block error.
-func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
+func (bc *BlockChain) reportBlock(block *types.Block, res *ProcessResult, err error) {
+ var receipts types.Receipts
+ if res != nil {
+ receipts = res.Receipts
+ }
rawdb.WriteBadBlock(bc.db, block)
log.Error(summarizeBadBlock(block, receipts, bc.Config(), err))
}
@@ -2537,7 +2484,7 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
- _, err := bc.hc.InsertHeaderChain(chain, start, bc.forker)
+ _, err := bc.hc.InsertHeaderChain(chain, start)
return 0, err
}
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index 49e913aada..ec3f771818 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -170,11 +170,6 @@ func (it *insertIterator) current() *types.Header {
return it.chain[it.index].Header()
}
-// first returns the first block in it.
-func (it *insertIterator) first() *types.Block {
- return it.chain[0]
-}
-
// remaining returns the number of remaining blocks.
func (it *insertIterator) remaining() int {
return len(it.chain) - it.index
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index 8a85800dd8..6b8dffdcdc 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -308,7 +308,7 @@ func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
- _, err := bc.stateCache.OpenTrie(hash)
+ _, err := bc.statedb.OpenTrie(hash)
return err == nil
}
@@ -341,12 +341,9 @@ func (bc *BlockChain) stateRecoverable(root common.Hash) bool {
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
- type codeReader interface {
- ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error)
- }
// TODO(rjl493456442) The associated account address is also required
// in Verkle scheme. Fix it once snap-sync is supported for Verkle.
- return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Address{}, hash)
+ return bc.statedb.ContractCodeWithPrefix(common.Address{}, hash)
}
// State returns a new mutable state based on the current HEAD block.
@@ -356,7 +353,7 @@ func (bc *BlockChain) State() (*state.StateDB, error) {
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
- return state.New(root, bc.stateCache, bc.snaps)
+ return state.New(root, bc.statedb)
}
// Config retrieves the chain's fork configuration.
@@ -382,7 +379,7 @@ func (bc *BlockChain) Processor() Processor {
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
- return bc.stateCache
+ return bc.statedb
}
// GasLimit returns the gas limit of the current HEAD block.
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index a4761f337b..07f154b4fa 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1794,7 +1794,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1859,7 +1859,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
}
defer db.Close()
- newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
+ newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -1931,7 +1931,7 @@ func testIssue23496(t *testing.T, scheme string) {
}
engine = ethash.NewFullFaker()
)
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1981,7 +1981,7 @@ func testIssue23496(t *testing.T, scheme string) {
}
defer db.Close()
- chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 8b77f9f8b2..5a62d6b25f 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -1997,7 +1997,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -2040,7 +2040,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
dbconfig.HashDB = hashdb.Defaults
}
chain.triedb = triedb.NewDatabase(chain.db, dbconfig)
- chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
+ chain.statedb = state.NewDatabase(chain.triedb, chain.snaps)
// Force run a freeze cycle
type freezer interface {
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 80f8035df1..45d33bb0a0 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -81,7 +81,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
}
engine = ethash.NewFullFaker()
)
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -228,7 +228,7 @@ func (snaptest *snapshotTest) test(t *testing.T) {
// Restart the chain normally
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -270,13 +270,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time.
- newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.Stop()
- newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -313,7 +313,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
SnapshotLimit: 0,
StateScheme: snaptest.scheme,
}
- newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -321,7 +321,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
newchain.Stop()
// Restart the chain with enabling the snapshot
- newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -349,7 +349,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
chain.SetHead(snaptest.setHead)
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -385,7 +385,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
SnapshotLimit: 0,
StateScheme: snaptest.scheme,
}
- newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -402,7 +402,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
SnapshotWait: false, // Don't wait rebuild
StateScheme: snaptest.scheme,
}
- tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -411,7 +411,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
tmp.triedb.Close()
tmp.stopWithoutSaving()
- newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 4f28c6f5e6..d8c436fcd6 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -22,6 +22,7 @@ import (
"math/big"
"math/rand"
"os"
+ "path"
"sync"
"testing"
"time"
@@ -61,7 +62,7 @@ func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (eth
}
)
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
// Create and inject the requested chain
if n == 0 {
@@ -159,17 +160,18 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
}
return err
}
- statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
+ statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.statedb)
if err != nil {
return err
}
- receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
+ res, err := blockchain.processor.Process(block, statedb, vm.Config{})
if err != nil {
- blockchain.reportBlock(block, receipts, err)
+ blockchain.reportBlock(block, res, err)
return err
}
- if err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas, false); err != nil {
- blockchain.reportBlock(block, receipts, err)
+ err = blockchain.validator.ValidateState(block, statedb, res, false)
+ if err != nil {
+ blockchain.reportBlock(block, res, err)
return err
}
@@ -762,7 +764,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
})
// Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase()
- archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
@@ -770,7 +772,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
}
// Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase()
- fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -790,7 +792,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
}
defer ancientDb.Close()
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers); err != nil {
@@ -910,7 +912,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
archiveCaching.TrieDirtyDisabled = true
archiveCaching.StateScheme = scheme
- archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -923,7 +925,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb := makeDb()
defer fastDb.Close()
- fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -943,7 +945,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb := makeDb()
defer ancientDb.Close()
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers); err != nil {
@@ -962,7 +964,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Import the chain as a light node and ensure all pointers are updated
lightDb := makeDb()
defer lightDb.Close()
- light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if n, err := light.InsertHeaderChain(headers); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -1035,7 +1037,7 @@ func testChainTxReorgs(t *testing.T, scheme string) {
})
// Import the chain. This runs all block validation rules.
db := rawdb.NewMemoryDatabase()
- blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -1109,7 +1111,7 @@ func testLogReorgs(t *testing.T, scheme string) {
signer = types.LatestSigner(gspec.Config)
)
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -1165,7 +1167,7 @@ func testLogRebirth(t *testing.T, scheme string) {
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config)
engine = ethash.NewFaker()
- blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
)
defer blockchain.Stop()
@@ -1246,7 +1248,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config)
- blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
)
defer blockchain.Stop()
@@ -1265,7 +1267,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
}
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
- // Generate side chain with lower difficulty
+ // Generate side chain with lower difficulty, after the merge, the chain will be accepted even if it is lower difficulty
genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
if i == 1 {
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
@@ -1278,14 +1280,14 @@ func testSideLogRebirth(t *testing.T, scheme string) {
if _, err := blockchain.InsertChain(sideChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
+ checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
- // Generate a new block based on side chain.
+ // Generate a new block based on side chain. Should not emit any events anymore.
newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
+ checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
}
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
@@ -1345,7 +1347,7 @@ func testReorgSideEvent(t *testing.T, scheme string) {
}
signer = types.LatestSigner(gspec.Config)
)
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
@@ -1369,15 +1371,10 @@ func testReorgSideEvent(t *testing.T, scheme string) {
t.Fatalf("failed to insert chain: %v", err)
}
- // first two block of the secondary chain are for a brief moment considered
- // side chains because up to that point the first one is considered the
- // heavier chain.
expectedSideHashes := map[common.Hash]bool{
- replacementBlocks[0].Hash(): true,
- replacementBlocks[1].Hash(): true,
- chain[0].Hash(): true,
- chain[1].Hash(): true,
- chain[2].Hash(): true,
+ chain[0].Hash(): true,
+ chain[1].Hash(): true,
+ chain[2].Hash(): true,
}
i := 0
@@ -1402,7 +1399,7 @@ done:
timeout.Reset(timeoutDura)
case <-timeout.C:
- t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
+ t.Fatalf("Timeout. Possibly not all blocks were triggered for sideevent: %v", i)
}
}
@@ -1529,7 +1526,7 @@ func testEIP155Transition(t *testing.T, scheme string) {
}
})
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if _, err := blockchain.InsertChain(blocks); err != nil {
@@ -1622,7 +1619,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) {
block.AddTx(tx)
})
// account must exist pre eip 161
- blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
@@ -1680,7 +1677,7 @@ func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) {
}
// Import the canonical and fork chain side by side, verifying the current block
// and current header consistency
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1724,7 +1721,7 @@ func TestTrieForkGC(t *testing.T) {
forks[i] = fork[0]
}
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1770,7 +1767,7 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer db.Close()
- chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1786,18 +1783,15 @@ func testLargeReorgTrieGC(t *testing.T, scheme string) {
if chain.HasState(shared[len(shared)-1].Root()) {
t.Fatalf("common-but-old ancestor still cache")
}
- // Import the competitor chain without exceeding the canonical's TD and ensure
- // we have not processed any of the blocks (protection against malicious blocks)
+ // Import the competitor chain without exceeding the canonical's TD.
+ // Post-merge the side chain should be executed
if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
t.Fatalf("failed to insert competitor chain: %v", err)
}
- for i, block := range competitor[:len(competitor)-2] {
- if chain.HasState(block.Root()) {
- t.Fatalf("competitor %d: low TD chain became processed", i)
- }
+ if !chain.HasState(competitor[len(competitor)-3].Root()) {
+ t.Fatalf("failed to insert low-TD chain")
}
- // Import the head of the competitor chain, triggering the reorg and ensure we
- // successfully reprocess all the stashed away blocks.
+ // Import the head of the competitor chain.
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
t.Fatalf("failed to finalize competitor chain: %v", err)
}
@@ -1841,7 +1835,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
defer ancientDb.Close()
- ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -1861,7 +1855,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancient.Stop()
if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@@ -1913,7 +1907,7 @@ func testInsertReceiptChainRollback(t *testing.T, scheme string) {
}
defer ancientDb.Close()
- ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer ancientChain.Stop()
// Import the canonical header chain.
@@ -1980,7 +1974,7 @@ func testLowDiffLongChain(t *testing.T, scheme string) {
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer diskdb.Close()
- chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2041,7 +2035,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
mergeBlock = math.MaxInt32
)
// Generate and import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2195,7 +2189,7 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
}
defer chaindb.Close()
- chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2279,10 +2273,10 @@ func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
- // The head shouldn't change.
- asserter(t, blocks3[len(blocks3)-1])
+ // Post-merge the chain should change even if td is lower.
+ asserter(t, blocks2[len(blocks2)-1])
- // Rollback the heavier chain and re-insert the longer chain again
+ // Rollback the heavier chain and re-insert the longer chain again.
chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
@@ -2366,7 +2360,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
}
defer chaindb.Close()
- chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2480,7 +2474,7 @@ func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types
genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
})
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
@@ -2656,7 +2650,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Import the shared chain and the original canonical one
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}
@@ -2743,7 +2737,21 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
// Generate and import the canonical chain
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*state.TriesInMemory, nil)
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil)
+ // Construct a database with freezer enabled
+ datadir := t.TempDir()
+ ancient := path.Join(datadir, "ancient")
+
+ db, err := rawdb.Open(rawdb.OpenOptions{
+ Directory: datadir,
+ AncientsDirectory: ancient,
+ Ephemeral: true,
+ })
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ defer db.Close()
+
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2770,7 +2778,6 @@ func testSideImportPrunedBlocks(t *testing.T, scheme string) {
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
}
- // Now re-import some old blocks
blockToReimport := blocks[5:8]
_, err = chain.InsertChain(blockToReimport)
if err != nil {
@@ -2843,7 +2850,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) {
b.AddTx(tx)
})
// Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2958,7 +2965,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) {
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
+ }, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3040,7 +3047,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) {
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
+ }, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3216,7 +3223,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
+ }, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3354,7 +3361,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
+ }, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3441,7 +3448,7 @@ func testEIP2718Transition(t *testing.T, scheme string) {
})
// Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3535,7 +3542,7 @@ func testEIP1559Transition(t *testing.T, scheme string) {
b.AddTx(tx)
})
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3648,7 +3655,7 @@ func testSetCanonical(t *testing.T, scheme string) {
diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
defer diskdb.Close()
- chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3757,7 +3764,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) {
_, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
// Initialize test chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3894,7 +3901,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: logger.NewJSONLogger(nil, os.Stdout),
- }, nil, nil)
+ }, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4006,7 +4013,7 @@ func TestDeleteThenCreate(t *testing.T) {
}
})
// Import the canonical chain
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4091,7 +4098,7 @@ func TestTransientStorageReset(t *testing.T) {
})
// Initialize the blockchain with 1153 enabled.
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4186,7 +4193,7 @@ func TestEIP3651(t *testing.T) {
b.AddTx(tx)
})
- chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr).Hooks()}, nil, nil)
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr).Hooks()}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -4220,3 +4227,90 @@ func TestEIP3651(t *testing.T) {
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
}
}
+
+func TestEIP6110(t *testing.T) {
+ var (
+ engine = beacon.NewFaker()
+
+ // A sender who makes transactions, has some funds
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key.PublicKey)
+ funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
+ config = *params.AllEthashProtocolChanges
+ gspec = &Genesis{
+ Config: &config,
+ Alloc: types.GenesisAlloc{
+ addr: {Balance: funds},
+ config.DepositContractAddress: {
+ // Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
+ Code: common.Hex2Bytes("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033"),
+ Nonce: 0,
+ Balance: big.NewInt(0),
+ },
+ },
+ }
+ )
+
+ gspec.Config.BerlinBlock = common.Big0
+ gspec.Config.LondonBlock = common.Big0
+ gspec.Config.TerminalTotalDifficulty = common.Big0
+ gspec.Config.TerminalTotalDifficultyPassed = true
+ gspec.Config.ShanghaiTime = u64(0)
+ gspec.Config.CancunTime = u64(0)
+ gspec.Config.PragueTime = u64(0)
+ signer := types.LatestSigner(gspec.Config)
+
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
+ for i := 0; i < 5; i++ {
+ txdata := &types.DynamicFeeTx{
+ ChainID: gspec.Config.ChainID,
+ Nonce: uint64(i),
+ To: &config.DepositContractAddress,
+ Gas: 500000,
+ GasFeeCap: newGwei(5),
+ GasTipCap: big.NewInt(2),
+ AccessList: nil,
+ Data: []byte{},
+ }
+ tx := types.NewTx(txdata)
+ tx, _ = types.SignTx(tx, signer, key)
+ b.AddTx(tx)
+ }
+ })
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{DisableStack: true}, os.Stderr).Hooks()}, nil)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ defer chain.Stop()
+ if n, err := chain.InsertChain(blocks); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", n, err)
+ }
+
+ block := chain.GetBlockByNumber(1)
+ if len(block.Requests()) != 5 {
+ t.Fatalf("failed to retrieve deposits: have %d, want %d", len(block.Requests()), 5)
+ }
+
+ // Verify each index is correct.
+ for want, req := range block.Requests() {
+ d, ok := req.Inner().(*types.Deposit)
+ if !ok {
+ t.Fatalf("expected deposit object")
+ }
+ if got := int(d.PublicKey[0]); got != want {
+ t.Fatalf("invalid pubkey: have %d, want %d", got, want)
+ }
+ if got := int(d.WithdrawalCredentials[0]); got != want {
+ t.Fatalf("invalid withdrawal credentials: have %d, want %d", got, want)
+ }
+ if d.Amount != uint64(want) {
+ t.Fatalf("invalid amounbt: have %d, want %d", d.Amount, want)
+ }
+ if got := int(d.Signature[0]); got != want {
+ t.Fatalf("invalid signature: have %d, want %d", got, want)
+ }
+ if d.Index != uint64(want) {
+ t.Fatalf("invalid index: have %d, want %d", d.Index, want)
+ }
+ }
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 6cee6fdc8a..8e75abdea0 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -346,7 +346,18 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
gen(i, b)
}
- body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
+ var requests types.Requests
+ if config.IsPrague(b.header.Number, b.header.Time) {
+ for _, r := range b.receipts {
+ d, err := ParseDepositLogs(r.Logs, config)
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse deposit log: %v", err))
+ }
+ requests = append(requests, d...)
+ }
+ }
+
+ body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals, Requests: requests}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts)
if err != nil {
panic(err)
@@ -368,7 +379,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
defer triedb.Close()
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil)
+ statedb, err := state.New(parent.Root(), state.NewDatabase(triedb, nil))
if err != nil {
panic(err)
}
@@ -474,7 +485,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
}
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, trdb), nil)
+ statedb, err := state.New(parent.Root(), state.NewDatabase(trdb, nil))
if err != nil {
panic(err)
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 6241f3fb69..61d09117bd 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -123,7 +123,7 @@ func TestGeneratePOSChain(t *testing.T) {
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(genchain); err != nil {
@@ -238,7 +238,7 @@ func ExampleGenerateChain() {
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {
diff --git a/core/dao_test.go b/core/dao_test.go
index b9a899ef2f..5da9e91b03 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -50,7 +50,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Config: &proConf,
}
- proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer proBc.Stop()
conDb := rawdb.NewMemoryDatabase()
@@ -62,7 +62,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Config: &conConf,
}
- conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil {
@@ -74,7 +74,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks
for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ {
// Create a pro-fork block, and try to feed into the no-fork chain
- bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -97,7 +97,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
}
// Create a no-fork block, and try to feed into the pro-fork chain
- bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -121,7 +121,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
}
}
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
- bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
@@ -139,7 +139,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
}
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
- bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
diff --git a/core/forkchoice.go b/core/forkchoice.go
deleted file mode 100644
index b293c851bf..0000000000
--- a/core/forkchoice.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- crand "crypto/rand"
- "errors"
- "math/big"
- mrand "math/rand"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// ChainReader defines a small collection of methods needed to access the local
-// blockchain during header verification. It's implemented by both blockchain
-// and lightchain.
-type ChainReader interface {
- // Config retrieves the header chain's chain configuration.
- Config() *params.ChainConfig
-
- // GetTd returns the total difficulty of a local block.
- GetTd(common.Hash, uint64) *big.Int
-}
-
-// ForkChoice is the fork chooser based on the highest total difficulty of the
-// chain(the fork choice used in the eth1) and the external fork choice (the fork
-// choice used in the eth2). This main goal of this ForkChoice is not only for
-// offering fork choice during the eth1/2 merge phase, but also keep the compatibility
-// for all other proof-of-work networks.
-type ForkChoice struct {
- chain ChainReader
- rand *mrand.Rand
-
- // preserve is a helper function used in td fork choice.
- // Miners will prefer to choose the local mined block if the
- // local td is equal to the extern one. It can be nil for light
- // client
- preserve func(header *types.Header) bool
-}
-
-func NewForkChoice(chainReader ChainReader, preserve func(header *types.Header) bool) *ForkChoice {
- // Seed a fast but crypto originating random generator
- seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
- if err != nil {
- log.Crit("Failed to initialize random seed", "err", err)
- }
- return &ForkChoice{
- chain: chainReader,
- rand: mrand.New(mrand.NewSource(seed.Int64())),
- preserve: preserve,
- }
-}
-
-// ReorgNeeded returns whether the reorg should be applied
-// based on the given external header and local canonical chain.
-// In the td mode, the new head is chosen if the corresponding
-// total difficulty is higher. In the extern mode, the trusted
-// header is always selected as the head.
-func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (bool, error) {
- var (
- localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64())
- externTd = f.chain.GetTd(extern.Hash(), extern.Number.Uint64())
- )
- if localTD == nil || externTd == nil {
- return false, errors.New("missing td")
- }
- // Accept the new header as the chain head if the transition
- // is already triggered. We assume all the headers after the
- // transition come from the trusted consensus layer.
- if ttd := f.chain.Config().TerminalTotalDifficulty; ttd != nil && ttd.Cmp(externTd) <= 0 {
- return true, nil
- }
-
- // If the total difficulty is higher than our known, add it to the canonical chain
- if diff := externTd.Cmp(localTD); diff > 0 {
- return true, nil
- } else if diff < 0 {
- return false, nil
- }
- // Local and external difficulty is identical.
- // Second clause in the if statement reduces the vulnerability to selfish mining.
- // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
- reorg := false
- externNum, localNum := extern.Number.Uint64(), current.Number.Uint64()
- if externNum < localNum {
- reorg = true
- } else if externNum == localNum {
- var currentPreserve, externPreserve bool
- if f.preserve != nil {
- currentPreserve, externPreserve = f.preserve(current), f.preserve(extern)
- }
- reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5)
- }
- return reorg, nil
-}
diff --git a/core/genesis.go b/core/genesis.go
index a819f9b882..cccaa8235a 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -127,8 +127,8 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
}
// Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk.
- db := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), config)
- statedb, err := state.New(types.EmptyRootHash, db, nil)
+ db := rawdb.NewMemoryDatabase()
+ statedb, err := state.New(types.EmptyRootHash, state.NewDatabase(triedb.NewDatabase(db, config), nil))
if err != nil {
return common.Hash{}, err
}
@@ -147,8 +147,8 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
// flushAlloc is very similar with hash, but the main difference is all the
// generated states will be persisted into the given database.
-func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Database) (common.Hash, error) {
- statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
+func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, error) {
+ statedb, err := state.New(types.EmptyRootHash, state.NewDatabase(triedb, nil))
if err != nil {
return common.Hash{}, err
}
@@ -447,7 +447,10 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
}
}
- var withdrawals []*types.Withdrawal
+ var (
+ withdrawals []*types.Withdrawal
+ requests types.Requests
+ )
if conf := g.Config; conf != nil {
num := big.NewInt(int64(g.Number))
if conf.IsShanghai(num, g.Timestamp) {
@@ -469,8 +472,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
head.BlobGasUsed = new(uint64)
}
}
+ if conf.IsPrague(num, g.Timestamp) {
+ head.RequestsHash = &types.EmptyRequestsHash
+ requests = make(types.Requests, 0)
+ }
}
- return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
+ return types.NewBlock(head, &types.Body{Withdrawals: withdrawals, Requests: requests}, nil, trie.NewStackTrie(nil))
}
// Commit writes the block and state of a genesis specification to the database.
@@ -490,7 +497,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
return nil, errors.New("can't start clique chain without signers")
}
// flush the data to disk and compute the state root
- root, err := flushAlloc(&g.Alloc, db, triedb)
+ root, err := flushAlloc(&g.Alloc, triedb)
if err != nil {
return nil, err
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index ad602db4be..0fee874138 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -124,7 +124,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb)
- bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil)
defer bc.Stop()
_, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil)
@@ -294,7 +294,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
},
}
- expected := common.FromHex("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b")
+ expected := common.FromHex("4a83dc39eb688dbcfaf581d60e82de18f875e38786ebce5833342011d6fef37b")
got := genesis.ToBlock().Root().Bytes()
if !bytes.Equal(got, expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
diff --git a/core/headerchain.go b/core/headerchain.go
index 9ce8d11c40..c587a83fa6 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -254,7 +254,7 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
-func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
+func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header) (*headerWriteResult, error) {
inserted, err := hc.WriteHeaders(headers)
if err != nil {
return nil, err
@@ -270,15 +270,6 @@ func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *F
lastHeader: lastHeader,
}
)
- // Ask the fork choicer if the reorg is necessary
- if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
- return nil, err
- } else if !reorg {
- if inserted != 0 {
- result.status = SideStatTy
- }
- return result, nil
- }
// Special case, all the inserted headers are already on the canonical
// header chain, skip the reorg operation.
if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
@@ -336,11 +327,11 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) {
//
// The returned 'write status' says if the inserted headers are part of the canonical chain
// or a side chain.
-func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
+func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) {
if hc.procInterrupt() {
return 0, errors.New("aborted")
}
- res, err := hc.writeHeadersAndSetHead(chain, forker)
+ res, err := hc.writeHeadersAndSetHead(chain)
if err != nil {
return 0, err
}
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
index 25d9bfffcb..3228d82f0f 100644
--- a/core/headerchain_test.go
+++ b/core/headerchain_test.go
@@ -51,10 +51,10 @@ func verifyUnbrokenCanonchain(hc *HeaderChain) error {
return nil
}
-func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) {
+func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) {
t.Helper()
- status, err := hc.InsertHeaderChain(chain, time.Now(), forker)
+ status, err := hc.InsertHeaderChain(chain, time.Now())
if status != wantStatus {
t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus)
}
@@ -83,34 +83,33 @@ func TestHeaderInsertion(t *testing.T) {
// chain B: G->A1->B1...B128
chainB := makeHeaderChain(gspec.Config, chainA[0], 128, ethash.NewFaker(), genDb, 10)
- forker := NewForkChoice(hc, nil)
// Inserting 64 headers on an empty chain, expecting
// 1 callbacks, 1 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker)
+ testInsert(t, hc, chainA[:64], CanonStatTy, nil)
// Inserting 64 identical headers, expecting
// 0 callbacks, 0 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], NonStatTy, nil, forker)
+ testInsert(t, hc, chainA[:64], NonStatTy, nil)
// Inserting the same some old, some new headers
// 1 callbacks, 1 canon, 0 side
- testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker)
+ testInsert(t, hc, chainA[32:96], CanonStatTy, nil)
- // Inserting side blocks, but not overtaking the canon chain
- testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker)
+ // Inserting headers from chain B, overtaking the canon chain blindly
+ testInsert(t, hc, chainB[0:32], CanonStatTy, nil)
- // Inserting more side blocks, but we don't have the parent
- testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker)
+ // Inserting more headers on chain B, but we don't have the parent
+ testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor)
- // Inserting more sideblocks, overtaking the canon chain
- testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker)
+ // Inserting more headers on chain B, extend the canon chain
+ testInsert(t, hc, chainB[32:97], CanonStatTy, nil)
- // Inserting more A-headers, taking back the canonicality
- testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker)
+ // Inserting more headers on chain A, taking back the canonicality
+ testInsert(t, hc, chainA[90:100], CanonStatTy, nil)
// And B becomes canon again
- testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker)
+ testInsert(t, hc, chainB[97:107], CanonStatTy, nil)
// And B becomes even longer
- testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker)
+ testInsert(t, hc, chainB[107:128], CanonStatTy, nil)
}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index 859566f722..6996031be2 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -174,16 +174,3 @@ func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
log.Warn("Failed to write unclean-shutdown marker", "err", err)
}
}
-
-// ReadTransitionStatus retrieves the eth2 transition status from the database
-func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
- data, _ := db.Get(transitionStatusKey)
- return data
-}
-
-// WriteTransitionStatus stores the eth2 transition status to the database
-func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) {
- if err := db.Put(transitionStatusKey, data); err != nil {
- log.Crit("Failed to store the eth2 transition status", "err", err)
- }
-}
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 6c050ee625..1b8df958d1 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -52,13 +52,11 @@ var (
// freezerTableSize defines the maximum size of freezer data files.
const freezerTableSize = 2 * 1000 * 1000 * 1000
-// Freezer is a memory mapped append-only database to store immutable ordered
-// data into flat files:
+// Freezer is an append-only database to store immutable ordered data into
+// flat files:
//
-// - The append-only nature ensures that disk writes are minimized.
-// - The memory mapping ensures we can max out system memory for caching without
-// reserving it for go-ethereum. This would also reduce the memory requirements
-// of Geth, and thus also GC overhead.
+// - The append-only nature ensures that disk writes are minimized.
+// - The in-order data ensures that disk reads are always optimized.
type Freezer struct {
frozen atomic.Uint64 // Number of items already frozen
tail atomic.Uint64 // Number of the first stored item in the freezer
@@ -152,7 +150,7 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
return freezer, nil
}
-// Close terminates the chain freezer, unmapping all the data files.
+// Close terminates the chain freezer, closing all the data files.
func (f *Freezer) Close() error {
f.writeLock.Lock()
defer f.writeLock.Unlock()
diff --git a/core/state/access_events.go b/core/state/access_events.go
index 4b6c7c7e69..2b270ccafa 100644
--- a/core/state/access_events.go
+++ b/core/state/access_events.go
@@ -94,11 +94,8 @@ func (ae *AccessEvents) Copy() *AccessEvents {
// member fields of an account.
func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool) uint64 {
var gas uint64
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite)
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite)
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite)
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite)
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite)
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite)
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite)
return gas
}
@@ -107,8 +104,7 @@ func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool) uint64 {
// call to that account.
func (ae *AccessEvents) MessageCallGas(destination common.Address) uint64 {
var gas uint64
- gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.VersionLeafKey, false)
- gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.CodeSizeLeafKey, false)
+ gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.BasicDataLeafKey, false)
return gas
}
@@ -116,41 +112,43 @@ func (ae *AccessEvents) MessageCallGas(destination common.Address) uint64 {
// cold balance member fields of the caller and the callee accounts.
func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address) uint64 {
var gas uint64
- gas += ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey, true)
- gas += ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, true)
+ gas += ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BasicDataLeafKey, true)
+ gas += ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BasicDataLeafKey, true)
+ return gas
+}
+
+// ContractCreateCPreheck charges access costs before
+// a contract creation is initiated. It is just reads, because the
+// address collision is done before the transfer, and so no write
+// are guaranteed to happen at this point.
+func (ae *AccessEvents) ContractCreatePreCheckGas(addr common.Address) uint64 {
+ var gas uint64
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, false)
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, false)
return gas
}
// ContractCreateInitGas returns the access gas costs for the initialization of
// a contract creation.
-func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, createSendsValue bool) uint64 {
+func (ae *AccessEvents) ContractCreateInitGas(addr common.Address) uint64 {
var gas uint64
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, true)
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, true)
- if createSendsValue {
- gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, true)
- }
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, true)
+ gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, true)
return gas
}
// AddTxOrigin adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) {
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey, false)
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey, true)
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey, true)
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey, false)
- ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey, false)
+ ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BasicDataLeafKey, true)
+ ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeHashLeafKey, false)
}
// AddTxDestination adds the member fields of the sender account to the access event list,
// so that cold accesses are not charged, since they are covered by the 21000 gas.
func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue bool) {
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, false)
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, sendsValue)
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, false)
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, false)
- ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, false)
+ ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, sendsValue)
+ ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, false)
}
// SlotGas returns the amount of gas to be charged for a cold storage access.
@@ -275,39 +273,12 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC,
return statelessGasCharged
}
-// VersionGas adds the account's version to the accessed data, and returns the
+// BasicDataGas adds the account's basic data to the accessed data, and returns the
// amount of gas that it costs.
// Note that an access in write mode implies an access in read mode, whereas an
// access in read mode does not imply an access in write mode.
-func (ae *AccessEvents) VersionGas(addr common.Address, isWrite bool) uint64 {
- return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite)
-}
-
-// BalanceGas adds the account's balance to the accessed data, and returns the
-// amount of gas that it costs.
-// in write mode. If false, the charged gas corresponds to an access in read mode.
-// Note that an access in write mode implies an access in read mode, whereas an access in
-// read mode does not imply an access in write mode.
-func (ae *AccessEvents) BalanceGas(addr common.Address, isWrite bool) uint64 {
- return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite)
-}
-
-// NonceGas adds the account's nonce to the accessed data, and returns the
-// amount of gas that it costs.
-// in write mode. If false, the charged gas corresponds to an access in read mode.
-// Note that an access in write mode implies an access in read mode, whereas an access in
-// read mode does not imply an access in write mode.
-func (ae *AccessEvents) NonceGas(addr common.Address, isWrite bool) uint64 {
- return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite)
-}
-
-// CodeSizeGas adds the account's code size to the accessed data, and returns the
-// amount of gas that it costs.
-// in write mode. If false, the charged gas corresponds to an access in read mode.
-// Note that an access in write mode implies an access in read mode, whereas an access in
-// read mode does not imply an access in write mode.
-func (ae *AccessEvents) CodeSizeGas(addr common.Address, isWrite bool) uint64 {
- return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite)
+func (ae *AccessEvents) BasicDataGas(addr common.Address, isWrite bool) uint64 {
+ return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BasicDataLeafKey, isWrite)
}
// CodeHashGas adds the account's code hash to the accessed data, and returns the
@@ -316,5 +287,5 @@ func (ae *AccessEvents) CodeSizeGas(addr common.Address, isWrite bool) uint64 {
// Note that an access in write mode implies an access in read mode, whereas an access in
// read mode does not imply an access in write mode.
func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool) uint64 {
- return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite)
+ return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeHashLeafKey, isWrite)
}
diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go
index c8c93accfd..10630b3181 100644
--- a/core/state/access_events_test.go
+++ b/core/state/access_events_test.go
@@ -40,55 +40,43 @@ func TestAccountHeaderGas(t *testing.T) {
ae := NewAccessEvents(utils.NewPointCache(1024))
// Check cold read cost
- gas := ae.VersionGas(testAddr, false)
+ gas := ae.BasicDataGas(testAddr, false)
if want := params.WitnessBranchReadCost + params.WitnessChunkReadCost; gas != want {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, want)
}
// Check warm read cost
- gas = ae.VersionGas(testAddr, false)
+ gas = ae.BasicDataGas(testAddr, false)
if gas != 0 {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
// Check cold read costs in the same group no longer incur the branch read cost
- gas = ae.BalanceGas(testAddr, false)
- if gas != params.WitnessChunkReadCost {
- t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost)
- }
- gas = ae.NonceGas(testAddr, false)
- if gas != params.WitnessChunkReadCost {
- t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost)
- }
- gas = ae.CodeSizeGas(testAddr, false)
- if gas != params.WitnessChunkReadCost {
- t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost)
- }
gas = ae.CodeHashGas(testAddr, false)
if gas != params.WitnessChunkReadCost {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost)
}
// Check cold write cost
- gas = ae.VersionGas(testAddr, true)
+ gas = ae.BasicDataGas(testAddr, true)
if want := params.WitnessBranchWriteCost + params.WitnessChunkWriteCost; gas != want {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, want)
}
// Check warm write cost
- gas = ae.VersionGas(testAddr, true)
+ gas = ae.BasicDataGas(testAddr, true)
if gas != 0 {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
// Check a write without a read charges both read and write costs
- gas = ae.BalanceGas(testAddr2, true)
+ gas = ae.BasicDataGas(testAddr2, true)
if want := params.WitnessBranchReadCost + params.WitnessBranchWriteCost + params.WitnessChunkWriteCost + params.WitnessChunkReadCost; gas != want {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, want)
}
// Check that a write followed by a read charges nothing
- gas = ae.BalanceGas(testAddr2, false)
+ gas = ae.BasicDataGas(testAddr2, false)
if gas != 0 {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
@@ -112,13 +100,13 @@ func TestContractCreateInitGas(t *testing.T) {
}
// Check cold read cost, without a value
- gas := ae.ContractCreateInitGas(testAddr, false)
- if want := params.WitnessBranchWriteCost + params.WitnessBranchReadCost + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*2; gas != want {
+ gas := ae.ContractCreateInitGas(testAddr)
+ if want := params.WitnessBranchWriteCost + params.WitnessBranchReadCost + 2*params.WitnessChunkWriteCost + 2*params.WitnessChunkReadCost; gas != want {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, want)
}
// Check warm read cost
- gas = ae.ContractCreateInitGas(testAddr, false)
+ gas = ae.ContractCreateInitGas(testAddr)
if gas != 0 {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
@@ -131,17 +119,17 @@ func TestMessageCallGas(t *testing.T) {
// Check cold read cost, without a value
gas := ae.MessageCallGas(testAddr)
- if want := params.WitnessBranchReadCost + params.WitnessChunkReadCost*2; gas != want {
+ if want := params.WitnessBranchReadCost + params.WitnessChunkReadCost; gas != want {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, want)
}
- // Check that reading the version and code size of the same account does not incur the branch read cost
- gas = ae.VersionGas(testAddr, false)
+ // Check that reading the basic data and code hash of the same account does not incur the branch read cost
+ gas = ae.BasicDataGas(testAddr, false)
if gas != 0 {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
- gas = ae.CodeSizeGas(testAddr, false)
- if gas != 0 {
+ gas = ae.CodeHashGas(testAddr, false)
+ if gas != params.WitnessChunkReadCost {
t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0)
}
diff --git a/core/state/database.go b/core/state/database.go
index d54417d2f9..de61dee036 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -45,29 +46,29 @@ const (
// Database wraps access to tries and contract code.
type Database interface {
+ // Reader returns a state reader associated with the specified state root.
+ Reader(root common.Hash) (Reader, error)
+
// OpenTrie opens the main account trie.
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error)
- // CopyTrie returns an independent copy of the given trie.
- CopyTrie(Trie) Trie
-
// ContractCode retrieves a particular contract's code.
ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error)
// ContractCodeSize retrieves a particular contracts code's size.
ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error)
- // DiskDB returns the underlying key-value disk database.
- DiskDB() ethdb.KeyValueStore
-
// PointCache returns the cache holding points used in verkle tree key computation
PointCache() *utils.PointCache
// TrieDB returns the underlying trie database for managing trie nodes.
TrieDB() *triedb.Database
+
+ // Snapshot returns the underlying state snapshot.
+ Snapshot() *snapshot.Tree
}
// Trie is a Ethereum Merkle Patricia trie.
@@ -94,7 +95,7 @@ type Trie interface {
// UpdateAccount abstracts an account write to the trie. It encodes the
// provided account object with associated algorithm and then updates it
// in the trie with provided address.
- UpdateAccount(address common.Address, account *types.StateAccount) error
+ UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
@@ -147,47 +148,62 @@ type Trie interface {
IsVerkle() bool
}
-// NewDatabase creates a backing store for state. The returned database is safe for
-// concurrent use, but does not retain any recent trie nodes in memory. To keep some
-// historical state in memory, use the NewDatabaseWithConfig constructor.
-func NewDatabase(db ethdb.Database) Database {
- return NewDatabaseWithConfig(db, nil)
+// CachingDB is an implementation of Database interface. It leverages both trie and
+// state snapshot to provide functionalities for state access. It's meant to be a
+// long-live object and has a few caches inside for sharing between blocks.
+type CachingDB struct {
+ disk ethdb.KeyValueStore
+ triedb *triedb.Database
+ snap *snapshot.Tree
+ codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
+ codeSizeCache *lru.Cache[common.Hash, int]
+ pointCache *utils.PointCache
}
-// NewDatabaseWithConfig creates a backing store for state. The returned database
-// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
-// large memory cache.
-func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database {
- return &cachingDB{
- disk: db,
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+// NewDatabase creates a state database with the provided data sources.
+func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
+ return &CachingDB{
+ disk: triedb.Disk(),
+ triedb: triedb,
+ snap: snap,
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- triedb: triedb.NewDatabase(db, config),
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
pointCache: utils.NewPointCache(pointCacheSize),
}
}
-// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
-func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database {
- return &cachingDB{
- disk: db,
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- triedb: triedb,
- pointCache: utils.NewPointCache(pointCacheSize),
- }
+// NewDatabaseForTesting is similar to NewDatabase, but it initializes the caching
+// db by using an ephemeral memory db with default config for testing.
+func NewDatabaseForTesting() *CachingDB {
+ return NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
}
-type cachingDB struct {
- disk ethdb.KeyValueStore
- codeSizeCache *lru.Cache[common.Hash, int]
- codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
- triedb *triedb.Database
- pointCache *utils.PointCache
+// Reader returns a state reader associated with the specified state root.
+func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
+ var readers []Reader
+
+ // Set up the state snapshot reader if available. This feature
+ // is optional and may be partially useful if it's not fully
+ // generated.
+ if db.snap != nil {
+ sr, err := newStateReader(stateRoot, db.snap)
+ if err == nil {
+ readers = append(readers, sr) // snap reader is optional
+ }
+ }
+ // Set up the trie reader, which is expected to always be available
+ // as the gatekeeper unless the state is corrupted.
+ tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
+ if err != nil {
+ return nil, err
+ }
+ readers = append(readers, tr)
+
+ return newMultiReader(readers...)
}
// OpenTrie opens the main account trie at a specific root hash.
-func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
+func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {
return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
}
@@ -199,7 +215,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
// OpenStorageTrie opens the storage trie of an account.
-func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
+func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
// In the verkle case, there is only one tree. But the two-tree structure
// is hardcoded in the codebase. So we need to return the same trie in this
// case.
@@ -213,20 +229,8 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
return tr, nil
}
-// CopyTrie returns an independent copy of the given trie.
-func (db *cachingDB) CopyTrie(t Trie) Trie {
- switch t := t.(type) {
- case *trie.StateTrie:
- return t.Copy()
- case *trie.VerkleTrie:
- return t.Copy()
- default:
- panic(fmt.Errorf("unknown trie type %T", t))
- }
-}
-
// ContractCode retrieves a particular contract's code.
-func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
+func (db *CachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) {
code, _ := db.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
@@ -243,7 +247,7 @@ func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash)
// ContractCodeWithPrefix retrieves a particular contract's code. If the
// code can't be found in the cache, then check the existence with **new**
// db scheme.
-func (db *cachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) {
+func (db *CachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) {
code, _ := db.codeCache.Get(codeHash)
if len(code) > 0 {
return code, nil
@@ -258,7 +262,7 @@ func (db *cachingDB) ContractCodeWithPrefix(address common.Address, codeHash com
}
// ContractCodeSize retrieves a particular contracts code's size.
-func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
+func (db *CachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) {
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
return cached, nil
}
@@ -266,17 +270,29 @@ func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash)
return len(code), err
}
-// DiskDB returns the underlying key-value disk database.
-func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
- return db.disk
-}
-
// TrieDB retrieves any intermediate trie-node caching layer.
-func (db *cachingDB) TrieDB() *triedb.Database {
+func (db *CachingDB) TrieDB() *triedb.Database {
return db.triedb
}
// PointCache returns the cache of evaluated curve points.
-func (db *cachingDB) PointCache() *utils.PointCache {
+func (db *CachingDB) PointCache() *utils.PointCache {
return db.pointCache
}
+
+// Snapshot returns the underlying state snapshot.
+func (db *CachingDB) Snapshot() *snapshot.Tree {
+ return db.snap
+}
+
+// mustCopyTrie returns a deep-copied trie.
+func mustCopyTrie(t Trie) Trie {
+ switch t := t.(type) {
+ case *trie.StateTrie:
+ return t.Copy()
+ case *trie.VerkleTrie:
+ return t.Copy()
+ default:
+ panic(fmt.Errorf("unknown trie type %T", t))
+ }
+}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index 73cc22490b..26456d7a89 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -35,7 +35,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
db, sdb, ndb, root, _ := makeTestState(scheme)
ndb.Commit(root, false)
- state, err := New(root, sdb, nil)
+ state, err := New(root, sdb)
if err != nil {
t.Fatalf("failed to create state trie at %x: %v", root, err)
}
diff --git a/core/state/journal.go b/core/state/journal.go
index f180a5dae4..a2fea6b6ec 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -153,7 +153,7 @@ func (j *journal) logChange(txHash common.Hash) {
}
func (j *journal) createObject(addr common.Address) {
- j.append(createObjectChange{account: &addr})
+ j.append(createObjectChange{account: addr})
}
func (j *journal) createContract(addr common.Address) {
@@ -161,12 +161,12 @@ func (j *journal) createContract(addr common.Address) {
}
func (j *journal) destruct(addr common.Address) {
- j.append(selfDestructChange{account: &addr})
+ j.append(selfDestructChange{account: addr})
}
func (j *journal) storageChange(addr common.Address, key, prev, origin common.Hash) {
j.append(storageChange{
- account: &addr,
+ account: addr,
key: key,
prevvalue: prev,
origvalue: origin,
@@ -175,7 +175,7 @@ func (j *journal) storageChange(addr common.Address, key, prev, origin common.Ha
func (j *journal) transientStateChange(addr common.Address, key, prev common.Hash) {
j.append(transientStorageChange{
- account: &addr,
+ account: addr,
key: key,
prevalue: prev,
})
@@ -187,25 +187,25 @@ func (j *journal) refundChange(previous uint64) {
func (j *journal) balanceChange(addr common.Address, previous *uint256.Int) {
j.append(balanceChange{
- account: &addr,
+ account: addr,
prev: previous.Clone(),
})
}
func (j *journal) setCode(address common.Address) {
- j.append(codeChange{account: &address})
+ j.append(codeChange{account: address})
}
func (j *journal) nonceChange(address common.Address, prev uint64) {
j.append(nonceChange{
- account: &address,
+ account: address,
prev: prev,
})
}
func (j *journal) touchChange(address common.Address) {
j.append(touchChange{
- account: &address,
+ account: address,
})
if address == ripemd {
// Explicitly put it in the dirty-cache, which is otherwise generated from
@@ -215,50 +215,48 @@ func (j *journal) touchChange(address common.Address) {
}
func (j *journal) accessListAddAccount(addr common.Address) {
- j.append(accessListAddAccountChange{&addr})
+ j.append(accessListAddAccountChange{addr})
}
func (j *journal) accessListAddSlot(addr common.Address, slot common.Hash) {
j.append(accessListAddSlotChange{
- address: &addr,
- slot: &slot,
+ address: addr,
+ slot: slot,
})
}
type (
// Changes to the account trie.
createObjectChange struct {
- account *common.Address
+ account common.Address
}
-
// createContractChange represents an account becoming a contract-account.
// This event happens prior to executing initcode. The journal-event simply
// manages the created-flag, in order to allow same-tx destruction.
createContractChange struct {
account common.Address
}
-
selfDestructChange struct {
- account *common.Address
+ account common.Address
}
// Changes to individual accounts.
balanceChange struct {
- account *common.Address
+ account common.Address
prev *uint256.Int
}
nonceChange struct {
- account *common.Address
+ account common.Address
prev uint64
}
storageChange struct {
- account *common.Address
+ account common.Address
key common.Hash
prevvalue common.Hash
origvalue common.Hash
}
codeChange struct {
- account *common.Address
+ account common.Address
}
// Changes to other state values.
@@ -269,31 +267,31 @@ type (
txhash common.Hash
}
touchChange struct {
- account *common.Address
+ account common.Address
}
// Changes to the access list
accessListAddAccountChange struct {
- address *common.Address
+ address common.Address
}
accessListAddSlotChange struct {
- address *common.Address
- slot *common.Hash
+ address common.Address
+ slot common.Hash
}
// Changes to transient storage
transientStorageChange struct {
- account *common.Address
+ account common.Address
key, prevalue common.Hash
}
)
func (ch createObjectChange) revert(s *StateDB) {
- delete(s.stateObjects, *ch.account)
+ delete(s.stateObjects, ch.account)
}
func (ch createObjectChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch createObjectChange) copy() journalEntry {
@@ -317,14 +315,14 @@ func (ch createContractChange) copy() journalEntry {
}
func (ch selfDestructChange) revert(s *StateDB) {
- obj := s.getStateObject(*ch.account)
+ obj := s.getStateObject(ch.account)
if obj != nil {
obj.selfDestructed = false
}
}
func (ch selfDestructChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch selfDestructChange) copy() journalEntry {
@@ -339,7 +337,7 @@ func (ch touchChange) revert(s *StateDB) {
}
func (ch touchChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch touchChange) copy() journalEntry {
@@ -349,11 +347,11 @@ func (ch touchChange) copy() journalEntry {
}
func (ch balanceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setBalance(ch.prev)
+ s.getStateObject(ch.account).setBalance(ch.prev)
}
func (ch balanceChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch balanceChange) copy() journalEntry {
@@ -364,11 +362,11 @@ func (ch balanceChange) copy() journalEntry {
}
func (ch nonceChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setNonce(ch.prev)
+ s.getStateObject(ch.account).setNonce(ch.prev)
}
func (ch nonceChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch nonceChange) copy() journalEntry {
@@ -379,11 +377,11 @@ func (ch nonceChange) copy() journalEntry {
}
func (ch codeChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setCode(types.EmptyCodeHash, nil)
+ s.getStateObject(ch.account).setCode(types.EmptyCodeHash, nil)
}
func (ch codeChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch codeChange) copy() journalEntry {
@@ -391,11 +389,11 @@ func (ch codeChange) copy() journalEntry {
}
func (ch storageChange) revert(s *StateDB) {
- s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue, ch.origvalue)
+ s.getStateObject(ch.account).setState(ch.key, ch.prevvalue, ch.origvalue)
}
func (ch storageChange) dirtied() *common.Address {
- return ch.account
+ return &ch.account
}
func (ch storageChange) copy() journalEntry {
@@ -407,7 +405,7 @@ func (ch storageChange) copy() journalEntry {
}
func (ch transientStorageChange) revert(s *StateDB) {
- s.setTransientState(*ch.account, ch.key, ch.prevalue)
+ s.setTransientState(ch.account, ch.key, ch.prevalue)
}
func (ch transientStorageChange) dirtied() *common.Address {
@@ -466,7 +464,7 @@ func (ch accessListAddAccountChange) revert(s *StateDB) {
(addr) at this point, since no storage adds can remain when come upon
a single (addr) change.
*/
- s.accessList.DeleteAddress(*ch.address)
+ s.accessList.DeleteAddress(ch.address)
}
func (ch accessListAddAccountChange) dirtied() *common.Address {
@@ -480,7 +478,7 @@ func (ch accessListAddAccountChange) copy() journalEntry {
}
func (ch accessListAddSlotChange) revert(s *StateDB) {
- s.accessList.DeleteSlot(*ch.address, *ch.slot)
+ s.accessList.DeleteSlot(ch.address, ch.slot)
}
func (ch accessListAddSlotChange) dirtied() *common.Address {
diff --git a/core/state/reader.go b/core/state/reader.go
new file mode 100644
index 0000000000..6bddefc2a7
--- /dev/null
+++ b/core/state/reader.go
@@ -0,0 +1,313 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import (
+ "errors"
+ "maps"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/utils"
+ "github.com/ethereum/go-ethereum/triedb"
+)
+
+// Reader defines the interface for accessing accounts and storage slots
+// associated with a specific state.
+type Reader interface {
+ // Account retrieves the account associated with a particular address.
+ //
+ // - Returns a nil account if it does not exist
+ // - Returns an error only if an unexpected issue occurs
+ // - The returned account is safe to modify after the call
+ Account(addr common.Address) (*types.StateAccount, error)
+
+ // Storage retrieves the storage slot associated with a particular account
+ // address and slot key.
+ //
+ // - Returns an empty slot if it does not exist
+ // - Returns an error only if an unexpected issue occurs
+ // - The returned storage slot is safe to modify after the call
+ Storage(addr common.Address, slot common.Hash) (common.Hash, error)
+
+ // Copy returns a deep-copied state reader.
+ Copy() Reader
+}
+
+// stateReader is a wrapper over the state snapshot and implements the Reader
+// interface. It provides an efficient way to access flat state.
+type stateReader struct {
+ snap snapshot.Snapshot
+ buff crypto.KeccakState
+}
+
+// newStateReader constructs a flat state reader with on the specified state root.
+func newStateReader(root common.Hash, snaps *snapshot.Tree) (*stateReader, error) {
+ snap := snaps.Snapshot(root)
+ if snap == nil {
+ return nil, errors.New("snapshot is not available")
+ }
+ return &stateReader{
+ snap: snap,
+ buff: crypto.NewKeccakState(),
+ }, nil
+}
+
+// Account implements Reader, retrieving the account specified by the address.
+//
+// An error will be returned if the associated snapshot is already stale or
+// the requested account is not yet covered by the snapshot.
+//
+// The returned account might be nil if it's not existent.
+func (r *stateReader) Account(addr common.Address) (*types.StateAccount, error) {
+ ret, err := r.snap.Account(crypto.HashData(r.buff, addr.Bytes()))
+ if err != nil {
+ return nil, err
+ }
+ if ret == nil {
+ return nil, nil
+ }
+ acct := &types.StateAccount{
+ Nonce: ret.Nonce,
+ Balance: ret.Balance,
+ CodeHash: ret.CodeHash,
+ Root: common.BytesToHash(ret.Root),
+ }
+ if len(acct.CodeHash) == 0 {
+ acct.CodeHash = types.EmptyCodeHash.Bytes()
+ }
+ if acct.Root == (common.Hash{}) {
+ acct.Root = types.EmptyRootHash
+ }
+ return acct, nil
+}
+
+// Storage implements Reader, retrieving the storage slot specified by the
+// address and slot key.
+//
+// An error will be returned if the associated snapshot is already stale or
+// the requested storage slot is not yet covered by the snapshot.
+//
+// The returned storage slot might be empty if it's not existent.
+func (r *stateReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
+ addrHash := crypto.HashData(r.buff, addr.Bytes())
+ slotHash := crypto.HashData(r.buff, key.Bytes())
+ ret, err := r.snap.Storage(addrHash, slotHash)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ if len(ret) == 0 {
+ return common.Hash{}, nil
+ }
+ // Perform the rlp-decode as the slot value is RLP-encoded in the state
+ // snapshot.
+ _, content, _, err := rlp.Split(ret)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ var value common.Hash
+ value.SetBytes(content)
+ return value, nil
+}
+
+// Copy implements Reader, returning a deep-copied snap reader.
+func (r *stateReader) Copy() Reader {
+ return &stateReader{
+ snap: r.snap,
+ buff: crypto.NewKeccakState(),
+ }
+}
+
+// trieReader implements the Reader interface, providing functions to access
+// state from the referenced trie.
+type trieReader struct {
+ root common.Hash // State root which uniquely represent a state
+ db *triedb.Database // Database for loading trie
+ buff crypto.KeccakState // Buffer for keccak256 hashing
+ mainTrie Trie // Main trie, resolved in constructor
+ subRoots map[common.Address]common.Hash // Set of storage roots, cached when the account is resolved
+ subTries map[common.Address]Trie // Group of storage tries, cached when it's resolved
+}
+
+// trieReader constructs a trie reader of the specific state. An error will be
+// returned if the associated trie specified by root is not existent.
+func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCache) (*trieReader, error) {
+ var (
+ tr Trie
+ err error
+ )
+ if !db.IsVerkle() {
+ tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
+ } else {
+ tr, err = trie.NewVerkleTrie(root, db, cache)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &trieReader{
+ root: root,
+ db: db,
+ buff: crypto.NewKeccakState(),
+ mainTrie: tr,
+ subRoots: make(map[common.Address]common.Hash),
+ subTries: make(map[common.Address]Trie),
+ }, nil
+}
+
+// Account implements Reader, retrieving the account specified by the address.
+//
+// An error will be returned if the trie state is corrupted. An nil account
+// will be returned if it's not existent in the trie.
+func (r *trieReader) Account(addr common.Address) (*types.StateAccount, error) {
+ account, err := r.mainTrie.GetAccount(addr)
+ if err != nil {
+ return nil, err
+ }
+ if account == nil {
+ r.subRoots[addr] = types.EmptyRootHash
+ } else {
+ r.subRoots[addr] = account.Root
+ }
+ return account, nil
+}
+
+// Storage implements Reader, retrieving the storage slot specified by the
+// address and slot key.
+//
+// An error will be returned if the trie state is corrupted. An empty storage
+// slot will be returned if it's not existent in the trie.
+func (r *trieReader) Storage(addr common.Address, key common.Hash) (common.Hash, error) {
+ var (
+ tr Trie
+ found bool
+ value common.Hash
+ )
+ if r.db.IsVerkle() {
+ tr = r.mainTrie
+ } else {
+ tr, found = r.subTries[addr]
+ if !found {
+ root, ok := r.subRoots[addr]
+
+ // The storage slot is accessed without account caching. It's unexpected
+ // behavior but try to resolve the account first anyway.
+ if !ok {
+ _, err := r.Account(addr)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ root = r.subRoots[addr]
+ }
+ var err error
+ tr, err = trie.NewStateTrie(trie.StorageTrieID(r.root, crypto.HashData(r.buff, addr.Bytes()), root), r.db)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ r.subTries[addr] = tr
+ }
+ }
+ ret, err := tr.GetStorage(addr, key.Bytes())
+ if err != nil {
+ return common.Hash{}, err
+ }
+ value.SetBytes(ret)
+ return value, nil
+}
+
+// Copy implements Reader, returning a deep-copied trie reader.
+func (r *trieReader) Copy() Reader {
+ tries := make(map[common.Address]Trie)
+ for addr, tr := range r.subTries {
+ tries[addr] = mustCopyTrie(tr)
+ }
+ return &trieReader{
+ root: r.root,
+ db: r.db,
+ buff: crypto.NewKeccakState(),
+ mainTrie: mustCopyTrie(r.mainTrie),
+ subRoots: maps.Clone(r.subRoots),
+ subTries: tries,
+ }
+}
+
+// multiReader is the aggregation of a list of Reader interface, providing state
+// access by leveraging all readers. The checking priority is determined by the
+// position in the reader list.
+type multiReader struct {
+ readers []Reader // List of readers, sorted by checking priority
+}
+
+// newMultiReader constructs a multiReader instance with the given readers. The
+// priority among readers is assumed to be sorted. Note, it must contain at least
+// one reader for constructing a multiReader.
+func newMultiReader(readers ...Reader) (*multiReader, error) {
+ if len(readers) == 0 {
+ return nil, errors.New("empty reader set")
+ }
+ return &multiReader{
+ readers: readers,
+ }, nil
+}
+
+// Account implementing Reader interface, retrieving the account associated with
+// a particular address.
+//
+// - Returns a nil account if it does not exist
+// - Returns an error only if an unexpected issue occurs
+// - The returned account is safe to modify after the call
+func (r *multiReader) Account(addr common.Address) (*types.StateAccount, error) {
+ var errs []error
+ for _, reader := range r.readers {
+ acct, err := reader.Account(addr)
+ if err == nil {
+ return acct, nil
+ }
+ errs = append(errs, err)
+ }
+ return nil, errors.Join(errs...)
+}
+
+// Storage implementing Reader interface, retrieving the storage slot associated
+// with a particular account address and slot key.
+//
+// - Returns an empty slot if it does not exist
+// - Returns an error only if an unexpected issue occurs
+// - The returned storage slot is safe to modify after the call
+func (r *multiReader) Storage(addr common.Address, slot common.Hash) (common.Hash, error) {
+ var errs []error
+ for _, reader := range r.readers {
+ slot, err := reader.Storage(addr, slot)
+ if err == nil {
+ return slot, nil
+ }
+ errs = append(errs, err)
+ }
+ return common.Hash{}, errors.Join(errs...)
+}
+
+// Copy implementing Reader interface, returning a deep-copied state reader.
+func (r *multiReader) Copy() Reader {
+ var readers []Reader
+ for _, reader := range r.readers {
+ readers = append(readers, reader.Copy())
+ }
+ return &multiReader{readers: readers}
+}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index f5518a204c..76928edf07 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -74,6 +74,14 @@ func (dl *diskLayer) Stale() bool {
return dl.stale
}
+// markStale sets the stale flag as true.
+func (dl *diskLayer) markStale() {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ dl.stale = true
+}
+
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
@@ -175,3 +183,18 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
return newDiffLayer(dl, blockHash, destructs, accounts, storage)
}
+
+// stopGeneration aborts the state snapshot generation if it is currently running.
+func (dl *diskLayer) stopGeneration() {
+ dl.lock.RLock()
+ generating := dl.genMarker != nil
+ dl.lock.RUnlock()
+ if !generating {
+ return
+ }
+ if dl.genAbort != nil {
+ abort := make(chan *generatorStats)
+ dl.genAbort <- abort
+ <-abort
+ }
+}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index d81a628c91..6d9e163075 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -631,16 +631,10 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er
accMarker = nil
return nil
}
- // Always reset the initial account range as 1 whenever recover from the
- // interruption. TODO(rjl493456442) can we remove it?
- var accountRange = accountCheckRange
- if len(accMarker) > 0 {
- accountRange = 1
- }
origin := common.CopyBytes(accMarker)
for {
id := trie.StateTrieID(dl.root)
- exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, types.FullAccountRLP)
+ exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountCheckRange, onAccount, types.FullAccountRLP)
if err != nil {
return err // The procedure it aborted, either by external signal or internal error.
}
@@ -652,7 +646,6 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er
ctx.removeStorageLeft()
break
}
- accountRange = accountCheckRange
}
return nil
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 752f4359fb..d1ffb5cc2d 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -258,24 +258,11 @@ func (t *Tree) Disable() {
for _, layer := range t.layers {
switch layer := layer.(type) {
case *diskLayer:
-
- layer.lock.RLock()
- generating := layer.genMarker != nil
- layer.lock.RUnlock()
- if !generating {
- // Generator is already aborted or finished
- break
- }
- // If the base layer is generating, abort it
- if layer.genAbort != nil {
- abort := make(chan *generatorStats)
- layer.genAbort <- abort
- <-abort
- }
- // Layer should be inactive now, mark it as stale
- layer.lock.Lock()
- layer.stale = true
- layer.lock.Unlock()
+ // TODO this function will hang if it's called twice. Will
+ // fix it in the following PRs.
+ layer.stopGeneration()
+ layer.markStale()
+ layer.Release()
case *diffLayer:
// If the layer is a simple diff, simply mark as stale
@@ -730,16 +717,11 @@ func (t *Tree) Rebuild(root common.Hash) {
for _, layer := range t.layers {
switch layer := layer.(type) {
case *diskLayer:
- // If the base layer is generating, abort it and save
- if layer.genAbort != nil {
- abort := make(chan *generatorStats)
- layer.genAbort <- abort
- <-abort
- }
- // Layer should be inactive now, mark it as stale
- layer.lock.Lock()
- layer.stale = true
- layer.lock.Unlock()
+ // TODO this function will hang if it's called twice. Will
+ // fix it in the following PRs.
+ layer.stopGeneration()
+ layer.markStale()
+ layer.Release()
case *diffLayer:
// If the layer is a simple diff, simply mark as stale
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 5946683bc0..422badb19b 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -143,7 +143,7 @@ func (s *stateObject) getTrie() (Trie, error) {
func (s *stateObject) getPrefetchedTrie() Trie {
// If there's nothing to meaningfully return, let the user figure it out by
// pulling the trie from disk.
- if s.data.Root == types.EmptyRootHash || s.db.prefetcher == nil {
+ if (s.data.Root == types.EmptyRootHash && !s.db.db.TrieDB().IsVerkle()) || s.db.prefetcher == nil {
return nil
}
// Attempt to retrieve the trie from the prefetcher
@@ -187,52 +187,23 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
s.originStorage[key] = common.Hash{} // track the empty slot as origin value
return common.Hash{}
}
- // If no live objects are available, attempt to use snapshots
- var (
- enc []byte
- err error
- value common.Hash
- )
- if s.db.snap != nil {
- start := time.Now()
- enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
- s.db.SnapshotStorageReads += time.Since(start)
-
- if len(enc) > 0 {
- _, content, _, err := rlp.Split(enc)
- if err != nil {
- s.db.setError(err)
- }
- value.SetBytes(content)
- }
- }
- // If the snapshot is unavailable or reading from it fails, load from the database.
- if s.db.snap == nil || err != nil {
- start := time.Now()
- tr, err := s.getTrie()
- if err != nil {
- s.db.setError(err)
- return common.Hash{}
- }
- val, err := tr.GetStorage(s.address, key.Bytes())
- s.db.StorageReads += time.Since(start)
+ s.db.StorageLoaded++
- if err != nil {
- s.db.setError(err)
- return common.Hash{}
- }
- value.SetBytes(val)
+ start := time.Now()
+ value, err := s.db.reader.Storage(s.address, key)
+ if err != nil {
+ s.db.setError(err)
+ return common.Hash{}
}
- // Independent of where we loaded the data from, add it to the prefetcher.
- // Whilst this would be a bit weird if snapshots are disabled, but we still
- // want the trie nodes to end up in the prefetcher too, so just push through.
+ s.db.StorageReads += time.Since(start)
+
+ // Schedule the resolved storage slots for prefetching if it's enabled.
if s.db.prefetcher != nil && s.data.Root != types.EmptyRootHash {
if err = s.db.prefetcher.prefetch(s.addrHash, s.origin.Root, s.address, [][]byte{key[:]}, true); err != nil {
log.Error("Failed to prefetch storage slot", "addr", s.address, "key", key, "err", err)
}
}
s.originStorage[key] = value
- s.db.StorageLoaded++
return value
}
@@ -527,7 +498,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
newContract: s.newContract,
}
if s.trie != nil {
- obj.trie = db.db.CopyTrie(s.trie)
+ obj.trie = mustCopyTrie(s.trie)
}
return obj
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 9200e4abe9..9de50beb12 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -26,27 +26,25 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
)
type stateEnv struct {
- db ethdb.Database
state *StateDB
}
func newStateEnv() *stateEnv {
- db := rawdb.NewMemoryDatabase()
- sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil)
- return &stateEnv{db: db, state: sdb}
+ sdb, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
+ return &stateEnv{state: sdb}
}
func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
- sdb, _ := New(types.EmptyRootHash, tdb, nil)
- s := &stateEnv{db: db, state: sdb}
+ triedb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
+ tdb := NewDatabase(triedb, nil)
+ sdb, _ := New(types.EmptyRootHash, tdb)
+ s := &stateEnv{state: sdb}
// generate a few entries
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
@@ -62,7 +60,7 @@ func TestDump(t *testing.T) {
root, _ := s.state.Commit(0, false)
// check that DumpToCollector contains the state objects that are in trie
- s.state, _ = New(root, tdb, nil)
+ s.state, _ = New(root, tdb)
got := string(s.state.Dump(nil))
want := `{
"root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2",
@@ -101,9 +99,10 @@ func TestDump(t *testing.T) {
func TestIterativeDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
- sdb, _ := New(types.EmptyRootHash, tdb, nil)
- s := &stateEnv{db: db, state: sdb}
+ triedb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
+ tdb := NewDatabase(triedb, nil)
+ sdb, _ := New(types.EmptyRootHash, tdb)
+ s := &stateEnv{state: sdb}
// generate a few entries
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
@@ -119,7 +118,7 @@ func TestIterativeDump(t *testing.T) {
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
root, _ := s.state.Commit(0, false)
- s.state, _ = New(root, tdb, nil)
+ s.state, _ = New(root, tdb)
b := &bytes.Buffer{}
s.state.IterativeDump(nil, json.NewEncoder(b))
@@ -195,7 +194,7 @@ func TestSnapshotEmpty(t *testing.T) {
}
func TestCreateObjectRevert(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
addr := common.BytesToAddress([]byte("so0"))
snap := state.Snapshot()
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 300ce29a67..1a12f519a4 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -82,10 +82,8 @@ type StateDB struct {
db Database
prefetcher *triePrefetcher
trie Trie
- hasher crypto.KeccakState
logger *tracing.Hooks
- snaps *snapshot.Tree // Nil if snapshot is not available
- snap snapshot.Snapshot // Nil if snapshot is not available
+ reader Reader
// originalRoot is the pre-state root, before any changes were made.
// It will be updated when the Commit is called.
@@ -144,17 +142,15 @@ type StateDB struct {
witness *stateless.Witness
// Measurements gathered during execution for debugging purposes
- AccountReads time.Duration
- AccountHashes time.Duration
- AccountUpdates time.Duration
- AccountCommits time.Duration
- StorageReads time.Duration
- StorageUpdates time.Duration
- StorageCommits time.Duration
- SnapshotAccountReads time.Duration
- SnapshotStorageReads time.Duration
- SnapshotCommits time.Duration
- TrieDBCommits time.Duration
+ AccountReads time.Duration
+ AccountHashes time.Duration
+ AccountUpdates time.Duration
+ AccountCommits time.Duration
+ StorageReads time.Duration
+ StorageUpdates time.Duration
+ StorageCommits time.Duration
+ SnapshotCommits time.Duration
+ TrieDBCommits time.Duration
AccountLoaded int // Number of accounts retrieved from the database during the state transition
AccountUpdated int // Number of accounts updated during the state transition
@@ -165,16 +161,20 @@ type StateDB struct {
}
// New creates a new state from a given trie.
-func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
+func New(root common.Hash, db Database) (*StateDB, error) {
tr, err := db.OpenTrie(root)
if err != nil {
return nil, err
}
+ reader, err := db.Reader(root)
+ if err != nil {
+ return nil, err
+ }
sdb := &StateDB{
db: db,
trie: tr,
originalRoot: root,
- snaps: snaps,
+ reader: reader,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDestruct: make(map[common.Address]*stateObject),
mutations: make(map[common.Address]*mutation),
@@ -183,13 +183,9 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
journal: newJournal(),
accessList: newAccessList(),
transientStorage: newTransientStorage(),
- hasher: crypto.NewKeccakState(),
}
if db.TrieDB().IsVerkle() {
- sdb.accessEvents = NewAccessEvents(db.(*cachingDB).pointCache)
- }
- if sdb.snaps != nil {
- sdb.snap = sdb.snaps.Snapshot(root)
+ sdb.accessEvents = NewAccessEvents(db.PointCache())
}
return sdb, nil
}
@@ -204,30 +200,23 @@ func (s *StateDB) SetLogger(l *tracing.Hooks) {
// commit phase, most of the needed data is already hot.
func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness) {
// Terminate any previously running prefetcher
- if s.prefetcher != nil {
- s.prefetcher.terminate(false)
- s.prefetcher.report()
- s.prefetcher = nil
- }
+ s.StopPrefetcher()
+
// Enable witness collection if requested
s.witness = witness
- // If snapshots are enabled, start prefethers explicitly
- if s.snap != nil {
- s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, witness == nil)
-
- // With the switch to the Proof-of-Stake consensus algorithm, block production
- // rewards are now handled at the consensus layer. Consequently, a block may
- // have no state transitions if it contains no transactions and no withdrawals.
- // In such cases, the account trie won't be scheduled for prefetching, leading
- // to unnecessary error logs.
- //
- // To prevent this, the account trie is always scheduled for prefetching once
- // the prefetcher is constructed. For more details, see:
- // https://github.com/ethereum/go-ethereum/issues/29880
- if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, false); err != nil {
- log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err)
- }
+ // With the switch to the Proof-of-Stake consensus algorithm, block production
+ // rewards are now handled at the consensus layer. Consequently, a block may
+ // have no state transitions if it contains no transactions and no withdrawals.
+ // In such cases, the account trie won't be scheduled for prefetching, leading
+ // to unnecessary error logs.
+ //
+ // To prevent this, the account trie is always scheduled for prefetching once
+ // the prefetcher is constructed. For more details, see:
+ // https://github.com/ethereum/go-ethereum/issues/29880
+ s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, witness == nil)
+ if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil, false); err != nil {
+ log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err)
}
}
@@ -557,7 +546,7 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common
func (s *StateDB) updateStateObject(obj *stateObject) {
// Encode the account and update the account trie
addr := obj.Address()
- if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
+ if err := s.trie.UpdateAccount(addr, &obj.data, len(obj.code)); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
if obj.dirtyCode {
@@ -583,57 +572,28 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil
}
- // If no live objects are available, attempt to use snapshots
- var data *types.StateAccount
- if s.snap != nil {
- start := time.Now()
- acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
- s.SnapshotAccountReads += time.Since(start)
- if err == nil {
- if acc == nil {
- s.AccountLoaded++
- return nil
- }
- data = &types.StateAccount{
- Nonce: acc.Nonce,
- Balance: acc.Balance,
- CodeHash: acc.CodeHash,
- Root: common.BytesToHash(acc.Root),
- }
- if len(data.CodeHash) == 0 {
- data.CodeHash = types.EmptyCodeHash.Bytes()
- }
- if data.Root == (common.Hash{}) {
- data.Root = types.EmptyRootHash
- }
- }
+ s.AccountLoaded++
+
+ start := time.Now()
+ acct, err := s.reader.Account(addr)
+ if err != nil {
+ s.setError(fmt.Errorf("getStateObject (%x) error: %w", addr.Bytes(), err))
+ return nil
}
- // If snapshot unavailable or reading from it failed, load from the database
- if data == nil {
- start := time.Now()
- var err error
- data, err = s.trie.GetAccount(addr)
- s.AccountReads += time.Since(start)
+ s.AccountReads += time.Since(start)
- if err != nil {
- s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err))
- return nil
- }
- if data == nil {
- s.AccountLoaded++
- return nil
- }
+ // Short circuit if the account is not found
+ if acct == nil {
+ return nil
}
- // Independent of where we loaded the data from, add it to the prefetcher.
- // Whilst this would be a bit weird if snapshots are disabled, but we still
- // want the trie nodes to end up in the prefetcher too, so just push through.
+ // Schedule the resolved account for prefetching if it's enabled.
if s.prefetcher != nil {
if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, [][]byte{addr[:]}, true); err != nil {
log.Error("Failed to prefetch account", "addr", addr, "err", err)
}
}
// Insert into the live set
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, acct)
s.setStateObject(obj)
s.AccountLoaded++
return obj
@@ -688,8 +648,8 @@ func (s *StateDB) Copy() *StateDB {
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
db: s.db,
- trie: s.db.CopyTrie(s.trie),
- hasher: crypto.NewKeccakState(),
+ trie: mustCopyTrie(s.trie),
+ reader: s.reader.Copy(),
originalRoot: s.originalRoot,
stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)),
stateObjectsDestruct: make(map[common.Address]*stateObject, len(s.stateObjectsDestruct)),
@@ -701,14 +661,16 @@ func (s *StateDB) Copy() *StateDB {
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: maps.Clone(s.preimages),
- journal: s.journal.copy(),
- // In order for the block producer to be able to use and make additions
- // to the snapshot tree, we need to copy that as well. Otherwise, any
- // block mined by ourselves will cause gaps in the tree, and force the
- // miner to operate trie-backed only.
- snaps: s.snaps,
- snap: s.snap,
+ // Do we need to copy the access list and transient storage?
+ // In practice: No. At the start of a transaction, these two lists are empty.
+ // In practice, we only ever copy state _between_ transactions/blocks, never
+ // in the middle of a transaction. However, it doesn't cost us much to copy
+ // empty lists, so we do it anyway to not blow up if we ever decide copy them
+ // in the middle of a transaction.
+ accessList: s.accessList.Copy(),
+ transientStorage: s.transientStorage.Copy(),
+ journal: s.journal.copy(),
}
if s.witness != nil {
state.witness = s.witness.Copy()
@@ -737,14 +699,6 @@ func (s *StateDB) Copy() *StateDB {
}
state.logs[hash] = cpy
}
- // Do we need to copy the access list and transient storage?
- // In practice: No. At the start of a transaction, these two lists are empty.
- // In practice, we only ever copy state _between_ transactions/blocks, never
- // in the middle of a transaction. However, it doesn't cost us much to copy
- // empty lists, so we do it anyway to not blow up if we ever decide copy them
- // in the middle of a transaction.
- state.accessList = s.accessList.Copy()
- state.transientStorage = s.transientStorage.Copy()
return state
}
@@ -979,8 +933,8 @@ func (s *StateDB) clearJournalAndRefund() {
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
-func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
- iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
+func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) {
+ iter, err := snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil {
return nil, nil, err
}
@@ -1058,10 +1012,11 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
- if s.snap != nil {
- slots, nodes, err = s.fastDeleteStorage(addrHash, root)
+ snaps := s.db.Snapshot()
+ if snaps != nil {
+ slots, nodes, err = s.fastDeleteStorage(snaps, addrHash, root)
}
- if s.snap == nil || err != nil {
+ if snaps == nil || err != nil {
slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
}
if err != nil {
@@ -1299,7 +1254,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
return nil, err
}
// Commit dirty contract code if any exists
- if db := s.db.DiskDB(); db != nil && len(ret.codes) > 0 {
+ if db := s.db.TrieDB().Disk(); db != nil && len(ret.codes) > 0 {
batch := db.NewBatch()
for _, code := range ret.codes {
rawdb.WriteCode(batch, code.hash, code.blob)
@@ -1310,18 +1265,16 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
}
if !ret.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
- if s.snap != nil {
- s.snap = nil
-
+ if snap := s.db.Snapshot(); snap != nil {
start := time.Now()
- if err := s.snaps.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil {
+ if err := snap.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
- if err := s.snaps.Cap(ret.root, TriesInMemory); err != nil {
+ if err := snap.Cap(ret.root, TriesInMemory); err != nil {
log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", TriesInMemory, "err", err)
}
s.SnapshotCommits += time.Since(start)
@@ -1336,6 +1289,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
s.TrieDBCommits += time.Since(start)
}
}
+ s.reader, _ = s.db.Reader(s.originalRoot)
return ret, err
}
@@ -1451,6 +1405,7 @@ func (s *StateDB) markUpdate(addr common.Address) {
s.mutations[addr].typ = update
}
+// PointCache returns the point cache used by verkle tree.
func (s *StateDB) PointCache() *utils.PointCache {
return s.db.PointCache()
}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 153035b9c1..90250819e3 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -197,7 +197,6 @@ func (test *stateTest) run() bool {
}
disk = rawdb.NewMemoryDatabase()
tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
- sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0
)
defer disk.Close()
@@ -217,7 +216,7 @@ func (test *stateTest) run() bool {
if i != 0 {
root = roots[len(roots)-1]
}
- state, err := New(root, sdb, snaps)
+ state, err := New(root, NewDatabase(tdb, snaps))
if err != nil {
panic(err)
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index a8ae6eb6d3..9441834c6a 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -19,7 +19,6 @@ package state
import (
"bytes"
"encoding/binary"
- "errors"
"fmt"
"maps"
"math"
@@ -53,8 +52,9 @@ func TestUpdateLeaks(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
tdb = triedb.NewDatabase(db, nil)
+ sdb = NewDatabase(tdb, nil)
)
- state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
+ state, _ := New(types.EmptyRootHash, sdb)
// Update it with some accounts
for i := byte(0); i < 255; i++ {
@@ -90,8 +90,8 @@ func TestIntermediateLeaks(t *testing.T) {
finalDb := rawdb.NewMemoryDatabase()
transNdb := triedb.NewDatabase(transDb, nil)
finalNdb := triedb.NewDatabase(finalDb, nil)
- transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
- finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
+ transState, _ := New(types.EmptyRootHash, NewDatabase(transNdb, nil))
+ finalState, _ := New(types.EmptyRootHash, NewDatabase(finalNdb, nil))
modify := func(state *StateDB, addr common.Address, i, tweak byte) {
state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)), tracing.BalanceChangeUnspecified)
@@ -166,7 +166,7 @@ func TestIntermediateLeaks(t *testing.T) {
// https://github.com/ethereum/go-ethereum/pull/15549.
func TestCopy(t *testing.T) {
// Create a random state test to copy and modify "independently"
- orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ orig, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
for i := byte(0); i < 255; i++ {
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
@@ -230,8 +230,8 @@ func TestCopy(t *testing.T) {
// TestCopyWithDirtyJournal tests if Copy can correct create a equal copied
// stateDB with dirty journal present.
func TestCopyWithDirtyJournal(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- orig, _ := New(types.EmptyRootHash, db, nil)
+ db := NewDatabaseForTesting()
+ orig, _ := New(types.EmptyRootHash, db)
// Fill up the initial states
for i := byte(0); i < 255; i++ {
@@ -241,7 +241,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
orig.updateStateObject(obj)
}
root, _ := orig.Commit(0, true)
- orig, _ = New(root, db, nil)
+ orig, _ = New(root, db)
// modify all in memory without finalizing
for i := byte(0); i < 255; i++ {
@@ -274,8 +274,8 @@ func TestCopyWithDirtyJournal(t *testing.T) {
// It then proceeds to make changes to S1. Those changes are _not_ supposed
// to affect S2. This test checks that the copy properly deep-copies the objectstate
func TestCopyObjectState(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- orig, _ := New(types.EmptyRootHash, db, nil)
+ db := NewDatabaseForTesting()
+ orig, _ := New(types.EmptyRootHash, db)
// Fill up the initial states
for i := byte(0); i < 5; i++ {
@@ -527,7 +527,7 @@ func (test *snapshotTest) String() string {
func (test *snapshotTest) run() bool {
// Run all actions and create snapshots.
var (
- state, _ = New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ = New(types.EmptyRootHash, NewDatabaseForTesting())
snapshotRevs = make([]int, len(test.snapshots))
sindex = 0
checkstates = make([]*StateDB, len(test.snapshots))
@@ -699,7 +699,7 @@ func TestTouchDelete(t *testing.T) {
s := newStateEnv()
s.state.getOrNewStateObject(common.Address{})
root, _ := s.state.Commit(0, false)
- s.state, _ = New(root, s.state.db, s.state.snaps)
+ s.state, _ = New(root, s.state.db)
snapshot := s.state.Snapshot()
s.state.AddBalance(common.Address{}, new(uint256.Int), tracing.BalanceChangeUnspecified)
@@ -716,7 +716,7 @@ func TestTouchDelete(t *testing.T) {
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
func TestCopyOfCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
addr := common.HexToAddress("aaaa")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified)
@@ -733,8 +733,8 @@ func TestCopyOfCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCommitCopy(t *testing.T) {
- tdb := NewDatabase(rawdb.NewMemoryDatabase())
- state, _ := New(types.EmptyRootHash, tdb, nil)
+ tdb := NewDatabaseForTesting()
+ state, _ := New(types.EmptyRootHash, tdb)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -787,7 +787,7 @@ func TestCopyCommitCopy(t *testing.T) {
}
// Commit state, ensure states can be loaded from disk
root, _ := state.Commit(0, false)
- state, _ = New(root, tdb, nil)
+ state, _ = New(root, tdb)
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
}
@@ -807,7 +807,7 @@ func TestCopyCommitCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCopyCommitCopy(t *testing.T) {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -876,8 +876,8 @@ func TestCopyCopyCommitCopy(t *testing.T) {
// TestCommitCopy tests the copy from a committed state is not fully functional.
func TestCommitCopy(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- state, _ := New(types.EmptyRootHash, db, nil)
+ db := NewDatabaseForTesting()
+ state, _ := New(types.EmptyRootHash, db)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -902,7 +902,7 @@ func TestCommitCopy(t *testing.T) {
}
root, _ := state.Commit(0, true)
- state, _ = New(root, db, nil)
+ state, _ = New(root, db)
state.SetState(addr, skey2, sval2)
state.Commit(1, true)
@@ -915,10 +915,10 @@ func TestCommitCopy(t *testing.T) {
t.Fatalf("unexpected code: have %x", code)
}
// Miss slots because of non-functional trie after commit
- if val := copied.GetState(addr, skey1); val != (common.Hash{}) {
- t.Fatalf("unexpected storage slot: have %x", sval1)
+ if val := copied.GetState(addr, skey1); val != sval1 {
+ t.Fatalf("unexpected storage slot: have %x", val)
}
- if val := copied.GetCommittedState(addr, skey1); val != (common.Hash{}) {
+ if val := copied.GetCommittedState(addr, skey1); val != sval1 {
t.Fatalf("unexpected storage slot: have %x", val)
}
// Slots cached in the stateDB, available after commit
@@ -928,9 +928,6 @@ func TestCommitCopy(t *testing.T) {
if val := copied.GetCommittedState(addr, skey2); val != sval2 {
t.Fatalf("unexpected storage slot: have %x", val)
}
- if !errors.Is(copied.Error(), trie.ErrCommitted) {
- t.Fatalf("unexpected state error, %v", copied.Error())
- }
}
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
@@ -943,13 +940,13 @@ func TestCommitCopy(t *testing.T) {
// first, but the journal wiped the entire state object on create-revert.
func TestDeleteCreateRevert(t *testing.T) {
// Create an initial state with a single contract
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
root, _ := state.Commit(0, false)
- state, _ = New(root, state.db, state.snaps)
+ state, _ = New(root, state.db)
// Simulate self-destructing in one transaction, then create-reverting in another
state.SelfDestruct(addr)
@@ -961,7 +958,7 @@ func TestDeleteCreateRevert(t *testing.T) {
// Commit the entire state and make sure we don't crash and have the correct state
root, _ = state.Commit(0, true)
- state, _ = New(root, state.db, state.snaps)
+ state, _ = New(root, state.db)
if state.getStateObject(addr) != nil {
t.Fatalf("self-destructed contract came alive")
@@ -992,10 +989,10 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
CleanCacheSize: 0,
}}) // disable caching
}
- db := NewDatabaseWithNodeDB(memDb, tdb)
+ db := NewDatabase(tdb, nil)
var root common.Hash
- state, _ := New(types.EmptyRootHash, db, nil)
+ state, _ := New(types.EmptyRootHash, db)
addr := common.BytesToAddress([]byte("so"))
{
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
@@ -1009,7 +1006,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
tdb.Commit(root, false)
}
// Create a new state on the old root
- state, _ = New(root, db, nil)
+ state, _ = New(root, db)
// Now we clear out the memdb
it := memDb.NewIterator(nil, nil)
for it.Next() {
@@ -1042,9 +1039,8 @@ func TestStateDBAccessList(t *testing.T) {
return common.HexToHash(a)
}
- memDb := rawdb.NewMemoryDatabase()
- db := NewDatabase(memDb)
- state, _ := New(types.EmptyRootHash, db, nil)
+ db := NewDatabaseForTesting()
+ state, _ := New(types.EmptyRootHash, db)
state.accessList = newAccessList()
verifyAddrs := func(astrings ...string) {
@@ -1213,9 +1209,9 @@ func TestFlushOrderDataLoss(t *testing.T) {
// Create a state trie with many accounts and slots
var (
memdb = rawdb.NewMemoryDatabase()
- triedb = triedb.NewDatabase(memdb, nil)
- statedb = NewDatabaseWithNodeDB(memdb, triedb)
- state, _ = New(types.EmptyRootHash, statedb, nil)
+ tdb = triedb.NewDatabase(memdb, triedb.HashDefaults)
+ statedb = NewDatabase(tdb, nil)
+ state, _ = New(types.EmptyRootHash, statedb)
)
for a := byte(0); a < 10; a++ {
state.CreateAccount(common.Address{a})
@@ -1227,15 +1223,15 @@ func TestFlushOrderDataLoss(t *testing.T) {
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
- triedb.Reference(root, common.Hash{})
- if err := triedb.Cap(1024); err != nil {
+ tdb.Reference(root, common.Hash{})
+ if err := tdb.Cap(1024); err != nil {
t.Fatalf("failed to cap trie dirty cache: %v", err)
}
- if err := triedb.Commit(root, false); err != nil {
+ if err := tdb.Commit(root, false); err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
// Reopen the state trie from flushed disk and verify it
- state, err = New(root, NewDatabase(memdb), nil)
+ state, err = New(root, NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil))
if err != nil {
t.Fatalf("failed to reopen state trie: %v", err)
}
@@ -1249,9 +1245,8 @@ func TestFlushOrderDataLoss(t *testing.T) {
}
func TestStateDBTransientStorage(t *testing.T) {
- memDb := rawdb.NewMemoryDatabase()
- db := NewDatabase(memDb)
- state, _ := New(types.EmptyRootHash, db, nil)
+ db := NewDatabaseForTesting()
+ state, _ := New(types.EmptyRootHash, db)
key := common.Hash{0x01}
value := common.Hash{0x02}
@@ -1286,9 +1281,9 @@ func TestDeleteStorage(t *testing.T) {
var (
disk = rawdb.NewMemoryDatabase()
tdb = triedb.NewDatabase(disk, nil)
- db = NewDatabaseWithNodeDB(disk, tdb)
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
- state, _ = New(types.EmptyRootHash, db, snaps)
+ db = NewDatabase(tdb, snaps)
+ state, _ = New(types.EmptyRootHash, db)
addr = common.HexToAddress("0x1")
)
// Initialize account and populate storage
@@ -1300,9 +1295,10 @@ func TestDeleteStorage(t *testing.T) {
state.SetState(addr, slot, value)
}
root, _ := state.Commit(0, true)
+
// Init phase done, create two states, one with snap and one without
- fastState, _ := New(root, db, snaps)
- slowState, _ := New(root, db, nil)
+ fastState, _ := New(root, NewDatabase(tdb, snaps))
+ slowState, _ := New(root, NewDatabase(tdb, nil))
obj := fastState.getOrNewStateObject(addr)
storageRoot := obj.data.Root
@@ -1340,8 +1336,8 @@ func TestStorageDirtiness(t *testing.T) {
var (
disk = rawdb.NewMemoryDatabase()
tdb = triedb.NewDatabase(disk, nil)
- db = NewDatabaseWithNodeDB(disk, tdb)
- state, _ = New(types.EmptyRootHash, db, nil)
+ db = NewDatabase(tdb, nil)
+ state, _ = New(types.EmptyRootHash, db)
addr = common.HexToAddress("0x1")
checkDirty = func(key common.Hash, value common.Hash, dirty bool) {
obj := state.getStateObject(addr)
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index b7039c9e1c..cc15422c0c 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -53,8 +53,8 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
}
db := rawdb.NewMemoryDatabase()
nodeDb := triedb.NewDatabase(db, config)
- sdb := NewDatabaseWithNodeDB(db, nodeDb)
- state, _ := New(types.EmptyRootHash, sdb, nil)
+ sdb := NewDatabase(nodeDb, nil)
+ state, _ := New(types.EmptyRootHash, sdb)
// Fill it with some arbitrary data
var accounts []*testAccount
@@ -94,7 +94,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root com
config.PathDB = pathdb.Defaults
}
// Check root availability and state contents
- state, err := New(root, NewDatabaseWithConfig(db, &config), nil)
+ state, err := New(root, NewDatabase(triedb.NewDatabase(db, &config), nil))
if err != nil {
t.Fatalf("failed to create state trie at %x: %v", root, err)
}
@@ -120,7 +120,7 @@ func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) e
if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults
}
- state, err := New(root, NewDatabaseWithConfig(db, config), nil)
+ state, err := New(root, NewDatabase(triedb.NewDatabase(db, config), nil))
if err != nil {
return err
}
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index a0a9d4110b..529b42d39c 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -31,7 +31,7 @@ import (
)
func filledStateDB() *StateDB {
- state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := New(types.EmptyRootHash, NewDatabaseForTesting())
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -67,8 +67,11 @@ func TestUseAfterTerminate(t *testing.T) {
}
func TestVerklePrefetcher(t *testing.T) {
- db := NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
- state, err := New(types.EmptyRootHash, db, nil)
+ disk := rawdb.NewMemoryDatabase()
+ db := triedb.NewDatabase(disk, triedb.VerkleDefaults)
+ sdb := NewDatabase(db, nil)
+
+ state, err := New(types.EmptyRootHash, sdb)
if err != nil {
t.Fatalf("failed to initialize state: %v", err)
}
@@ -82,9 +85,9 @@ func TestVerklePrefetcher(t *testing.T) {
state.SetState(addr, skey, sval) // Change the storage trie
root, _ := state.Commit(0, true)
- state, _ = New(root, db, nil)
+ state, _ = New(root, sdb)
sRoot := state.GetStorageRoot(addr)
- fetcher := newTriePrefetcher(db, root, "", false)
+ fetcher := newTriePrefetcher(sdb, root, "", false)
// Read account
fetcher.prefetch(common.Hash{}, root, common.Address{}, [][]byte{
diff --git a/core/state_processor.go b/core/state_processor.go
index c13b696022..c74a33c378 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -53,7 +53,7 @@ func NewStateProcessor(config *params.ChainConfig, chain *HeaderChain) *StatePro
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
-func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
+func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error) {
var (
receipts types.Receipts
usedGas = new(uint64)
@@ -71,6 +71,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
var (
context vm.BlockContext
signer = types.MakeSigner(p.config, header.Number, header.Time)
+ err error
)
context = NewEVMBlockContext(header, p.chain, nil)
vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg)
@@ -84,21 +85,35 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
for i, tx := range block.Transactions() {
msg, err := TransactionToMessage(tx, signer, header.BaseFee)
if err != nil {
- return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
+ return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
statedb.SetTxContext(tx.Hash(), i)
receipt, err := ApplyTransactionWithEVM(msg, p.config, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
if err != nil {
- return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
+ return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
+ // Read requests if Prague is enabled.
+ var requests types.Requests
+ if p.config.IsPrague(block.Number(), block.Time()) {
+ requests, err = ParseDepositLogs(allLogs, p.config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.chain.engine.Finalize(p.chain, header, statedb, block.Body())
- return receipts, allLogs, *usedGas, nil
+ return &ProcessResult{
+ Receipts: receipts,
+ Requests: requests,
+ Logs: allLogs,
+ GasUsed: *usedGas,
+ }, nil
}
// ApplyTransactionWithEVM attempts to apply a transaction to the given state database
@@ -132,9 +147,14 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
}
*usedGas += result.UsedGas
+ return MakeReceipt(evm, result, statedb, blockNumber, blockHash, tx, *usedGas, root), nil
+}
+
+// MakeReceipt generates the receipt object for a transaction given its execution result.
+func MakeReceipt(evm *vm.EVM, result *ExecutionResult, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas uint64, root []byte) *types.Receipt {
// Create a new receipt for the transaction, storing the intermediate root and gas used
// by the tx.
- receipt = &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas}
+ receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: usedGas}
if result.Failed() {
receipt.Status = types.ReceiptStatusFailed
} else {
@@ -149,7 +169,7 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
}
// If the transaction created a contract, store the creation address in the receipt.
- if msg.To == nil {
+ if tx.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
}
@@ -165,7 +185,7 @@ func ApplyTransactionWithEVM(msg *Message, config *params.ChainConfig, gp *GasPo
receipt.BlockHash = blockHash
receipt.BlockNumber = blockNumber
receipt.TransactionIndex = uint(statedb.TxIndex())
- return receipt, err
+ return receipt
}
// ApplyTransaction attempts to apply a transaction to the given state database
@@ -239,3 +259,19 @@ func ProcessParentBlockHash(prevHash common.Hash, vmenv *vm.EVM, statedb *state.
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
statedb.Finalise(true)
}
+
+// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
+// BeaconDepositContract.
+func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) (types.Requests, error) {
+ deposits := make(types.Requests, 0)
+ for _, log := range logs {
+ if log.Address == config.DepositContractAddress {
+ d, err := types.UnpackIntoDeposit(log.Data)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse deposit data: %v", err)
+ }
+ deposits = append(deposits, types.NewRequest(d))
+ }
+ }
+ return deposits, nil
+}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 307ab75c5b..9678f2828c 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
@@ -133,7 +132,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
)
@@ -293,7 +292,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
)
defer blockchain.Stop()
for i, tt := range []struct {
@@ -332,7 +331,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
)
defer blockchain.Stop()
for i, tt := range []struct {
@@ -481,13 +480,40 @@ func TestProcessVerkle(t *testing.T) {
// genesis := gspec.MustCommit(bcdb, triedb)
cacheConfig := DefaultCacheConfigWithScheme("path")
cacheConfig.SnapshotLimit = 0
- blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil)
defer blockchain.Stop()
txCost1 := params.TxGas
txCost2 := params.TxGas
- contractCreationCost := intrinsicContractCreationGas + uint64(2039 /* execution costs */)
- codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(57444 /* execution costs */)
+ contractCreationCost := intrinsicContractCreationGas +
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* creation with value */
+ 739 /* execution costs */
+ codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas +
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (tx) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at pc=0x20) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
+ params.WitnessChunkReadCost + /* SLOAD in constructor */
+ params.WitnessChunkWriteCost + /* SSTORE in constructor */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + params.WitnessBranchReadCost + params.WitnessBranchWriteCost + /* creation (CREATE at PC=0x121) */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #0 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #1 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #2 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #3 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #4 */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* code chunk #5 */
+ params.WitnessChunkReadCost + /* SLOAD in constructor */
+ params.WitnessChunkWriteCost + /* SSTORE in constructor */
+ params.WitnessChunkReadCost + params.WitnessChunkWriteCost + /* write code hash for tx creation */
+ 15*(params.WitnessChunkReadCost+params.WitnessChunkWriteCost) + /* code chunks #0..#14 */
+ 4844 /* execution costs */
blockGasUsagesExpected := []uint64{
txCost1*2 + txCost2,
txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas,
@@ -575,7 +601,7 @@ func TestProcessParentBlockHash(t *testing.T) {
}
}
t.Run("MPT", func(t *testing.T) {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
test(statedb)
})
t.Run("Verkle", func(t *testing.T) {
@@ -583,7 +609,7 @@ func TestProcessParentBlockHash(t *testing.T) {
cacheConfig := DefaultCacheConfigWithScheme(rawdb.PathScheme)
cacheConfig.SnapshotLimit = 0
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
- statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
+ statedb, _ := state.New(types.EmptyVerkleHash, state.NewDatabase(triedb, nil))
test(statedb)
})
}
diff --git a/core/state_transition.go b/core/state_transition.go
index 1a6a66a2fc..d285d03fe2 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -142,27 +142,31 @@ type Message struct {
BlobGasFeeCap *big.Int
BlobHashes []common.Hash
- // When SkipAccountChecks is true, the message nonce is not checked against the
- // account nonce in state. It also disables checking that the sender is an EOA.
+ // When SkipNonceChecks is true, the message nonce is not checked against the
+ // account nonce in state.
// This field will be set to true for operations like RPC eth_call.
- SkipAccountChecks bool
+ SkipNonceChecks bool
+
+ // When SkipFromEOACheck is true, the message sender is not checked to be an EOA.
+ SkipFromEOACheck bool
}
// TransactionToMessage converts a transaction into a Message.
func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.Int) (*Message, error) {
msg := &Message{
- Nonce: tx.Nonce(),
- GasLimit: tx.Gas(),
- GasPrice: new(big.Int).Set(tx.GasPrice()),
- GasFeeCap: new(big.Int).Set(tx.GasFeeCap()),
- GasTipCap: new(big.Int).Set(tx.GasTipCap()),
- To: tx.To(),
- Value: tx.Value(),
- Data: tx.Data(),
- AccessList: tx.AccessList(),
- SkipAccountChecks: false,
- BlobHashes: tx.BlobHashes(),
- BlobGasFeeCap: tx.BlobGasFeeCap(),
+ Nonce: tx.Nonce(),
+ GasLimit: tx.Gas(),
+ GasPrice: new(big.Int).Set(tx.GasPrice()),
+ GasFeeCap: new(big.Int).Set(tx.GasFeeCap()),
+ GasTipCap: new(big.Int).Set(tx.GasTipCap()),
+ To: tx.To(),
+ Value: tx.Value(),
+ Data: tx.Data(),
+ AccessList: tx.AccessList(),
+ SkipNonceChecks: false,
+ SkipFromEOACheck: false,
+ BlobHashes: tx.BlobHashes(),
+ BlobGasFeeCap: tx.BlobGasFeeCap(),
}
// If baseFee provided, set gasPrice to effectiveGasPrice.
if baseFee != nil {
@@ -280,7 +284,7 @@ func (st *StateTransition) buyGas() error {
func (st *StateTransition) preCheck() error {
// Only check transactions that are not fake
msg := st.msg
- if !msg.SkipAccountChecks {
+ if !msg.SkipNonceChecks {
// Make sure this transaction's nonce is correct.
stNonce := st.state.GetNonce(msg.From)
if msgNonce := msg.Nonce; stNonce < msgNonce {
@@ -293,6 +297,8 @@ func (st *StateTransition) preCheck() error {
return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax,
msg.From.Hex(), stNonce)
}
+ }
+ if !msg.SkipFromEOACheck {
// Make sure the sender is an EOA
codeHash := st.state.GetCodeHash(msg.From)
if codeHash != (common.Hash{}) && codeHash != types.EmptyCodeHash {
@@ -470,7 +476,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
// add the coinbase to the witness iff the fee is greater than 0
if rules.IsEIP4762 && fee.Sign() != 0 {
- st.evm.AccessEvents.BalanceGas(st.evm.Context.Coinbase, true)
+ st.evm.AccessEvents.AddAccount(st.evm.Context.Coinbase, true)
}
}
diff --git a/core/stateless.go b/core/stateless.go
index 4c7e6f3102..f323cc2e8e 100644
--- a/core/stateless.go
+++ b/core/stateless.go
@@ -42,8 +42,7 @@ import (
func ExecuteStateless(config *params.ChainConfig, witness *stateless.Witness) (common.Hash, common.Hash, error) {
// Create and populate the state database to serve as the stateless backend
memdb := witness.MakeHashDB()
-
- db, err := state.New(witness.Root(), state.NewDatabaseWithConfig(memdb, triedb.HashDefaults), nil)
+ db, err := state.New(witness.Root(), state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil))
if err != nil {
return common.Hash{}, common.Hash{}, err
}
@@ -58,15 +57,15 @@ func ExecuteStateless(config *params.ChainConfig, witness *stateless.Witness) (c
validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block
// Run the stateless blocks processing and self-validate certain fields
- receipts, _, usedGas, err := processor.Process(witness.Block, db, vm.Config{})
+ res, err := processor.Process(witness.Block, db, vm.Config{})
if err != nil {
return common.Hash{}, common.Hash{}, err
}
- if err = validator.ValidateState(witness.Block, db, receipts, usedGas, true); err != nil {
+ if err = validator.ValidateState(witness.Block, db, res, true); err != nil {
return common.Hash{}, common.Hash{}, err
}
// Almost everything validated, but receipt and state root needs to be returned
- receiptRoot := types.DeriveSha(receipts, trie.NewStackTrie(nil))
+ receiptRoot := types.DeriveSha(res.Receipts, trie.NewStackTrie(nil))
stateRoot := db.IntermediateRoot(config.IsEIP158(witness.Block.Number()))
return receiptRoot, stateRoot, nil
diff --git a/core/stateless/witness.go b/core/stateless/witness.go
index 7622c5eb61..0f2d6cfe07 100644
--- a/core/stateless/witness.go
+++ b/core/stateless/witness.go
@@ -101,9 +101,7 @@ func (w *Witness) AddState(nodes map[string]struct{}) {
w.lock.Lock()
defer w.lock.Unlock()
- for node := range nodes {
- w.State[node] = struct{}{}
- }
+ maps.Copy(w.State, nodes)
}
// Copy deep-copies the witness object. Witness.Block isn't deep-copied as it
diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md
index cddc728fc0..e8aa3a9e2e 100644
--- a/core/tracing/CHANGELOG.md
+++ b/core/tracing/CHANGELOG.md
@@ -2,6 +2,24 @@
All notable changes to the tracing interface will be documented in this file.
+## [Unreleased]
+
+### Modified types
+
+- `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork.
+ - `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision.
+
+## [v1.14.4]
+
+This release contained only minor extensions to the tracing interface.
+
+### Modified types
+
+- `GasChangeReason` has been extended with the following reasons that will only be active post-Verkle.
+ - `GasChangeWitnessContractInit` flags the event of adding to the witness during the contract creation initialization step.
+ - `GasChangeWitnessContractCreation` flags the event of adding to the witness during the contract creation finalization step.
+ - `GasChangeWitnessCodeChunk` flags the event of adding one or more contract code chunks to the witness.
+
## [v1.14.3]
There have been minor backwards-compatible changes to the tracing interface to explicitly mark the execution of **system** contracts. As of now the only system call updates the parent beacon block root as per [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788). Other system calls are being considered for the future hardfork.
@@ -75,6 +93,7 @@ The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signale
- `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract.
- `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above.
-[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.0...master
+[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.14.8...master
[v1.14.0]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0
[v1.14.3]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.3
+[v1.14.4]: https://github.com/ethereum/go-ethereum/releases/tag/v1.14.4
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index aa66dc49ff..987dfa7a1e 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -300,12 +300,14 @@ const (
GasChangeCallStorageColdAccess GasChangeReason = 13
// GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert.
GasChangeCallFailedExecution GasChangeReason = 14
- // GasChangeWitnessContractInit is the amount charged for adding to the witness during the contract creation initialization step
+ // GasChangeWitnessContractInit flags the event of adding to the witness during the contract creation initialization step.
GasChangeWitnessContractInit GasChangeReason = 15
- // GasChangeWitnessContractCreation is the amount charged for adding to the witness during the contract creation finalization step
+ // GasChangeWitnessContractCreation flags the event of adding to the witness during the contract creation finalization step.
GasChangeWitnessContractCreation GasChangeReason = 16
- // GasChangeWitnessCodeChunk is the amount charged for touching one or more contract code chunks
+ // GasChangeWitnessCodeChunk flags the event of adding one or more contract code chunks to the witness.
GasChangeWitnessCodeChunk GasChangeReason = 17
+ // GasChangeWitnessContractCollisionCheck flags the event of adding to the witness when checking for contract address collision.
+ GasChangeWitnessContractCollisionCheck GasChangeReason = 18
// GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as
// it will be "manually" tracked by a direct emit of the gas change event.
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index d66a08aa17..685c017cbd 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -566,7 +566,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
ids []uint64
nonces []uint64
)
- for txs[0].nonce < next {
+ for len(txs) > 0 && txs[0].nonce < next {
ids = append(ids, txs[0].id)
nonces = append(nonces, txs[0].nonce)
@@ -940,7 +940,7 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
}
// Generate the set of transactions per address to pull back into the pool,
// also updating the rest along the way
- reinject := make(map[common.Address][]*types.Transaction)
+ reinject := make(map[common.Address][]*types.Transaction, len(transactors))
for addr := range transactors {
// Generate the set that was lost to reinject into the pool
lost := make([]*types.Transaction, 0, len(discarded[addr]))
@@ -949,7 +949,9 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
lost = append(lost, tx)
}
}
- reinject[addr] = lost
+ if len(lost) > 0 {
+ reinject[addr] = lost
+ }
// Update the set that was already reincluded to track the blocks in limbo
for _, tx := range types.TxDifference(included[addr], discarded[addr]) {
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index d658a6daf4..feccb17922 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -33,14 +33,12 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -545,7 +543,7 @@ func TestOpenDrops(t *testing.T) {
store.Close()
// Create a blob pool out of the pre-seeded data
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
@@ -676,7 +674,7 @@ func TestOpenIndex(t *testing.T) {
store.Close()
// Create a blob pool out of the pre-seeded data
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true)
@@ -776,7 +774,7 @@ func TestOpenHeap(t *testing.T) {
store.Close()
// Create a blob pool out of the pre-seeded data
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
@@ -856,7 +854,7 @@ func TestOpenCap(t *testing.T) {
// with a high cap to ensure everything was persisted previously
for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
@@ -1266,7 +1264,7 @@ func TestAdd(t *testing.T) {
keys = make(map[string]*ecdsa.PrivateKey)
addrs = make(map[string]common.Address)
)
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
for acc, seed := range tt.seeds {
// Generate a new random key/address for the seed account
keys[acc], _ = crypto.GenerateKey()
@@ -1328,7 +1326,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
basefee = uint64(1050)
blobfee = uint64(105)
signer = types.LatestSigner(testChainConfig)
- statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
chain = &testBlockChain{
config: testChainConfig,
basefee: uint256.NewInt(basefee),
diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go
index fd961d1d92..1377479da1 100644
--- a/core/txpool/legacypool/legacypool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -21,7 +21,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
@@ -80,7 +79,7 @@ func TestTransactionFutureAttack(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
@@ -117,7 +116,7 @@ func TestTransactionFutureAttack(t *testing.T) {
func TestTransactionFuture1559(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
@@ -150,7 +149,7 @@ func TestTransactionFuture1559(t *testing.T) {
func TestTransactionZAttack(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
@@ -218,7 +217,7 @@ func TestTransactionZAttack(t *testing.T) {
func BenchmarkFutureAttack(b *testing.B) {
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.GlobalQueue = 100
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index c86991c942..39673d176d 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/txpool"
@@ -160,7 +159,7 @@ func setupPool() (*LegacyPool, *ecdsa.PrivateKey) {
}
func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed))
key, _ := crypto.GenerateKey()
@@ -251,7 +250,7 @@ func (c *testChain) State() (*state.StateDB, error) {
// a state change between those fetches.
stdb := c.statedb
if *c.trigger {
- c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
// simulate that the new head block included tx0 and tx1
c.statedb.SetNonce(c.address, 2)
c.statedb.SetBalance(c.address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified)
@@ -269,7 +268,7 @@ func TestStateChangeDuringReset(t *testing.T) {
var (
key, _ = crypto.GenerateKey()
address = crypto.PubkeyToAddress(key.PublicKey)
- statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
trigger = false
)
@@ -468,7 +467,7 @@ func TestChainFork(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified)
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
@@ -497,7 +496,7 @@ func TestDoubleNonce(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
resetState := func() {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified)
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
@@ -697,7 +696,7 @@ func TestPostponing(t *testing.T) {
t.Parallel()
// Create the pool to test the postponing with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
@@ -910,7 +909,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1003,7 +1002,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
evictionInterval = time.Millisecond * 100
// Create the pool to test the non-expiration enforcement
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1189,7 +1188,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1291,7 +1290,7 @@ func TestCapClearsFromAll(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1326,7 +1325,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
t.Parallel()
// Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1375,7 +1374,7 @@ func TestRepricing(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
@@ -1495,7 +1494,7 @@ func TestMinGasPriceEnforced(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed))
txPoolConfig := DefaultConfig
@@ -1668,7 +1667,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
@@ -1742,7 +1741,7 @@ func TestUnderpricing(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -1857,7 +1856,7 @@ func TestStableUnderpricing(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -2090,7 +2089,7 @@ func TestDeduplication(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
@@ -2157,7 +2156,7 @@ func TestReplacement(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
@@ -2363,7 +2362,7 @@ func testJournaling(t *testing.T, nolocals bool) {
os.Remove(journal)
// Create the original pool to inject transaction into the journal
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
@@ -2464,7 +2463,7 @@ func TestStatusCheck(t *testing.T) {
t.Parallel()
// Create the pool to test the status retrievals with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
diff --git a/core/types.go b/core/types.go
index dc13de52ce..b1808c9ffb 100644
--- a/core/types.go
+++ b/core/types.go
@@ -33,9 +33,8 @@ type Validator interface {
// ValidateBody validates the given block's content.
ValidateBody(block *types.Block) error
- // ValidateState validates the given statedb and optionally the receipts and
- // gas used.
- ValidateState(block *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64, stateless bool) error
+ // ValidateState validates the given statedb and optionally the process result.
+ ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
// ValidateWitness cross validates a block execution with stateless remote clients.
ValidateWitness(witness *stateless.Witness, receiptRoot common.Hash, stateRoot common.Hash) error
@@ -54,5 +53,13 @@ type Processor interface {
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
- Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error)
+ Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResult, error)
+}
+
+// ProcessResult contains the values computed by Process.
+type ProcessResult struct {
+ Receipts types.Receipts
+ Requests types.Requests
+ Logs []*types.Log
+ GasUsed uint64
}
diff --git a/core/types/block.go b/core/types/block.go
index e6ddf2012f..1c00658d5b 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -102,6 +102,9 @@ type Header struct {
// ParentBeaconRoot was added by EIP-4788 and is ignored in legacy headers.
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
+
+ // RequestsHash was added by EIP-7685 and is ignored in legacy headers.
+ RequestsHash *common.Hash `json:"requestsRoot" rlp:"optional"`
}
// field type overrides for gencodec
@@ -163,10 +166,11 @@ func (h *Header) SanityCheck() error {
// EmptyBody returns true if there is no additional 'body' to complete the header
// that is: no transactions, no uncles and no withdrawals.
func (h *Header) EmptyBody() bool {
- if h.WithdrawalsHash != nil {
- return h.TxHash == EmptyTxsHash && *h.WithdrawalsHash == EmptyWithdrawalsHash
- }
- return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash
+ var (
+ emptyWithdrawals = h.WithdrawalsHash == nil || *h.WithdrawalsHash == EmptyWithdrawalsHash
+ emptyRequests = h.RequestsHash == nil || *h.RequestsHash == EmptyReceiptsHash
+ )
+ return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && emptyWithdrawals && emptyRequests
}
// EmptyReceipts returns true if there are no receipts for this header/block.
@@ -180,6 +184,7 @@ type Body struct {
Transactions []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
+ Requests []*Request `rlp:"optional"`
}
// Block represents an Ethereum block.
@@ -204,6 +209,7 @@ type Block struct {
uncles []*Header
transactions Transactions
withdrawals Withdrawals
+ requests Requests
// witness is not an encoded part of the block body.
// It is held in Block in order for easy relaying to the places
@@ -226,6 +232,7 @@ type extblock struct {
Txs []*Transaction
Uncles []*Header
Withdrawals []*Withdrawal `rlp:"optional"`
+ Requests []*Request `rlp:"optional"`
}
// NewBlock creates a new block. The input data is copied, changes to header and to the
@@ -242,6 +249,7 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
txs = body.Transactions
uncles = body.Uncles
withdrawals = body.Withdrawals
+ requests = body.Requests
)
if len(txs) == 0 {
@@ -280,6 +288,17 @@ func NewBlock(header *Header, body *Body, receipts []*Receipt, hasher TrieHasher
b.withdrawals = slices.Clone(withdrawals)
}
+ if requests == nil {
+ b.header.RequestsHash = nil
+ } else if len(requests) == 0 {
+ b.header.RequestsHash = &EmptyRequestsHash
+ b.requests = Requests{}
+ } else {
+ h := DeriveSha(Requests(requests), hasher)
+ b.header.RequestsHash = &h
+ b.requests = slices.Clone(requests)
+ }
+
return b
}
@@ -315,6 +334,10 @@ func CopyHeader(h *Header) *Header {
cpy.ParentBeaconRoot = new(common.Hash)
*cpy.ParentBeaconRoot = *h.ParentBeaconRoot
}
+ if h.RequestsHash != nil {
+ cpy.RequestsHash = new(common.Hash)
+ *cpy.RequestsHash = *h.RequestsHash
+ }
return &cpy
}
@@ -325,7 +348,7 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&eb); err != nil {
return err
}
- b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
+ b.header, b.uncles, b.transactions, b.withdrawals, b.requests = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals, eb.Requests
b.size.Store(rlp.ListSize(size))
return nil
}
@@ -337,13 +360,14 @@ func (b *Block) EncodeRLP(w io.Writer) error {
Txs: b.transactions,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
+ Requests: b.requests,
})
}
// Body returns the non-header content of the block.
// Note the returned data is not an independent copy.
func (b *Block) Body() *Body {
- return &Body{b.transactions, b.uncles, b.withdrawals}
+ return &Body{b.transactions, b.uncles, b.withdrawals, b.requests}
}
// Accessors for body data. These do not return a copy because the content
@@ -352,6 +376,7 @@ func (b *Block) Body() *Body {
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
+func (b *Block) Requests() Requests { return b.requests }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
@@ -476,6 +501,7 @@ func (b *Block) WithBody(body Body) *Block {
transactions: slices.Clone(body.Transactions),
uncles: make([]*Header, len(body.Uncles)),
withdrawals: slices.Clone(body.Withdrawals),
+ requests: slices.Clone(body.Requests),
witness: b.witness,
}
for i := range body.Uncles {
@@ -490,6 +516,7 @@ func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
+ requests: b.requests,
witness: witness,
}
}
diff --git a/core/types/deposit.go b/core/types/deposit.go
new file mode 100644
index 0000000000..172acc36ed
--- /dev/null
+++ b/core/types/deposit.go
@@ -0,0 +1,103 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+//go:generate go run github.com/fjl/gencodec -type Deposit -field-override depositMarshaling -out gen_deposit_json.go
+
+// Deposit contains EIP-6110 deposit data.
+type Deposit struct {
+ PublicKey [48]byte `json:"pubkey"` // public key of validator
+ WithdrawalCredentials common.Hash `json:"withdrawalCredentials"` // beneficiary of the validator funds
+ Amount uint64 `json:"amount"` // deposit size in Gwei
+ Signature [96]byte `json:"signature"` // signature over deposit msg
+ Index uint64 `json:"index"` // deposit count value
+}
+
+// field type overrides for gencodec
+type depositMarshaling struct {
+ PublicKey hexutil.Bytes
+ WithdrawalCredentials hexutil.Bytes
+ Amount hexutil.Uint64
+ Signature hexutil.Bytes
+ Index hexutil.Uint64
+}
+
+// Deposits implements DerivableList for requests.
+type Deposits []*Deposit
+
+// Len returns the length of s.
+func (s Deposits) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th deposit to s.
+func (s Deposits) EncodeIndex(i int, w *bytes.Buffer) {
+ rlp.Encode(w, s[i])
+}
+
+// UnpackIntoDeposit unpacks a serialized DepositEvent.
+func UnpackIntoDeposit(data []byte) (*Deposit, error) {
+ if len(data) != 576 {
+ return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data))
+ }
+ var d Deposit
+ // The ABI encodes the position of dynamic elements first. Since there are 5
+ // elements, skip over the positional data. The first 32 bytes of dynamic
+ // elements also encode their actual length. Skip over that value too.
+ b := 32*5 + 32
+ // PublicKey is the first element. ABI encoding pads values to 32 bytes, so
+ // despite BLS public keys being length 48, the value length here is 64. Then
+ // skip over the next length value.
+ copy(d.PublicKey[:], data[b:b+48])
+ b += 48 + 16 + 32
+ // WithdrawalCredentials is 32 bytes. Read that value then skip over next
+ // length.
+ copy(d.WithdrawalCredentials[:], data[b:b+32])
+ b += 32 + 32
+ // Amount is 8 bytes, but it is padded to 32. Skip over it and the next
+ // length.
+ d.Amount = binary.LittleEndian.Uint64(data[b : b+8])
+ b += 8 + 24 + 32
+ // Signature is 96 bytes. Skip over it and the next length.
+ copy(d.Signature[:], data[b:b+96])
+ b += 96 + 32
+ // Amount is 8 bytes.
+ d.Index = binary.LittleEndian.Uint64(data[b : b+8])
+
+ return &d, nil
+}
+
+func (d *Deposit) requestType() byte { return DepositRequestType }
+func (d *Deposit) encode(b *bytes.Buffer) error { return rlp.Encode(b, d) }
+func (d *Deposit) decode(input []byte) error { return rlp.DecodeBytes(input, d) }
+func (d *Deposit) copy() RequestData {
+ return &Deposit{
+ PublicKey: d.PublicKey,
+ WithdrawalCredentials: d.WithdrawalCredentials,
+ Amount: d.Amount,
+ Signature: d.Signature,
+ Index: d.Index,
+ }
+}
diff --git a/core/types/deposit_test.go b/core/types/deposit_test.go
new file mode 100644
index 0000000000..ed2e18445d
--- /dev/null
+++ b/core/types/deposit_test.go
@@ -0,0 +1,93 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "encoding/binary"
+ "reflect"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ depositABI = abi.ABI{Methods: map[string]abi.Method{"DepositEvent": depositEvent}}
+ bytesT, _ = abi.NewType("bytes", "", nil)
+ depositEvent = abi.NewMethod("DepositEvent", "DepositEvent", abi.Function, "", false, false, []abi.Argument{
+ {Name: "pubkey", Type: bytesT, Indexed: false},
+ {Name: "withdrawal_credentials", Type: bytesT, Indexed: false},
+ {Name: "amount", Type: bytesT, Indexed: false},
+ {Name: "signature", Type: bytesT, Indexed: false},
+ {Name: "index", Type: bytesT, Indexed: false}}, nil,
+ )
+)
+
+// FuzzUnpackIntoDeposit tries roundtrip packing and unpacking of deposit events.
+func FuzzUnpackIntoDeposit(f *testing.F) {
+ for _, tt := range []struct {
+ pubkey string
+ wxCred string
+ amount string
+ sig string
+ index string
+ }{
+ {
+ pubkey: "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111",
+ wxCred: "2222222222222222222222222222222222222222222222222222222222222222",
+ amount: "3333333333333333",
+ sig: "444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444444",
+ index: "5555555555555555",
+ },
+ } {
+ f.Add(common.FromHex(tt.pubkey), common.FromHex(tt.wxCred), common.FromHex(tt.amount), common.FromHex(tt.sig), common.FromHex(tt.index))
+ }
+
+ f.Fuzz(func(t *testing.T, p []byte, w []byte, a []byte, s []byte, i []byte) {
+ var (
+ pubkey [48]byte
+ wxCred [32]byte
+ amount [8]byte
+ sig [96]byte
+ index [8]byte
+ )
+ copy(pubkey[:], p)
+ copy(wxCred[:], w)
+ copy(amount[:], a)
+ copy(sig[:], s)
+ copy(index[:], i)
+
+ want := Deposit{
+ PublicKey: pubkey,
+ WithdrawalCredentials: wxCred,
+ Amount: binary.LittleEndian.Uint64(amount[:]),
+ Signature: sig,
+ Index: binary.LittleEndian.Uint64(index[:]),
+ }
+ out, err := depositABI.Pack("DepositEvent", want.PublicKey[:], want.WithdrawalCredentials[:], amount[:], want.Signature[:], index[:])
+ if err != nil {
+ t.Fatalf("error packing deposit: %v", err)
+ }
+ got, err := UnpackIntoDeposit(out[4:])
+ if err != nil {
+ t.Errorf("error unpacking deposit: %v", err)
+ }
+ if !reflect.DeepEqual(want, *got) {
+ t.Errorf("roundtrip failed: want %v, got %v", want, got)
+ }
+ })
+}
diff --git a/core/types/gen_deposit_json.go b/core/types/gen_deposit_json.go
new file mode 100644
index 0000000000..a65691188f
--- /dev/null
+++ b/core/types/gen_deposit_json.go
@@ -0,0 +1,70 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+var _ = (*depositMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (d Deposit) MarshalJSON() ([]byte, error) {
+ type Deposit struct {
+ PublicKey hexutil.Bytes `json:"pubkey"`
+ WithdrawalCredentials hexutil.Bytes `json:"withdrawalCredentials"`
+ Amount hexutil.Uint64 `json:"amount"`
+ Signature hexutil.Bytes `json:"signature"`
+ Index hexutil.Uint64 `json:"index"`
+ }
+ var enc Deposit
+ enc.PublicKey = d.PublicKey[:]
+ enc.WithdrawalCredentials = d.WithdrawalCredentials[:]
+ enc.Amount = hexutil.Uint64(d.Amount)
+ enc.Signature = d.Signature[:]
+ enc.Index = hexutil.Uint64(d.Index)
+ return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (d *Deposit) UnmarshalJSON(input []byte) error {
+ type Deposit struct {
+ PublicKey *hexutil.Bytes `json:"pubkey"`
+ WithdrawalCredentials *hexutil.Bytes `json:"withdrawalCredentials"`
+ Amount *hexutil.Uint64 `json:"amount"`
+ Signature *hexutil.Bytes `json:"signature"`
+ Index *hexutil.Uint64 `json:"index"`
+ }
+ var dec Deposit
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ if dec.PublicKey != nil {
+ if len(*dec.PublicKey) != len(d.PublicKey) {
+ return errors.New("field 'pubkey' has wrong length, need 48 items")
+ }
+ copy(d.PublicKey[:], *dec.PublicKey)
+ }
+ if dec.WithdrawalCredentials != nil {
+ if len(*dec.WithdrawalCredentials) != len(d.WithdrawalCredentials) {
+ return errors.New("field 'withdrawalCredentials' has wrong length, need 32 items")
+ }
+ copy(d.WithdrawalCredentials[:], *dec.WithdrawalCredentials)
+ }
+ if dec.Amount != nil {
+ d.Amount = uint64(*dec.Amount)
+ }
+ if dec.Signature != nil {
+ if len(*dec.Signature) != len(d.Signature) {
+ return errors.New("field 'signature' has wrong length, need 96 items")
+ }
+ copy(d.Signature[:], *dec.Signature)
+ }
+ if dec.Index != nil {
+ d.Index = uint64(*dec.Index)
+ }
+ return nil
+}
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index fb1f915d01..322c5d5642 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -36,6 +36,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
+ RequestsHash *common.Hash `json:"requestsRoot" rlp:"optional"`
Hash common.Hash `json:"hash"`
}
var enc Header
@@ -59,6 +60,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas)
enc.ParentBeaconRoot = h.ParentBeaconRoot
+ enc.RequestsHash = h.RequestsHash
enc.Hash = h.Hash()
return json.Marshal(&enc)
}
@@ -86,6 +88,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
+ RequestsHash *common.Hash `json:"requestsRoot" rlp:"optional"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -163,5 +166,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.ParentBeaconRoot != nil {
h.ParentBeaconRoot = dec.ParentBeaconRoot
}
+ if dec.RequestsHash != nil {
+ h.RequestsHash = dec.RequestsHash
+ }
return nil
}
diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go
index ed6a1a002c..c79aa8a250 100644
--- a/core/types/gen_header_rlp.go
+++ b/core/types/gen_header_rlp.go
@@ -42,7 +42,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
_tmp3 := obj.BlobGasUsed != nil
_tmp4 := obj.ExcessBlobGas != nil
_tmp5 := obj.ParentBeaconRoot != nil
- if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 {
+ _tmp6 := obj.RequestsHash != nil
+ if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
@@ -52,34 +53,41 @@ func (obj *Header) EncodeRLP(_w io.Writer) error {
w.WriteBigInt(obj.BaseFee)
}
}
- if _tmp2 || _tmp3 || _tmp4 || _tmp5 {
+ if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if obj.WithdrawalsHash == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.WithdrawalsHash[:])
}
}
- if _tmp3 || _tmp4 || _tmp5 {
+ if _tmp3 || _tmp4 || _tmp5 || _tmp6 {
if obj.BlobGasUsed == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.BlobGasUsed))
}
}
- if _tmp4 || _tmp5 {
+ if _tmp4 || _tmp5 || _tmp6 {
if obj.ExcessBlobGas == nil {
w.Write([]byte{0x80})
} else {
w.WriteUint64((*obj.ExcessBlobGas))
}
}
- if _tmp5 {
+ if _tmp5 || _tmp6 {
if obj.ParentBeaconRoot == nil {
w.Write([]byte{0x80})
} else {
w.WriteBytes(obj.ParentBeaconRoot[:])
}
}
+ if _tmp6 {
+ if obj.RequestsHash == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteBytes(obj.RequestsHash[:])
+ }
+ }
w.ListEnd(_tmp0)
return w.Flush()
}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index 43e9130fd1..cbd197072e 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -41,6 +41,9 @@ var (
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ // EmptyRequestsHash is the known hash of the empty requests set.
+ EmptyRequestsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
)
diff --git a/core/types/request.go b/core/types/request.go
new file mode 100644
index 0000000000..7b1cade26e
--- /dev/null
+++ b/core/types/request.go
@@ -0,0 +1,157 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+ ErrRequestTypeNotSupported = errors.New("request type not supported")
+ errShortTypedRequest = errors.New("typed request too short")
+)
+
+// Request types.
+const (
+ DepositRequestType = 0x00
+)
+
+// Request is an EIP-7685 request object. It represents execution layer
+// triggered messages bound for the consensus layer.
+type Request struct {
+ inner RequestData
+}
+
+// Type returns the EIP-7685 type of the request.
+func (r *Request) Type() byte {
+ return r.inner.requestType()
+}
+
+// Inner returns the inner request data.
+func (r *Request) Inner() RequestData {
+ return r.inner
+}
+
+// NewRequest creates a new request.
+func NewRequest(inner RequestData) *Request {
+ req := new(Request)
+ req.inner = inner.copy()
+ return req
+}
+
+// Requests implements DerivableList for requests.
+type Requests []*Request
+
+// Len returns the length of s.
+func (s Requests) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th request to s.
+func (s Requests) EncodeIndex(i int, w *bytes.Buffer) {
+ s[i].encode(w)
+}
+
+// RequestData is the underlying data of a request.
+type RequestData interface {
+ requestType() byte
+ encode(*bytes.Buffer) error
+ decode([]byte) error
+ copy() RequestData // creates a deep copy and initializes all fields
+}
+
+// EncodeRLP implements rlp.Encoder
+func (r *Request) EncodeRLP(w io.Writer) error {
+ buf := encodeBufferPool.Get().(*bytes.Buffer)
+ defer encodeBufferPool.Put(buf)
+ buf.Reset()
+ if err := r.encode(buf); err != nil {
+ return err
+ }
+ return rlp.Encode(w, buf.Bytes())
+}
+
+// encode writes the canonical encoding of a request to w.
+func (r *Request) encode(w *bytes.Buffer) error {
+ w.WriteByte(r.Type())
+ return r.inner.encode(w)
+}
+
+// MarshalBinary returns the canonical encoding of the request.
+func (r *Request) MarshalBinary() ([]byte, error) {
+ var buf bytes.Buffer
+ err := r.encode(&buf)
+ return buf.Bytes(), err
+}
+
+// DecodeRLP implements rlp.Decoder
+func (r *Request) DecodeRLP(s *rlp.Stream) error {
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case kind == rlp.List:
+ return fmt.Errorf("untyped request")
+ case kind == rlp.Byte:
+ return errShortTypedRequest
+ default:
+ // First read the request payload bytes into a temporary buffer.
+ b, buf, err := getPooledBuffer(size)
+ if err != nil {
+ return err
+ }
+ defer encodeBufferPool.Put(buf)
+ if err := s.ReadBytes(b); err != nil {
+ return err
+ }
+ // Now decode the inner request.
+ inner, err := r.decode(b)
+ if err == nil {
+ r.inner = inner
+ }
+ return err
+ }
+}
+
+// UnmarshalBinary decodes the canonical encoding of requests.
+func (r *Request) UnmarshalBinary(b []byte) error {
+ inner, err := r.decode(b)
+ if err != nil {
+ return err
+ }
+ r.inner = inner
+ return nil
+}
+
+// decode decodes a request from the canonical format.
+func (r *Request) decode(b []byte) (RequestData, error) {
+ if len(b) <= 1 {
+ return nil, errShortTypedRequest
+ }
+ var inner RequestData
+ switch b[0] {
+ case DepositRequestType:
+ inner = new(Deposit)
+ default:
+ return nil, ErrRequestTypeNotSupported
+ }
+ err := inner.decode(b[1:])
+ return inner, err
+}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 4ac9187bdb..6c8759ee69 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -560,7 +560,7 @@ func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) {
func TxDifference(a, b Transactions) Transactions {
keep := make(Transactions, 0, len(a))
- remove := make(map[common.Hash]struct{})
+ remove := make(map[common.Hash]struct{}, b.Len())
for _, tx := range b {
remove[tx.Hash()] = struct{}{}
}
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index dd25f081f7..73011e238b 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -64,21 +64,24 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
// Use this in transaction-handling code where the current block number is unknown. If you
// have the current block number available, use MakeSigner instead.
func LatestSigner(config *params.ChainConfig) Signer {
+ var signer Signer
if config.ChainID != nil {
- if config.CancunTime != nil {
- return NewCancunSigner(config.ChainID)
- }
- if config.LondonBlock != nil {
- return NewLondonSigner(config.ChainID)
- }
- if config.BerlinBlock != nil {
- return NewEIP2930Signer(config.ChainID)
- }
- if config.EIP155Block != nil {
- return NewEIP155Signer(config.ChainID)
+ switch {
+ case config.CancunTime != nil:
+ signer = NewCancunSigner(config.ChainID)
+ case config.LondonBlock != nil:
+ signer = NewLondonSigner(config.ChainID)
+ case config.BerlinBlock != nil:
+ signer = NewEIP2930Signer(config.ChainID)
+ case config.EIP155Block != nil:
+ signer = NewEIP155Signer(config.ChainID)
+ default:
+ signer = HomesteadSigner{}
}
+ } else {
+ signer = HomesteadSigner{}
}
- return HomesteadSigner{}
+ return signer
}
// LatestSignerForChainID returns the 'most permissive' Signer available. Specifically,
@@ -89,10 +92,13 @@ func LatestSigner(config *params.ChainConfig) Signer {
// configuration are unknown. If you have a ChainConfig, use LatestSigner instead.
// If you have a ChainConfig and know the current block number, use MakeSigner instead.
func LatestSignerForChainID(chainID *big.Int) Signer {
- if chainID == nil {
- return HomesteadSigner{}
+ var signer Signer
+ if chainID != nil {
+ signer = NewCancunSigner(chainID)
+ } else {
+ signer = HomesteadSigner{}
}
- return NewCancunSigner(chainID)
+ return signer
}
// SignTx signs the transaction using the given signer and private key.
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index dd71a9729f..104d2ba814 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -21,6 +21,7 @@ import (
"encoding/binary"
"errors"
"fmt"
+ "maps"
"math/big"
"github.com/consensys/gnark-crypto/ecc"
@@ -46,9 +47,12 @@ type PrecompiledContract interface {
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
}
+// PrecompiledContracts contains the precompiled contracts supported at the given fork.
+type PrecompiledContracts map[common.Address]PrecompiledContract
+
// PrecompiledContractsHomestead contains the default set of pre-compiled Ethereum
// contracts used in the Frontier and Homestead releases.
-var PrecompiledContractsHomestead = map[common.Address]PrecompiledContract{
+var PrecompiledContractsHomestead = PrecompiledContracts{
common.BytesToAddress([]byte{0x1}): &ecrecover{},
common.BytesToAddress([]byte{0x2}): &sha256hash{},
common.BytesToAddress([]byte{0x3}): &ripemd160hash{},
@@ -57,7 +61,7 @@ var PrecompiledContractsHomestead = map[common.Address]PrecompiledContract{
// PrecompiledContractsByzantium contains the default set of pre-compiled Ethereum
// contracts used in the Byzantium release.
-var PrecompiledContractsByzantium = map[common.Address]PrecompiledContract{
+var PrecompiledContractsByzantium = PrecompiledContracts{
common.BytesToAddress([]byte{0x1}): &ecrecover{},
common.BytesToAddress([]byte{0x2}): &sha256hash{},
common.BytesToAddress([]byte{0x3}): &ripemd160hash{},
@@ -70,7 +74,7 @@ var PrecompiledContractsByzantium = map[common.Address]PrecompiledContract{
// PrecompiledContractsIstanbul contains the default set of pre-compiled Ethereum
// contracts used in the Istanbul release.
-var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
+var PrecompiledContractsIstanbul = PrecompiledContracts{
common.BytesToAddress([]byte{0x1}): &ecrecover{},
common.BytesToAddress([]byte{0x2}): &sha256hash{},
common.BytesToAddress([]byte{0x3}): &ripemd160hash{},
@@ -84,7 +88,7 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{
// PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum
// contracts used in the Berlin release.
-var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{
+var PrecompiledContractsBerlin = PrecompiledContracts{
common.BytesToAddress([]byte{0x1}): &ecrecover{},
common.BytesToAddress([]byte{0x2}): &sha256hash{},
common.BytesToAddress([]byte{0x3}): &ripemd160hash{},
@@ -98,7 +102,7 @@ var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{
// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum
// contracts used in the Cancun release.
-var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
+var PrecompiledContractsCancun = PrecompiledContracts{
common.BytesToAddress([]byte{0x1}): &ecrecover{},
common.BytesToAddress([]byte{0x2}): &sha256hash{},
common.BytesToAddress([]byte{0x3}): &ripemd160hash{},
@@ -113,7 +117,7 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
// PrecompiledContractsPrague contains the set of pre-compiled Ethereum
// contracts used in the Prague release.
-var PrecompiledContractsPrague = map[common.Address]PrecompiledContract{
+var PrecompiledContractsPrague = PrecompiledContracts{
common.BytesToAddress([]byte{0x01}): &ecrecover{},
common.BytesToAddress([]byte{0x02}): &sha256hash{},
common.BytesToAddress([]byte{0x03}): &ripemd160hash{},
@@ -169,7 +173,31 @@ func init() {
}
}
-// ActivePrecompiles returns the precompiles enabled with the current configuration.
+func activePrecompiledContracts(rules params.Rules) PrecompiledContracts {
+ switch {
+ case rules.IsVerkle:
+ return PrecompiledContractsVerkle
+ case rules.IsPrague:
+ return PrecompiledContractsPrague
+ case rules.IsCancun:
+ return PrecompiledContractsCancun
+ case rules.IsBerlin:
+ return PrecompiledContractsBerlin
+ case rules.IsIstanbul:
+ return PrecompiledContractsIstanbul
+ case rules.IsByzantium:
+ return PrecompiledContractsByzantium
+ default:
+ return PrecompiledContractsHomestead
+ }
+}
+
+// ActivePrecompiledContracts returns a copy of precompiled contracts enabled with the current configuration.
+func ActivePrecompiledContracts(rules params.Rules) PrecompiledContracts {
+ return maps.Clone(activePrecompiledContracts(rules))
+}
+
+// ActivePrecompiles returns the precompile addresses enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address {
switch {
case rules.IsPrague:
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 7617d843c7..616668d565 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -41,24 +41,7 @@ type (
)
func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) {
- var precompiles map[common.Address]PrecompiledContract
- switch {
- case evm.chainRules.IsVerkle:
- precompiles = PrecompiledContractsVerkle
- case evm.chainRules.IsPrague:
- precompiles = PrecompiledContractsPrague
- case evm.chainRules.IsCancun:
- precompiles = PrecompiledContractsCancun
- case evm.chainRules.IsBerlin:
- precompiles = PrecompiledContractsBerlin
- case evm.chainRules.IsIstanbul:
- precompiles = PrecompiledContractsIstanbul
- case evm.chainRules.IsByzantium:
- precompiles = PrecompiledContractsByzantium
- default:
- precompiles = PrecompiledContractsHomestead
- }
- p, ok := precompiles[addr]
+ p, ok := evm.precompiles[addr]
return p, ok
}
@@ -129,22 +112,13 @@ type EVM struct {
// available gas is calculated in gasCall* according to the 63/64 rule and later
// applied in opCall*.
callGasTemp uint64
+ // precompiles holds the precompiled contracts for the current epoch
+ precompiles map[common.Address]PrecompiledContract
}
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
// only ever be used *once*.
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
- // If basefee tracking is disabled (eth_call, eth_estimateGas, etc), and no
- // gas prices were specified, lower the basefee to 0 to avoid breaking EVM
- // invariants (basefee < feecap)
- if config.NoBaseFee {
- if txCtx.GasPrice.BitLen() == 0 {
- blockCtx.BaseFee = new(big.Int)
- }
- if txCtx.BlobFeeCap != nil && txCtx.BlobFeeCap.BitLen() == 0 {
- blockCtx.BlobBaseFee = new(big.Int)
- }
- }
evm := &EVM{
Context: blockCtx,
TxContext: txCtx,
@@ -153,10 +127,18 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
}
+ evm.precompiles = activePrecompiledContracts(evm.chainRules)
evm.interpreter = NewEVMInterpreter(evm)
return evm
}
+// SetPrecompiles sets the precompiled contracts for the EVM.
+// This method is only used through RPC calls.
+// It is not thread-safe.
+func (evm *EVM) SetPrecompiles(precompiles PrecompiledContracts) {
+ evm.precompiles = precompiles
+}
+
// Reset resets the EVM with a new transaction context.Reset
// This is not threadsafe and should only be done very cautiously.
func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) {
@@ -466,6 +448,18 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
evm.StateDB.SetNonce(caller.Address(), nonce+1)
+ // Charge the contract creation init gas in verkle mode
+ if evm.chainRules.IsEIP4762 {
+ statelessGas := evm.AccessEvents.ContractCreatePreCheckGas(address)
+ if statelessGas > gas {
+ return nil, common.Address{}, 0, ErrOutOfGas
+ }
+ if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil {
+ evm.Config.Tracer.OnGasChange(gas, gas-statelessGas, tracing.GasChangeWitnessContractCollisionCheck)
+ }
+ gas = gas - statelessGas
+ }
+
// We add this to the access list _before_ taking a snapshot. Even if the
// creation fails, the access-list change should not be rolled back.
if evm.chainRules.IsEIP2929 {
@@ -502,6 +496,17 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if evm.chainRules.IsEIP158 {
evm.StateDB.SetNonce(address, 1)
}
+ // Charge the contract creation init gas in verkle mode
+ if evm.chainRules.IsEIP4762 {
+ statelessGas := evm.AccessEvents.ContractCreateInitGas(address)
+ if statelessGas > gas {
+ return nil, common.Address{}, 0, ErrOutOfGas
+ }
+ if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil {
+ evm.Config.Tracer.OnGasChange(gas, gas-statelessGas, tracing.GasChangeWitnessContractInit)
+ }
+ gas = gas - statelessGas
+ }
evm.Context.Transfer(evm.StateDB, caller.Address(), address, value)
// Initialise a new contract and set the code that is to be used by the EVM.
@@ -523,13 +528,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
// initNewContract runs a new contract's creation code, performs checks on the
// resulting code that is to be deployed, and consumes necessary gas.
func (evm *EVM) initNewContract(contract *Contract, address common.Address, value *uint256.Int) ([]byte, error) {
- // Charge the contract creation init gas in verkle mode
- if evm.chainRules.IsEIP4762 {
- if !contract.UseGas(evm.AccessEvents.ContractCreateInitGas(address, value.Sign() != 0), evm.Config.Tracer, tracing.GasChangeWitnessContractInit) {
- return nil, ErrOutOfGas
- }
- }
-
ret, err := evm.interpreter.Run(contract, nil, false)
if err != nil {
return ret, err
@@ -551,11 +549,6 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address, valu
return ret, ErrCodeStoreOutOfGas
}
} else {
- // Contract creation completed, touch the missing fields in the contract
- if !contract.UseGas(evm.AccessEvents.AddAccount(address, true), evm.Config.Tracer, tracing.GasChangeWitnessContractCreation) {
- return ret, ErrCodeStoreOutOfGas
- }
-
if len(ret) > 0 && !contract.UseGas(evm.AccessEvents.CodeChunksRangeGas(address, 0, uint64(len(ret)), uint64(len(ret)), true), evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk) {
return ret, ErrCodeStoreOutOfGas
}
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index 02fc94840d..babe9a5b6a 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
@@ -86,7 +85,7 @@ func TestEIP2200(t *testing.T) {
for i, tt := range eip2200Tests {
address := common.BytesToAddress([]byte("contract"))
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
statedb.SetCode(address, hexutil.MustDecode(tt.input))
statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original}))
@@ -138,7 +137,7 @@ func TestCreateGas(t *testing.T) {
var gasUsed = uint64(0)
doCheck := func(testGas int) bool {
address := common.BytesToAddress([]byte("contract"))
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
statedb.SetCode(address, hexutil.MustDecode(tt.code))
statedb.Finalise(true)
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index e17e913aa3..a3f9ee81d1 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -582,7 +581,7 @@ func BenchmarkOpMstore(bench *testing.B) {
func TestOpTstore(t *testing.T) {
var (
- statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
env = NewEVM(BlockContext{}, TxContext{}, statedb, params.TestChainConfig, Config{})
stack = newstack()
mem = NewMemory()
diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go
index ff4977d728..a1369648da 100644
--- a/core/vm/interpreter_test.go
+++ b/core/vm/interpreter_test.go
@@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
@@ -43,7 +42,7 @@ func TestLoopInterrupt(t *testing.T) {
}
for i, tt := range loopInterruptTests {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
statedb.SetCode(address, common.Hex2Bytes(tt))
statedb.Finalise(true)
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index 289da44be3..1cdaaba516 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -39,16 +39,10 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
cost = uint64(0)
)
// Check slot presence in the access list
- if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
+ if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
cost = params.ColdSloadCostEIP2929
// If the caller cannot afford the cost, this change will be rolled back
evm.StateDB.AddSlotToAccessList(contract.Address(), slot)
- if !addrPresent {
- // Once we're done with YOLOv2 and schedule this for mainnet, might
- // be good to remove this panic here, which is just really a
- // canary to have during testing
- panic("impossible case: address was not present in access list during sstore op")
- }
}
value := common.Hash(y.Bytes32())
diff --git a/core/vm/operations_verkle.go b/core/vm/operations_verkle.go
index 73eb05974d..722d5ed2ce 100644
--- a/core/vm/operations_verkle.go
+++ b/core/vm/operations_verkle.go
@@ -40,7 +40,7 @@ func gasSLoad4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor
func gasBalance4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
address := stack.peek().Bytes20()
- gas := evm.AccessEvents.BalanceGas(address, false)
+ gas := evm.AccessEvents.BasicDataGas(address, false)
if gas == 0 {
gas = params.WarmStorageReadCostEIP2929
}
@@ -52,8 +52,7 @@ func gasExtCodeSize4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory,
if _, isPrecompile := evm.precompile(address); isPrecompile {
return 0, nil
}
- gas := evm.AccessEvents.VersionGas(address, false)
- gas += evm.AccessEvents.CodeSizeGas(address, false)
+ gas := evm.AccessEvents.BasicDataGas(address, false)
if gas == 0 {
gas = params.WarmStorageReadCostEIP2929
}
@@ -102,17 +101,15 @@ func gasSelfdestructEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Mem
return 0, nil
}
contractAddr := contract.Address()
- statelessGas := evm.AccessEvents.VersionGas(contractAddr, false)
- statelessGas += evm.AccessEvents.CodeSizeGas(contractAddr, false)
- statelessGas += evm.AccessEvents.BalanceGas(contractAddr, false)
+ statelessGas := evm.AccessEvents.BasicDataGas(contractAddr, false)
if contractAddr != beneficiaryAddr {
- statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, false)
+ statelessGas += evm.AccessEvents.BasicDataGas(beneficiaryAddr, false)
}
// Charge write costs if it transfers value
if evm.StateDB.GetBalance(contractAddr).Sign() != 0 {
- statelessGas += evm.AccessEvents.BalanceGas(contractAddr, true)
+ statelessGas += evm.AccessEvents.BasicDataGas(contractAddr, true)
if contractAddr != beneficiaryAddr {
- statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, true)
+ statelessGas += evm.AccessEvents.BasicDataGas(beneficiaryAddr, true)
}
}
return statelessGas, nil
@@ -145,8 +142,7 @@ func gasExtCodeCopyEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memo
return 0, err
}
addr := common.Address(stack.peek().Bytes20())
- wgas := evm.AccessEvents.VersionGas(addr, false)
- wgas += evm.AccessEvents.CodeSizeGas(addr, false)
+ wgas := evm.AccessEvents.BasicDataGas(addr, false)
if wgas == 0 {
wgas = params.WarmStorageReadCostEIP2929
}
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 1181e5fccd..f83ed682cd 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -21,7 +21,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -128,7 +127,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
setDefaults(cfg)
if cfg.State == nil {
- cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
}
var (
address = common.BytesToAddress([]byte("contract"))
@@ -165,7 +164,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
setDefaults(cfg)
if cfg.State == nil {
- cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
}
var (
vmenv = NewEnv(cfg)
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index f56d4a7452..9046dad5fe 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -30,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/asm"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -105,7 +104,7 @@ func TestExecute(t *testing.T) {
}
func TestCall(t *testing.T) {
- state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
address := common.HexToAddress("0xaa")
state.SetCode(address, []byte{
byte(vm.PUSH1), 10,
@@ -161,7 +160,7 @@ func BenchmarkCall(b *testing.B) {
}
func benchmarkEVM_Create(bench *testing.B, code string) {
var (
- statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
)
@@ -227,7 +226,7 @@ func BenchmarkEVM_SWAP1(b *testing.B) {
return contract
}
- state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
contractAddr := common.BytesToAddress([]byte("contract"))
b.Run("10k", func(b *testing.B) {
@@ -255,7 +254,7 @@ func BenchmarkEVM_RETURN(b *testing.B) {
return contract
}
- state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ state, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
contractAddr := common.BytesToAddress([]byte("contract"))
for _, n := range []uint64{1_000, 10_000, 100_000, 1_000_000} {
@@ -393,7 +392,7 @@ func TestBlockhash(t *testing.T) {
func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) {
cfg := new(Config)
setDefaults(cfg)
- cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
cfg.GasLimit = gas
if len(tracerCode) > 0 {
tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil)
@@ -880,7 +879,7 @@ func TestRuntimeJSTracer(t *testing.T) {
main := common.HexToAddress("0xaa")
for i, jsTracer := range jsTracers {
for j, tc := range tests {
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.SetCode(main, tc.code)
statedb.SetCode(common.HexToAddress("0xbb"), calleeCode)
statedb.SetCode(common.HexToAddress("0xcc"), calleeCode)
@@ -922,7 +921,7 @@ func TestJSTracerCreateTx(t *testing.T) {
exit: function(res) { this.exits++ }}`
code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil)
if err != nil {
t.Fatal(err)
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index 750cee5e44..cfb8829b5c 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -64,8 +64,9 @@ func TestAccountRange(t *testing.T) {
t.Parallel()
var (
- statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
- sdb, _ = state.New(types.EmptyRootHash, statedb, nil)
+ mdb = rawdb.NewMemoryDatabase()
+ statedb = state.NewDatabase(triedb.NewDatabase(mdb, &triedb.Config{Preimages: true}), nil)
+ sdb, _ = state.New(types.EmptyRootHash, statedb)
addrs = [AccountRangeMaxResults * 2]common.Address{}
m = map[common.Address]bool{}
)
@@ -82,7 +83,7 @@ func TestAccountRange(t *testing.T) {
}
}
root, _ := sdb.Commit(0, true)
- sdb, _ = state.New(root, statedb, nil)
+ sdb, _ = state.New(root, statedb)
trie, err := statedb.OpenTrie(root)
if err != nil {
@@ -135,12 +136,12 @@ func TestEmptyAccountRange(t *testing.T) {
t.Parallel()
var (
- statedb = state.NewDatabase(rawdb.NewMemoryDatabase())
- st, _ = state.New(types.EmptyRootHash, statedb, nil)
+ statedb = state.NewDatabaseForTesting()
+ st, _ = state.New(types.EmptyRootHash, statedb)
)
// Commit(although nothing to flush) and re-init the statedb
st.Commit(0, true)
- st, _ = state.New(types.EmptyRootHash, statedb, nil)
+ st, _ = state.New(types.EmptyRootHash, statedb)
results := st.RawDump(&state.DumpConfig{
SkipCode: true,
@@ -161,8 +162,10 @@ func TestStorageRangeAt(t *testing.T) {
// Create a state where account 0x010000... has a few storage entries.
var (
- db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
- sdb, _ = state.New(types.EmptyRootHash, db, nil)
+ mdb = rawdb.NewMemoryDatabase()
+ tdb = triedb.NewDatabase(mdb, &triedb.Config{Preimages: true})
+ db = state.NewDatabase(tdb, nil)
+ sdb, _ = state.New(types.EmptyRootHash, db)
addr = common.Address{0x01}
keys = []common.Hash{ // hashes of Keys of storage
common.HexToHash("340dd630ad21bf010b4e676dbfa9ba9a02175262d1fa356232cfde6cb5b47ef2"),
@@ -181,7 +184,7 @@ func TestStorageRangeAt(t *testing.T) {
sdb.SetState(addr, *entry.Key, entry.Value)
}
root, _ := sdb.Commit(0, false)
- sdb, _ = state.New(root, db, nil)
+ sdb, _ = state.New(root, db)
// Check a few combinations of limit and start/end.
tests := []struct {
diff --git a/eth/backend.go b/eth/backend.go
index 51011ed0b2..f7b67be4dc 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -216,11 +216,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if config.OverrideVerkle != nil {
overrides.OverrideVerkle = config.OverrideVerkle
}
- // TODO (MariusVanDerWijden) get rid of shouldPreserve in a follow-up PR
- shouldPreserve := func(header *types.Header) bool {
- return false
- }
- eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, shouldPreserve, &config.TransactionHistory)
+ eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, &config.TransactionHistory)
if err != nil {
return nil, err
}
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index badc9b5d0c..877c010596 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -86,11 +86,15 @@ var caps = []string{
"engine_getPayloadV1",
"engine_getPayloadV2",
"engine_getPayloadV3",
+ "engine_getPayloadV4",
"engine_newPayloadV1",
"engine_newPayloadV2",
"engine_newPayloadV3",
+ "engine_newPayloadV4",
"engine_getPayloadBodiesByHashV1",
+ "engine_getPayloadBodiesByHashV2",
"engine_getPayloadBodiesByRangeV1",
+ "engine_getPayloadBodiesByRangeV2",
"engine_getClientVersionV1",
}
@@ -220,7 +224,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
if params.BeaconRoot == nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("missing beacon root"))
}
- if api.eth.BlockChain().Config().LatestPostLondonFork(params.Timestamp) != forks.Cancun {
+ if api.eth.BlockChain().Config().LatestPostLondonFork(params.Timestamp) != forks.Cancun && api.eth.BlockChain().Config().LatestPostLondonFork(params.Timestamp) != forks.Prague {
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV3 must only be called for cancun payloads"))
}
}
@@ -443,6 +447,14 @@ func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.Execu
return api.getPayload(payloadID, false)
}
+// GetPayloadV4 returns a cached payload by id.
+func (api *ConsensusAPI) GetPayloadV4(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) {
+ if !payloadID.Is(engine.PayloadV3) {
+ return nil, engine.UnsupportedFork
+ }
+ return api.getPayload(payloadID, false)
+}
+
func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*engine.ExecutionPayloadEnvelope, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
data := api.localBlocks.get(payloadID, full)
@@ -508,6 +520,34 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas
return api.newPayload(params, versionedHashes, beaconRoot)
}
+// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
+ if params.Withdrawals == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
+ }
+ if params.ExcessBlobGas == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
+ }
+ if params.BlobGasUsed == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
+ }
+ if params.Deposits == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
+ }
+
+ if versionedHashes == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
+ }
+ if beaconRoot == nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
+ }
+
+ if api.eth.BlockChain().Config().LatestPostLondonFork(params.Timestamp) != forks.Prague {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
+ }
+ return api.newPayload(params, versionedHashes, beaconRoot)
+}
+
func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//
@@ -553,6 +593,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
"params.ExcessBlobGas", ebg,
"len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals),
+ "len(params.Deposits)", len(params.Deposits),
"beaconRoot", beaconRoot,
"error", err)
return api.invalid(err, nil), nil
@@ -826,8 +867,25 @@ func (api *ConsensusAPI) GetClientVersionV1(info engine.ClientVersionV1) []engin
// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
// of block bodies by the engine api.
-func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 {
- bodies := make([]*engine.ExecutionPayloadBodyV1, len(hashes))
+func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBody {
+ bodies := make([]*engine.ExecutionPayloadBody, len(hashes))
+ for i, hash := range hashes {
+ block := api.eth.BlockChain().GetBlockByHash(hash)
+ body := getBody(block)
+ if body != nil {
+ // Nil out the V2 values, clients should know to not request V1 objects
+ // after Prague.
+ body.Deposits = nil
+ }
+ bodies[i] = body
+ }
+ return bodies
+}
+
+// GetPayloadBodiesByHashV2 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
+// of block bodies by the engine api.
+func (api *ConsensusAPI) GetPayloadBodiesByHashV2(hashes []common.Hash) []*engine.ExecutionPayloadBody {
+ bodies := make([]*engine.ExecutionPayloadBody, len(hashes))
for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash)
bodies[i] = getBody(block)
@@ -837,7 +895,28 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engin
// GetPayloadBodiesByRangeV1 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
// of block bodies by the engine api.
-func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBodyV1, error) {
+func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBody, error) {
+ bodies, err := api.getBodiesByRange(start, count)
+ if err != nil {
+ return nil, err
+ }
+ // Nil out the V2 values, clients should know to not request V1 objects
+ // after Prague.
+ for i := range bodies {
+ if bodies[i] != nil {
+ bodies[i].Deposits = nil
+ }
+ }
+ return bodies, nil
+}
+
+// GetPayloadBodiesByRangeV2 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range
+// of block bodies by the engine api.
+func (api *ConsensusAPI) GetPayloadBodiesByRangeV2(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBody, error) {
+ return api.getBodiesByRange(start, count)
+}
+
+func (api *ConsensusAPI) getBodiesByRange(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBody, error) {
if start == 0 || count == 0 {
return nil, engine.InvalidParams.With(fmt.Errorf("invalid start or count, start: %v count: %v", start, count))
}
@@ -850,7 +929,7 @@ func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64)
if last > current {
last = current
}
- bodies := make([]*engine.ExecutionPayloadBodyV1, 0, uint64(count))
+ bodies := make([]*engine.ExecutionPayloadBody, 0, uint64(count))
for i := uint64(start); i <= last; i++ {
block := api.eth.BlockChain().GetBlockByNumber(i)
bodies = append(bodies, getBody(block))
@@ -858,15 +937,16 @@ func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64)
return bodies, nil
}
-func getBody(block *types.Block) *engine.ExecutionPayloadBodyV1 {
+func getBody(block *types.Block) *engine.ExecutionPayloadBody {
if block == nil {
return nil
}
var (
- body = block.Body()
- txs = make([]hexutil.Bytes, len(body.Transactions))
- withdrawals = body.Withdrawals
+ body = block.Body()
+ txs = make([]hexutil.Bytes, len(body.Transactions))
+ withdrawals = body.Withdrawals
+ depositRequests types.Deposits
)
for j, tx := range body.Transactions {
@@ -878,8 +958,20 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBodyV1 {
withdrawals = make([]*types.Withdrawal, 0)
}
- return &engine.ExecutionPayloadBodyV1{
+ if block.Header().RequestsHash != nil {
+ // TODO: this isn't future proof because we can't determine if a request
+ // type has activated yet or if there are just no requests of that type from
+ // only the block.
+ for _, req := range block.Requests() {
+ if d, ok := req.Inner().(*types.Deposit); ok {
+ depositRequests = append(depositRequests, d)
+ }
+ }
+ }
+
+ return &engine.ExecutionPayloadBody{
TransactionData: txs,
Withdrawals: withdrawals,
+ Deposits: depositRequests,
}
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 64e6684be1..0a58e1eaee 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -74,6 +74,12 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) {
Alloc: types.GenesisAlloc{
testAddr: {Balance: testBalance},
params.BeaconRootsAddress: {Balance: common.Big0, Code: common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500")},
+ config.DepositContractAddress: {
+ // Simple deposit generator, source: https://gist.github.com/lightclient/54abb2af2465d6969fa6d1920b9ad9d7
+ Code: common.Hex2Bytes("6080604052366103aa575f603067ffffffffffffffff811115610025576100246103ae565b5b6040519080825280601f01601f1916602001820160405280156100575781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061007d5761007c6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f602067ffffffffffffffff8111156100c7576100c66103ae565b5b6040519080825280601f01601f1916602001820160405280156100f95781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f8151811061011f5761011e6103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff811115610169576101686103ae565b5b6040519080825280601f01601f19166020018201604052801561019b5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f815181106101c1576101c06103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f606067ffffffffffffffff81111561020b5761020a6103ae565b5b6040519080825280601f01601f19166020018201604052801561023d5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610263576102626103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f600867ffffffffffffffff8111156102ad576102ac6103ae565b5b6040519080825280601f01601f1916602001820160405280156102df5781602001600182028036833780820191505090505b5090505f8054906101000a900460ff1660f81b815f81518110610305576103046103db565b5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f8081819054906101000a900460ff168092919061035090610441565b91906101000a81548160ff021916908360ff160217905550507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c585858585856040516103a09594939291906104d9565b60405180910390a1005b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f60ff82169050919050565b5f61044b82610435565b915060ff820361045e5761045d610408565b5b600182019050919050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f6104ab82610469565b6104b58185610473565b93506104c5818560208601610483565b6104ce81610491565b840191505092915050565b5f60a0820190508181035f8301526104f181886104a1565b9050818103602083015261050581876104a1565b9050818103604083015261051981866104a1565b9050818103606083015261052d81856104a1565b9050818103608083015261054181846104a1565b9050969550505050505056fea26469706673582212208569967e58690162d7d6fe3513d07b393b4c15e70f41505cbbfd08f53eba739364736f6c63430008190033"),
+ Nonce: 0,
+ Balance: big.NewInt(0),
+ },
},
ExtraData: []byte("test genesis"),
Timestamp: 9000,
@@ -483,10 +489,10 @@ func TestFullAPI(t *testing.T) {
ethservice.TxPool().Add([]*types.Transaction{tx}, true, false)
}
- setupBlocks(t, ethservice, 10, parent, callback, nil)
+ setupBlocks(t, ethservice, 10, parent, callback, nil, nil)
}
-func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Header, callback func(parent *types.Header), withdrawals [][]*types.Withdrawal) []*types.Header {
+func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Header, callback func(parent *types.Header), withdrawals [][]*types.Withdrawal, beaconRoots []common.Hash) []*types.Header {
api := NewConsensusAPI(ethservice)
var blocks []*types.Header
for i := 0; i < n; i++ {
@@ -495,14 +501,18 @@ func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.He
if withdrawals != nil {
w = withdrawals[i]
}
+ var h *common.Hash
+ if beaconRoots != nil {
+ h = &beaconRoots[i]
+ }
- payload := getNewPayload(t, api, parent, w)
- execResp, err := api.NewPayloadV2(*payload)
+ payload := getNewPayload(t, api, parent, w, h)
+ execResp, err := api.newPayload(*payload, []common.Hash{}, h)
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
if execResp.Status != engine.VALID {
- t.Fatalf("invalid status: %v", execResp.Status)
+ t.Fatalf("invalid status: %v %s", execResp.Status, *execResp.ValidationError)
}
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
@@ -690,10 +700,10 @@ func TestEmptyBlocks(t *testing.T) {
api := NewConsensusAPI(ethservice)
// Setup 10 blocks on the canonical chain
- setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil)
+ setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil, nil)
// (1) check LatestValidHash by sending a normal payload (P1'')
- payload := getNewPayload(t, api, commonAncestor, nil)
+ payload := getNewPayload(t, api, commonAncestor, nil, nil)
status, err := api.NewPayloadV1(*payload)
if err != nil {
@@ -707,7 +717,7 @@ func TestEmptyBlocks(t *testing.T) {
}
// (2) Now send P1' which is invalid
- payload = getNewPayload(t, api, commonAncestor, nil)
+ payload = getNewPayload(t, api, commonAncestor, nil, nil)
payload.GasUsed += 1
payload = setBlockhash(payload)
// Now latestValidHash should be the common ancestor
@@ -725,7 +735,7 @@ func TestEmptyBlocks(t *testing.T) {
}
// (3) Now send a payload with unknown parent
- payload = getNewPayload(t, api, commonAncestor, nil)
+ payload = getNewPayload(t, api, commonAncestor, nil, nil)
payload.ParentHash = common.Hash{1}
payload = setBlockhash(payload)
// Now latestValidHash should be the common ancestor
@@ -741,12 +751,13 @@ func TestEmptyBlocks(t *testing.T) {
}
}
-func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal) *engine.ExecutableData {
+func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal, beaconRoot *common.Hash) *engine.ExecutableData {
params := engine.PayloadAttributes{
Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte{byte(1)}),
SuggestedFeeRecipient: parent.Coinbase,
Withdrawals: withdrawals,
+ BeaconRoot: beaconRoot,
}
payload, err := assembleBlock(api, parent.Hash(), ¶ms)
@@ -814,7 +825,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
commonAncestor := ethserviceA.BlockChain().CurrentBlock()
// Setup 10 blocks on the canonical chain
- setupBlocks(t, ethserviceA, 10, commonAncestor, func(parent *types.Header) {}, nil)
+ setupBlocks(t, ethserviceA, 10, commonAncestor, func(parent *types.Header) {}, nil, nil)
commonAncestor = ethserviceA.BlockChain().CurrentBlock()
var invalidChain []*engine.ExecutableData
@@ -823,7 +834,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
//invalidChain = append(invalidChain, payload1)
// create an invalid payload2 (P2)
- payload2 := getNewPayload(t, apiA, commonAncestor, nil)
+ payload2 := getNewPayload(t, apiA, commonAncestor, nil, nil)
//payload2.ParentHash = payload1.BlockHash
payload2.GasUsed += 1
payload2 = setBlockhash(payload2)
@@ -832,7 +843,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
head := payload2
// create some valid payloads on top
for i := 0; i < 10; i++ {
- payload := getNewPayload(t, apiA, commonAncestor, nil)
+ payload := getNewPayload(t, apiA, commonAncestor, nil, nil)
payload.ParentHash = head.BlockHash
payload = setBlockhash(payload)
invalidChain = append(invalidChain, payload)
@@ -869,10 +880,10 @@ func TestInvalidBloom(t *testing.T) {
api := NewConsensusAPI(ethservice)
// Setup 10 blocks on the canonical chain
- setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil)
+ setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil, nil)
// (1) check LatestValidHash by sending a normal payload (P1'')
- payload := getNewPayload(t, api, commonAncestor, nil)
+ payload := getNewPayload(t, api, commonAncestor, nil, nil)
payload.LogsBloom = append(payload.LogsBloom, byte(1))
status, err := api.NewPayloadV1(*payload)
if err != nil {
@@ -1285,24 +1296,35 @@ func TestNilWithdrawals(t *testing.T) {
func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
genesis, blocks := generateMergeChain(10, true)
- // enable shanghai on the last block
+
+ // Enable next forks on the last block.
time := blocks[len(blocks)-1].Header().Time + 1
genesis.Config.ShanghaiTime = &time
+ genesis.Config.CancunTime = &time
+ genesis.Config.PragueTime = &time
+
n, ethservice := startEthService(t, genesis, blocks)
var (
- parent = ethservice.BlockChain().CurrentBlock()
// This EVM code generates a log when the contract is created.
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
+ parent = ethservice.BlockChain().CurrentBlock()
)
+ // Each block, this callback will include two txs that generate body values like logs and requests.
callback := func(parent *types.Header) {
- statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
- nonce := statedb.GetNonce(testAddr)
- tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
- ethservice.TxPool().Add([]*types.Transaction{tx}, false, false)
+ var (
+ statedb, _ = ethservice.BlockChain().StateAt(parent.Root)
+ // Create tx to trigger log generator.
+ tx1, _ = types.SignTx(types.NewContractCreation(statedb.GetNonce(testAddr), new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ // Create tx to trigger deposit generator.
+ tx2, _ = types.SignTx(types.NewTransaction(statedb.GetNonce(testAddr)+1, ethservice.APIBackend.ChainConfig().DepositContractAddress, new(big.Int), 500000, big.NewInt(2*params.InitialBaseFee), nil), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ )
+ ethservice.TxPool().Add([]*types.Transaction{tx1}, false, false)
+ ethservice.TxPool().Add([]*types.Transaction{tx2}, false, false)
}
+ // Make some withdrawals to include.
withdrawals := make([][]*types.Withdrawal, 10)
withdrawals[0] = nil // should be filtered out by miner
withdrawals[1] = make([]*types.Withdrawal, 0)
@@ -1314,12 +1336,20 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
}
}
- postShanghaiHeaders := setupBlocks(t, ethservice, 10, parent, callback, withdrawals)
- postShanghaiBlocks := make([]*types.Block, len(postShanghaiHeaders))
- for i, header := range postShanghaiHeaders {
- postShanghaiBlocks[i] = ethservice.BlockChain().GetBlock(header.Hash(), header.Number.Uint64())
+ // Make beacon root update for each block.
+ beaconRoots := make([]common.Hash, 10)
+ for i := 0; i < 10; i++ {
+ beaconRoots[i] = common.Hash{byte(i)}
+ }
+
+ // Create the blocks.
+ newHeaders := setupBlocks(t, ethservice, 10, parent, callback, withdrawals, beaconRoots)
+ newBlocks := make([]*types.Block, len(newHeaders))
+ for i, header := range newHeaders {
+ newBlocks[i] = ethservice.BlockChain().GetBlock(header.Hash(), header.Number.Uint64())
}
- return n, ethservice, append(blocks, postShanghaiBlocks...)
+
+ return n, ethservice, append(blocks, newBlocks...)
}
func allHashes(blocks []*types.Block) []common.Hash {
@@ -1384,7 +1414,7 @@ func TestGetBlockBodiesByHash(t *testing.T) {
}
for k, test := range tests {
- result := api.GetPayloadBodiesByHashV1(test.hashes)
+ result := api.GetPayloadBodiesByHashV2(test.hashes)
for i, r := range result {
if !equalBody(test.results[i], r) {
t.Fatalf("test %v: invalid response: expected %+v got %+v", k, test.results[i], r)
@@ -1458,7 +1488,7 @@ func TestGetBlockBodiesByRange(t *testing.T) {
}
for k, test := range tests {
- result, err := api.GetPayloadBodiesByRangeV1(test.start, test.count)
+ result, err := api.GetPayloadBodiesByRangeV2(test.start, test.count)
if err != nil {
t.Fatal(err)
}
@@ -1509,7 +1539,7 @@ func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) {
},
}
for i, tc := range tests {
- result, err := api.GetPayloadBodiesByRangeV1(tc.start, tc.count)
+ result, err := api.GetPayloadBodiesByRangeV2(tc.start, tc.count)
if err == nil {
t.Fatalf("test %d: expected error, got %v", i, result)
}
@@ -1519,7 +1549,7 @@ func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) {
}
}
-func equalBody(a *types.Body, b *engine.ExecutionPayloadBodyV1) bool {
+func equalBody(a *types.Body, b *engine.ExecutionPayloadBody) bool {
if a == nil && b == nil {
return true
} else if a == nil || b == nil {
@@ -1534,7 +1564,23 @@ func equalBody(a *types.Body, b *engine.ExecutionPayloadBodyV1) bool {
return false
}
}
- return reflect.DeepEqual(a.Withdrawals, b.Withdrawals)
+
+ if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
+ return false
+ }
+
+ var deposits types.Deposits
+ if a.Requests != nil {
+ // If requests is non-nil, it means deposits are available in block and we
+ // should return an empty slice instead of nil if there are no deposits.
+ deposits = make(types.Deposits, 0)
+ }
+ for _, r := range a.Requests {
+ if d, ok := r.Inner().(*types.Deposit); ok {
+ deposits = append(deposits, d)
+ }
+ }
+ return reflect.DeepEqual(deposits, b.Deposits)
}
func TestBlockToPayloadWithBlobs(t *testing.T) {
diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go
index 86355a1533..cb27810aac 100644
--- a/eth/catalyst/simulated_beacon.go
+++ b/eth/catalyst/simulated_beacon.go
@@ -109,7 +109,7 @@ func NewSimulatedBeacon(period uint64, eth *eth.Ethereum) (*SimulatedBeacon, err
// if genesis block, send forkchoiceUpdated to trigger transition to PoS
if block.Number.Sign() == 0 {
- if _, err := engineAPI.ForkchoiceUpdatedV2(current, nil); err != nil {
+ if _, err := engineAPI.ForkchoiceUpdatedV3(current, nil); err != nil {
return nil, err
}
}
@@ -226,7 +226,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
c.setCurrentState(payload.BlockHash, finalizedHash)
// Mark the block containing the payload as canonical
- if _, err = c.engineAPI.ForkchoiceUpdatedV2(c.curForkchoiceState, nil); err != nil {
+ if _, err = c.engineAPI.ForkchoiceUpdatedV3(c.curForkchoiceState, nil); err != nil {
return err
}
c.lastBlockTime = payload.Timestamp
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 0cbddee6bf..0f81e152ef 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -68,7 +68,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
panic(err)
}
@@ -230,6 +230,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
txsHashes = make([]common.Hash, len(bodies))
uncleHashes = make([]common.Hash, len(bodies))
withdrawalHashes = make([]common.Hash, len(bodies))
+ requestsHashes = make([]common.Hash, len(bodies))
)
hasher := trie.NewStackTrie(nil)
for i, body := range bodies {
@@ -248,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
res := ð.Response{
Req: req,
Res: (*eth.BlockBodiesResponse)(&bodies),
- Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
+ Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes},
Time: 1,
Done: make(chan error, 1), // Ignore the returned status
}
diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go
index 56359b33c9..709df77575 100644
--- a/eth/downloader/fetchers_concurrent_bodies.go
+++ b/eth/downloader/fetchers_concurrent_bodies.go
@@ -88,10 +88,10 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
- txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
- hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
+ txs, uncles, withdrawals, requests := packet.Res.(*eth.BlockBodiesResponse).Unpack()
+ hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes, requests hashes}
- accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])
+ accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2], requests, hashsets[3])
switch {
case err == nil && len(txs) == 0:
peer.log.Trace("Requested bodies delivered")
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 5441ad1187..adad450200 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -784,7 +784,8 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []comm
// also wakes any threads waiting for data delivery.
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash,
uncleLists [][]*types.Header, uncleListHashes []common.Hash,
- withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash) (int, error) {
+ withdrawalLists [][]*types.Withdrawal, withdrawalListHashes []common.Hash,
+ requestsLists [][]*types.Request, requestsListHashes []common.Hash) (int, error) {
q.lock.Lock()
defer q.lock.Unlock()
@@ -808,6 +809,19 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH
return errInvalidBody
}
}
+ if header.RequestsHash == nil {
+ // nil hash means that requests should not be present in body
+ if requestsLists[index] != nil {
+ return errInvalidBody
+ }
+ } else { // non-nil hash: body must have requests
+ if requestsLists[index] == nil {
+ return errInvalidBody
+ }
+ if requestsListHashes[index] != *header.RequestsHash {
+ return errInvalidBody
+ }
+ }
// Blocks must have a number of blobs corresponding to the header gas usage,
// and zero before the Cancun hardfork.
var blobs int
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index 857ac4813a..e29d23f80b 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -341,7 +341,7 @@ func XTestDelivery(t *testing.T) {
uncleHashes[i] = types.CalcUncleHash(uncles)
}
time.Sleep(100 * time.Millisecond)
- _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil)
+ _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil, nil)
if err != nil {
fmt.Printf("delivered %d bodies %v\n", len(txset), err)
}
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
index 6043f51372..8fa2e83413 100644
--- a/eth/downloader/testchain_test.go
+++ b/eth/downloader/testchain_test.go
@@ -217,7 +217,7 @@ func newTestBlockchain(blocks []*types.Block) *core.BlockChain {
if pregenerated {
panic("Requested chain generation outside of init")
}
- chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
panic(err)
}
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
index a113155009..97d1e29862 100644
--- a/eth/fetcher/tx_fetcher.go
+++ b/eth/fetcher/tx_fetcher.go
@@ -17,7 +17,6 @@
package fetcher
import (
- "bytes"
"errors"
"fmt"
"math"
@@ -35,7 +34,7 @@ import (
)
const (
- // maxTxAnnounces is the maximum number of unique transaction a peer
+ // maxTxAnnounces is the maximum number of unique transactions a peer
// can announce in a short time.
maxTxAnnounces = 4096
@@ -114,16 +113,23 @@ var errTerminated = errors.New("terminated")
type txAnnounce struct {
origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes being announced
- metas []*txMetadata // Batch of metadata associated with the hashes
+ metas []txMetadata // Batch of metadata associated with the hashes
}
-// txMetadata is a set of extra data transmitted along the announcement for better
-// fetch scheduling.
+// txMetadata provides the extra data transmitted along with the announcement
+// for better fetch scheduling.
type txMetadata struct {
kind byte // Transaction consensus type
size uint32 // Transaction size in bytes
}
+// txMetadataWithSeq is a wrapper of transaction metadata with an extra field
+// tracking the transaction sequence number.
+type txMetadataWithSeq struct {
+ txMetadata
+ seq uint64
+}
+
// txRequest represents an in-flight transaction retrieval request destined to
// a specific peers.
type txRequest struct {
@@ -159,7 +165,7 @@ type txDrop struct {
// The invariants of the fetcher are:
// - Each tracked transaction (hash) must only be present in one of the
// three stages. This ensures that the fetcher operates akin to a finite
-// state automata and there's do data leak.
+// state automata and there's no data leak.
// - Each peer that announced transactions may be scheduled retrievals, but
// only ever one concurrently. This ensures we can immediately know what is
// missing from a reply and reschedule it.
@@ -169,18 +175,19 @@ type TxFetcher struct {
drop chan *txDrop
quit chan struct{}
+ txSeq uint64 // Unique transaction sequence number
underpriced *lru.Cache[common.Hash, time.Time] // Transactions discarded as too cheap (don't re-fetch)
// Stage 1: Waiting lists for newly discovered transactions that might be
// broadcast without needing explicit request/reply round trips.
- waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
- waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
- waitslots map[string]map[common.Hash]*txMetadata // Waiting announcements grouped by peer (DoS protection)
+ waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
+ waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
+ waitslots map[string]map[common.Hash]*txMetadataWithSeq // Waiting announcements grouped by peer (DoS protection)
// Stage 2: Queue of transactions that waiting to be allocated to some peer
// to be retrieved directly.
- announces map[string]map[common.Hash]*txMetadata // Set of announced transactions, grouped by origin peer
- announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
+ announces map[string]map[common.Hash]*txMetadataWithSeq // Set of announced transactions, grouped by origin peer
+ announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
// Stage 3: Set of transactions currently being retrieved, some which may be
// fulfilled and some rescheduled. Note, this step shares 'announces' from the
@@ -218,8 +225,8 @@ func NewTxFetcherForTests(
quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime),
- waitslots: make(map[string]map[common.Hash]*txMetadata),
- announces: make(map[string]map[common.Hash]*txMetadata),
+ waitslots: make(map[string]map[common.Hash]*txMetadataWithSeq),
+ announces: make(map[string]map[common.Hash]*txMetadataWithSeq),
announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest),
@@ -247,7 +254,7 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
// loop, so anything caught here is time saved internally.
var (
unknownHashes = make([]common.Hash, 0, len(hashes))
- unknownMetas = make([]*txMetadata, 0, len(hashes))
+ unknownMetas = make([]txMetadata, 0, len(hashes))
duplicate int64
underpriced int64
@@ -264,7 +271,7 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
// Transaction metadata has been available since eth68, and all
// legacy eth protocols (prior to eth68) have been deprecated.
// Therefore, metadata is always expected in the announcement.
- unknownMetas = append(unknownMetas, &txMetadata{kind: types[i], size: sizes[i]})
+ unknownMetas = append(unknownMetas, txMetadata{kind: types[i], size: sizes[i]})
}
}
txAnnounceKnownMeter.Mark(duplicate)
@@ -431,9 +438,19 @@ func (f *TxFetcher) loop() {
ann.metas = ann.metas[:want-maxTxAnnounces]
}
// All is well, schedule the remainder of the transactions
- idleWait := len(f.waittime) == 0
- _, oldPeer := f.announces[ann.origin]
-
+ var (
+ idleWait = len(f.waittime) == 0
+ _, oldPeer = f.announces[ann.origin]
+ hasBlob bool
+
+ // nextSeq returns the next available sequence number for tagging
+ // transaction announcement and also bump it internally.
+ nextSeq = func() uint64 {
+ seq := f.txSeq
+ f.txSeq++
+ return seq
+ }
+ )
for i, hash := range ann.hashes {
// If the transaction is already downloading, add it to the list
// of possible alternates (in case the current retrieval fails) and
@@ -443,9 +460,17 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil {
- announces[hash] = ann.metas[i]
+ announces[hash] = &txMetadataWithSeq{
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ }
} else {
- f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
+ f.announces[ann.origin] = map[common.Hash]*txMetadataWithSeq{
+ hash: {
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ },
+ }
}
continue
}
@@ -456,9 +481,17 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil {
- announces[hash] = ann.metas[i]
+ announces[hash] = &txMetadataWithSeq{
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ }
} else {
- f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
+ f.announces[ann.origin] = map[common.Hash]*txMetadataWithSeq{
+ hash: {
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ },
+ }
}
continue
}
@@ -475,24 +508,47 @@ func (f *TxFetcher) loop() {
f.waitlist[hash][ann.origin] = struct{}{}
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
- waitslots[hash] = ann.metas[i]
+ waitslots[hash] = &txMetadataWithSeq{
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ }
} else {
- f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
+ f.waitslots[ann.origin] = map[common.Hash]*txMetadataWithSeq{
+ hash: {
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ },
+ }
}
continue
}
// Transaction unknown to the fetcher, insert it into the waiting list
f.waitlist[hash] = map[string]struct{}{ann.origin: {}}
- f.waittime[hash] = f.clock.Now()
+ // Assign the current timestamp as the wait time, but for blob transactions,
+ // skip the wait time since they are only announced.
+ if ann.metas[i].kind != types.BlobTxType {
+ f.waittime[hash] = f.clock.Now()
+ } else {
+ hasBlob = true
+ f.waittime[hash] = f.clock.Now() - mclock.AbsTime(txArriveTimeout)
+ }
if waitslots := f.waitslots[ann.origin]; waitslots != nil {
- waitslots[hash] = ann.metas[i]
+ waitslots[hash] = &txMetadataWithSeq{
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ }
} else {
- f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
+ f.waitslots[ann.origin] = map[common.Hash]*txMetadataWithSeq{
+ hash: {
+ txMetadata: ann.metas[i],
+ seq: nextSeq(),
+ },
+ }
}
}
// If a new item was added to the waitlist, schedule it into the fetcher
- if idleWait && len(f.waittime) > 0 {
+ if hasBlob || (idleWait && len(f.waittime) > 0) {
f.rescheduleWait(waitTimer, waitTrigger)
}
// If this peer is new and announced something already queued, maybe
@@ -516,7 +572,7 @@ func (f *TxFetcher) loop() {
if announces := f.announces[peer]; announces != nil {
announces[hash] = f.waitslots[peer][hash]
} else {
- f.announces[peer] = map[common.Hash]*txMetadata{hash: f.waitslots[peer][hash]}
+ f.announces[peer] = map[common.Hash]*txMetadataWithSeq{hash: f.waitslots[peer][hash]}
}
delete(f.waitslots[peer], hash)
if len(f.waitslots[peer]) == 0 {
@@ -873,7 +929,7 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
hashes = make([]common.Hash, 0, maxTxRetrievals)
bytes uint64
)
- f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool {
+ f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta txMetadata) bool {
// If the transaction is already fetching, skip to the next one
if _, ok := f.fetching[hash]; ok {
return true
@@ -938,28 +994,26 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string))
}
}
-// forEachAnnounce does a range loop over a map of announcements in production,
-// but during testing it does a deterministic sorted random to allow reproducing
-// issues.
-func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadata, do func(hash common.Hash, meta *txMetadata) bool) {
- // If we're running production, use whatever Go's map gives us
- if f.rand == nil {
- for hash, meta := range announces {
- if !do(hash, meta) {
- return
- }
- }
- return
+// forEachAnnounce loops over the given announcements in arrival order, invoking
+// the do function for each until it returns false. We enforce an arrival
+// ordering to minimize the chances of transaction nonce-gaps, which result in
+// transactions being rejected by the txpool.
+func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadataWithSeq, do func(hash common.Hash, meta txMetadata) bool) {
+ type announcement struct {
+ hash common.Hash
+ meta txMetadata
+ seq uint64
}
- // We're running the test suite, make iteration deterministic
- list := make([]common.Hash, 0, len(announces))
- for hash := range announces {
- list = append(list, hash)
+ // Process announcements by their arrival order
+ list := make([]announcement, 0, len(announces))
+ for hash, entry := range announces {
+ list = append(list, announcement{hash: hash, meta: entry.txMetadata, seq: entry.seq})
}
- sortHashes(list)
- rotateHashes(list, f.rand.Intn(len(list)))
- for _, hash := range list {
- if !do(hash, announces[hash]) {
+ sort.Slice(list, func(i, j int) bool {
+ return list[i].seq < list[j].seq
+ })
+ for i := range list {
+ if !do(list[i].hash, list[i].meta) {
return
}
}
@@ -975,26 +1029,3 @@ func rotateStrings(slice []string, n int) {
slice[i] = orig[(i+n)%len(orig)]
}
}
-
-// sortHashes sorts a slice of hashes. This method is only used in tests in order
-// to simulate random map iteration but keep it deterministic.
-func sortHashes(slice []common.Hash) {
- for i := 0; i < len(slice); i++ {
- for j := i + 1; j < len(slice); j++ {
- if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
- slice[i], slice[j] = slice[j], slice[i]
- }
- }
- }
-}
-
-// rotateHashes rotates the contents of a slice by n steps. This method is only
-// used in tests to simulate random map iteration but keep it deterministic.
-func rotateHashes(slice []common.Hash, n int) {
- orig := make([]common.Hash, len(slice))
- copy(orig, slice)
-
- for i := 0; i < len(orig); i++ {
- slice[i] = orig[(i+n)%len(orig)]
- }
-}
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
index 0b47646669..f80b1d6096 100644
--- a/eth/fetcher/tx_fetcher_test.go
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -701,7 +701,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
},
// Deliver the middle transaction requested, the one before which
// should be dropped and the one after re-requested.
- doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
isScheduled{
tracking: map[string][]announce{
"A": {
@@ -1070,7 +1070,7 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
"A": announces,
},
fetching: map[string][]common.Hash{
- "A": hashes[1643 : 1643+maxTxRetrievals],
+ "A": hashes[:maxTxRetrievals],
},
},
},
@@ -1130,9 +1130,9 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
},
},
fetching: map[string][]common.Hash{
- "A": {{0x02}, {0x03}, {0x04}},
- "B": {{0x06}},
- "C": {{0x08}},
+ "A": {{0x01}, {0x02}, {0x03}},
+ "B": {{0x05}},
+ "C": {{0x07}},
},
},
},
@@ -1209,8 +1209,8 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
"B": announceB[:maxTxAnnounces/2-1],
},
fetching: map[string][]common.Hash{
- "A": hashesA[1643 : 1643+maxTxRetrievals],
- "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ "A": hashesA[:maxTxRetrievals],
+ "B": hashesB[:maxTxRetrievals],
},
},
// Ensure that adding even one more hash results in dropping the hash
@@ -1227,8 +1227,8 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
"B": announceB[:maxTxAnnounces/2-1],
},
fetching: map[string][]common.Hash{
- "A": hashesA[1643 : 1643+maxTxRetrievals],
- "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ "A": hashesA[:maxTxRetrievals],
+ "B": hashesB[:maxTxRetrievals],
},
},
},
@@ -1759,6 +1759,76 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
})
}
+// This test ensures the blob transactions will be scheduled for fetching
+// once they are announced in the network.
+func TestBlobTransactionAnnounce(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ nil,
+ )
+ },
+ steps: []interface{}{
+ // Initial announcement to get something into the waitlist
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}},
+ isWaiting(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, types.LegacyTxType, 111},
+ {common.Hash{0x02}, types.LegacyTxType, 222},
+ },
+ }),
+ // Announce a blob transaction
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x03}}, types: []byte{types.BlobTxType}, sizes: []uint32{333}},
+ isWaiting(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, types.LegacyTxType, 111},
+ {common.Hash{0x02}, types.LegacyTxType, 222},
+ },
+ "B": {
+ {common.Hash{0x03}, types.BlobTxType, 333},
+ },
+ }),
+ doWait{time: 0, step: true}, // zero time, but the blob fetching should be scheduled
+ isWaiting(map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, types.LegacyTxType, 111},
+ {common.Hash{0x02}, types.LegacyTxType, 222},
+ },
+ }),
+ isScheduled{
+ tracking: map[string][]announce{
+ "B": {
+ {common.Hash{0x03}, types.BlobTxType, 333},
+ },
+ },
+ fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
+ "B": {{0x03}},
+ },
+ },
+ doWait{time: txArriveTimeout, step: true}, // zero time, but the blob fetching should be scheduled
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]announce{
+ "A": {
+ {common.Hash{0x01}, types.LegacyTxType, 111},
+ {common.Hash{0x02}, types.LegacyTxType, 222},
+ },
+ "B": {
+ {common.Hash{0x03}, types.BlobTxType, 333},
+ },
+ },
+ fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}},
+ },
+ },
+ },
+ })
+}
+
func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
t.Parallel()
testTransactionFetcher(t, tt)
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 013b9f7bc2..1d52afb282 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -601,7 +601,9 @@ func TestPendingTxFilterDeadlock(t *testing.T) {
subs := make([]*Subscription, 20)
for i := 0; i < len(subs); i++ {
fid := api.NewPendingTransactionFilter(nil)
+ api.filtersMu.Lock()
f, ok := api.filters[fid]
+ api.filtersMu.Unlock()
if !ok {
t.Fatalf("Filter %s should exist", fid)
}
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index 2b3efb51b1..d8b703fee4 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -250,7 +250,7 @@ func TestFilters(t *testing.T) {
}
})
var l uint64
- bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
+ bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, &l)
if err != nil {
t.Fatal(err)
}
diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go
index ac3b59e97e..d43057dda2 100644
--- a/eth/gasestimator/gasestimator.go
+++ b/eth/gasestimator/gasestimator.go
@@ -221,8 +221,16 @@ func run(ctx context.Context, call *core.Message, opts *Options) (*core.Executio
evmContext = core.NewEVMBlockContext(opts.Header, opts.Chain, nil)
dirtyState = opts.State.Copy()
- evm = vm.NewEVM(evmContext, msgContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
)
+ // Lower the basefee to 0 to avoid breaking EVM
+ // invariants (basefee < feecap).
+ if msgContext.GasPrice.Sign() == 0 {
+ evmContext.BaseFee = new(big.Int)
+ }
+ if msgContext.BlobFeeCap != nil && msgContext.BlobFeeCap.BitLen() == 0 {
+ evmContext.BlobBaseFee = new(big.Int)
+ }
+ evm := vm.NewEVM(evmContext, msgContext, dirtyState, opts.Config, vm.Config{NoBaseFee: true})
// Monitor the outer context and interrupt the EVM upon cancellation. To avoid
// a dangling goroutine until the outer estimation finishes, create an internal
// context for the lifetime of this method call.
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
index 39f3c79b98..fdba2e584b 100644
--- a/eth/gasprice/gasprice_test.go
+++ b/eth/gasprice/gasprice_test.go
@@ -210,7 +210,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, cancunBlock *big.Int, pe
})
// Construct testing chain
gspec.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(td)
- chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create local chain, %v", err)
}
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index a38059ca95..c41c9abc26 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -98,8 +98,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
gspecNoFork = &core.Genesis{Config: configNoFork}
gspecProFork = &core.Genesis{Config: configProFork}
- chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil)
- chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil)
+ chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil)
+ chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil)
_, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil)
_, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil)
diff --git a/eth/handler_test.go b/eth/handler_test.go
index bcc8ea30e4..7b250df2e9 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -151,7 +151,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler {
Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
_, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), blocks, nil)
if _, err := chain.InsertChain(bs); err != nil {
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index 934dadc9a5..fc82b42947 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -104,7 +104,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
Config: config,
Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil)
_, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator)
if _, err := chain.InsertChain(bs); err != nil {
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index b3886270f3..951352319f 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -316,6 +316,7 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
txsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
uncleHashes = make([]common.Hash, len(res.BlockBodiesResponse))
withdrawalHashes = make([]common.Hash, len(res.BlockBodiesResponse))
+ requestsHashes = make([]common.Hash, len(res.BlockBodiesResponse))
)
hasher := trie.NewStackTrie(nil)
for i, body := range res.BlockBodiesResponse {
@@ -324,8 +325,11 @@ func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
if body.Withdrawals != nil {
withdrawalHashes[i] = types.DeriveSha(types.Withdrawals(body.Withdrawals), hasher)
}
+ if body.Requests != nil {
+ requestsHashes[i] = types.DeriveSha(types.Requests(body.Requests), hasher)
+ }
}
- return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}
+ return [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes, requestsHashes}
}
return peer.dispatchResponse(&Response{
id: res.RequestId,
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index c5cb2dd1dc..cbc895eabb 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -224,21 +224,22 @@ type BlockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
Uncles []*types.Header // Uncles contained within a block
Withdrawals []*types.Withdrawal `rlp:"optional"` // Withdrawals contained within a block
+ Requests []*types.Request `rlp:"optional"` // Requests contained within a block
}
// Unpack retrieves the transactions and uncles from the range packet and returns
// them in a split flat format that's more consistent with the internal data structures.
-func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal) {
- // TODO(matt): add support for withdrawals to fetchers
+func (p *BlockBodiesResponse) Unpack() ([][]*types.Transaction, [][]*types.Header, [][]*types.Withdrawal, [][]*types.Request) {
var (
txset = make([][]*types.Transaction, len(*p))
uncleset = make([][]*types.Header, len(*p))
withdrawalset = make([][]*types.Withdrawal, len(*p))
+ requestset = make([][]*types.Request, len(*p))
)
for i, body := range *p {
- txset[i], uncleset[i], withdrawalset[i] = body.Transactions, body.Uncles, body.Withdrawals
+ txset[i], uncleset[i], withdrawalset[i], requestset[i] = body.Transactions, body.Uncles, body.Withdrawals, body.Requests
}
- return txset, uncleset, withdrawalset
+ return txset, uncleset, withdrawalset, requestset
}
// GetReceiptsRequest represents a block receipts query.
diff --git a/eth/protocols/snap/handler_fuzzing_test.go b/eth/protocols/snap/handler_fuzzing_test.go
index 4e234ad21b..777db6387c 100644
--- a/eth/protocols/snap/handler_fuzzing_test.go
+++ b/eth/protocols/snap/handler_fuzzing_test.go
@@ -126,7 +126,7 @@ func getChain() *core.BlockChain {
SnapshotWait: true,
}
trieRoot = blocks[len(blocks)-1].Root()
- bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil)
if _, err := bc.InsertChain(blocks); err != nil {
panic(err)
}
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 90f7c01391..cb5a233a83 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -68,8 +68,9 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
- database = state.NewDatabaseWithConfig(eth.chainDb, triedb.HashDefaults)
- if statedb, err = state.New(block.Root(), database, nil); err == nil {
+ tdb := triedb.NewDatabase(eth.chainDb, triedb.HashDefaults)
+ database = state.NewDatabase(tdb, nil)
+ if statedb, err = state.New(block.Root(), database); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil
}
@@ -86,13 +87,13 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
tdb = triedb.NewDatabase(eth.chainDb, triedb.HashDefaults)
- database = state.NewDatabaseWithNodeDB(eth.chainDb, tdb)
+ database = state.NewDatabase(tdb, nil)
// If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is
// chain tracing from the genesis).
if !readOnly {
- statedb, err = state.New(current.Root(), database, nil)
+ statedb, err = state.New(current.Root(), database)
if err == nil {
return statedb, noopReleaser, nil
}
@@ -111,7 +112,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
}
current = parent
- statedb, err = state.New(current.Root(), database, nil)
+ statedb, err = state.New(current.Root(), database)
if err == nil {
break
}
@@ -146,7 +147,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
if current = eth.blockchain.GetBlockByNumber(next); current == nil {
return nil, nil, fmt.Errorf("block #%d not found", next)
}
- _, _, _, err := eth.blockchain.Processor().Process(current, statedb, vm.Config{})
+ _, err := eth.blockchain.Processor().Process(current, statedb, vm.Config{})
if err != nil {
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
@@ -156,7 +157,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
}
- statedb, err = state.New(root, database, nil)
+ statedb, err = state.New(root, database)
if err != nil {
return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err)
}
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 9ee108d0f1..a828951206 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/big"
"os"
"runtime"
"sync"
@@ -955,20 +956,31 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
// Apply the customization rules if required.
if config != nil {
- if err := config.StateOverrides.Apply(statedb); err != nil {
+ config.BlockOverrides.Apply(&vmctx)
+ rules := api.backend.ChainConfig().Rules(vmctx.BlockNumber, vmctx.Random != nil, vmctx.Time)
+
+ precompiles := vm.ActivePrecompiledContracts(rules)
+ if err := config.StateOverrides.Apply(statedb, precompiles); err != nil {
return nil, err
}
- config.BlockOverrides.Apply(&vmctx)
}
// Execute the trace
if err := args.CallDefaults(api.backend.RPCGasCap(), vmctx.BaseFee, api.backend.ChainConfig().ChainID); err != nil {
return nil, err
}
var (
- msg = args.ToMessage(vmctx.BaseFee)
- tx = args.ToTransaction()
+ msg = args.ToMessage(vmctx.BaseFee, true, true)
+ tx = args.ToTransaction(types.LegacyTxType)
traceConfig *TraceConfig
)
+ // Lower the basefee to 0 to avoid breaking EVM
+ // invariants (basefee < feecap).
+ if msg.GasPrice.Sign() == 0 {
+ vmctx.BaseFee = new(big.Int)
+ }
+ if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
+ vmctx.BlobBaseFee = new(big.Int)
+ }
if config != nil {
traceConfig = &config.TraceConfig
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index e717f5352d..47e3693495 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -81,7 +81,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
SnapshotLimit: 0,
TrieDirtyDisabled: true, // Archive mode
}
- chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1014,7 +1014,7 @@ func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator fu
SnapshotLimit: 0,
TrieDirtyDisabled: true, // Archive mode
}
- chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/eth/tracers/internal/tracetest/makeTest.js b/eth/tracers/internal/tracetest/makeTest.js
index 3ad7a5df73..7a50748930 100644
--- a/eth/tracers/internal/tracetest/makeTest.js
+++ b/eth/tracers/internal/tracetest/makeTest.js
@@ -49,5 +49,6 @@ var makeTest = function(tx, traceConfig) {
context: context,
input: eth.getRawTransaction(tx),
result: result,
+ tracerConfig: traceConfig.tracerConfig,
}, null, 2));
}
diff --git a/eth/tracers/live/tests/supply_filler.go b/eth/tracers/live/tests/supply_filler.go
index 496e23d01e..76ec9f909e 100644
--- a/eth/tracers/live/tests/supply_filler.go
+++ b/eth/tracers/live/tests/supply_filler.go
@@ -663,7 +663,7 @@ func testSupplyTracer(genesis *core.Genesis, gen func(*core.BlockGen)) ([]supply
}
db := rawdb.NewMemoryDatabase()
- chain, err := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, engine, vm.Config{Tracer: tracer}, nil, nil)
+ chain, err := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(rawdb.PathScheme), genesis, nil, engine, vm.Config{Tracer: tracer}, nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index 7638c3cd09..b952c82286 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -390,7 +390,7 @@ func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, r
// OnOpcode also tracks SLOAD/SSTORE ops to track storage change.
func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
stack := scope.StackData()
- fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost)
+ fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, vm.OpCode(op).String(), cost)
if !t.cfg.DisableStack {
// format stack
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index f769ee8475..a1148bcedb 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -123,6 +123,7 @@ type rpcBlock struct {
Transactions []rpcTransaction `json:"transactions"`
UncleHashes []common.Hash `json:"uncles"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
+ Requests []*types.Request `json:"requests,omitempty"`
}
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
@@ -196,6 +197,7 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface
Transactions: txs,
Uncles: uncles,
Withdrawals: body.Withdrawals,
+ Requests: body.Requests,
}), nil
}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 2f3229cedc..1b7e26fb74 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -219,7 +219,7 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
t.Fatalf("can't create new node: %v", err)
}
// Create Ethereum Service
- config := ðconfig.Config{Genesis: genesis}
+ config := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
ethservice, err := eth.New(n, config)
if err != nil {
t.Fatalf("can't create new ethereum service: %v", err)
diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go
index b1678b6766..02b2598b37 100644
--- a/ethclient/gethclient/gethclient.go
+++ b/ethclient/gethclient/gethclient.go
@@ -328,9 +328,9 @@ func (o BlockOverrides) MarshalJSON() ([]byte, error) {
Difficulty *hexutil.Big `json:"difficulty,omitempty"`
Time hexutil.Uint64 `json:"time,omitempty"`
GasLimit hexutil.Uint64 `json:"gasLimit,omitempty"`
- Coinbase *common.Address `json:"coinbase,omitempty"`
- Random *common.Hash `json:"random,omitempty"`
- BaseFee *hexutil.Big `json:"baseFee,omitempty"`
+ Coinbase *common.Address `json:"feeRecipient,omitempty"`
+ Random *common.Hash `json:"prevRandao,omitempty"`
+ BaseFee *hexutil.Big `json:"baseFeePerGas,omitempty"`
}
output := override{
diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go
index 59ad370146..36ea290a85 100644
--- a/ethclient/gethclient/gethclient_test.go
+++ b/ethclient/gethclient/gethclient_test.go
@@ -57,7 +57,7 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
t.Fatalf("can't create new node: %v", err)
}
// Create Ethereum Service
- config := ðconfig.Config{Genesis: genesis}
+ config := ðconfig.Config{Genesis: genesis, RPCGasCap: 1000000}
ethservice, err := eth.New(n, config)
if err != nil {
t.Fatalf("can't create new ethereum service: %v", err)
@@ -510,7 +510,7 @@ func TestBlockOverridesMarshal(t *testing.T) {
bo: BlockOverrides{
Coinbase: common.HexToAddress("0x1111111111111111111111111111111111111111"),
},
- want: `{"coinbase":"0x1111111111111111111111111111111111111111"}`,
+ want: `{"feeRecipient":"0x1111111111111111111111111111111111111111"}`,
},
{
bo: BlockOverrides{
@@ -520,7 +520,7 @@ func TestBlockOverridesMarshal(t *testing.T) {
GasLimit: 4,
BaseFee: big.NewInt(5),
},
- want: `{"number":"0x1","difficulty":"0x2","time":"0x3","gasLimit":"0x4","baseFee":"0x5"}`,
+ want: `{"number":"0x1","difficulty":"0x2","time":"0x3","gasLimit":"0x4","baseFeePerGas":"0x5"}`,
},
} {
marshalled, err := json.Marshal(&tt.bo)
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index f3f9d1778a..ae3a58a983 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -453,6 +453,7 @@ func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Ge
TrieDirtyCache: 5,
TrieTimeout: 60 * time.Minute,
SnapshotCache: 5,
+ RPCGasCap: 1000000,
StateScheme: rawdb.HashScheme,
}
var engine consensus.Engine = ethash.NewFaker()
diff --git a/internal/era/e2store/e2store_test.go b/internal/era/e2store/e2store_test.go
index b0803493c7..353942f0bd 100644
--- a/internal/era/e2store/e2store_test.go
+++ b/internal/era/e2store/e2store_test.go
@@ -26,6 +26,8 @@ import (
)
func TestEncode(t *testing.T) {
+ t.Parallel()
+
for _, test := range []struct {
entries []Entry
want string
@@ -53,6 +55,7 @@ func TestEncode(t *testing.T) {
tt := test
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
+
var (
b = bytes.NewBuffer(nil)
w = NewWriter(b)
@@ -83,6 +86,8 @@ func TestEncode(t *testing.T) {
}
func TestDecode(t *testing.T) {
+ t.Parallel()
+
for i, tt := range []struct {
have string
err error
diff --git a/internal/era/era_test.go b/internal/era/era_test.go
index 883e30f7b6..d0f56b6f88 100644
--- a/internal/era/era_test.go
+++ b/internal/era/era_test.go
@@ -34,6 +34,8 @@ type testchain struct {
}
func TestEra1Builder(t *testing.T) {
+ t.Parallel()
+
// Get temp directory.
f, err := os.CreateTemp("", "era1-test")
if err != nil {
@@ -125,6 +127,8 @@ func TestEra1Builder(t *testing.T) {
}
func TestEraFilename(t *testing.T) {
+ t.Parallel()
+
for i, tt := range []struct {
network string
epoch int
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 1c3cb4adf9..79f94892c5 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -21,6 +21,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "maps"
"math/big"
"strings"
"time"
@@ -474,7 +475,7 @@ func (api *PersonalAccountAPI) signTransaction(ctx context.Context, args *Transa
return nil, err
}
// Assemble the transaction and sign with the wallet
- tx := args.ToTransaction()
+ tx := args.ToTransaction(types.LegacyTxType)
return wallet.SignTxWithPassphrase(account, passwd, tx, api.b.ChainConfig().ChainID)
}
@@ -523,7 +524,7 @@ func (api *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transac
return nil, errors.New("nonce not specified")
}
// Before actually signing the transaction, ensure the transaction fee is reasonable.
- tx := args.ToTransaction()
+ tx := args.ToTransaction(types.LegacyTxType)
if err := checkTxFee(tx.GasPrice(), tx.Gas(), api.b.RPCTxFeeCap()); err != nil {
return nil, err
}
@@ -968,22 +969,54 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp
// if stateDiff is set, all diff will be applied first and then execute the call
// message.
type OverrideAccount struct {
- Nonce *hexutil.Uint64 `json:"nonce"`
- Code *hexutil.Bytes `json:"code"`
- Balance *hexutil.Big `json:"balance"`
- State map[common.Hash]common.Hash `json:"state"`
- StateDiff map[common.Hash]common.Hash `json:"stateDiff"`
+ Nonce *hexutil.Uint64 `json:"nonce"`
+ Code *hexutil.Bytes `json:"code"`
+ Balance *hexutil.Big `json:"balance"`
+ State map[common.Hash]common.Hash `json:"state"`
+ StateDiff map[common.Hash]common.Hash `json:"stateDiff"`
+ MovePrecompileTo *common.Address `json:"movePrecompileToAddress"`
}
// StateOverride is the collection of overridden accounts.
type StateOverride map[common.Address]OverrideAccount
+func (diff *StateOverride) has(address common.Address) bool {
+ _, ok := (*diff)[address]
+ return ok
+}
+
// Apply overrides the fields of specified accounts into the given state.
-func (diff *StateOverride) Apply(statedb *state.StateDB) error {
+func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.PrecompiledContracts) error {
if diff == nil {
return nil
}
+ // Tracks destinations of precompiles that were moved.
+ dirtyAddrs := make(map[common.Address]struct{})
for addr, account := range *diff {
+ // If a precompile was moved to this address already, it can't be overridden.
+ if _, ok := dirtyAddrs[addr]; ok {
+ return fmt.Errorf("account %s has already been overridden by a precompile", addr.Hex())
+ }
+ p, isPrecompile := precompiles[addr]
+ // The MoveTo feature makes it possible to move a precompile
+ // code to another address. If the target address is another precompile
+ // the code for the latter is lost for this session.
+ // Note the destination account is not cleared upon move.
+ if account.MovePrecompileTo != nil {
+ if !isPrecompile {
+ return fmt.Errorf("account %s is not a precompile", addr.Hex())
+ }
+ // Refuse to move a precompile to an address that has been
+ // or will be overridden.
+ if diff.has(*account.MovePrecompileTo) {
+ return fmt.Errorf("account %s is already overridden", account.MovePrecompileTo.Hex())
+ }
+ precompiles[*account.MovePrecompileTo] = p
+ dirtyAddrs[*account.MovePrecompileTo] = struct{}{}
+ }
+ if isPrecompile {
+ delete(precompiles, addr)
+ }
// Override account nonce.
if account.Nonce != nil {
statedb.SetNonce(addr, uint64(*account.Nonce))
@@ -1020,47 +1053,80 @@ func (diff *StateOverride) Apply(statedb *state.StateDB) error {
// BlockOverrides is a set of header fields to override.
type BlockOverrides struct {
- Number *hexutil.Big
- Difficulty *hexutil.Big
- Time *hexutil.Uint64
- GasLimit *hexutil.Uint64
- Coinbase *common.Address
- Random *common.Hash
- BaseFee *hexutil.Big
- BlobBaseFee *hexutil.Big
+ Number *hexutil.Big
+ Difficulty *hexutil.Big // No-op if we're simulating post-merge calls.
+ Time *hexutil.Uint64
+ GasLimit *hexutil.Uint64
+ FeeRecipient *common.Address
+ PrevRandao *common.Hash
+ BaseFeePerGas *hexutil.Big
+ BlobBaseFee *hexutil.Big
}
// Apply overrides the given header fields into the given block context.
-func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) {
- if diff == nil {
+func (o *BlockOverrides) Apply(blockCtx *vm.BlockContext) {
+ if o == nil {
return
}
- if diff.Number != nil {
- blockCtx.BlockNumber = diff.Number.ToInt()
+ if o.Number != nil {
+ blockCtx.BlockNumber = o.Number.ToInt()
}
- if diff.Difficulty != nil {
- blockCtx.Difficulty = diff.Difficulty.ToInt()
+ if o.Difficulty != nil {
+ blockCtx.Difficulty = o.Difficulty.ToInt()
}
- if diff.Time != nil {
- blockCtx.Time = uint64(*diff.Time)
+ if o.Time != nil {
+ blockCtx.Time = uint64(*o.Time)
}
- if diff.GasLimit != nil {
- blockCtx.GasLimit = uint64(*diff.GasLimit)
+ if o.GasLimit != nil {
+ blockCtx.GasLimit = uint64(*o.GasLimit)
}
- if diff.Coinbase != nil {
- blockCtx.Coinbase = *diff.Coinbase
+ if o.FeeRecipient != nil {
+ blockCtx.Coinbase = *o.FeeRecipient
}
- if diff.Random != nil {
- blockCtx.Random = diff.Random
+ if o.PrevRandao != nil {
+ blockCtx.Random = o.PrevRandao
}
- if diff.BaseFee != nil {
- blockCtx.BaseFee = diff.BaseFee.ToInt()
+ if o.BaseFeePerGas != nil {
+ blockCtx.BaseFee = o.BaseFeePerGas.ToInt()
}
- if diff.BlobBaseFee != nil {
- blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt()
+ if o.BlobBaseFee != nil {
+ blockCtx.BlobBaseFee = o.BlobBaseFee.ToInt()
}
}
+// MakeHeader returns a new header object with the overridden
+// fields.
+// Note: MakeHeader ignores BlobBaseFee if set. That's because
+// header has no such field.
+func (o *BlockOverrides) MakeHeader(header *types.Header) *types.Header {
+ if o == nil {
+ return header
+ }
+ h := types.CopyHeader(header)
+ if o.Number != nil {
+ h.Number = o.Number.ToInt()
+ }
+ if o.Difficulty != nil {
+ h.Difficulty = o.Difficulty.ToInt()
+ }
+ if o.Time != nil {
+ h.Time = uint64(*o.Time)
+ }
+ if o.GasLimit != nil {
+ h.GasLimit = uint64(*o.GasLimit)
+ }
+ if o.FeeRecipient != nil {
+ h.Coinbase = *o.FeeRecipient
+ }
+ if o.PrevRandao != nil {
+ h.MixDigest = *o.PrevRandao
+ }
+ if o.BaseFeePerGas != nil {
+ h.BaseFee = o.BaseFeePerGas.ToInt()
+ }
+ return h
+}
+
// ChainContextBackend provides methods required to implement ChainContext.
type ChainContextBackend interface {
Engine() consensus.Engine
@@ -1094,9 +1160,16 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H
}
func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
- if err := overrides.Apply(state); err != nil {
+ blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil)
+ if blockOverrides != nil {
+ blockOverrides.Apply(&blockCtx)
+ }
+ rules := b.ChainConfig().Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time)
+ precompiles := maps.Clone(vm.ActivePrecompiledContracts(rules))
+ if err := overrides.Apply(state, precompiles); err != nil {
return nil, err
}
+
// Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout.
var cancel context.CancelFunc
@@ -1108,18 +1181,32 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
+ return applyMessage(ctx, b, args, state, header, timeout, new(core.GasPool).AddGas(globalGasCap), &blockCtx, &vm.Config{NoBaseFee: true}, precompiles, true)
+}
+func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, timeout time.Duration, gp *core.GasPool, blockContext *vm.BlockContext, vmConfig *vm.Config, precompiles vm.PrecompiledContracts, skipChecks bool) (*core.ExecutionResult, error) {
// Get a new instance of the EVM.
- blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil)
- if blockOverrides != nil {
- blockOverrides.Apply(&blockCtx)
- }
- if err := args.CallDefaults(globalGasCap, blockCtx.BaseFee, b.ChainConfig().ChainID); err != nil {
+ if err := args.CallDefaults(gp.Gas(), blockContext.BaseFee, b.ChainConfig().ChainID); err != nil {
return nil, err
}
- msg := args.ToMessage(blockCtx.BaseFee)
- evm := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx)
+ msg := args.ToMessage(header.BaseFee, skipChecks, skipChecks)
+ // Lower the basefee to 0 to avoid breaking EVM
+ // invariants (basefee < feecap).
+ if msg.GasPrice.Sign() == 0 {
+ blockContext.BaseFee = new(big.Int)
+ }
+ if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
+ blockContext.BlobBaseFee = new(big.Int)
+ }
+ evm := b.GetEVM(ctx, msg, state, header, vmConfig, blockContext)
+ if precompiles != nil {
+ evm.SetPrecompiles(precompiles)
+ }
+ return applyMessageWithEVM(ctx, evm, msg, state, timeout, gp)
+}
+
+func applyMessageWithEVM(ctx context.Context, evm *vm.EVM, msg *core.Message, state *state.StateDB, timeout time.Duration, gp *core.GasPool) (*core.ExecutionResult, error) {
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
@@ -1128,7 +1215,6 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
}()
// Execute the message.
- gp := new(core.GasPool).AddGas(math.MaxUint64)
result, err := core.ApplyMessage(evm, msg, gp)
if err := state.Error(); err != nil {
return nil, err
@@ -1151,7 +1237,6 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash
if state == nil || err != nil {
return nil, err
}
-
return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap)
}
@@ -1177,6 +1262,41 @@ func (api *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockN
return result.Return(), result.Err
}
+// SimulateV1 executes series of transactions on top of a base state.
+// The transactions are packed into blocks. For each block, block header
+// fields can be overridden. The state can also be overridden prior to
+// execution of each block.
+//
+// Note, this function doesn't make any changes in the state/blockchain and is
+// useful to execute and retrieve values.
+func (api *BlockChainAPI) SimulateV1(ctx context.Context, opts simOpts, blockNrOrHash *rpc.BlockNumberOrHash) ([]map[string]interface{}, error) {
+ if len(opts.BlockStateCalls) == 0 {
+ return nil, &invalidParamsError{message: "empty input"}
+ } else if len(opts.BlockStateCalls) > maxSimulateBlocks {
+ return nil, &clientLimitExceededError{message: "too many blocks"}
+ }
+ if blockNrOrHash == nil {
+ n := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ blockNrOrHash = &n
+ }
+ state, base, err := api.b.StateAndHeaderByNumberOrHash(ctx, *blockNrOrHash)
+ if state == nil || err != nil {
+ return nil, err
+ }
+ sim := &simulator{
+ b: api.b,
+ state: state,
+ base: base,
+ chainConfig: api.b.ChainConfig(),
+ // Each tx and all the series of txes shouldn't consume more gas than cap
+ gp: new(core.GasPool).AddGas(api.b.RPCGasCap()),
+ traceTransfers: opts.TraceTransfers,
+ validate: opts.Validation,
+ fullTx: opts.ReturnFullTransactions,
+ }
+ return sim.execute(ctx, opts.BlockStateCalls)
+}
+
// DoEstimateGas returns the lowest possible gas limit that allows the transaction to run
// successfully at block `blockNrOrHash`. It returns error if the transaction would revert, or if
// there are unexpected failures. The gas limit is capped by both `args.Gas` (if non-nil &
@@ -1187,7 +1307,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
if state == nil || err != nil {
return 0, err
}
- if err = overrides.Apply(state); err != nil {
+ if err := overrides.Apply(state, nil); err != nil {
return 0, err
}
// Construct the gas estimator option from the user input
@@ -1206,7 +1326,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
if err := args.CallDefaults(gasCap, header.BaseFee, b.ChainConfig().ChainID); err != nil {
return 0, err
}
- call := args.ToMessage(header.BaseFee)
+ call := args.ToMessage(header.BaseFee, true, true)
// Run the gas estimation and wrap any revertals into a custom return
estimate, revert, err := gasestimator.Estimate(ctx, call, opts, gasCap)
@@ -1268,6 +1388,9 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
if head.ParentBeaconRoot != nil {
result["parentBeaconBlockRoot"] = head.ParentBeaconRoot
}
+ if head.RequestsHash != nil {
+ result["requestsRoot"] = head.RequestsHash
+ }
return result
}
@@ -1303,6 +1426,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Header().WithdrawalsHash != nil {
fields["withdrawals"] = block.Withdrawals()
}
+ if block.Header().RequestsHash != nil {
+ fields["requests"] = block.Requests()
+ }
return fields
}
@@ -1540,15 +1666,23 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
statedb := db.Copy()
// Set the accesslist to the last al
args.AccessList = &accessList
- msg := args.ToMessage(header.BaseFee)
+ msg := args.ToMessage(header.BaseFee, true, true)
// Apply the transaction with the access list tracer
tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles)
config := vm.Config{Tracer: tracer.Hooks(), NoBaseFee: true}
vmenv := b.GetEVM(ctx, msg, statedb, header, &config, nil)
+ // Lower the basefee to 0 to avoid breaking EVM
+ // invariants (basefee < feecap).
+ if msg.GasPrice.Sign() == 0 {
+ vmenv.Context.BaseFee = new(big.Int)
+ }
+ if msg.BlobGasFeeCap != nil && msg.BlobGasFeeCap.BitLen() == 0 {
+ vmenv.Context.BlobBaseFee = new(big.Int)
+ }
res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit))
if err != nil {
- return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction().Hash(), err)
+ return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.ToTransaction(types.LegacyTxType).Hash(), err)
}
if tracer.Equal(prevTracer) {
return accessList, res.UsedGas, res.Err, nil
@@ -1817,7 +1951,7 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
- tx := args.ToTransaction()
+ tx := args.ToTransaction(types.LegacyTxType)
signed, err := wallet.SignTx(account, tx, api.b.ChainConfig().ChainID)
if err != nil {
@@ -1837,7 +1971,7 @@ func (api *TransactionAPI) FillTransaction(ctx context.Context, args Transaction
return nil, err
}
// Assemble the transaction and obtain rlp
- tx := args.ToTransaction()
+ tx := args.ToTransaction(types.LegacyTxType)
data, err := tx.MarshalBinary()
if err != nil {
return nil, err
@@ -1905,7 +2039,7 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction
return nil, err
}
// Before actually sign the transaction, ensure the transaction fee is reasonable.
- tx := args.ToTransaction()
+ tx := args.ToTransaction(types.LegacyTxType)
if err := checkTxFee(tx.GasPrice(), tx.Gas(), api.b.RPCTxFeeCap()); err != nil {
return nil, err
}
@@ -1963,7 +2097,7 @@ func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs,
if err := sendArgs.setDefaults(ctx, api.b, false); err != nil {
return common.Hash{}, err
}
- matchTx := sendArgs.ToTransaction()
+ matchTx := sendArgs.ToTransaction(types.LegacyTxType)
// Before replacing the old transaction, ensure the _new_ transaction fee is reasonable.
var price = matchTx.GasPrice()
@@ -1993,7 +2127,7 @@ func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs,
if gasLimit != nil && *gasLimit != 0 {
sendArgs.Gas = gasLimit
}
- signedTx, err := api.sign(sendArgs.from(), sendArgs.ToTransaction())
+ signedTx, err := api.sign(sendArgs.from(), sendArgs.ToTransaction(types.LegacyTxType))
if err != nil {
return common.Hash{}, err
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 7465fb5529..384ca9f1cc 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -24,17 +24,16 @@ import (
"encoding/json"
"errors"
"fmt"
+ "maps"
"math/big"
"os"
"path/filepath"
"reflect"
"slices"
+ "strings"
"testing"
"time"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
-
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -56,10 +55,12 @@ import (
"github.com/ethereum/go-ethereum/internal/blocktest"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
)
func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) {
- t.Parallel()
var (
signer = types.LatestSigner(config)
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -96,6 +97,8 @@ func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainCo
}
func TestTransaction_RoundTripRpcJSON(t *testing.T) {
+ t.Parallel()
+
var (
config = params.AllEthashProtocolChanges
tests = allTransactionTypes(common.Address{0xde, 0xad}, config)
@@ -104,6 +107,8 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) {
}
func TestTransactionBlobTx(t *testing.T) {
+ t.Parallel()
+
config := *params.TestChainConfig
config.ShanghaiTime = new(uint64)
config.CancunTime = new(uint64)
@@ -449,7 +454,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
// Generate blocks for testing
db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator)
txlookupLimit := uint64(0)
- chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit)
+ chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, &txlookupLimit)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -502,7 +507,8 @@ func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc
}
panic("unknown type rpc.BlockNumberOrHash")
}
-func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() }
+
+func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentHeader() }
func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() }
func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
if number == rpc.LatestBlockNumber {
@@ -814,6 +820,7 @@ func TestCall(t *testing.T) {
}))
randomAccounts := newAccounts(3)
var testSuite = []struct {
+ name string
blockNumber rpc.BlockNumber
overrides StateOverride
call TransactionArgs
@@ -823,6 +830,7 @@ func TestCall(t *testing.T) {
}{
// transfer on genesis
{
+ name: "transfer-on-genesis",
blockNumber: rpc.BlockNumber(0),
call: TransactionArgs{
From: &accounts[0].addr,
@@ -834,6 +842,7 @@ func TestCall(t *testing.T) {
},
// transfer on the head
{
+ name: "transfer-on-the-head",
blockNumber: rpc.BlockNumber(genBlocks),
call: TransactionArgs{
From: &accounts[0].addr,
@@ -845,6 +854,7 @@ func TestCall(t *testing.T) {
},
// transfer on a non-existent block, error expects
{
+ name: "transfer-non-existent-block",
blockNumber: rpc.BlockNumber(genBlocks + 1),
call: TransactionArgs{
From: &accounts[0].addr,
@@ -855,6 +865,7 @@ func TestCall(t *testing.T) {
},
// transfer on the latest block
{
+ name: "transfer-latest-block",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &accounts[0].addr,
@@ -866,6 +877,7 @@ func TestCall(t *testing.T) {
},
// Call which can only succeed if state is state overridden
{
+ name: "state-override-success",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &randomAccounts[0].addr,
@@ -879,6 +891,7 @@ func TestCall(t *testing.T) {
},
// Invalid call without state overriding
{
+ name: "insufficient-funds-simple",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &randomAccounts[0].addr,
@@ -904,6 +917,7 @@ func TestCall(t *testing.T) {
// }
// }
{
+ name: "simple-contract-call",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &randomAccounts[0].addr,
@@ -920,6 +934,7 @@ func TestCall(t *testing.T) {
},
// Block overrides should work
{
+ name: "block-override",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &accounts[1].addr,
@@ -932,8 +947,36 @@ func TestCall(t *testing.T) {
blockOverrides: BlockOverrides{Number: (*hexutil.Big)(big.NewInt(11))},
want: "0x000000000000000000000000000000000000000000000000000000000000000b",
},
+ // Clear storage trie
+ {
+ name: "clear-storage-trie",
+ blockNumber: rpc.LatestBlockNumber,
+ call: TransactionArgs{
+ From: &accounts[1].addr,
+ // Yul:
+ // object "Test" {
+ // code {
+ // let dad := 0x0000000000000000000000000000000000000dad
+ // if eq(balance(dad), 0) {
+ // revert(0, 0)
+ // }
+ // let slot := sload(0)
+ // mstore(0, slot)
+ // return(0, 32)
+ // }
+ // }
+ Input: hex2Bytes("610dad6000813103600f57600080fd5b6000548060005260206000f3"),
+ },
+ overrides: StateOverride{
+ dad: OverrideAccount{
+ State: map[common.Hash]common.Hash{},
+ },
+ },
+ want: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ },
// Invalid blob tx
{
+ name: "invalid-blob-tx",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &accounts[1].addr,
@@ -944,6 +987,7 @@ func TestCall(t *testing.T) {
},
// BLOBHASH opcode
{
+ name: "blobhash-opcode",
blockNumber: rpc.LatestBlockNumber,
call: TransactionArgs{
From: &accounts[1].addr,
@@ -985,31 +1029,1178 @@ func TestCall(t *testing.T) {
want: "0x0000000000000000000000000000000000000000000000000000000000000000",
},
}
- for i, tc := range testSuite {
+ for _, tc := range testSuite {
result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides)
if tc.expectErr != nil {
if err == nil {
- t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
+ t.Errorf("test %s: want error %v, have nothing", tc.name, tc.expectErr)
continue
}
if !errors.Is(err, tc.expectErr) {
// Second try
if !reflect.DeepEqual(err, tc.expectErr) {
- t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err)
+ t.Errorf("test %s: error mismatch, want %v, have %v", tc.name, tc.expectErr, err)
}
}
continue
}
if err != nil {
- t.Errorf("test %d: want no error, have %v", i, err)
+ t.Errorf("test %s: want no error, have %v", tc.name, err)
continue
}
if !reflect.DeepEqual(result.String(), tc.want) {
- t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, result.String(), tc.want)
+ t.Errorf("test %s, result mismatch, have\n%v\n, want\n%v\n", tc.name, result.String(), tc.want)
}
}
}
+func TestSimulateV1(t *testing.T) {
+ t.Parallel()
+ // Initialize test accounts
+ var (
+ accounts = newAccounts(3)
+ fixedAccount = newTestAccount()
+ genBlocks = 10
+ signer = types.HomesteadSigner{}
+ cac = common.HexToAddress("0x0000000000000000000000000000000000000cac")
+ bab = common.HexToAddress("0x0000000000000000000000000000000000000bab")
+ coinbase = "0x000000000000000000000000000000000000ffff"
+ genesis = &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{
+ accounts[0].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[1].addr: {Balance: big.NewInt(params.Ether)},
+ accounts[2].addr: {Balance: big.NewInt(params.Ether)},
+ // Yul:
+ // object "Test" {
+ // code {
+ // let dad := 0x0000000000000000000000000000000000000dad
+ // selfdestruct(dad)
+ // }
+ // }
+ cac: {Balance: big.NewInt(params.Ether), Code: common.Hex2Bytes("610dad80ff")},
+ bab: {
+ Balance: big.NewInt(1),
+ // object "Test" {
+ // code {
+ // let value1 := sload(1)
+ // let value2 := sload(2)
+ //
+ // // Shift value1 by 128 bits to the left by multiplying it with 2^128
+ // value1 := mul(value1, 0x100000000000000000000000000000000)
+ //
+ // // Concatenate value1 and value2
+ // let concatenatedValue := add(value1, value2)
+ //
+ // // Store the result in memory and return it
+ // mstore(0, concatenatedValue)
+ // return(0, 0x20)
+ // }
+ // }
+ Code: common.FromHex("0x600154600254700100000000000000000000000000000000820291508082018060005260206000f3"),
+ Storage: map[common.Hash]common.Hash{
+ common.BigToHash(big.NewInt(1)): common.BigToHash(big.NewInt(10)),
+ common.BigToHash(big.NewInt(2)): common.BigToHash(big.NewInt(12)),
+ },
+ },
+ },
+ }
+ sha256Address = common.BytesToAddress([]byte{0x02})
+ )
+ api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) {
+ b.SetCoinbase(common.HexToAddress(coinbase))
+ // Transfer from account[0] to account[1]
+ // value: 1000 wei
+ // fee: 0 wei
+ tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
+ Nonce: uint64(i),
+ To: &accounts[1].addr,
+ Value: big.NewInt(1000),
+ Gas: params.TxGas,
+ GasPrice: b.BaseFee(),
+ Data: nil,
+ }), signer, accounts[0].key)
+ b.AddTx(tx)
+ }))
+ var (
+ randomAccounts = newAccounts(4)
+ latest = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ includeTransfers = true
+ validation = true
+ )
+ type log struct {
+ Address common.Address `json:"address"`
+ Topics []common.Hash `json:"topics"`
+ Data hexutil.Bytes `json:"data"`
+ BlockNumber hexutil.Uint64 `json:"blockNumber"`
+ // Skip txHash
+ //TxHash common.Hash `json:"transactionHash" gencodec:"required"`
+ TxIndex hexutil.Uint `json:"transactionIndex"`
+ //BlockHash common.Hash `json:"blockHash"`
+ Index hexutil.Uint `json:"logIndex"`
+ }
+ type callErr struct {
+ Message string
+ Code int
+ }
+ type callRes struct {
+ ReturnValue string `json:"returnData"`
+ Error callErr
+ Logs []log
+ GasUsed string
+ Status string
+ }
+ type blockRes struct {
+ Number string
+ //Hash string
+ // Ignore timestamp
+ GasLimit string
+ GasUsed string
+ Miner string
+ BaseFeePerGas string
+ Calls []callRes
+ }
+ var testSuite = []struct {
+ name string
+ blocks []simBlock
+ tag rpc.BlockNumberOrHash
+ includeTransfers *bool
+ validation *bool
+ expectErr error
+ want []blockRes
+ }{
+ // State build-up over calls:
+ // First value transfer OK after state override.
+ // Second one should succeed because of first transfer.
+ {
+ name: "simple",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(1000))},
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ }, {
+ From: &randomAccounts[1].addr,
+ To: &randomAccounts[2].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ }, {
+ To: &randomAccounts[3].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xf618",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ }, {
+ // State build-up over blocks.
+ name: "simple-multi-block",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(2000))},
+ },
+ Calls: []TransactionArgs{
+ {
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ }, {
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[3].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ },
+ },
+ }, {
+ StateOverrides: &StateOverride{
+ randomAccounts[3].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))},
+ },
+ Calls: []TransactionArgs{
+ {
+ From: &randomAccounts[1].addr,
+ To: &randomAccounts[2].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ },
+ },
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xa410",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x5208",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ }, {
+ // insufficient funds
+ name: "insufficient-funds",
+ tag: latest,
+ blocks: []simBlock{{
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ }},
+ }},
+ want: nil,
+ expectErr: &invalidTxError{Message: fmt.Sprintf("err: insufficient funds for gas * price + value: address %s have 0 want 1000 (supplied gas 4712388)", randomAccounts[0].addr.String()), Code: errCodeInsufficientFunds},
+ }, {
+ // EVM error
+ name: "evm-error",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: OverrideAccount{Code: hex2Bytes("f3")},
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x47e7c4",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ Error: callErr{Message: "stack underflow (0 <=> 2)", Code: errCodeVMError},
+ GasUsed: "0x47e7c4",
+ Logs: []log{},
+ Status: "0x0",
+ }},
+ }},
+ }, {
+ // Block overrides should work, each call is simulated on a different block number
+ name: "block-overrides",
+ tag: latest,
+ blocks: []simBlock{{
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(11)),
+ FeeRecipient: &cac,
+ },
+ Calls: []TransactionArgs{
+ {
+ From: &accounts[0].addr,
+ Input: &hexutil.Bytes{
+ 0x43, // NUMBER
+ 0x60, 0x00, 0x52, // MSTORE offset 0
+ 0x60, 0x20, 0x60, 0x00, 0xf3, // RETURN
+ },
+ },
+ },
+ }, {
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(12)),
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[1].addr,
+ Input: &hexutil.Bytes{
+ 0x43, // NUMBER
+ 0x60, 0x00, 0x52, // MSTORE offset 0
+ 0x60, 0x20, 0x60, 0x00, 0xf3,
+ },
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xe891",
+ Miner: strings.ToLower(cac.String()),
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x000000000000000000000000000000000000000000000000000000000000000b",
+ GasUsed: "0xe891",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xe891",
+ Miner: strings.ToLower(cac.String()),
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x000000000000000000000000000000000000000000000000000000000000000c",
+ GasUsed: "0xe891",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Block numbers must be in order.
+ {
+ name: "block-number-order",
+ tag: latest,
+ blocks: []simBlock{{
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(12)),
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[1].addr,
+ Input: &hexutil.Bytes{
+ 0x43, // NUMBER
+ 0x60, 0x00, 0x52, // MSTORE offset 0
+ 0x60, 0x20, 0x60, 0x00, 0xf3, // RETURN
+ },
+ }},
+ }, {
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(11)),
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ Input: &hexutil.Bytes{
+ 0x43, // NUMBER
+ 0x60, 0x00, 0x52, // MSTORE offset 0
+ 0x60, 0x20, 0x60, 0x00, 0xf3, // RETURN
+ },
+ }},
+ }},
+ want: []blockRes{},
+ expectErr: &invalidBlockNumberError{message: "block numbers must be in order: 11 <= 12"},
+ },
+ // Test on solidity storage example. Set value in one call, read in next.
+ {
+ name: "storage-contract",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: OverrideAccount{
+ Code: hex2Bytes("608060405234801561001057600080fd5b50600436106100365760003560e01c80632e64cec11461003b5780636057361d14610059575b600080fd5b610043610075565b60405161005091906100d9565b60405180910390f35b610073600480360381019061006e919061009d565b61007e565b005b60008054905090565b8060008190555050565b60008135905061009781610103565b92915050565b6000602082840312156100b3576100b26100fe565b5b60006100c184828501610088565b91505092915050565b6100d3816100f4565b82525050565b60006020820190506100ee60008301846100ca565b92915050565b6000819050919050565b600080fd5b61010c816100f4565b811461011757600080fd5b5056fea2646970667358221220404e37f487a89a932dca5e77faaf6ca2de3b991f93d230604b1b8daaef64766264736f6c63430008070033"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ // Set value to 5
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ Input: hex2Bytes("6057361d0000000000000000000000000000000000000000000000000000000000000005"),
+ }, {
+ // Read value
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ Input: hex2Bytes("2e64cec1"),
+ },
+ },
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x10683",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0xaacc",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x0000000000000000000000000000000000000000000000000000000000000005",
+ GasUsed: "0x5bb7",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Test logs output.
+ {
+ name: "logs",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: OverrideAccount{
+ // Yul code:
+ // object "Test" {
+ // code {
+ // let hash:u256 := 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // log1(0, 0, hash)
+ // return (0, 0)
+ // }
+ // }
+ Code: hex2Bytes("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80600080a1600080f3"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x5508",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ Logs: []log{{
+ Address: randomAccounts[2].addr,
+ Topics: []common.Hash{common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
+ BlockNumber: hexutil.Uint64(11),
+ Data: hexutil.Bytes{},
+ }},
+ GasUsed: "0x5508",
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Test ecrecover override
+ {
+ name: "ecrecover-override",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: OverrideAccount{
+ // Yul code that returns ecrecover(0, 0, 0, 0).
+ // object "Test" {
+ // code {
+ // // Free memory pointer
+ // let free_ptr := mload(0x40)
+ //
+ // // Initialize inputs with zeros
+ // mstore(free_ptr, 0) // Hash
+ // mstore(add(free_ptr, 0x20), 0) // v
+ // mstore(add(free_ptr, 0x40), 0) // r
+ // mstore(add(free_ptr, 0x60), 0) // s
+ //
+ // // Call ecrecover precompile (at address 1) with all 0 inputs
+ // let success := staticcall(gas(), 1, free_ptr, 0x80, free_ptr, 0x20)
+ //
+ // // Check if the call was successful
+ // if eq(success, 0) {
+ // revert(0, 0)
+ // }
+ //
+ // // Return the recovered address
+ // return(free_ptr, 0x14)
+ // }
+ // }
+ Code: hex2Bytes("6040516000815260006020820152600060408201526000606082015260208160808360015afa60008103603157600080fd5b601482f3"),
+ },
+ common.BytesToAddress([]byte{0x01}): OverrideAccount{
+ // Yul code that returns the address of the caller.
+ // object "Test" {
+ // code {
+ // let c := caller()
+ // mstore(0, c)
+ // return(0xc, 0x14)
+ // }
+ // }
+ Code: hex2Bytes("33806000526014600cf3"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x52f6",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ // Caller is in this case the contract that invokes ecrecover.
+ ReturnValue: strings.ToLower(randomAccounts[2].addr.String()),
+ GasUsed: "0x52f6",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Test moving the sha256 precompile.
+ {
+ name: "precompile-move",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ sha256Address: OverrideAccount{
+ // Yul code that returns the calldata.
+ // object "Test" {
+ // code {
+ // let size := calldatasize() // Get the size of the calldata
+ //
+ // // Allocate memory to store the calldata
+ // let memPtr := msize()
+ //
+ // // Copy calldata to memory
+ // calldatacopy(memPtr, 0, size)
+ //
+ // // Return the calldata from memory
+ // return(memPtr, size)
+ // }
+ // }
+ Code: hex2Bytes("365981600082378181f3"),
+ MovePrecompileTo: &randomAccounts[2].addr,
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[2].addr,
+ Input: hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
+ }, {
+ From: &randomAccounts[0].addr,
+ To: &sha256Address,
+ Input: hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"),
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xa58c",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0xec4916dd28fc4c10d78e287ca5d9cc51ee1ae73cbfde08c6b37324cbfaac8bc5",
+ GasUsed: "0x52dc",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x0000000000000000000000000000000000000000000000000000000000000001",
+ GasUsed: "0x52b0",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Test ether transfers.
+ {
+ name: "transfer-logs",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[0].addr: OverrideAccount{
+ Balance: newRPCBalance(big.NewInt(100)),
+ // Yul code that transfers 100 wei to address passed in calldata:
+ // object "Test" {
+ // code {
+ // let recipient := shr(96, calldataload(0))
+ // let value := 100
+ // let success := call(gas(), recipient, value, 0, 0, 0, 0)
+ // if eq(success, 0) {
+ // revert(0, 0)
+ // }
+ // }
+ // }
+ Code: hex2Bytes("60003560601c606460008060008084865af160008103601d57600080fd5b505050"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[0].addr,
+ Value: (*hexutil.Big)(big.NewInt(50)),
+ Input: hex2Bytes(strings.TrimPrefix(fixedAccount.addr.String(), "0x")),
+ }},
+ }},
+ includeTransfers: &includeTransfers,
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x77dc",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x77dc",
+ Logs: []log{{
+ Address: transferAddress,
+ Topics: []common.Hash{
+ transferTopic,
+ addressToHash(accounts[0].addr),
+ addressToHash(randomAccounts[0].addr),
+ },
+ Data: hexutil.Bytes(common.BigToHash(big.NewInt(50)).Bytes()),
+ BlockNumber: hexutil.Uint64(11),
+ }, {
+ Address: transferAddress,
+ Topics: []common.Hash{
+ transferTopic,
+ addressToHash(randomAccounts[0].addr),
+ addressToHash(fixedAccount.addr),
+ },
+ Data: hexutil.Bytes(common.BigToHash(big.NewInt(100)).Bytes()),
+ BlockNumber: hexutil.Uint64(11),
+ Index: hexutil.Uint(1),
+ }},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Tests selfdestructed contract.
+ {
+ name: "selfdestruct",
+ tag: latest,
+ blocks: []simBlock{{
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &cac,
+ }, {
+ From: &accounts[0].addr,
+ // Check that cac is selfdestructed and balance transferred to dad.
+ // object "Test" {
+ // code {
+ // let cac := 0x0000000000000000000000000000000000000cac
+ // let dad := 0x0000000000000000000000000000000000000dad
+ // if gt(balance(cac), 0) {
+ // revert(0, 0)
+ // }
+ // if gt(extcodesize(cac), 0) {
+ // revert(0, 0)
+ // }
+ // if eq(balance(dad), 0) {
+ // revert(0, 0)
+ // }
+ // }
+ // }
+ Input: hex2Bytes("610cac610dad600082311115601357600080fd5b6000823b1115602157600080fd5b6000813103602e57600080fd5b5050"),
+ }},
+ }, {
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ Input: hex2Bytes("610cac610dad600082311115601357600080fd5b6000823b1115602157600080fd5b6000813103602e57600080fd5b5050"),
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x1b83f",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0xd166",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x",
+ GasUsed: "0xe6d9",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xe6d9",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0xe6d9",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Enable validation checks.
+ {
+ name: "validation-checks",
+ tag: latest,
+ blocks: []simBlock{{
+ Calls: []TransactionArgs{{
+ From: &accounts[2].addr,
+ To: &cac,
+ Nonce: newUint64(2),
+ }},
+ }},
+ validation: &validation,
+ want: nil,
+ expectErr: &invalidTxError{Message: fmt.Sprintf("err: nonce too high: address %s, tx: 2 state: 0 (supplied gas 4712388)", accounts[2].addr), Code: errCodeNonceTooHigh},
+ },
+ // Contract sends tx in validation mode.
+ {
+ name: "validation-checks-from-contract",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: OverrideAccount{
+ Balance: newRPCBalance(big.NewInt(2098640803896784)),
+ Code: hex2Bytes("00"),
+ Nonce: newUint64(1),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[2].addr,
+ To: &cac,
+ Nonce: newUint64(1),
+ MaxFeePerGas: newInt(233138868),
+ MaxPriorityFeePerGas: newInt(1),
+ }},
+ }},
+ validation: &validation,
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xd166",
+ Miner: coinbase,
+ BaseFeePerGas: "0xde56ab3",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0xd166",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Successful validation
+ {
+ name: "validation-checks-success",
+ tag: latest,
+ blocks: []simBlock{{
+ BlockOverrides: &BlockOverrides{
+ BaseFeePerGas: (*hexutil.Big)(big.NewInt(1)),
+ },
+ StateOverrides: &StateOverride{
+ randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(10000000))},
+ },
+ Calls: []TransactionArgs{{
+ From: &randomAccounts[0].addr,
+ To: &randomAccounts[1].addr,
+ Value: (*hexutil.Big)(big.NewInt(1000)),
+ MaxFeePerGas: (*hexutil.Big)(big.NewInt(2)),
+ }},
+ }},
+ validation: &validation,
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x5208",
+ Miner: coinbase,
+ BaseFeePerGas: "0x1",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x5208",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ // Clear storage.
+ {
+ name: "clear-storage",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: {
+ Code: newBytes(genesis.Alloc[bab].Code),
+ StateDiff: map[common.Hash]common.Hash{
+ common.BigToHash(big.NewInt(1)): common.BigToHash(big.NewInt(2)),
+ common.BigToHash(big.NewInt(2)): common.BigToHash(big.NewInt(3)),
+ },
+ },
+ bab: {
+ State: map[common.Hash]common.Hash{
+ common.BigToHash(big.NewInt(1)): common.BigToHash(big.NewInt(1)),
+ },
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }, {
+ From: &accounts[0].addr,
+ To: &bab,
+ }},
+ }, {
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: {
+ State: map[common.Hash]common.Hash{
+ common.BigToHash(big.NewInt(1)): common.BigToHash(big.NewInt(5)),
+ },
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xc542",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x0000000000000000000000000000000200000000000000000000000000000003",
+ GasUsed: "0x62a1",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x0000000000000000000000000000000100000000000000000000000000000000",
+ GasUsed: "0x62a1",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x62a1",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x0000000000000000000000000000000500000000000000000000000000000000",
+ GasUsed: "0x62a1",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ {
+ name: "blockhash-opcode",
+ tag: latest,
+ blocks: []simBlock{{
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(12)),
+ },
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: {
+ Code: hex2Bytes("600035804060008103601057600080fd5b5050"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // Phantom block after base.
+ Input: uint256ToBytes(uint256.NewInt(11)),
+ }, {
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // Canonical block.
+ Input: uint256ToBytes(uint256.NewInt(8)),
+ }, {
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // base block.
+ Input: uint256ToBytes(uint256.NewInt(10)),
+ }},
+ }, {
+ BlockOverrides: &BlockOverrides{
+ Number: (*hexutil.Big)(big.NewInt(16)),
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // blocks[0]
+ Input: uint256ToBytes(uint256.NewInt(12)),
+ }, {
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // Phantom after blocks[0]
+ Input: uint256ToBytes(uint256.NewInt(13)),
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x0",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xf864",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x52cc",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x",
+ GasUsed: "0x52cc",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+
+ ReturnValue: "0x",
+ GasUsed: "0x52cc",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xd",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x0",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{},
+ }, {
+ Number: "0xe",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x0",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{},
+ }, {
+ Number: "0xf",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x0",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{},
+ }, {
+ Number: "0x10",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xa598",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x",
+ GasUsed: "0x52cc",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+
+ ReturnValue: "0x",
+ GasUsed: "0x52cc",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ {
+ name: "basefee-non-validation",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: {
+ // Yul code:
+ // object "Test" {
+ // code {
+ // // Get the gas price from the transaction
+ // let gasPrice := gasprice()
+ //
+ // // Get the base fee from the block
+ // let baseFee := basefee()
+ //
+ // // Store gasPrice and baseFee in memory
+ // mstore(0x0, gasPrice)
+ // mstore(0x20, baseFee)
+ //
+ // // Return the data
+ // return(0x0, 0x40)
+ // }
+ // }
+ Code: hex2Bytes("3a489060005260205260406000f3"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // 0 gas price
+ }, {
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // non-zero gas price
+ MaxPriorityFeePerGas: newInt(1),
+ MaxFeePerGas: newInt(2),
+ },
+ },
+ }, {
+ BlockOverrides: &BlockOverrides{
+ BaseFeePerGas: (*hexutil.Big)(big.NewInt(1)),
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // 0 gas price
+ }, {
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ // non-zero gas price
+ MaxPriorityFeePerGas: newInt(1),
+ MaxFeePerGas: newInt(2),
+ },
+ },
+ }, {
+ // Base fee should be 0 to zero even if it was set in previous block.
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ }},
+ }},
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xa44e",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xc",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0xa44e",
+ Miner: coinbase,
+ BaseFeePerGas: "0x1",
+ Calls: []callRes{{
+ ReturnValue: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }, {
+ ReturnValue: "0x00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }, {
+ Number: "0xd",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x5227",
+ Miner: coinbase,
+ BaseFeePerGas: "0x0",
+ Calls: []callRes{{
+ ReturnValue: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ }, {
+ name: "basefee-validation-mode",
+ tag: latest,
+ blocks: []simBlock{{
+ StateOverrides: &StateOverride{
+ randomAccounts[2].addr: {
+ // Yul code:
+ // object "Test" {
+ // code {
+ // // Get the gas price from the transaction
+ // let gasPrice := gasprice()
+ //
+ // // Get the base fee from the block
+ // let baseFee := basefee()
+ //
+ // // Store gasPrice and baseFee in memory
+ // mstore(0x0, gasPrice)
+ // mstore(0x20, baseFee)
+ //
+ // // Return the data
+ // return(0x0, 0x40)
+ // }
+ // }
+ Code: hex2Bytes("3a489060005260205260406000f3"),
+ },
+ },
+ Calls: []TransactionArgs{{
+ From: &accounts[0].addr,
+ To: &randomAccounts[2].addr,
+ MaxFeePerGas: newInt(233138868),
+ MaxPriorityFeePerGas: newInt(1),
+ }},
+ }},
+ validation: &validation,
+ want: []blockRes{{
+ Number: "0xb",
+ GasLimit: "0x47e7c4",
+ GasUsed: "0x5227",
+ Miner: coinbase,
+ BaseFeePerGas: "0xde56ab3",
+ Calls: []callRes{{
+ ReturnValue: "0x000000000000000000000000000000000000000000000000000000000de56ab4000000000000000000000000000000000000000000000000000000000de56ab3",
+ GasUsed: "0x5227",
+ Logs: []log{},
+ Status: "0x1",
+ }},
+ }},
+ },
+ }
+
+ for _, tc := range testSuite {
+ t.Run(tc.name, func(t *testing.T) {
+ opts := simOpts{BlockStateCalls: tc.blocks}
+ if tc.includeTransfers != nil && *tc.includeTransfers {
+ opts.TraceTransfers = true
+ }
+ if tc.validation != nil && *tc.validation {
+ opts.Validation = true
+ }
+ result, err := api.SimulateV1(context.Background(), opts, &tc.tag)
+ if tc.expectErr != nil {
+ if err == nil {
+ t.Fatalf("test %s: want error %v, have nothing", tc.name, tc.expectErr)
+ }
+ if !errors.Is(err, tc.expectErr) {
+ // Second try
+ if !reflect.DeepEqual(err, tc.expectErr) {
+ t.Errorf("test %s: error mismatch, want %v, have %v", tc.name, tc.expectErr, err)
+ }
+ }
+ return
+ }
+ if err != nil {
+ t.Fatalf("test %s: want no error, have %v", tc.name, err)
+ }
+ // Turn result into res-struct
+ var have []blockRes
+ resBytes, _ := json.Marshal(result)
+ if err := json.Unmarshal(resBytes, &have); err != nil {
+ t.Fatalf("failed to unmarshal result: %v", err)
+ }
+ if !reflect.DeepEqual(have, tc.want) {
+ t.Errorf("test %s, result mismatch, have\n%v\n, want\n%v\n", tc.name, have, tc.want)
+ }
+ })
+ }
+}
+
func TestSignTransaction(t *testing.T) {
t.Parallel()
// Initialize test accounts
@@ -1272,6 +2463,8 @@ func TestFillBlobTransaction(t *testing.T) {
}
for _, tc := range suite {
t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
res, err := api.FillTransaction(context.Background(), tc.args)
if len(tc.err) > 0 {
if err == nil {
@@ -1343,16 +2536,40 @@ func newAccounts(n int) (accounts []account) {
return accounts
}
+func newTestAccount() account {
+ // testKey is a private key to use for funding a tester account.
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ // testAddr is the Ethereum address of the tester account.
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ return account{key: key, addr: addr}
+}
+
func newRPCBalance(balance *big.Int) *hexutil.Big {
rpcBalance := (*hexutil.Big)(balance)
return rpcBalance
}
func hex2Bytes(str string) *hexutil.Bytes {
- rpcBytes := hexutil.Bytes(common.Hex2Bytes(str))
+ rpcBytes := hexutil.Bytes(common.FromHex(str))
return &rpcBytes
}
+func newUint64(v uint64) *hexutil.Uint64 {
+ rpcUint64 := hexutil.Uint64(v)
+ return &rpcUint64
+}
+
+func newBytes(b []byte) *hexutil.Bytes {
+ rpcBytes := hexutil.Bytes(b)
+ return &rpcBytes
+}
+
+func uint256ToBytes(v *uint256.Int) *hexutil.Bytes {
+ b := v.Bytes32()
+ r := hexutil.Bytes(b[:])
+ return &r
+}
+
func TestRPCMarshalBlock(t *testing.T) {
t.Parallel()
var (
@@ -2072,6 +3289,97 @@ func TestRPCGetBlockReceipts(t *testing.T) {
}
}
+type precompileContract struct{}
+
+func (p *precompileContract) RequiredGas(input []byte) uint64 { return 0 }
+
+func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil }
+
+func TestStateOverrideMovePrecompile(t *testing.T) {
+ db := state.NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
+ statedb, err := state.New(common.Hash{}, db)
+ if err != nil {
+ t.Fatalf("failed to create statedb: %v", err)
+ }
+ precompiles := map[common.Address]vm.PrecompiledContract{
+ common.BytesToAddress([]byte{0x1}): &precompileContract{},
+ common.BytesToAddress([]byte{0x2}): &precompileContract{},
+ }
+ bytes2Addr := func(b []byte) *common.Address {
+ a := common.BytesToAddress(b)
+ return &a
+ }
+ var testSuite = []struct {
+ overrides StateOverride
+ expectedPrecompiles map[common.Address]struct{}
+ fail bool
+ }{
+ {
+ overrides: StateOverride{
+ common.BytesToAddress([]byte{0x1}): {
+ Code: hex2Bytes("0xff"),
+ MovePrecompileTo: bytes2Addr([]byte{0x2}),
+ },
+ common.BytesToAddress([]byte{0x2}): {
+ Code: hex2Bytes("0x00"),
+ },
+ },
+ // 0x2 has already been touched by the moveTo.
+ fail: true,
+ }, {
+ overrides: StateOverride{
+ common.BytesToAddress([]byte{0x1}): {
+ Code: hex2Bytes("0xff"),
+ MovePrecompileTo: bytes2Addr([]byte{0xff}),
+ },
+ common.BytesToAddress([]byte{0x3}): {
+ Code: hex2Bytes("0x00"),
+ MovePrecompileTo: bytes2Addr([]byte{0xfe}),
+ },
+ },
+ // 0x3 is not a precompile.
+ fail: true,
+ }, {
+ overrides: StateOverride{
+ common.BytesToAddress([]byte{0x1}): {
+ Code: hex2Bytes("0xff"),
+ MovePrecompileTo: bytes2Addr([]byte{0xff}),
+ },
+ common.BytesToAddress([]byte{0x2}): {
+ Code: hex2Bytes("0x00"),
+ MovePrecompileTo: bytes2Addr([]byte{0xfe}),
+ },
+ },
+ expectedPrecompiles: map[common.Address]struct{}{common.BytesToAddress([]byte{0xfe}): {}, common.BytesToAddress([]byte{0xff}): {}},
+ },
+ }
+
+ for i, tt := range testSuite {
+ cpy := maps.Clone(precompiles)
+ // Apply overrides
+ err := tt.overrides.Apply(statedb, cpy)
+ if tt.fail {
+ if err == nil {
+ t.Errorf("test %d: want error, have nothing", i)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ // Precompile keys
+ if len(cpy) != len(tt.expectedPrecompiles) {
+ t.Errorf("test %d: precompile mismatch, want %d, have %d", i, len(tt.expectedPrecompiles), len(cpy))
+ }
+ for k := range tt.expectedPrecompiles {
+ if _, ok := cpy[k]; !ok {
+ t.Errorf("test %d: precompile not found: %s", i, k.String())
+ }
+ }
+ }
+}
+
func testRPCResponseWithFile(t *testing.T, testid int, result interface{}, rpc string, file string) {
data, err := json.MarshalIndent(result, "", " ")
if err != nil {
@@ -2088,3 +3396,7 @@ func testRPCResponseWithFile(t *testing.T, testid int, result interface{}, rpc s
}
require.JSONEqf(t, string(want), string(data), "test %d: json not match, want: %s, have: %s", testid, string(want), string(data))
}
+
+func addressToHash(a common.Address) common.Hash {
+ return common.BytesToHash(a.Bytes())
+}
diff --git a/internal/ethapi/errors.go b/internal/ethapi/errors.go
index b5e668a805..ae38061234 100644
--- a/internal/ethapi/errors.go
+++ b/internal/ethapi/errors.go
@@ -17,10 +17,12 @@
package ethapi
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -76,3 +78,93 @@ func (e *TxIndexingError) ErrorCode() int {
// ErrorData returns the hex encoded revert reason.
func (e *TxIndexingError) ErrorData() interface{} { return "transaction indexing is in progress" }
+
+type callError struct {
+ Message string `json:"message"`
+ Code int `json:"code"`
+ Data string `json:"data,omitempty"`
+}
+
+type invalidTxError struct {
+ Message string `json:"message"`
+ Code int `json:"code"`
+}
+
+func (e *invalidTxError) Error() string { return e.Message }
+func (e *invalidTxError) ErrorCode() int { return e.Code }
+
+const (
+ errCodeNonceTooHigh = -38011
+ errCodeNonceTooLow = -38010
+ errCodeIntrinsicGas = -38013
+ errCodeInsufficientFunds = -38014
+ errCodeBlockGasLimitReached = -38015
+ errCodeBlockNumberInvalid = -38020
+ errCodeBlockTimestampInvalid = -38021
+ errCodeSenderIsNotEOA = -38024
+ errCodeMaxInitCodeSizeExceeded = -38025
+ errCodeClientLimitExceeded = -38026
+ errCodeInternalError = -32603
+ errCodeInvalidParams = -32602
+ errCodeReverted = -32000
+ errCodeVMError = -32015
+)
+
+func txValidationError(err error) *invalidTxError {
+ if err == nil {
+ return nil
+ }
+ switch {
+ case errors.Is(err, core.ErrNonceTooHigh):
+ return &invalidTxError{Message: err.Error(), Code: errCodeNonceTooHigh}
+ case errors.Is(err, core.ErrNonceTooLow):
+ return &invalidTxError{Message: err.Error(), Code: errCodeNonceTooLow}
+ case errors.Is(err, core.ErrSenderNoEOA):
+ return &invalidTxError{Message: err.Error(), Code: errCodeSenderIsNotEOA}
+ case errors.Is(err, core.ErrFeeCapVeryHigh):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInvalidParams}
+ case errors.Is(err, core.ErrTipVeryHigh):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInvalidParams}
+ case errors.Is(err, core.ErrTipAboveFeeCap):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInvalidParams}
+ case errors.Is(err, core.ErrFeeCapTooLow):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInvalidParams}
+ case errors.Is(err, core.ErrInsufficientFunds):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInsufficientFunds}
+ case errors.Is(err, core.ErrIntrinsicGas):
+ return &invalidTxError{Message: err.Error(), Code: errCodeIntrinsicGas}
+ case errors.Is(err, core.ErrInsufficientFundsForTransfer):
+ return &invalidTxError{Message: err.Error(), Code: errCodeInsufficientFunds}
+ case errors.Is(err, core.ErrMaxInitCodeSizeExceeded):
+ return &invalidTxError{Message: err.Error(), Code: errCodeMaxInitCodeSizeExceeded}
+ }
+ return &invalidTxError{
+ Message: err.Error(),
+ Code: errCodeInternalError,
+ }
+}
+
+type invalidParamsError struct{ message string }
+
+func (e *invalidParamsError) Error() string { return e.message }
+func (e *invalidParamsError) ErrorCode() int { return errCodeInvalidParams }
+
+type clientLimitExceededError struct{ message string }
+
+func (e *clientLimitExceededError) Error() string { return e.message }
+func (e *clientLimitExceededError) ErrorCode() int { return errCodeClientLimitExceeded }
+
+type invalidBlockNumberError struct{ message string }
+
+func (e *invalidBlockNumberError) Error() string { return e.message }
+func (e *invalidBlockNumberError) ErrorCode() int { return errCodeBlockNumberInvalid }
+
+type invalidBlockTimestampError struct{ message string }
+
+func (e *invalidBlockTimestampError) Error() string { return e.message }
+func (e *invalidBlockTimestampError) ErrorCode() int { return errCodeBlockTimestampInvalid }
+
+type blockGasLimitReachedError struct{ message string }
+
+func (e *blockGasLimitReachedError) Error() string { return e.message }
+func (e *blockGasLimitReachedError) ErrorCode() int { return errCodeBlockGasLimitReached }
diff --git a/internal/ethapi/logtracer.go b/internal/ethapi/logtracer.go
new file mode 100644
index 0000000000..456aa93736
--- /dev/null
+++ b/internal/ethapi/logtracer.go
@@ -0,0 +1,151 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethapi
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+)
+
+var (
+ // keccak256("Transfer(address,address,uint256)")
+ transferTopic = common.HexToHash("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
+ // ERC-7528
+ transferAddress = common.HexToAddress("0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE")
+)
+
+// tracer is a simple tracer that records all logs and
+// ether transfers. Transfers are recorded as if they
+// were logs. Transfer events include:
+// - tx value
+// - call value
+// - self destructs
+//
+// The log format for a transfer is:
+// - address: 0x0000000000000000000000000000000000000000
+// - data: Value
+// - topics:
+// - Transfer(address,address,uint256)
+// - Sender address
+// - Recipient address
+type tracer struct {
+ // logs keeps logs for all open call frames.
+ // This lets us clear logs for failed calls.
+ logs [][]*types.Log
+ count int
+ traceTransfers bool
+ blockNumber uint64
+ blockHash common.Hash
+ txHash common.Hash
+ txIdx uint
+}
+
+func newTracer(traceTransfers bool, blockNumber uint64, blockHash, txHash common.Hash, txIndex uint) *tracer {
+ return &tracer{
+ traceTransfers: traceTransfers,
+ blockNumber: blockNumber,
+ blockHash: blockHash,
+ txHash: txHash,
+ txIdx: txIndex,
+ }
+}
+
+func (t *tracer) Hooks() *tracing.Hooks {
+ return &tracing.Hooks{
+ OnEnter: t.onEnter,
+ OnExit: t.onExit,
+ OnLog: t.onLog,
+ }
+}
+
+func (t *tracer) onEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+ t.logs = append(t.logs, make([]*types.Log, 0))
+ if vm.OpCode(typ) != vm.DELEGATECALL && value != nil && value.Cmp(common.Big0) > 0 {
+ t.captureTransfer(from, to, value)
+ }
+}
+
+func (t *tracer) onExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
+ if depth == 0 {
+ t.onEnd(reverted)
+ return
+ }
+ size := len(t.logs)
+ if size <= 1 {
+ return
+ }
+ // pop call
+ call := t.logs[size-1]
+ t.logs = t.logs[:size-1]
+ size--
+
+ // Clear logs if call failed.
+ if !reverted {
+ t.logs[size-1] = append(t.logs[size-1], call...)
+ }
+}
+
+func (t *tracer) onEnd(reverted bool) {
+ if reverted {
+ t.logs[0] = nil
+ }
+}
+
+func (t *tracer) onLog(log *types.Log) {
+ t.captureLog(log.Address, log.Topics, log.Data)
+}
+
+func (t *tracer) captureLog(address common.Address, topics []common.Hash, data []byte) {
+ t.logs[len(t.logs)-1] = append(t.logs[len(t.logs)-1], &types.Log{
+ Address: address,
+ Topics: topics,
+ Data: data,
+ BlockNumber: t.blockNumber,
+ BlockHash: t.blockHash,
+ TxHash: t.txHash,
+ TxIndex: t.txIdx,
+ Index: uint(t.count),
+ })
+ t.count++
+}
+
+func (t *tracer) captureTransfer(from, to common.Address, value *big.Int) {
+ if !t.traceTransfers {
+ return
+ }
+ topics := []common.Hash{
+ transferTopic,
+ common.BytesToHash(from.Bytes()),
+ common.BytesToHash(to.Bytes()),
+ }
+ t.captureLog(transferAddress, topics, common.BigToHash(value).Bytes())
+}
+
+// reset prepares the tracer for the next transaction.
+func (t *tracer) reset(txHash common.Hash, txIdx uint) {
+ t.logs = nil
+ t.txHash = txHash
+ t.txIdx = txIdx
+}
+
+func (t *tracer) Logs() []*types.Log {
+ return t.logs[0]
+}
diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go
new file mode 100644
index 0000000000..bccf87f8ef
--- /dev/null
+++ b/internal/ethapi/simulate.go
@@ -0,0 +1,418 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethapi
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+const (
+ // maxSimulateBlocks is the maximum number of blocks that can be simulated
+ // in a single request.
+ maxSimulateBlocks = 256
+
+ // timestampIncrement is the default increment between block timestamps.
+ timestampIncrement = 1
+)
+
+// simBlock is a batch of calls to be simulated sequentially.
+type simBlock struct {
+ BlockOverrides *BlockOverrides
+ StateOverrides *StateOverride
+ Calls []TransactionArgs
+}
+
+// simCallResult is the result of a simulated call.
+type simCallResult struct {
+ ReturnValue hexutil.Bytes `json:"returnData"`
+ Logs []*types.Log `json:"logs"`
+ GasUsed hexutil.Uint64 `json:"gasUsed"`
+ Status hexutil.Uint64 `json:"status"`
+ Error *callError `json:"error,omitempty"`
+}
+
+func (r *simCallResult) MarshalJSON() ([]byte, error) {
+ type callResultAlias simCallResult
+ // Marshal logs to be an empty array instead of nil when empty
+ if r.Logs == nil {
+ r.Logs = []*types.Log{}
+ }
+ return json.Marshal((*callResultAlias)(r))
+}
+
+// simOpts are the inputs to eth_simulateV1.
+type simOpts struct {
+ BlockStateCalls []simBlock
+ TraceTransfers bool
+ Validation bool
+ ReturnFullTransactions bool
+}
+
+// simulator is a stateful object that simulates a series of blocks.
+// it is not safe for concurrent use.
+type simulator struct {
+ b Backend
+ state *state.StateDB
+ base *types.Header
+ chainConfig *params.ChainConfig
+ gp *core.GasPool
+ traceTransfers bool
+ validate bool
+ fullTx bool
+}
+
+// execute runs the simulation of a series of blocks.
+func (sim *simulator) execute(ctx context.Context, blocks []simBlock) ([]map[string]interface{}, error) {
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+ var (
+ cancel context.CancelFunc
+ timeout = sim.b.RPCEVMTimeout()
+ )
+ if timeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+ // Make sure the context is cancelled when the call has completed
+ // this makes sure resources are cleaned up.
+ defer cancel()
+
+ var err error
+ blocks, err = sim.sanitizeChain(blocks)
+ if err != nil {
+ return nil, err
+ }
+ // Prepare block headers with preliminary fields for the response.
+ headers, err := sim.makeHeaders(blocks)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ results = make([]map[string]interface{}, len(blocks))
+ parent = sim.base
+ // Assume same total difficulty for all simulated blocks.
+ td = sim.b.GetTd(ctx, sim.base.Hash())
+ )
+ for bi, block := range blocks {
+ result, callResults, err := sim.processBlock(ctx, &block, headers[bi], parent, headers[:bi], timeout)
+ if err != nil {
+ return nil, err
+ }
+ enc := RPCMarshalBlock(result, true, sim.fullTx, sim.chainConfig)
+ enc["totalDifficulty"] = (*hexutil.Big)(td)
+ enc["calls"] = callResults
+ results[bi] = enc
+
+ parent = headers[bi]
+ }
+ return results, nil
+}
+
+func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header, parent *types.Header, headers []*types.Header, timeout time.Duration) (*types.Block, []simCallResult, error) {
+ // Set header fields that depend only on parent block.
+ // Parent hash is needed for evm.GetHashFn to work.
+ header.ParentHash = parent.Hash()
+ if sim.chainConfig.IsLondon(header.Number) {
+ // In non-validation mode base fee is set to 0 if it is not overridden.
+ // This is because it creates an edge case in EVM where gasPrice < baseFee.
+ // Base fee could have been overridden.
+ if header.BaseFee == nil {
+ if sim.validate {
+ header.BaseFee = eip1559.CalcBaseFee(sim.chainConfig, parent)
+ } else {
+ header.BaseFee = big.NewInt(0)
+ }
+ }
+ }
+ if sim.chainConfig.IsCancun(header.Number, header.Time) {
+ var excess uint64
+ if sim.chainConfig.IsCancun(parent.Number, parent.Time) {
+ excess = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed)
+ } else {
+ excess = eip4844.CalcExcessBlobGas(0, 0)
+ }
+ header.ExcessBlobGas = &excess
+ }
+ blockContext := core.NewEVMBlockContext(header, sim.newSimulatedChainContext(ctx, headers), nil)
+ if block.BlockOverrides.BlobBaseFee != nil {
+ blockContext.BlobBaseFee = block.BlockOverrides.BlobBaseFee.ToInt()
+ }
+ precompiles := sim.activePrecompiles(sim.base)
+ // State overrides are applied prior to execution of a block
+ if err := block.StateOverrides.Apply(sim.state, precompiles); err != nil {
+ return nil, nil, err
+ }
+ var (
+ gasUsed, blobGasUsed uint64
+ txes = make([]*types.Transaction, len(block.Calls))
+ callResults = make([]simCallResult, len(block.Calls))
+ receipts = make([]*types.Receipt, len(block.Calls))
+ // Block hash will be repaired after execution.
+ tracer = newTracer(sim.traceTransfers, blockContext.BlockNumber.Uint64(), common.Hash{}, common.Hash{}, 0)
+ vmConfig = &vm.Config{
+ NoBaseFee: !sim.validate,
+ Tracer: tracer.Hooks(),
+ }
+ evm = vm.NewEVM(blockContext, vm.TxContext{GasPrice: new(big.Int)}, sim.state, sim.chainConfig, *vmConfig)
+ )
+ sim.state.SetLogger(tracer.Hooks())
+ // It is possible to override precompiles with EVM bytecode, or
+ // move them to another address.
+ if precompiles != nil {
+ evm.SetPrecompiles(precompiles)
+ }
+ for i, call := range block.Calls {
+ if err := ctx.Err(); err != nil {
+ return nil, nil, err
+ }
+ if err := sim.sanitizeCall(&call, sim.state, header, blockContext, &gasUsed); err != nil {
+ return nil, nil, err
+ }
+ tx := call.ToTransaction(types.DynamicFeeTxType)
+ txes[i] = tx
+ tracer.reset(tx.Hash(), uint(i))
+ // EoA check is always skipped, even in validation mode.
+ msg := call.ToMessage(header.BaseFee, !sim.validate, true)
+ evm.Reset(core.NewEVMTxContext(msg), sim.state)
+ result, err := applyMessageWithEVM(ctx, evm, msg, sim.state, timeout, sim.gp)
+ if err != nil {
+ txErr := txValidationError(err)
+ return nil, nil, txErr
+ }
+ // Update the state with pending changes.
+ var root []byte
+ if sim.chainConfig.IsByzantium(blockContext.BlockNumber) {
+ sim.state.Finalise(true)
+ } else {
+ root = sim.state.IntermediateRoot(sim.chainConfig.IsEIP158(blockContext.BlockNumber)).Bytes()
+ }
+ gasUsed += result.UsedGas
+ receipts[i] = core.MakeReceipt(evm, result, sim.state, blockContext.BlockNumber, common.Hash{}, tx, gasUsed, root)
+ blobGasUsed += receipts[i].BlobGasUsed
+ logs := tracer.Logs()
+ callRes := simCallResult{ReturnValue: result.Return(), Logs: logs, GasUsed: hexutil.Uint64(result.UsedGas)}
+ if result.Failed() {
+ callRes.Status = hexutil.Uint64(types.ReceiptStatusFailed)
+ if errors.Is(result.Err, vm.ErrExecutionReverted) {
+ // If the result contains a revert reason, try to unpack it.
+ revertErr := newRevertError(result.Revert())
+ callRes.Error = &callError{Message: revertErr.Error(), Code: errCodeReverted, Data: revertErr.ErrorData().(string)}
+ } else {
+ callRes.Error = &callError{Message: result.Err.Error(), Code: errCodeVMError}
+ }
+ } else {
+ callRes.Status = hexutil.Uint64(types.ReceiptStatusSuccessful)
+ }
+ callResults[i] = callRes
+ }
+ header.Root = sim.state.IntermediateRoot(true)
+ header.GasUsed = gasUsed
+ if sim.chainConfig.IsCancun(header.Number, header.Time) {
+ header.BlobGasUsed = &blobGasUsed
+ }
+ var withdrawals types.Withdrawals
+ if sim.chainConfig.IsShanghai(header.Number, header.Time) {
+ withdrawals = make([]*types.Withdrawal, 0)
+ }
+ b := types.NewBlock(header, &types.Body{Transactions: txes, Withdrawals: withdrawals}, receipts, trie.NewStackTrie(nil))
+ repairLogs(callResults, b.Hash())
+ return b, callResults, nil
+}
+
+// repairLogs updates the block hash in the logs present in the result of
+// a simulated block. This is needed as during execution when logs are collected
+// the block hash is not known.
+func repairLogs(calls []simCallResult, hash common.Hash) {
+ for i := range calls {
+ for j := range calls[i].Logs {
+ calls[i].Logs[j].BlockHash = hash
+ }
+ }
+}
+
+func (sim *simulator) sanitizeCall(call *TransactionArgs, state *state.StateDB, header *types.Header, blockContext vm.BlockContext, gasUsed *uint64) error {
+ if call.Nonce == nil {
+ nonce := state.GetNonce(call.from())
+ call.Nonce = (*hexutil.Uint64)(&nonce)
+ }
+ // Let the call run wild unless explicitly specified.
+ if call.Gas == nil {
+ remaining := blockContext.GasLimit - *gasUsed
+ call.Gas = (*hexutil.Uint64)(&remaining)
+ }
+ if *gasUsed+uint64(*call.Gas) > blockContext.GasLimit {
+ return &blockGasLimitReachedError{fmt.Sprintf("block gas limit reached: %d >= %d", gasUsed, blockContext.GasLimit)}
+ }
+ if err := call.CallDefaults(sim.gp.Gas(), header.BaseFee, sim.chainConfig.ChainID); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (sim *simulator) activePrecompiles(base *types.Header) vm.PrecompiledContracts {
+ var (
+ isMerge = (base.Difficulty.Sign() == 0)
+ rules = sim.chainConfig.Rules(base.Number, isMerge, base.Time)
+ )
+ return maps.Clone(vm.ActivePrecompiledContracts(rules))
+}
+
+// sanitizeChain checks the chain integrity. Specifically it checks that
+// block numbers and timestamp are strictly increasing, setting default values
+// when necessary. Gaps in block numbers are filled with empty blocks.
+// Note: It modifies the block's override object.
+func (sim *simulator) sanitizeChain(blocks []simBlock) ([]simBlock, error) {
+ var (
+ res = make([]simBlock, 0, len(blocks))
+ base = sim.base
+ prevNumber = base.Number
+ prevTimestamp = base.Time
+ )
+ for _, block := range blocks {
+ if block.BlockOverrides == nil {
+ block.BlockOverrides = new(BlockOverrides)
+ }
+ if block.BlockOverrides.Number == nil {
+ n := new(big.Int).Add(prevNumber, big.NewInt(1))
+ block.BlockOverrides.Number = (*hexutil.Big)(n)
+ }
+ diff := new(big.Int).Sub(block.BlockOverrides.Number.ToInt(), prevNumber)
+ if diff.Cmp(common.Big0) <= 0 {
+ return nil, &invalidBlockNumberError{fmt.Sprintf("block numbers must be in order: %d <= %d", block.BlockOverrides.Number.ToInt().Uint64(), prevNumber)}
+ }
+ if total := new(big.Int).Sub(block.BlockOverrides.Number.ToInt(), base.Number); total.Cmp(big.NewInt(maxSimulateBlocks)) > 0 {
+ return nil, &clientLimitExceededError{message: "too many blocks"}
+ }
+ if diff.Cmp(big.NewInt(1)) > 0 {
+ // Fill the gap with empty blocks.
+ gap := new(big.Int).Sub(diff, big.NewInt(1))
+ // Assign block number to the empty blocks.
+ for i := uint64(0); i < gap.Uint64(); i++ {
+ n := new(big.Int).Add(prevNumber, big.NewInt(int64(i+1)))
+ t := prevTimestamp + timestampIncrement
+ b := simBlock{BlockOverrides: &BlockOverrides{Number: (*hexutil.Big)(n), Time: (*hexutil.Uint64)(&t)}}
+ prevTimestamp = t
+ res = append(res, b)
+ }
+ }
+ // Only append block after filling a potential gap.
+ prevNumber = block.BlockOverrides.Number.ToInt()
+ var t uint64
+ if block.BlockOverrides.Time == nil {
+ t = prevTimestamp + timestampIncrement
+ block.BlockOverrides.Time = (*hexutil.Uint64)(&t)
+ } else {
+ t = uint64(*block.BlockOverrides.Time)
+ if t <= prevTimestamp {
+ return nil, &invalidBlockTimestampError{fmt.Sprintf("block timestamps must be in order: %d <= %d", t, prevTimestamp)}
+ }
+ }
+ prevTimestamp = t
+ res = append(res, block)
+ }
+ return res, nil
+}
+
+// makeHeaders makes header object with preliminary fields based on a simulated block.
+// Some fields have to be filled post-execution.
+// It assumes blocks are in order and numbers have been validated.
+func (sim *simulator) makeHeaders(blocks []simBlock) ([]*types.Header, error) {
+ var (
+ res = make([]*types.Header, len(blocks))
+ base = sim.base
+ header = base
+ )
+ for bi, block := range blocks {
+ if block.BlockOverrides == nil || block.BlockOverrides.Number == nil {
+ return nil, errors.New("empty block number")
+ }
+ overrides := block.BlockOverrides
+
+ var withdrawalsHash *common.Hash
+ if sim.chainConfig.IsShanghai(overrides.Number.ToInt(), (uint64)(*overrides.Time)) {
+ withdrawalsHash = &types.EmptyWithdrawalsHash
+ }
+ var parentBeaconRoot *common.Hash
+ if sim.chainConfig.IsCancun(overrides.Number.ToInt(), (uint64)(*overrides.Time)) {
+ parentBeaconRoot = &common.Hash{}
+ }
+ header = overrides.MakeHeader(&types.Header{
+ UncleHash: types.EmptyUncleHash,
+ ReceiptHash: types.EmptyReceiptsHash,
+ TxHash: types.EmptyTxsHash,
+ Coinbase: header.Coinbase,
+ Difficulty: header.Difficulty,
+ GasLimit: header.GasLimit,
+ WithdrawalsHash: withdrawalsHash,
+ ParentBeaconRoot: parentBeaconRoot,
+ })
+ res[bi] = header
+ }
+ return res, nil
+}
+
+func (sim *simulator) newSimulatedChainContext(ctx context.Context, headers []*types.Header) *ChainContext {
+ return NewChainContext(ctx, &simBackend{base: sim.base, b: sim.b, headers: headers})
+}
+
+type simBackend struct {
+ b ChainContextBackend
+ base *types.Header
+ headers []*types.Header
+}
+
+func (b *simBackend) Engine() consensus.Engine {
+ return b.b.Engine()
+}
+
+func (b *simBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
+ if uint64(number) == b.base.Number.Uint64() {
+ return b.base, nil
+ }
+ if uint64(number) < b.base.Number.Uint64() {
+ // Resolve canonical header.
+ return b.b.HeaderByNumber(ctx, number)
+ }
+ // Simulated block.
+ for _, header := range b.headers {
+ if header.Number.Uint64() == uint64(number) {
+ return header, nil
+ }
+ }
+ return nil, errors.New("header not found")
+}
diff --git a/internal/ethapi/simulate_test.go b/internal/ethapi/simulate_test.go
new file mode 100644
index 0000000000..37883924ac
--- /dev/null
+++ b/internal/ethapi/simulate_test.go
@@ -0,0 +1,120 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethapi
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+func TestSimulateSanitizeBlockOrder(t *testing.T) {
+ type result struct {
+ number uint64
+ timestamp uint64
+ }
+ for i, tc := range []struct {
+ baseNumber int
+ baseTimestamp uint64
+ blocks []simBlock
+ expected []result
+ err string
+ }{
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{}, {}, {}},
+ expected: []result{{number: 11, timestamp: 51}, {number: 12, timestamp: 52}, {number: 13, timestamp: 53}},
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(13), Time: newUint64(70)}}, {}},
+ expected: []result{{number: 11, timestamp: 51}, {number: 12, timestamp: 52}, {number: 13, timestamp: 70}, {number: 14, timestamp: 71}},
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(11)}}, {BlockOverrides: &BlockOverrides{Number: newInt(14)}}, {}},
+ expected: []result{{number: 11, timestamp: 51}, {number: 12, timestamp: 52}, {number: 13, timestamp: 53}, {number: 14, timestamp: 54}, {number: 15, timestamp: 55}},
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(13)}}, {BlockOverrides: &BlockOverrides{Number: newInt(12)}}},
+ err: "block numbers must be in order: 12 <= 13",
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(13), Time: newUint64(52)}}},
+ err: "block timestamps must be in order: 52 <= 52",
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(11), Time: newUint64(60)}}, {BlockOverrides: &BlockOverrides{Number: newInt(12), Time: newUint64(55)}}},
+ err: "block timestamps must be in order: 55 <= 60",
+ },
+ {
+ baseNumber: 10,
+ baseTimestamp: 50,
+ blocks: []simBlock{{BlockOverrides: &BlockOverrides{Number: newInt(11), Time: newUint64(60)}}, {BlockOverrides: &BlockOverrides{Number: newInt(13), Time: newUint64(61)}}},
+ err: "block timestamps must be in order: 61 <= 61",
+ },
+ } {
+ sim := &simulator{base: &types.Header{Number: big.NewInt(int64(tc.baseNumber)), Time: tc.baseTimestamp}}
+ res, err := sim.sanitizeChain(tc.blocks)
+ if err != nil {
+ if err.Error() == tc.err {
+ continue
+ } else {
+ t.Fatalf("testcase %d: error mismatch. Want '%s', have '%s'", i, tc.err, err.Error())
+ }
+ }
+ if err == nil && tc.err != "" {
+ t.Fatalf("testcase %d: expected err", i)
+ }
+ if len(res) != len(tc.expected) {
+ t.Errorf("testcase %d: mismatch number of blocks. Want %d, have %d", i, len(tc.expected), len(res))
+ }
+ for bi, b := range res {
+ if b.BlockOverrides == nil {
+ t.Fatalf("testcase %d: block overrides nil", i)
+ }
+ if b.BlockOverrides.Number == nil {
+ t.Fatalf("testcase %d: block number not set", i)
+ }
+ if b.BlockOverrides.Time == nil {
+ t.Fatalf("testcase %d: block time not set", i)
+ }
+ if uint64(*b.BlockOverrides.Time) != tc.expected[bi].timestamp {
+ t.Errorf("testcase %d: block timestamp mismatch. Want %d, have %d", i, tc.expected[bi].timestamp, uint64(*b.BlockOverrides.Time))
+ }
+ have := b.BlockOverrides.Number.ToInt().Uint64()
+ if have != tc.expected[bi].number {
+ t.Errorf("testcase %d: block number mismatch. Want %d, have %d", i, tc.expected[bi].number, have)
+ }
+ }
+ }
+}
+
+func newInt(n int64) *hexutil.Big {
+ return (*hexutil.Big)(big.NewInt(n))
+}
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index f199f9d912..f9835a96da 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -421,7 +421,7 @@ func (args *TransactionArgs) CallDefaults(globalGasCap uint64, baseFee *big.Int,
// core evm. This method is used in calls and traces that do not require a real
// live transaction.
// Assumes that fields are not nil, i.e. setDefaults or CallDefaults has been called.
-func (args *TransactionArgs) ToMessage(baseFee *big.Int) *core.Message {
+func (args *TransactionArgs) ToMessage(baseFee *big.Int, skipNonceCheck, skipEoACheck bool) *core.Message {
var (
gasPrice *big.Int
gasFeeCap *big.Int
@@ -452,27 +452,42 @@ func (args *TransactionArgs) ToMessage(baseFee *big.Int) *core.Message {
accessList = *args.AccessList
}
return &core.Message{
- From: args.from(),
- To: args.To,
- Value: (*big.Int)(args.Value),
- GasLimit: uint64(*args.Gas),
- GasPrice: gasPrice,
- GasFeeCap: gasFeeCap,
- GasTipCap: gasTipCap,
- Data: args.data(),
- AccessList: accessList,
- BlobGasFeeCap: (*big.Int)(args.BlobFeeCap),
- BlobHashes: args.BlobHashes,
- SkipAccountChecks: true,
+ From: args.from(),
+ To: args.To,
+ Value: (*big.Int)(args.Value),
+ Nonce: uint64(*args.Nonce),
+ GasLimit: uint64(*args.Gas),
+ GasPrice: gasPrice,
+ GasFeeCap: gasFeeCap,
+ GasTipCap: gasTipCap,
+ Data: args.data(),
+ AccessList: accessList,
+ BlobGasFeeCap: (*big.Int)(args.BlobFeeCap),
+ BlobHashes: args.BlobHashes,
+ SkipNonceChecks: skipNonceCheck,
+ SkipFromEOACheck: skipEoACheck,
}
}
// ToTransaction converts the arguments to a transaction.
// This assumes that setDefaults has been called.
-func (args *TransactionArgs) ToTransaction() *types.Transaction {
- var data types.TxData
+func (args *TransactionArgs) ToTransaction(defaultType int) *types.Transaction {
+ usedType := types.LegacyTxType
switch {
- case args.BlobHashes != nil:
+ case args.BlobHashes != nil || defaultType == types.BlobTxType:
+ usedType = types.BlobTxType
+ case args.MaxFeePerGas != nil || defaultType == types.DynamicFeeTxType:
+ usedType = types.DynamicFeeTxType
+ case args.AccessList != nil || defaultType == types.AccessListTxType:
+ usedType = types.AccessListTxType
+ }
+ // Make it possible to default to newer tx, but use legacy if gasprice is provided
+ if args.GasPrice != nil {
+ usedType = types.LegacyTxType
+ }
+ var data types.TxData
+ switch usedType {
+ case types.BlobTxType:
al := types.AccessList{}
if args.AccessList != nil {
al = *args.AccessList
@@ -498,7 +513,7 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction {
}
}
- case args.MaxFeePerGas != nil:
+ case types.DynamicFeeTxType:
al := types.AccessList{}
if args.AccessList != nil {
al = *args.AccessList
@@ -515,7 +530,7 @@ func (args *TransactionArgs) ToTransaction() *types.Transaction {
AccessList: al,
}
- case args.AccessList != nil:
+ case types.AccessListTxType:
data = &types.AccessListTx{
To: args.To,
ChainID: (*big.Int)(args.ChainID),
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 6750fc07a9..5317828173 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -42,6 +42,8 @@ import (
// TestSetFeeDefaults tests the logic for filling in default fee values works as expected.
func TestSetFeeDefaults(t *testing.T) {
+ t.Parallel()
+
type test struct {
name string
fork string // options: legacy, london, cancun
diff --git a/internal/flags/flags_test.go b/internal/flags/flags_test.go
index 681586b46c..cfe16b340e 100644
--- a/internal/flags/flags_test.go
+++ b/internal/flags/flags_test.go
@@ -24,6 +24,8 @@ import (
)
func TestPathExpansion(t *testing.T) {
+ t.Parallel()
+
user, _ := user.Current()
var tests map[string]string
@@ -53,9 +55,13 @@ func TestPathExpansion(t *testing.T) {
os.Setenv(`DDDXXX`, `/tmp`)
for test, expected := range tests {
- got := expandPath(test)
- if got != expected {
- t.Errorf(`test %s, got %s, expected %s\n`, test, got, expected)
- }
+ t.Run(test, func(t *testing.T) {
+ t.Parallel()
+
+ got := expandPath(test)
+ if got != expected {
+ t.Errorf(`test %s, got %s, expected %s\n`, test, got, expected)
+ }
+ })
}
}
diff --git a/internal/guide/guide_test.go b/internal/guide/guide_test.go
index f682daac91..573898d7d0 100644
--- a/internal/guide/guide_test.go
+++ b/internal/guide/guide_test.go
@@ -35,6 +35,8 @@ import (
// Tests that the account management snippets work correctly.
func TestAccountManagement(t *testing.T) {
+ t.Parallel()
+
// Create a temporary folder to work with
workdir := t.TempDir()
diff --git a/internal/jsre/completion_test.go b/internal/jsre/completion_test.go
index 953bc5026d..8fbddbc299 100644
--- a/internal/jsre/completion_test.go
+++ b/internal/jsre/completion_test.go
@@ -23,6 +23,8 @@ import (
)
func TestCompleteKeywords(t *testing.T) {
+ t.Parallel()
+
re := New("", os.Stdout)
re.Run(`
function theClass() {
@@ -85,9 +87,13 @@ func TestCompleteKeywords(t *testing.T) {
},
}
for _, test := range tests {
- cs := re.CompleteKeywords(test.input)
- if !reflect.DeepEqual(cs, test.want) {
- t.Errorf("wrong completions for %q\ngot %v\nwant %v", test.input, cs, test.want)
- }
+ t.Run(test.input, func(t *testing.T) {
+ t.Parallel()
+
+ cs := re.CompleteKeywords(test.input)
+ if !reflect.DeepEqual(cs, test.want) {
+ t.Errorf("wrong completions for %q\ngot %v\nwant %v", test.input, cs, test.want)
+ }
+ })
}
}
diff --git a/internal/jsre/jsre_test.go b/internal/jsre/jsre_test.go
index 18ef39e2f4..a812d6116d 100644
--- a/internal/jsre/jsre_test.go
+++ b/internal/jsre/jsre_test.go
@@ -51,6 +51,8 @@ func newWithTestJS(t *testing.T, testjs string) *JSRE {
}
func TestExec(t *testing.T) {
+ t.Parallel()
+
jsre := newWithTestJS(t, `msg = "testMsg"`)
err := jsre.Exec("test.js")
@@ -73,6 +75,8 @@ func TestExec(t *testing.T) {
}
func TestNatto(t *testing.T) {
+ t.Parallel()
+
jsre := newWithTestJS(t, `setTimeout(function(){msg = "testMsg"}, 1);`)
err := jsre.Exec("test.js")
@@ -96,6 +100,8 @@ func TestNatto(t *testing.T) {
}
func TestBind(t *testing.T) {
+ t.Parallel()
+
jsre := New("", os.Stdout)
defer jsre.Stop(false)
@@ -108,6 +114,8 @@ func TestBind(t *testing.T) {
}
func TestLoadScript(t *testing.T) {
+ t.Parallel()
+
jsre := newWithTestJS(t, `msg = "testMsg"`)
_, err := jsre.Run(`loadScript("test.js")`)
diff --git a/internal/utesting/utesting_test.go b/internal/utesting/utesting_test.go
index 31c7911c52..526d36bab1 100644
--- a/internal/utesting/utesting_test.go
+++ b/internal/utesting/utesting_test.go
@@ -24,6 +24,8 @@ import (
)
func TestTest(t *testing.T) {
+ t.Parallel()
+
tests := []Test{
{
Name: "successful test",
@@ -90,6 +92,8 @@ var outputTests = []Test{
}
func TestOutput(t *testing.T) {
+ t.Parallel()
+
var buf bytes.Buffer
RunTests(outputTests, &buf)
@@ -116,6 +120,8 @@ $`[1:])
}
func TestOutputTAP(t *testing.T) {
+ t.Parallel()
+
var buf bytes.Buffer
RunTAP(outputTests, &buf)
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 4a1a37d722..4a53e2c829 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -616,6 +616,12 @@ web3._extend({
params: 4,
inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputDefaultBlockNumberFormatter, null, null],
}),
+ new web3._extend.Method({
+ name: 'simulateV1',
+ call: 'eth_simulateV1',
+ params: 2,
+ inputFormatter: [null, web3._extend.formatters.inputDefaultBlockNumberFormatter],
+ }),
new web3._extend.Method({
name: 'getBlockReceipts',
call: 'eth_getBlockReceipts',
diff --git a/miner/miner_test.go b/miner/miner_test.go
index da133ad8d0..b92febdd12 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -152,11 +152,11 @@ func createMiner(t *testing.T) *Miner {
// Create consensus engine
engine := clique.New(chainConfig.Clique, chainDB)
// Create Ethereum backend
- bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil)
+ bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("can't create new chain %v", err)
}
- statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
+ statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache())
blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
pool := legacypool.New(testTxPoolConfig, blockchain)
diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go
index ac9b2ab704..7a235d88e1 100644
--- a/miner/payload_building_test.go
+++ b/miner/payload_building_test.go
@@ -121,7 +121,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
default:
t.Fatalf("unexpected consensus engine type: %T", engine)
}
- chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("core.NewBlockChain failed: %v", err)
}
diff --git a/miner/worker.go b/miner/worker.go
index 4e566f1489..1f49118c47 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -105,7 +105,20 @@ func (miner *Miner) generateWork(params *generateParams) *newPayloadResult {
log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(miner.config.Recommit))
}
}
+
body := types.Body{Transactions: work.txs, Withdrawals: params.withdrawals}
+ allLogs := make([]*types.Log, 0)
+ for _, r := range work.receipts {
+ allLogs = append(allLogs, r.Logs...)
+ }
+ // Read requests if Prague is enabled.
+ if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
+ requests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
+ if err != nil {
+ return &newPayloadResult{err: err}
+ }
+ body.Requests = requests
+ }
block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts)
if err != nil {
return &newPayloadResult{err: err}
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index e41cc51ad3..c6f598b774 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -383,7 +383,7 @@ func TestJWT(t *testing.T) {
expFail := []func() string{
// future
func() string {
- return fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + int64(jwtExpiryTimeout.Seconds()) + 1}))
+ return fmt.Sprintf("Bearer %v", issueToken(secret, nil, testClaim{"iat": time.Now().Unix() + int64(jwtExpiryTimeout.Seconds()) + 60}))
},
// stale
func() string {
diff --git a/p2p/enode/node.go b/p2p/enode/node.go
index cb4ac8d172..4d93d3f6be 100644
--- a/p2p/enode/node.go
+++ b/p2p/enode/node.go
@@ -200,6 +200,20 @@ func (n *Node) TCPEndpoint() (netip.AddrPort, bool) {
return netip.AddrPortFrom(n.ip, n.tcp), true
}
+// QUICEndpoint returns the announced QUIC endpoint.
+func (n *Node) QUICEndpoint() (netip.AddrPort, bool) {
+ var quic uint16
+ if n.ip.Is4() || n.ip.Is4In6() {
+ n.Load((*enr.QUIC)(&quic))
+ } else if n.ip.Is6() {
+ n.Load((*enr.QUIC6)(&quic))
+ }
+ if !n.ip.IsValid() || n.ip.IsUnspecified() || quic == 0 {
+ return netip.AddrPort{}, false
+ }
+ return netip.AddrPortFrom(n.ip, quic), true
+}
+
// Pubkey returns the secp256k1 public key of the node, if present.
func (n *Node) Pubkey() *ecdsa.PublicKey {
var key ecdsa.PublicKey
diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go
index 56e196e82e..f38c77415e 100644
--- a/p2p/enode/node_test.go
+++ b/p2p/enode/node_test.go
@@ -68,11 +68,12 @@ func TestPythonInterop(t *testing.T) {
func TestNodeEndpoints(t *testing.T) {
id := HexID("00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
type endpointTest struct {
- name string
- node *Node
- wantIP netip.Addr
- wantUDP int
- wantTCP int
+ name string
+ node *Node
+ wantIP netip.Addr
+ wantUDP int
+ wantTCP int
+ wantQUIC int
}
tests := []endpointTest{
{
@@ -98,6 +99,22 @@ func TestNodeEndpoints(t *testing.T) {
return SignNull(&r, id)
}(),
},
+ {
+ name: "quic-only",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.QUIC(9000))
+ return SignNull(&r, id)
+ }(),
+ },
+ {
+ name: "quic6-only",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.QUIC6(9000))
+ return SignNull(&r, id)
+ }(),
+ },
{
name: "ipv4-only-loopback",
node: func() *Node {
@@ -209,6 +226,48 @@ func TestNodeEndpoints(t *testing.T) {
wantIP: netip.MustParseAddr("192.168.2.2"),
wantUDP: 30304,
},
+ {
+ name: "ipv4-quic",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.IPv4Addr(netip.MustParseAddr("99.22.33.1")))
+ r.Set(enr.QUIC(9001))
+ return SignNull(&r, id)
+ }(),
+ wantIP: netip.MustParseAddr("99.22.33.1"),
+ wantQUIC: 9001,
+ },
+ { // Because the node is IPv4, the quic6 entry won't be loaded.
+ name: "ipv4-quic6",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.IPv4Addr(netip.MustParseAddr("99.22.33.1")))
+ r.Set(enr.QUIC6(9001))
+ return SignNull(&r, id)
+ }(),
+ wantIP: netip.MustParseAddr("99.22.33.1"),
+ },
+ {
+ name: "ipv6-quic",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329")))
+ r.Set(enr.QUIC(9001))
+ return SignNull(&r, id)
+ }(),
+ wantIP: netip.MustParseAddr("2001::ff00:0042:8329"),
+ },
+ {
+ name: "ipv6-quic6",
+ node: func() *Node {
+ var r enr.Record
+ r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329")))
+ r.Set(enr.QUIC6(9001))
+ return SignNull(&r, id)
+ }(),
+ wantIP: netip.MustParseAddr("2001::ff00:0042:8329"),
+ wantQUIC: 9001,
+ },
}
for _, test := range tests {
@@ -222,6 +281,9 @@ func TestNodeEndpoints(t *testing.T) {
if test.wantTCP != test.node.TCP() {
t.Errorf("node has wrong TCP port %d, want %d", test.node.TCP(), test.wantTCP)
}
+ if quic, _ := test.node.QUICEndpoint(); test.wantQUIC != int(quic.Port()) {
+ t.Errorf("node has wrong QUIC port %d, want %d", quic.Port(), test.wantQUIC)
+ }
})
}
}
diff --git a/p2p/enr/entries.go b/p2p/enr/entries.go
index 155ec4c023..58e660c154 100644
--- a/p2p/enr/entries.go
+++ b/p2p/enr/entries.go
@@ -77,6 +77,16 @@ type UDP6 uint16
func (v UDP6) ENRKey() string { return "udp6" }
+// QUIC is the "quic" key, which holds the QUIC port of the node.
+type QUIC uint16
+
+func (v QUIC) ENRKey() string { return "quic" }
+
+// QUIC6 is the "quic6" key, which holds the IPv6-specific quic6 port of the node.
+type QUIC6 uint16
+
+func (v QUIC6) ENRKey() string { return "quic6" }
+
// ID is the "id" key, which holds the name of the identity scheme.
type ID string
diff --git a/params/config.go b/params/config.go
index 17a57a1ccd..c366fb052f 100644
--- a/params/config.go
+++ b/params/config.go
@@ -58,6 +58,7 @@ var (
TerminalTotalDifficultyPassed: true,
ShanghaiTime: newUint64(1681338455),
CancunTime: newUint64(1710338135),
+ DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"),
Ethash: new(EthashConfig),
}
// HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network.
@@ -337,6 +338,8 @@ type ChainConfig struct {
// TODO(karalabe): Drop this field eventually (always assuming PoS mode)
TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"`
+ DepositContractAddress common.Address `json:"depositContractAddress,omitempty"`
+
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`
Clique *CliqueConfig `json:"clique,omitempty"`
diff --git a/tests/block_test.go b/tests/block_test.go
index 52184eb274..4bbd2571d7 100644
--- a/tests/block_test.go
+++ b/tests/block_test.go
@@ -49,6 +49,17 @@ func TestBlockchain(t *testing.T) {
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
+ // After the merge we would accept side chains as canonical even if they have lower td
+ bt.skipLoad(`.*bcMultiChainTest/ChainAtoChainB_difficultyB.json`)
+ bt.skipLoad(`.*bcMultiChainTest/CallContractFromNotBestBlock.json`)
+ bt.skipLoad(`.*bcTotalDifficultyTest/uncleBlockAtBlock3afterBlock4.json`)
+ bt.skipLoad(`.*bcTotalDifficultyTest/lotsOfBranchesOverrideAtTheMiddle.json`)
+ bt.skipLoad(`.*bcTotalDifficultyTest/sideChainWithMoreTransactions.json`)
+ bt.skipLoad(`.*bcForkStressTest/ForkStressTest.json`)
+ bt.skipLoad(`.*bcMultiChainTest/lotsOfLeafs.json`)
+ bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
+ bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
+
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
})
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 5b8ba4610e..bbda7aa461 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -234,7 +234,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{
Tracer: tracer,
EnableWitnessCollection: witness,
- }, nil, nil)
+ }, nil)
if err != nil {
return err
}
diff --git a/tests/init.go b/tests/init.go
index c85e714c00..4bb83f9300 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -373,6 +373,7 @@ var Forks = map[string]*params.ChainConfig{
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(0),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
},
"CancunToPragueAtTime15k": {
ChainID: big.NewInt(1),
@@ -393,6 +394,7 @@ var Forks = map[string]*params.ChainConfig{
ShanghaiTime: u64(0),
CancunTime: u64(0),
PragueTime: u64(15_000),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
},
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 2bf0056cbc..cf0ce9777f 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -222,7 +222,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
if logs := rlpHash(st.StateDB.Logs()); logs != common.Hash(post.Logs) {
return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- st.StateDB, _ = state.New(root, st.StateDB.Database(), st.Snapshots)
+ st.StateDB, _ = state.New(root, st.StateDB.Database())
return nil
}
@@ -462,8 +462,8 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
tconf.PathDB = pathdb.Defaults
}
triedb := triedb.NewDatabase(db, tconf)
- sdb := state.NewDatabaseWithNodeDB(db, triedb)
- statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
+ sdb := state.NewDatabase(triedb, nil)
+ statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce)
@@ -486,7 +486,8 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
}
snaps, _ = snapshot.New(snapconfig, db, triedb, root)
}
- statedb, _ = state.New(root, sdb, snaps)
+ sdb = state.NewDatabase(triedb, snaps)
+ statedb, _ = state.New(root, sdb)
return StateTestState{statedb, triedb, snaps}
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 9d19ee58a4..91fd38269f 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -182,7 +182,7 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
}
// UpdateAccount will abstract the write of an account to the secure trie.
-func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
+func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := t.hashKey(address.Bytes())
data, err := rlp.EncodeToBytes(acc)
if err != nil {
diff --git a/trie/trie.go b/trie/trie.go
index f44e10b918..885b6b7962 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -666,7 +666,7 @@ func (t *Trie) Witness() map[string]struct{} {
if len(t.tracer.accessList) == 0 {
return nil
}
- witness := make(map[string]struct{})
+ witness := make(map[string]struct{}, len(t.tracer.accessList))
for _, node := range t.tracer.accessList {
witness[string(node)] = struct{}{}
}
diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go
index 2a4a632d49..12e02de9a4 100644
--- a/trie/utils/verkle.go
+++ b/trie/utils/verkle.go
@@ -28,13 +28,13 @@ import (
)
const (
- // The spec of verkle key encoding can be found here.
- // https://notes.ethereum.org/@vbuterin/verkle_tree_eip#Tree-embedding
- VersionLeafKey = 0
- BalanceLeafKey = 1
- NonceLeafKey = 2
- CodeKeccakLeafKey = 3
- CodeSizeLeafKey = 4
+ BasicDataLeafKey = 0
+ CodeHashLeafKey = 1
+
+ BasicDataVersionOffset = 0
+ BasicDataCodeSizeOffset = 5
+ BasicDataNonceOffset = 8
+ BasicDataBalanceOffset = 16
)
var (
@@ -177,31 +177,16 @@ func GetTreeKeyWithEvaluatedAddress(evaluated *verkle.Point, treeIndex *uint256.
return pointToHash(ret, subIndex)
}
-// VersionKey returns the verkle tree key of the version field for the specified account.
-func VersionKey(address []byte) []byte {
- return GetTreeKey(address, zero, VersionLeafKey)
-}
-
-// BalanceKey returns the verkle tree key of the balance field for the specified account.
-func BalanceKey(address []byte) []byte {
- return GetTreeKey(address, zero, BalanceLeafKey)
-}
-
-// NonceKey returns the verkle tree key of the nonce field for the specified account.
-func NonceKey(address []byte) []byte {
- return GetTreeKey(address, zero, NonceLeafKey)
-}
-
-// CodeKeccakKey returns the verkle tree key of the code keccak field for
+// BasicDataKey returns the verkle tree key of the basic data field for
// the specified account.
-func CodeKeccakKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeKeccakLeafKey)
+func BasicDataKey(address []byte) []byte {
+ return GetTreeKey(address, zero, BasicDataLeafKey)
}
-// CodeSizeKey returns the verkle tree key of the code size field for the
-// specified account.
-func CodeSizeKey(address []byte) []byte {
- return GetTreeKey(address, zero, CodeSizeLeafKey)
+// CodeHashKey returns the verkle tree key of the code hash field for
+// the specified account.
+func CodeHashKey(address []byte) []byte {
+ return GetTreeKey(address, zero, CodeHashLeafKey)
}
func codeChunkIndex(chunk *uint256.Int) (*uint256.Int, byte) {
@@ -249,39 +234,18 @@ func StorageSlotKey(address []byte, storageKey []byte) []byte {
return GetTreeKey(address, treeIndex, subIndex)
}
-// VersionKeyWithEvaluatedAddress returns the verkle tree key of the version
-// field for the specified account. The difference between VersionKey is the
-// address evaluation is already computed to minimize the computational overhead.
-func VersionKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
- return GetTreeKeyWithEvaluatedAddress(evaluated, zero, VersionLeafKey)
-}
-
-// BalanceKeyWithEvaluatedAddress returns the verkle tree key of the balance
-// field for the specified account. The difference between BalanceKey is the
-// address evaluation is already computed to minimize the computational overhead.
-func BalanceKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
- return GetTreeKeyWithEvaluatedAddress(evaluated, zero, BalanceLeafKey)
-}
-
-// NonceKeyWithEvaluatedAddress returns the verkle tree key of the nonce
-// field for the specified account. The difference between NonceKey is the
-// address evaluation is already computed to minimize the computational overhead.
-func NonceKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
- return GetTreeKeyWithEvaluatedAddress(evaluated, zero, NonceLeafKey)
-}
-
-// CodeKeccakKeyWithEvaluatedAddress returns the verkle tree key of the code
-// keccak for the specified account. The difference between CodeKeccakKey is the
+// BasicDataKeyWithEvaluatedAddress returns the verkle tree key of the basic data
+// field for the specified account. The difference between BasicDataKey is the
// address evaluation is already computed to minimize the computational overhead.
-func CodeKeccakKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
- return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeKeccakLeafKey)
+func BasicDataKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
+ return GetTreeKeyWithEvaluatedAddress(evaluated, zero, BasicDataLeafKey)
}
-// CodeSizeKeyWithEvaluatedAddress returns the verkle tree key of the code
-// size for the specified account. The difference between CodeSizeKey is the
+// CodeHashKeyWithEvaluatedAddress returns the verkle tree key of the code
+// hash for the specified account. The difference between CodeHashKey is the
// address evaluation is already computed to minimize the computational overhead.
-func CodeSizeKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
- return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeSizeLeafKey)
+func CodeHashKeyWithEvaluatedAddress(evaluated *verkle.Point) []byte {
+ return GetTreeKeyWithEvaluatedAddress(evaluated, zero, CodeHashLeafKey)
}
// CodeChunkKeyWithEvaluatedAddress returns the verkle tree key of the code
diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go
index c29504a6d0..dbec29685b 100644
--- a/trie/utils/verkle_test.go
+++ b/trie/utils/verkle_test.go
@@ -33,20 +33,11 @@ func TestTreeKey(t *testing.T) {
smallStorage = []byte{0x1}
largeStorage = bytes.Repeat([]byte{0xff}, 16)
)
- if !bytes.Equal(VersionKey(address), VersionKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched version key")
+ if !bytes.Equal(BasicDataKey(address), BasicDataKeyWithEvaluatedAddress(addressEval)) {
+ t.Fatal("Unmatched basic data key")
}
- if !bytes.Equal(BalanceKey(address), BalanceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched balance key")
- }
- if !bytes.Equal(NonceKey(address), NonceKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched nonce key")
- }
- if !bytes.Equal(CodeKeccakKey(address), CodeKeccakKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code keccak key")
- }
- if !bytes.Equal(CodeSizeKey(address), CodeSizeKeyWithEvaluatedAddress(addressEval)) {
- t.Fatal("Unmatched code size key")
+ if !bytes.Equal(CodeHashKey(address), CodeHashKeyWithEvaluatedAddress(addressEval)) {
+ t.Fatal("Unmatched code hash key")
}
if !bytes.Equal(CodeChunkKey(address, smallIndex), CodeChunkKeyWithEvaluatedAddress(addressEval, smallIndex)) {
t.Fatal("Unmatched code chunk key")
@@ -76,7 +67,7 @@ func BenchmarkTreeKey(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
- BalanceKey([]byte{0x01})
+ BasicDataKey([]byte{0x01})
}
}
@@ -96,7 +87,7 @@ func BenchmarkTreeKeyWithEvaluation(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- BalanceKeyWithEvaluatedAddress(eval)
+ BasicDataKeyWithEvaluatedAddress(eval)
}
}
diff --git a/trie/verkle.go b/trie/verkle.go
index 60293e8998..6bd9d3d1af 100644
--- a/trie/verkle.go
+++ b/trie/verkle.go
@@ -100,20 +100,10 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error
if values == nil {
return nil, nil
}
- // Decode nonce in little-endian
- if len(values[utils.NonceLeafKey]) > 0 {
- acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey])
- }
- // Decode balance in little-endian
- var balance [32]byte
- copy(balance[:], values[utils.BalanceLeafKey])
- for i := 0; i < len(balance)/2; i++ {
- balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1]
- }
- acc.Balance = new(uint256.Int).SetBytes32(balance[:])
-
- // Decode codehash
- acc.CodeHash = values[utils.CodeKeccakLeafKey]
+ basicData := values[utils.BasicDataLeafKey]
+ acc.Nonce = binary.BigEndian.Uint64(basicData[utils.BasicDataNonceOffset:])
+ acc.Balance = new(uint256.Int).SetBytes(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
+ acc.CodeHash = values[utils.CodeHashLeafKey]
// TODO account.Root is leave as empty. How should we handle the legacy account?
return acc, nil
@@ -133,36 +123,36 @@ func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
// UpdateAccount implements state.Trie, writing the provided account into the tree.
// If the tree is corrupted, an error will be returned.
-func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) error {
+func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
var (
- err error
- nonce, balance [32]byte
- values = make([][]byte, verkle.NodeWidth)
+ err error
+ basicData [32]byte
+ values = make([][]byte, verkle.NodeWidth)
+ stem = t.cache.GetStem(addr[:])
)
- values[utils.VersionLeafKey] = zero[:]
- values[utils.CodeKeccakLeafKey] = acc.CodeHash[:]
- // Encode nonce in little-endian
- binary.LittleEndian.PutUint64(nonce[:], acc.Nonce)
- values[utils.NonceLeafKey] = nonce[:]
-
- // Encode balance in little-endian
- bytes := acc.Balance.Bytes()
- for i, b := range bytes {
- balance[len(bytes)-i-1] = b
+ // Code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
+ // before the code size to support bigger integers in the future. PutUint32(...) requires
+ // 4 bytes, so we need to shift the offset 1 byte to the left.
+ binary.BigEndian.PutUint32(basicData[utils.BasicDataCodeSizeOffset-1:], uint32(codeLen))
+ binary.BigEndian.PutUint64(basicData[utils.BasicDataNonceOffset:], acc.Nonce)
+ if acc.Balance.ByteLen() > 16 {
+ panic("balance too large")
}
- values[utils.BalanceLeafKey] = balance[:]
+ acc.Balance.WriteToSlice(basicData[utils.BasicDataBalanceOffset : utils.BasicDataBalanceOffset+16])
+ values[utils.BasicDataLeafKey] = basicData[:]
+ values[utils.CodeHashLeafKey] = acc.CodeHash[:]
- switch n := t.root.(type) {
+ switch root := t.root.(type) {
case *verkle.InternalNode:
- err = n.InsertValuesAtStem(t.cache.GetStem(addr[:]), values, t.nodeResolver)
- if err != nil {
- return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
- }
+ err = root.InsertValuesAtStem(stem, values, t.nodeResolver)
default:
return errInvalidRootType
}
- // TODO figure out if the code size needs to be updated, too
+ if err != nil {
+ return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err)
+ }
+
return nil
}
@@ -208,31 +198,33 @@ func (t *VerkleTrie) DeleteAccount(addr common.Address) error {
func (t *VerkleTrie) RollBackAccount(addr common.Address) error {
var (
evaluatedAddr = t.cache.Get(addr.Bytes())
- codeSizeKey = utils.CodeSizeKeyWithEvaluatedAddress(evaluatedAddr)
+ basicDataKey = utils.BasicDataKeyWithEvaluatedAddress(evaluatedAddr)
)
- codeSizeBytes, err := t.root.Get(codeSizeKey, t.nodeResolver)
+ basicDataBytes, err := t.root.Get(basicDataKey, t.nodeResolver)
if err != nil {
return fmt.Errorf("rollback: error finding code size: %w", err)
}
- if len(codeSizeBytes) == 0 {
- return errors.New("rollback: code size is not existent")
+ if len(basicDataBytes) == 0 {
+ return errors.New("rollback: basic data is not existent")
}
- codeSize := binary.LittleEndian.Uint64(codeSizeBytes)
+ // The code size is encoded in BasicData as a 3-byte big-endian integer. Spare bytes are present
+ // before the code size to support bigger integers in the future.
+ // LittleEndian.Uint32(...) expects 4-bytes, so we need to shift the offset 1-byte to the left.
+ codeSize := binary.BigEndian.Uint32(basicDataBytes[utils.BasicDataCodeSizeOffset-1:])
// Delete the account header + first 64 slots + first 128 code chunks
- _, err = t.root.(*verkle.InternalNode).DeleteAtStem(codeSizeKey[:31], t.nodeResolver)
+ _, err = t.root.(*verkle.InternalNode).DeleteAtStem(basicDataKey[:31], t.nodeResolver)
if err != nil {
return fmt.Errorf("error rolling back account header: %w", err)
}
// Delete all further code
- for i, chunknr := uint64(31*128), uint64(128); i < codeSize; i, chunknr = i+31*256, chunknr+256 {
+ for i, chunknr := uint64(31*128), uint64(128); i < uint64(codeSize); i, chunknr = i+31*256, chunknr+256 {
// evaluate group key at the start of a new group
offset := uint256.NewInt(chunknr)
key := utils.CodeChunkKeyWithEvaluatedAddress(evaluatedAddr, offset)
- _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver)
- if err != nil {
+ if _, err = t.root.(*verkle.InternalNode).DeleteAtStem(key[:], t.nodeResolver); err != nil {
return fmt.Errorf("error deleting code chunk stem (addr=%x, offset=%d) error: %w", addr[:], offset, err)
}
}
@@ -385,6 +377,7 @@ func ChunkifyCode(code []byte) ChunkedCode {
// UpdateContractCode implements state.Trie, writing the provided contract code
// into the trie.
+// Note that the code-size *must* be already saved by a previous UpdateAccount call.
func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
var (
chunks = ChunkifyCode(code)
@@ -400,12 +393,6 @@ func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Has
}
values[groupOffset] = chunks[i : i+32]
- // Reuse the calculated key to also update the code size.
- if i == 0 {
- cs := make([]byte, 32)
- binary.LittleEndian.PutUint64(cs, uint64(len(code)))
- values[utils.CodeSizeLeafKey] = cs
- }
if groupOffset == 255 || len(chunks)-i <= 32 {
switch root := t.root.(type) {
case *verkle.InternalNode:
diff --git a/trie/verkle_test.go b/trie/verkle_test.go
index 55438d45e1..4cd1717c0e 100644
--- a/trie/verkle_test.go
+++ b/trie/verkle_test.go
@@ -61,7 +61,7 @@ func TestVerkleTreeReadWrite(t *testing.T) {
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts {
- if err := tr.UpdateAccount(addr, acct); err != nil {
+ if err := tr.UpdateAccount(addr, acct, 0); err != nil {
t.Fatalf("Failed to update account, %v", err)
}
for key, val := range storages[addr] {
@@ -96,7 +96,13 @@ func TestVerkleRollBack(t *testing.T) {
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts {
- if err := tr.UpdateAccount(addr, acct); err != nil {
+ // create more than 128 chunks of code
+ code := make([]byte, 129*32)
+ for i := 0; i < len(code); i += 2 {
+ code[i] = 0x60
+ code[i+1] = byte(i % 256)
+ }
+ if err := tr.UpdateAccount(addr, acct, len(code)); err != nil {
t.Fatalf("Failed to update account, %v", err)
}
for key, val := range storages[addr] {
@@ -104,12 +110,6 @@ func TestVerkleRollBack(t *testing.T) {
t.Fatalf("Failed to update account, %v", err)
}
}
- // create more than 128 chunks of code
- code := make([]byte, 129*32)
- for i := 0; i < len(code); i += 2 {
- code[i] = 0x60
- code[i+1] = byte(i % 256)
- }
hash := crypto.Keccak256Hash(code)
if err := tr.UpdateContractCode(addr, hash, code); err != nil {
t.Fatalf("Failed to update contract, %v", err)
diff --git a/triedb/database.go b/triedb/database.go
index aecb900f31..c1e6f9af4e 100644
--- a/triedb/database.go
+++ b/triedb/database.go
@@ -92,6 +92,7 @@ type backend interface {
// types of node backend as an entrypoint. It's responsible for all interactions
// relevant with trie nodes and node preimages.
type Database struct {
+ disk ethdb.Database
config *Config // Configuration for trie database
preimages *preimageStore // The store for caching preimages
backend backend // The backend for managing trie nodes
@@ -109,6 +110,7 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
preimages = newPreimageStore(diskdb)
}
db := &Database{
+ disk: diskdb,
config: config,
preimages: preimages,
}
@@ -327,3 +329,8 @@ func (db *Database) SetBufferSize(size int) error {
func (db *Database) IsVerkle() bool {
return db.config.IsVerkle
}
+
+// Disk returns the underlying disk database.
+func (db *Database) Disk() ethdb.Database {
+ return db.disk
+}