From a7f9523ae1182aeb5fecd54042f264fc196ba03b Mon Sep 17 00:00:00 2001
From: rjl493456442 <garyrong0905@gmail.com>
Date: Fri, 17 Jan 2025 09:59:02 +0800
Subject: [PATCH] all: implement state history v2 (#30107)

This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.

Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.

Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.

Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.

Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.

Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.

---

This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.

---------

Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
---
 cmd/evm/internal/t8ntool/execution.go |  4 +-
 cmd/evm/runner.go                     |  2 +-
 cmd/geth/dbcmd.go                     |  3 +-
 core/blockchain.go                    |  2 +-
 core/blockchain_test.go               |  2 +-
 core/chain_makers.go                  |  4 +-
 core/genesis.go                       |  4 +-
 core/state/state_object.go            | 12 +++-
 core/state/state_test.go              |  6 +-
 core/state/statedb.go                 | 25 ++++---
 core/state/statedb_fuzz_test.go       |  2 +-
 core/state/statedb_hooked_test.go     |  2 +-
 core/state/statedb_test.go            | 29 ++++----
 core/state/stateupdate.go             | 86 +++++++++++++++--------
 core/state/sync_test.go               |  2 +-
 core/state/trie_prefetcher_test.go    |  2 +-
 core/txpool/blobpool/blobpool_test.go | 12 ++--
 eth/api_debug_test.go                 |  6 +-
 eth/state_accessor.go                 |  2 +-
 tests/state_test_util.go              |  4 +-
 triedb/pathdb/buffer.go               |  2 +-
 triedb/pathdb/database_test.go        | 79 +++++++++++++++------
 triedb/pathdb/difflayer_test.go       |  6 +-
 triedb/pathdb/disklayer.go            |  2 +-
 triedb/pathdb/execute.go              | 40 +++++++----
 triedb/pathdb/history.go              | 44 +++++++-----
 triedb/pathdb/history_inspect.go      |  8 ++-
 triedb/pathdb/history_test.go         | 13 ++--
 triedb/pathdb/iterator_test.go        | 98 +++++++++++++--------------
 triedb/pathdb/journal.go              |  3 +-
 triedb/pathdb/states.go               | 21 ++++--
 triedb/pathdb/states_test.go          | 30 +++++++-
 triedb/states.go                      |  3 +-
 33 files changed, 356 insertions(+), 204 deletions(-)

diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index aef497885e..7c17a251f0 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -379,7 +379,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
 	}
 
 	// Commit block
-	root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
+	root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
 	if err != nil {
 		return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
 	}
@@ -437,7 +437,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
 		}
 	}
 	// Commit and re-open to start with a clean state.
-	root, _ := statedb.Commit(0, false)
+	root, _ := statedb.Commit(0, false, false)
 	statedb, _ = state.New(root, sdb)
 	return statedb
 }
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index c67d3657e2..b2cf28353b 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -336,7 +336,7 @@ func runCmd(ctx *cli.Context) error {
 	output, stats, err := timedExec(bench, execFunc)
 
 	if ctx.Bool(DumpFlag.Name) {
-		root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
+		root, err := runtimeConfig.State.Commit(genesisConfig.Number, true, false)
 		if err != nil {
 			fmt.Printf("Failed to commit changes %v\n", err)
 			return err
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index 7622246050..cd41c57c75 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -829,8 +829,7 @@ func inspectAccount(db *triedb.Database, start uint64, end uint64, address commo
 func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
 	// The hash of storage slot key is utilized in the history
 	// rather than the raw slot key, make the conversion.
-	slotHash := crypto.Keccak256Hash(slot.Bytes())
-	stats, err := db.StorageHistory(address, slotHash, start, end)
+	stats, err := db.StorageHistory(address, slot, start, end)
 	if err != nil {
 		return err
 	}
diff --git a/core/blockchain.go b/core/blockchain.go
index 4ced94bb68..6aac541ba0 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1471,7 +1471,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
 		log.Crit("Failed to write block into disk", "err", err)
 	}
 	// Commit all cached state changes into underlying memory database.
-	root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
+	root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
 	if err != nil {
 		return err
 	}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 7805a7c6e8..84f1b9740c 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -181,7 +181,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
 		blockchain.chainmu.MustLock()
 		rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
 		rawdb.WriteBlock(blockchain.db, block)
-		statedb.Commit(block.NumberU64(), false)
+		statedb.Commit(block.NumberU64(), false, false)
 		blockchain.chainmu.Unlock()
 	}
 	return nil
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 26714845eb..5298874a40 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -405,7 +405,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
 		}
 
 		// Write state changes to db
-		root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
+		root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
 		if err != nil {
 			panic(fmt.Sprintf("state write error: %v", err))
 		}
@@ -510,7 +510,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
 		}
 
 		// Write state changes to DB.
-		root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
+		root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
 		if err != nil {
 			panic(fmt.Sprintf("state write error: %v", err))
 		}
diff --git a/core/genesis.go b/core/genesis.go
index 02cdc74d86..68d945e37e 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -146,7 +146,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
 			statedb.SetState(addr, key, value)
 		}
 	}
-	return statedb.Commit(0, false)
+	return statedb.Commit(0, false, false)
 }
 
 // flushAlloc is very similar with hash, but the main difference is all the
@@ -172,7 +172,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
 			statedb.SetState(addr, key, value)
 		}
 	}
-	root, err := statedb.Commit(0, false)
+	root, err := statedb.Commit(0, false, false)
 	if err != nil {
 		return common.Hash{}, err
 	}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 76a3aba92c..a6979bd361 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -399,10 +399,16 @@ func (s *stateObject) commitStorage(op *accountUpdate) {
 			op.storages = make(map[common.Hash][]byte)
 		}
 		op.storages[hash] = encode(val)
-		if op.storagesOrigin == nil {
-			op.storagesOrigin = make(map[common.Hash][]byte)
+
+		if op.storagesOriginByKey == nil {
+			op.storagesOriginByKey = make(map[common.Hash][]byte)
+		}
+		if op.storagesOriginByHash == nil {
+			op.storagesOriginByHash = make(map[common.Hash][]byte)
 		}
-		op.storagesOrigin[hash] = encode(s.originStorage[key])
+		origin := encode(s.originStorage[key])
+		op.storagesOriginByKey[key] = origin
+		op.storagesOriginByHash[hash] = origin
 
 		// Overwrite the clean value of storage slots
 		s.originStorage[key] = val
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 6f54300c37..b443411f1b 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -56,7 +56,7 @@ func TestDump(t *testing.T) {
 	// write some of them to the trie
 	s.state.updateStateObject(obj1)
 	s.state.updateStateObject(obj2)
-	root, _ := s.state.Commit(0, false)
+	root, _ := s.state.Commit(0, false, false)
 
 	// check that DumpToCollector contains the state objects that are in trie
 	s.state, _ = New(root, tdb)
@@ -116,7 +116,7 @@ func TestIterativeDump(t *testing.T) {
 	// write some of them to the trie
 	s.state.updateStateObject(obj1)
 	s.state.updateStateObject(obj2)
-	root, _ := s.state.Commit(0, false)
+	root, _ := s.state.Commit(0, false, false)
 	s.state, _ = New(root, tdb)
 
 	b := &bytes.Buffer{}
@@ -142,7 +142,7 @@ func TestNull(t *testing.T) {
 	var value common.Hash
 
 	s.state.SetState(address, common.Hash{}, value)
-	s.state.Commit(0, false)
+	s.state.Commit(0, false, false)
 
 	if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
 		t.Errorf("expected empty current value, got %x", value)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index d279ccfdfe..0310ee6973 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -1051,7 +1051,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
 // with their values be tracked as original value.
 // In case (d), **original** account along with its storages should be deleted,
 // with their values be tracked as original value.
-func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
+func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
 	var (
 		nodes   []*trienode.NodeSet
 		buf     = crypto.NewKeccakState()
@@ -1080,6 +1080,9 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
 		if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
 			continue
 		}
+		if noStorageWiping {
+			return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
+		}
 		// Remove storage slots belonging to the account.
 		storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
 		if err != nil {
@@ -1101,7 +1104,7 @@ func (s *StateDB) GetTrie() Trie {
 
 // commit gathers the state mutations accumulated along with the associated
 // trie changes, resetting all internal flags with the new state as the base.
-func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
+func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
 	// Short circuit in case any database failure occurred earlier.
 	if s.dbErr != nil {
 		return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1155,7 +1158,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
 	// the same block, account deletions must be processed first. This ensures
 	// that the storage trie nodes deleted during destruction and recreated
 	// during subsequent resurrection can be combined correctly.
-	deletes, delNodes, err := s.handleDestruction()
+	deletes, delNodes, err := s.handleDestruction(noStorageWiping)
 	if err != nil {
 		return nil, err
 	}
@@ -1252,13 +1255,14 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
 
 	origin := s.originalRoot
 	s.originalRoot = root
-	return newStateUpdate(origin, root, deletes, updates, nodes), nil
+
+	return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
 }
 
 // commitAndFlush is a wrapper of commit which also commits the state mutations
 // to the configured data stores.
-func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) {
-	ret, err := s.commit(deleteEmptyObjects)
+func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
+	ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
 	if err != nil {
 		return nil, err
 	}
@@ -1310,8 +1314,13 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
 //
 // The associated block number of the state transition is also provided
 // for more chain context.
-func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
-	ret, err := s.commitAndFlush(block, deleteEmptyObjects)
+//
+// noStorageWiping is a flag indicating whether storage wiping is permitted.
+// Since self-destruction was deprecated with the Cancun fork and there are
+// no empty accounts left that could be deleted by EIP-158, storage wiping
+// should not occur.
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
+	ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
 	if err != nil {
 		return common.Hash{}, err
 	}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 7cbfd9b9d7..ed99cf687c 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -228,7 +228,7 @@ func (test *stateTest) run() bool {
 		} else {
 			state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
 		}
-		ret, err := state.commitAndFlush(0, true) // call commit at the block boundary
+		ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
 		if err != nil {
 			panic(err)
 		}
diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go
index 5f82ed06d0..874a275993 100644
--- a/core/state/statedb_hooked_test.go
+++ b/core/state/statedb_hooked_test.go
@@ -71,7 +71,7 @@ func TestBurn(t *testing.T) {
 	hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
 	hooked.Finalise(true)
 
-	s.Commit(0, false)
+	s.Commit(0, false, false)
 	if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
 		t.Fatalf("burn-count wrong, have %v want %v", have, want)
 	}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 37141e90b0..67eb9cbdc6 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -119,7 +119,7 @@ func TestIntermediateLeaks(t *testing.T) {
 	}
 
 	// Commit and cross check the databases.
-	transRoot, err := transState.Commit(0, false)
+	transRoot, err := transState.Commit(0, false, false)
 	if err != nil {
 		t.Fatalf("failed to commit transition state: %v", err)
 	}
@@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
 		t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
 	}
 
-	finalRoot, err := finalState.Commit(0, false)
+	finalRoot, err := finalState.Commit(0, false, false)
 	if err != nil {
 		t.Fatalf("failed to commit final state: %v", err)
 	}
@@ -240,7 +240,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
 		obj.data.Root = common.HexToHash("0xdeadbeef")
 		orig.updateStateObject(obj)
 	}
-	root, _ := orig.Commit(0, true)
+	root, _ := orig.Commit(0, true, false)
 	orig, _ = New(root, db)
 
 	// modify all in memory without finalizing
@@ -293,7 +293,7 @@ func TestCopyObjectState(t *testing.T) {
 			t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
 		}
 	}
-	orig.Commit(0, true)
+	orig.Commit(0, true, false)
 	for _, op := range cpy.mutations {
 		if have, want := op.applied, false; have != want {
 			t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
@@ -696,7 +696,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
 func TestTouchDelete(t *testing.T) {
 	s := newStateEnv()
 	s.state.getOrNewStateObject(common.Address{})
-	root, _ := s.state.Commit(0, false)
+	root, _ := s.state.Commit(0, false, false)
 	s.state, _ = New(root, s.state.db)
 
 	snapshot := s.state.Snapshot()
@@ -784,7 +784,7 @@ func TestCopyCommitCopy(t *testing.T) {
 		t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
 	}
 	// Commit state, ensure states can be loaded from disk
-	root, _ := state.Commit(0, false)
+	root, _ := state.Commit(0, false, false)
 	state, _ = New(root, tdb)
 	if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
 		t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
@@ -898,11 +898,11 @@ func TestCommitCopy(t *testing.T) {
 	if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
 		t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
 	}
-	root, _ := state.Commit(0, true)
+	root, _ := state.Commit(0, true, false)
 
 	state, _ = New(root, db)
 	state.SetState(addr, skey2, sval2)
-	state.Commit(1, true)
+	state.Commit(1, true, false)
 
 	// Copy the committed state database, the copied one is not fully functional.
 	copied := state.Copy()
@@ -943,7 +943,7 @@ func TestDeleteCreateRevert(t *testing.T) {
 	addr := common.BytesToAddress([]byte("so"))
 	state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
 
-	root, _ := state.Commit(0, false)
+	root, _ := state.Commit(0, false, false)
 	state, _ = New(root, state.db)
 
 	// Simulate self-destructing in one transaction, then create-reverting in another
@@ -955,7 +955,7 @@ func TestDeleteCreateRevert(t *testing.T) {
 	state.RevertToSnapshot(id)
 
 	// Commit the entire state and make sure we don't crash and have the correct state
-	root, _ = state.Commit(0, true)
+	root, _ = state.Commit(0, true, false)
 	state, _ = New(root, state.db)
 
 	if state.getStateObject(addr) != nil {
@@ -998,7 +998,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
 		a2 := common.BytesToAddress([]byte("another"))
 		state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
 		state.SetCode(a2, []byte{1, 2, 4})
-		root, _ = state.Commit(0, false)
+		root, _ = state.Commit(0, false, false)
 		t.Logf("root: %x", root)
 		// force-flush
 		tdb.Commit(root, false)
@@ -1022,7 +1022,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
 	}
 	// Modify the state
 	state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
-	root, err := state.Commit(0, false)
+	root, err := state.Commit(0, false, false)
 	if err == nil {
 		t.Fatalf("expected error, got root :%x", root)
 	}
@@ -1213,7 +1213,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
 			state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
 		}
 	}
-	root, err := state.Commit(0, false)
+	root, err := state.Commit(0, false, false)
 	if err != nil {
 		t.Fatalf("failed to commit state trie: %v", err)
 	}
@@ -1288,8 +1288,7 @@ func TestDeleteStorage(t *testing.T) {
 		value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
 		state.SetState(addr, slot, value)
 	}
-	root, _ := state.Commit(0, true)
-
+	root, _ := state.Commit(0, true, false)
 	// Init phase done, create two states, one with snap and one without
 	fastState, _ := New(root, NewDatabase(tdb, snaps))
 	slowState, _ := New(root, NewDatabase(tdb, nil))
diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go
index 45de660ca5..75c4ca028c 100644
--- a/core/state/stateupdate.go
+++ b/core/state/stateupdate.go
@@ -32,34 +32,56 @@ type contractCode struct {
 
 // accountDelete represents an operation for deleting an Ethereum account.
 type accountDelete struct {
-	address        common.Address         // address is the unique account identifier
-	origin         []byte                 // origin is the original value of account data in slim-RLP encoding.
-	storages       map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
-	storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
+	address common.Address // address is the unique account identifier
+	origin  []byte         // origin is the original value of account data in slim-RLP encoding.
+
+	// storages stores mutated slots, the value should be nil.
+	storages map[common.Hash][]byte
+
+	// storagesOrigin stores the original values of mutated slots in
+	// prefix-zero-trimmed RLP format. The map key refers to the **HASH**
+	// of the raw storage slot key.
+	storagesOrigin map[common.Hash][]byte
 }
 
 // accountUpdate represents an operation for updating an Ethereum account.
 type accountUpdate struct {
-	address        common.Address         // address is the unique account identifier
-	data           []byte                 // data is the slim-RLP encoded account data.
-	origin         []byte                 // origin is the original value of account data in slim-RLP encoding.
-	code           *contractCode          // code represents mutated contract code; nil means it's not modified.
-	storages       map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
-	storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
+	address  common.Address         // address is the unique account identifier
+	data     []byte                 // data is the slim-RLP encoded account data.
+	origin   []byte                 // origin is the original value of account data in slim-RLP encoding.
+	code     *contractCode          // code represents mutated contract code; nil means it's not modified.
+	storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
+
+	// storagesOriginByKey and storagesOriginByHash both store the original values
+	// of mutated slots in prefix-zero-trimmed RLP format. The difference is that
+	// storagesOriginByKey uses the **raw** storage slot key as the map ID, while
+	// storagesOriginByHash uses the **hash** of the storage slot key instead.
+	storagesOriginByKey  map[common.Hash][]byte
+	storagesOriginByHash map[common.Hash][]byte
 }
 
 // stateUpdate represents the difference between two states resulting from state
 // execution. It contains information about mutated contract codes, accounts,
 // and storage slots, along with their original values.
 type stateUpdate struct {
-	originRoot     common.Hash                               // hash of the state before applying mutation
-	root           common.Hash                               // hash of the state after applying mutation
-	accounts       map[common.Hash][]byte                    // accounts stores mutated accounts in 'slim RLP' encoding
-	accountsOrigin map[common.Address][]byte                 // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
-	storages       map[common.Hash]map[common.Hash][]byte    // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
-	storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format
-	codes          map[common.Address]contractCode           // codes contains the set of dirty codes
-	nodes          *trienode.MergedNodeSet                   // Aggregated dirty nodes caused by state changes
+	originRoot     common.Hash               // hash of the state before applying mutation
+	root           common.Hash               // hash of the state after applying mutation
+	accounts       map[common.Hash][]byte    // accounts stores mutated accounts in 'slim RLP' encoding
+	accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
+
+	// storages stores mutated slots in 'prefix-zero-trimmed' RLP format.
+	// The value is keyed by account hash and **storage slot key hash**.
+	storages map[common.Hash]map[common.Hash][]byte
+
+	// storagesOrigin stores the original values of mutated slots in
+	// 'prefix-zero-trimmed' RLP format.
+	// (a) the value is keyed by account hash and **storage slot key** if rawStorageKey is true;
+	// (b) the value is keyed by account hash and **storage slot key hash** if rawStorageKey is false;
+	storagesOrigin map[common.Address]map[common.Hash][]byte
+	rawStorageKey  bool
+
+	codes map[common.Address]contractCode // codes contains the set of dirty codes
+	nodes *trienode.MergedNodeSet         // Aggregated dirty nodes caused by state changes
 }
 
 // empty returns a flag indicating the state transition is empty or not.
@@ -67,10 +89,13 @@ func (sc *stateUpdate) empty() bool {
 	return sc.originRoot == sc.root
 }
 
-// newStateUpdate constructs a state update object, representing the differences
-// between two states by performing state execution. It aggregates the given
-// account deletions and account updates to form a comprehensive state update.
-func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
+// newStateUpdate constructs a state update object by identifying the differences
+// between two states through state execution. It combines the specified account
+// deletions and account updates to create a complete state update.
+//
+// rawStorageKey is a flag indicating whether to use the raw storage slot key or
+// the hash of the slot key for constructing state update object.
+func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
 	var (
 		accounts       = make(map[common.Hash][]byte)
 		accountsOrigin = make(map[common.Address][]byte)
@@ -78,13 +103,14 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
 		storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
 		codes          = make(map[common.Address]contractCode)
 	)
-	// Due to the fact that some accounts could be destructed and resurrected
-	// within the same block, the deletions must be aggregated first.
+	// Since some accounts might be destroyed and recreated within the same
+	// block, deletions must be aggregated first.
 	for addrHash, op := range deletes {
 		addr := op.address
 		accounts[addrHash] = nil
 		accountsOrigin[addr] = op.origin
 
+		// If storage wiping exists, the hash of the storage slot key must be used
 		if len(op.storages) > 0 {
 			storages[addrHash] = op.storages
 		}
@@ -118,12 +144,16 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
 		}
 		// Aggregate the storage original values. If the slot is already present
 		// in aggregated storagesOrigin set, skip it.
-		if len(op.storagesOrigin) > 0 {
+		storageOriginSet := op.storagesOriginByHash
+		if rawStorageKey {
+			storageOriginSet = op.storagesOriginByKey
+		}
+		if len(storageOriginSet) > 0 {
 			origin, exist := storagesOrigin[addr]
 			if !exist {
-				storagesOrigin[addr] = op.storagesOrigin
+				storagesOrigin[addr] = storageOriginSet
 			} else {
-				for key, slot := range op.storagesOrigin {
+				for key, slot := range storageOriginSet {
 					if _, found := origin[key]; !found {
 						origin[key] = slot
 					}
@@ -138,6 +168,7 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
 		accountsOrigin: accountsOrigin,
 		storages:       storages,
 		storagesOrigin: storagesOrigin,
+		rawStorageKey:  rawStorageKey,
 		codes:          codes,
 		nodes:          nodes,
 	}
@@ -153,5 +184,6 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet {
 		AccountsOrigin: sc.accountsOrigin,
 		Storages:       sc.storages,
 		StoragesOrigin: sc.storagesOrigin,
+		RawStorageKey:  sc.rawStorageKey,
 	}
 }
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index efa56f8860..5c8b5a90f7 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -79,7 +79,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
 		}
 		accounts = append(accounts, acc)
 	}
-	root, _ := state.Commit(0, false)
+	root, _ := state.Commit(0, false, false)
 
 	// Return the generated state
 	return db, sdb, nodeDb, root, accounts
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index d96727704c..4d1b627c4d 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -83,7 +83,7 @@ func TestVerklePrefetcher(t *testing.T) {
 	state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
 	state.SetCode(addr, []byte("hello"))                                         // Change an external metadata
 	state.SetState(addr, skey, sval)                                             // Change the storage trie
-	root, _ := state.Commit(0, true)
+	root, _ := state.Commit(0, true, false)
 
 	state, _ = New(root, sdb)
 	sRoot := state.GetStorageRoot(addr)
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index e4441bec5d..3d90ec4412 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -650,7 +650,7 @@ func TestOpenDrops(t *testing.T) {
 	statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
 	statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
 	statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
-	statedb.Commit(0, true)
+	statedb.Commit(0, true, false)
 
 	chain := &testBlockChain{
 		config:  params.MainnetChainConfig,
@@ -769,7 +769,7 @@ func TestOpenIndex(t *testing.T) {
 	// Create a blob pool out of the pre-seeded data
 	statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
 	statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
-	statedb.Commit(0, true)
+	statedb.Commit(0, true, false)
 
 	chain := &testBlockChain{
 		config:  params.MainnetChainConfig,
@@ -871,7 +871,7 @@ func TestOpenHeap(t *testing.T) {
 	statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
 	statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
 	statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
-	statedb.Commit(0, true)
+	statedb.Commit(0, true, false)
 
 	chain := &testBlockChain{
 		config:  params.MainnetChainConfig,
@@ -951,7 +951,7 @@ func TestOpenCap(t *testing.T) {
 		statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
 		statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
 		statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
-		statedb.Commit(0, true)
+		statedb.Commit(0, true, false)
 
 		chain := &testBlockChain{
 			config:  params.MainnetChainConfig,
@@ -1393,7 +1393,7 @@ func TestAdd(t *testing.T) {
 				store.Put(blob)
 			}
 		}
-		statedb.Commit(0, true)
+		statedb.Commit(0, true, false)
 		store.Close()
 
 		// Create a blob pool out of the pre-seeded dats
@@ -1519,7 +1519,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
 		statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
 		pool.add(tx)
 	}
-	statedb.Commit(0, true)
+	statedb.Commit(0, true, false)
 	defer pool.Close()
 
 	// Benchmark assembling the pending
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index cfb8829b5c..02b85f69fd 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -82,7 +82,7 @@ func TestAccountRange(t *testing.T) {
 			m[addr] = true
 		}
 	}
-	root, _ := sdb.Commit(0, true)
+	root, _ := sdb.Commit(0, true, false)
 	sdb, _ = state.New(root, statedb)
 
 	trie, err := statedb.OpenTrie(root)
@@ -140,7 +140,7 @@ func TestEmptyAccountRange(t *testing.T) {
 		st, _   = state.New(types.EmptyRootHash, statedb)
 	)
 	// Commit(although nothing to flush) and re-init the statedb
-	st.Commit(0, true)
+	st.Commit(0, true, false)
 	st, _ = state.New(types.EmptyRootHash, statedb)
 
 	results := st.RawDump(&state.DumpConfig{
@@ -183,7 +183,7 @@ func TestStorageRangeAt(t *testing.T) {
 	for _, entry := range storage {
 		sdb.SetState(addr, *entry.Key, entry.Value)
 	}
-	root, _ := sdb.Commit(0, false)
+	root, _ := sdb.Commit(0, false, false)
 	sdb, _ = state.New(root, db)
 
 	// Check a few combinations of limit and start/end.
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 0749d73791..99ed28d96a 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -152,7 +152,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
 			return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
 		}
 		// Finalize the state so any modifications are written to the trie
-		root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
+		root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), eth.blockchain.Config().IsCancun(current.Number(), current.Time()))
 		if err != nil {
 			return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
 				current.NumberU64(), current.Root().Hex(), err)
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 740ebd4afd..e658b62ebf 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -339,7 +339,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
 	st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
 
 	// Commit state mutations into database.
-	root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
+	root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()), config.IsCancun(block.Number(), block.Time()))
 	if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
 		receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
 		tracer.OnTxEnd(receipt, nil)
@@ -512,7 +512,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
 		}
 	}
 	// Commit and re-open to start with a clean state.
-	root, _ := statedb.Commit(0, false)
+	root, _ := statedb.Commit(0, false, false)
 
 	// If snapshot is requested, initialize the snapshotter and use it in state.
 	var snaps *snapshot.Tree
diff --git a/triedb/pathdb/buffer.go b/triedb/pathdb/buffer.go
index 68e136f193..dea8875bda 100644
--- a/triedb/pathdb/buffer.go
+++ b/triedb/pathdb/buffer.go
@@ -46,7 +46,7 @@ func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buff
 		nodes = newNodeSet(nil)
 	}
 	if states == nil {
-		states = newStates(nil, nil)
+		states = newStates(nil, nil, false)
 	}
 	return &buffer{
 		layers: layers,
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index a6b1d3c045..f4b3fcec23 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -91,29 +91,47 @@ func newCtx(stateRoot common.Hash) *genctx {
 	}
 }
 
+func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
+	if !rawStorageKey {
+		return ctx.storageOrigin
+	}
+	set := make(map[common.Address]map[common.Hash][]byte)
+	for addr, storage := range ctx.storageOrigin {
+		subset := make(map[common.Hash][]byte)
+		for hash, val := range storage {
+			key := t.hashPreimage(hash)
+			subset[key] = val
+		}
+		set[addr] = subset
+	}
+	return set
+}
+
 type tester struct {
 	db        *Database
 	roots     []common.Hash
-	preimages map[common.Hash]common.Address
-	accounts  map[common.Hash][]byte
-	storages  map[common.Hash]map[common.Hash][]byte
+	preimages map[common.Hash][]byte
+
+	// current state set
+	accounts map[common.Hash][]byte
+	storages map[common.Hash]map[common.Hash][]byte
 
 	// state snapshots
 	snapAccounts map[common.Hash]map[common.Hash][]byte
 	snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
 }
 
-func newTester(t *testing.T, historyLimit uint64) *tester {
+func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
 	var (
 		disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 		db      = New(disk, &Config{
 			StateHistory:    historyLimit,
 			CleanCacheSize:  16 * 1024,
 			WriteBufferSize: 16 * 1024,
-		}, false)
+		}, isVerkle)
 		obj = &tester{
 			db:           db,
-			preimages:    make(map[common.Hash]common.Address),
+			preimages:    make(map[common.Hash][]byte),
 			accounts:     make(map[common.Hash][]byte),
 			storages:     make(map[common.Hash]map[common.Hash][]byte),
 			snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
@@ -125,7 +143,8 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
 		if len(obj.roots) != 0 {
 			parent = obj.roots[len(obj.roots)-1]
 		}
-		root, nodes, states := obj.generate(parent)
+		root, nodes, states := obj.generate(parent, i > 6)
+
 		if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
 			panic(fmt.Errorf("failed to update state changes, err: %w", err))
 		}
@@ -134,6 +153,14 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
 	return obj
 }
 
+func (t *tester) accountPreimage(hash common.Hash) common.Address {
+	return common.BytesToAddress(t.preimages[hash])
+}
+
+func (t *tester) hashPreimage(hash common.Hash) common.Hash {
+	return common.BytesToHash(t.preimages[hash])
+}
+
 func (t *tester) release() {
 	t.db.Close()
 	t.db.diskdb.Close()
@@ -141,7 +168,7 @@ func (t *tester) release() {
 
 func (t *tester) randAccount() (common.Address, []byte) {
 	for addrHash, account := range t.accounts {
-		return t.preimages[addrHash], account
+		return t.accountPreimage(addrHash), account
 	}
 	return common.Address{}, nil
 }
@@ -154,7 +181,9 @@ func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
 	)
 	for i := 0; i < 10; i++ {
 		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
-		hash := testrand.Hash()
+		key := testrand.Bytes(32)
+		hash := crypto.Keccak256Hash(key)
+		t.preimages[hash] = key
 
 		storage[hash] = v
 		origin[hash] = nil
@@ -183,7 +212,9 @@ func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Has
 	}
 	for i := 0; i < 3; i++ {
 		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
-		hash := testrand.Hash()
+		key := testrand.Bytes(32)
+		hash := crypto.Keccak256Hash(key)
+		t.preimages[hash] = key
 
 		storage[hash] = v
 		origin[hash] = nil
@@ -216,7 +247,7 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
 	return root
 }
 
-func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
+func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
 	var (
 		ctx     = newCtx(parent)
 		dirties = make(map[common.Hash]struct{})
@@ -232,9 +263,12 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
 			// account creation
 			addr := testrand.Address()
 			addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+			// short circuit if the account was already existent
 			if _, ok := t.accounts[addrHash]; ok {
 				continue
 			}
+			// short circuit if the account has been modified within the same transition
 			if _, ok := dirties[addrHash]; ok {
 				continue
 			}
@@ -243,7 +277,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
 			root := t.generateStorage(ctx, addr)
 			ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
 			ctx.accountOrigin[addr] = nil
-			t.preimages[addrHash] = addr
+			t.preimages[addrHash] = addr.Bytes()
 
 		case modifyAccountOp:
 			// account mutation
@@ -252,6 +286,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
 				continue
 			}
 			addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+			// short circuit if the account has been modified within the same transition
 			if _, ok := dirties[addrHash]; ok {
 				continue
 			}
@@ -271,6 +307,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
 				continue
 			}
 			addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+			// short circuit if the account has been modified within the same transition
 			if _, ok := dirties[addrHash]; ok {
 				continue
 			}
@@ -314,7 +352,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
 			delete(t.storages, addrHash)
 		}
 	}
-	return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
+	storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
+	return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, storageOrigin, rawStorageKey)
 }
 
 // lastHash returns the latest root hash, or empty if nothing is cached.
@@ -409,7 +448,7 @@ func TestDatabaseRollback(t *testing.T) {
 	}()
 
 	// Verify state histories
-	tester := newTester(t, 0)
+	tester := newTester(t, 0, false)
 	defer tester.release()
 
 	if err := tester.verifyHistory(); err != nil {
@@ -443,7 +482,7 @@ func TestDatabaseRecoverable(t *testing.T) {
 	}()
 
 	var (
-		tester = newTester(t, 0)
+		tester = newTester(t, 0, false)
 		index  = tester.bottomIndex()
 	)
 	defer tester.release()
@@ -487,7 +526,7 @@ func TestDisable(t *testing.T) {
 		maxDiffLayers = 128
 	}()
 
-	tester := newTester(t, 0)
+	tester := newTester(t, 0, false)
 	defer tester.release()
 
 	stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
@@ -529,7 +568,7 @@ func TestCommit(t *testing.T) {
 		maxDiffLayers = 128
 	}()
 
-	tester := newTester(t, 0)
+	tester := newTester(t, 0, false)
 	defer tester.release()
 
 	if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@@ -559,7 +598,7 @@ func TestJournal(t *testing.T) {
 		maxDiffLayers = 128
 	}()
 
-	tester := newTester(t, 0)
+	tester := newTester(t, 0, false)
 	defer tester.release()
 
 	if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -589,7 +628,7 @@ func TestCorruptedJournal(t *testing.T) {
 		maxDiffLayers = 128
 	}()
 
-	tester := newTester(t, 0)
+	tester := newTester(t, 0, false)
 	defer tester.release()
 
 	if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -637,7 +676,7 @@ func TestTailTruncateHistory(t *testing.T) {
 		maxDiffLayers = 128
 	}()
 
-	tester := newTester(t, 10)
+	tester := newTester(t, 10, false)
 	defer tester.release()
 
 	tester.db.Close()
diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
index 7176d9964d..83ed833486 100644
--- a/triedb/pathdb/difflayer_test.go
+++ b/triedb/pathdb/difflayer_test.go
@@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
 				nblob = common.CopyBytes(blob)
 			}
 		}
-		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
 	}
 	var layer layer
 	layer = emptyLayer()
@@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
 			)
 			nodes[common.Hash{}][string(path)] = node
 		}
-		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
 	}
 	for i := 0; i < b.N; i++ {
 		b.StopTimer()
@@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
 			)
 			nodes[common.Hash{}][string(path)] = node
 		}
-		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+		return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
 	}
 	var layer layer
 	layer = emptyLayer()
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index 003431b19b..5e678dbdee 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -316,7 +316,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
 	// Apply the reverse state changes upon the current state. This must
 	// be done before holding the lock in order to access state in "this"
 	// layer.
-	nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.accounts, h.storages)
+	nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
 	if err != nil {
 		return nil, err
 	}
diff --git a/triedb/pathdb/execute.go b/triedb/pathdb/execute.go
index e24d0710f3..80cecb82e7 100644
--- a/triedb/pathdb/execute.go
+++ b/triedb/pathdb/execute.go
@@ -30,11 +30,12 @@ import (
 
 // context wraps all fields for executing state diffs.
 type context struct {
-	prevRoot common.Hash
-	postRoot common.Hash
-	accounts map[common.Address][]byte
-	storages map[common.Address]map[common.Hash][]byte
-	nodes    *trienode.MergedNodeSet
+	prevRoot      common.Hash
+	postRoot      common.Hash
+	accounts      map[common.Address][]byte
+	storages      map[common.Address]map[common.Hash][]byte
+	nodes         *trienode.MergedNodeSet
+	rawStorageKey bool
 
 	// TODO (rjl493456442) abstract out the state hasher
 	// for supporting verkle tree.
@@ -43,18 +44,19 @@ type context struct {
 
 // apply processes the given state diffs, updates the corresponding post-state
 // and returns the trie nodes that have been modified.
-func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
+func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, rawStorageKey bool, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
 	tr, err := trie.New(trie.TrieID(postRoot), db)
 	if err != nil {
 		return nil, err
 	}
 	ctx := &context{
-		prevRoot:    prevRoot,
-		postRoot:    postRoot,
-		accounts:    accounts,
-		storages:    storages,
-		accountTrie: tr,
-		nodes:       trienode.NewMergedNodeSet(),
+		prevRoot:      prevRoot,
+		postRoot:      postRoot,
+		accounts:      accounts,
+		storages:      storages,
+		accountTrie:   tr,
+		rawStorageKey: rawStorageKey,
+		nodes:         trienode.NewMergedNodeSet(),
 	}
 	for addr, account := range accounts {
 		var err error
@@ -109,11 +111,15 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
 		return err
 	}
 	for key, val := range ctx.storages[addr] {
+		tkey := key
+		if ctx.rawStorageKey {
+			tkey = h.hash(key.Bytes())
+		}
 		var err error
 		if len(val) == 0 {
-			err = st.Delete(key.Bytes())
+			err = st.Delete(tkey.Bytes())
 		} else {
-			err = st.Update(key.Bytes(), val)
+			err = st.Update(tkey.Bytes(), val)
 		}
 		if err != nil {
 			return err
@@ -166,7 +172,11 @@ func deleteAccount(ctx *context, db database.NodeDatabase, addr common.Address)
 		if len(val) != 0 {
 			return errors.New("expect storage deletion")
 		}
-		if err := st.Delete(key.Bytes()); err != nil {
+		tkey := key
+		if ctx.rawStorageKey {
+			tkey = h.hash(key.Bytes())
+		}
+		if err := st.Delete(tkey.Bytes()); err != nil {
 			return err
 		}
 	}
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index e1cd981153..9fb7d9e153 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -68,7 +68,8 @@ const (
 	slotIndexSize    = common.HashLength + 5     // The length of encoded slot index
 	historyMetaSize  = 9 + 2*common.HashLength   // The length of encoded history meta
 
-	stateHistoryVersion = uint8(0) // initial version of state history structure.
+	stateHistoryV0 = uint8(0) // initial version of state history structure
+	stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
 )
 
 // Each state history entry is consisted of five elements:
@@ -169,15 +170,18 @@ func (i *accountIndex) decode(blob []byte) {
 
 // slotIndex describes the metadata belonging to a storage slot.
 type slotIndex struct {
-	hash   common.Hash // The hash of slot key
-	length uint8       // The length of storage slot, up to 32 bytes defined in protocol
-	offset uint32      // The offset of item in storage slot data table
+	// the identifier of the storage slot. Specifically
+	// in v0, it's the hash of the raw storage slot key (32 bytes);
+	// in v1, it's the raw storage slot key (32 bytes);
+	id     common.Hash
+	length uint8  // The length of storage slot, up to 32 bytes defined in protocol
+	offset uint32 // The offset of item in storage slot data table
 }
 
 // encode packs slot index into byte stream.
 func (i *slotIndex) encode() []byte {
 	var buf [slotIndexSize]byte
-	copy(buf[:common.HashLength], i.hash.Bytes())
+	copy(buf[:common.HashLength], i.id.Bytes())
 	buf[common.HashLength] = i.length
 	binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
 	return buf[:]
@@ -185,7 +189,7 @@ func (i *slotIndex) encode() []byte {
 
 // decode unpack slot index from the byte stream.
 func (i *slotIndex) decode(blob []byte) {
-	i.hash = common.BytesToHash(blob[:common.HashLength])
+	i.id = common.BytesToHash(blob[:common.HashLength])
 	i.length = blob[common.HashLength]
 	i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
 }
@@ -214,7 +218,7 @@ func (m *meta) decode(blob []byte) error {
 		return errors.New("no version tag")
 	}
 	switch blob[0] {
-	case stateHistoryVersion:
+	case stateHistoryV0, stateHistoryV1:
 		if len(blob) != historyMetaSize {
 			return fmt.Errorf("invalid state history meta, len: %d", len(blob))
 		}
@@ -242,7 +246,7 @@ type history struct {
 }
 
 // newHistory constructs the state history object with provided state change set.
-func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *history {
+func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
 	var (
 		accountList = maps.Keys(accounts)
 		storageList = make(map[common.Address][]common.Hash)
@@ -254,9 +258,13 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
 		slices.SortFunc(slist, common.Hash.Cmp)
 		storageList[addr] = slist
 	}
+	version := stateHistoryV0
+	if rawStorageKey {
+		version = stateHistoryV1
+	}
 	return &history{
 		meta: &meta{
-			version: stateHistoryVersion,
+			version: version,
 			parent:  parent,
 			root:    root,
 			block:   block,
@@ -289,7 +297,7 @@ func (h *history) encode() ([]byte, []byte, []byte, []byte) {
 			// Encode storage slots in order
 			for _, slotHash := range h.storageList[addr] {
 				sIndex := slotIndex{
-					hash:   slotHash,
+					id:     slotHash,
 					length: uint8(len(slots[slotHash])),
 					offset: uint32(len(storageData)),
 				}
@@ -377,7 +385,7 @@ func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
 // readStorage parses the storage slots from the byte stream with specified account.
 func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
 	var (
-		last    common.Hash
+		last    *common.Hash
 		count   = int(accIndex.storageSlots)
 		list    = make([]common.Hash, 0, count)
 		storage = make(map[common.Hash][]byte, count)
@@ -402,8 +410,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
 		}
 		index.decode(r.storageIndexes[start:end])
 
-		if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
-			return nil, nil, errors.New("storage slot is not in order")
+		if last != nil {
+			if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
+				return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
+			}
 		}
 		if index.offset != r.lastSlotDataRead {
 			return nil, nil, errors.New("storage data buffer is gapped")
@@ -412,10 +422,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
 		if uint32(len(r.storageData)) < sEnd {
 			return nil, nil, errors.New("storage data buffer is corrupted")
 		}
-		storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
-		list = append(list, index.hash)
+		storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
+		list = append(list, index.id)
 
-		last = index.hash
+		last = &index.id
 		r.lastSlotIndexRead = end
 		r.lastSlotDataRead = sEnd
 	}
@@ -498,7 +508,7 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
 	}
 	var (
 		start   = time.Now()
-		history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin)
+		history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
 	)
 	accountData, storageData, accountIndex, storageIndex := history.encode()
 	dataSize := common.StorageSize(len(accountData) + len(storageData))
diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go
index 240474da37..7dbe5959dc 100644
--- a/triedb/pathdb/history_inspect.go
+++ b/triedb/pathdb/history_inspect.go
@@ -21,6 +21,7 @@ import (
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
 )
@@ -109,12 +110,17 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
 
 // storageHistory inspects the storage history within the range.
 func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
+	slotHash := crypto.Keccak256Hash(slot.Bytes())
 	return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
 		slots, exists := h.storages[address]
 		if !exists {
 			return
 		}
-		blob, exists := slots[slot]
+		key := slotHash
+		if h.meta.version != stateHistoryV0 {
+			key = slot
+		}
+		blob, exists := slots[key]
 		if !exists {
 			return
 		}
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
index d430706dee..953f023530 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_test.go
@@ -49,9 +49,9 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
 	return accounts, storages
 }
 
-func makeHistory() *history {
+func makeHistory(rawStorageKey bool) *history {
 	accounts, storages := randomStateSet(3)
-	return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages)
+	return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
 }
 
 func makeHistories(n int) []*history {
@@ -62,7 +62,7 @@ func makeHistories(n int) []*history {
 	for i := 0; i < n; i++ {
 		root := testrand.Hash()
 		accounts, storages := randomStateSet(3)
-		h := newHistory(root, parent, uint64(i), accounts, storages)
+		h := newHistory(root, parent, uint64(i), accounts, storages, false)
 		parent = root
 		result = append(result, h)
 	}
@@ -70,10 +70,15 @@ func makeHistories(n int) []*history {
 }
 
 func TestEncodeDecodeHistory(t *testing.T) {
+	testEncodeDecodeHistory(t, false)
+	testEncodeDecodeHistory(t, true)
+}
+
+func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
 	var (
 		m   meta
 		dec history
-		obj = makeHistory()
+		obj = makeHistory(rawStorageKey)
 	)
 	// check if meta data can be correctly encode/decode
 	blob := obj.meta.encode()
diff --git a/triedb/pathdb/iterator_test.go b/triedb/pathdb/iterator_test.go
index 48b5870b5b..05a166d1b6 100644
--- a/triedb/pathdb/iterator_test.go
+++ b/triedb/pathdb/iterator_test.go
@@ -131,7 +131,7 @@ func TestAccountIteratorBasics(t *testing.T) {
 			storage[hash] = accStorage
 		}
 	}
-	states := newStates(accounts, storage)
+	states := newStates(accounts, storage, false)
 	it := newDiffAccountIterator(common.Hash{}, states, nil)
 	verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
 
@@ -171,7 +171,7 @@ func TestStorageIteratorBasics(t *testing.T) {
 		storage[hash] = accStorage
 		nilStorage[hash] = nilstorage
 	}
-	states := newStates(accounts, storage)
+	states := newStates(accounts, storage, false)
 	for account := range accounts {
 		it := newDiffStorageIterator(account, common.Hash{}, states, nil)
 		verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@@ -267,13 +267,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
 
 	// Stack three diff layers on top with various overlaps
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
 
 	// Verify the single and multi-layer iterators
 	head := db.tree.get(common.HexToHash("0x04"))
@@ -314,13 +314,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
 
 	// Stack three diff layers on top with various overlaps
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
 
 	// Verify the single and multi-layer iterators
 	head := db.tree.get(common.HexToHash("0x04"))
@@ -395,14 +395,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
 		}
 	}
 	// Assemble a stack of snapshots from the account layers
-	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil))
-	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil))
-	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil))
-	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil))
-	db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil))
-	db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil))
-	db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil))
-	db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil))
+	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil, false))
+	db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil, false))
 
 	// binaryIterator
 	r, _ := db.StateReader(common.HexToHash("0x09"))
@@ -504,14 +504,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
 		}
 	}
 	// Assemble a stack of snapshots from the account layers
-	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil))
-	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil))
-	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil))
-	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil))
-	db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil))
-	db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil))
-	db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil))
-	db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil))
+	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil, false))
+	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil, false))
+	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil, false))
+	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil, false))
+	db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil, false))
+	db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil, false))
+	db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil, false))
+	db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil, false))
 
 	// binaryIterator
 	r, _ := db.StateReader(common.HexToHash("0x09"))
@@ -588,7 +588,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
 			parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
 		}
 		db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
-			NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
+			NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
 	}
 	// Iterate the entire stack and ensure everything is hit only once
 	head := db.tree.get(common.HexToHash("0x80"))
@@ -626,13 +626,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
 
 	// Create a stack of diffs on top
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
 
 	// Create a binary iterator and flatten the data from underneath it
 	head := db.tree.get(common.HexToHash("0x04"))
@@ -658,13 +658,13 @@ func TestAccountIteratorSeek(t *testing.T) {
 	// db.WaitGeneration()
 
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
 
 	// Account set is now
 	// 02: aa, ee, f0, ff
@@ -731,13 +731,13 @@ func testStorageIteratorSeek(t *testing.T, newIterator func(db *Database, root,
 
 	// Stack three diff layers on top with various overlaps
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil, false))
 
 	// Account set is now
 	// 02: 01, 03, 05
@@ -803,16 +803,16 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(db *Database, r
 
 	// Stack three diff layers on top with various overlaps
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil, false))
 
 	deleted := common.HexToHash("0x22")
 	accounts := randomAccountSet("0x11", "0x33")
 	accounts[deleted] = nil
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(accounts, nil, nil, nil))
+		NewStateSetWithOrigin(accounts, nil, nil, nil, false))
 
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil, false))
 
 	// The output should be 11,33,44,55
 	it := newIterator(db, common.HexToHash("0x04"), common.Hash{})
@@ -843,10 +843,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
 
 	// Stack three diff layers on top with various overlaps
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
 
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil, false))
 
 	// The output should be 02,04,05,06
 	it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
@@ -863,7 +863,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 		common.HexToHash("0xaa"): nil,
 	}
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil))
+		NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil, false))
 
 	it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
 	verifyIterator(t, 0, it, verifyStorage)
@@ -871,7 +871,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 
 	// Re-insert the slots of the same account
 	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil, false))
 
 	// The output should be 07,08,09
 	it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
@@ -880,7 +880,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 
 	// Destruct the whole storage but re-create the account in the same layer
 	db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil, false))
 
 	it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
 	verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
@@ -911,19 +911,19 @@ func testStaleIterator(t *testing.T, newIter func(db *Database, hash common.Hash
 
 	// [02 (disk), 03]
 	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil, false))
 	db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil, false))
 	db.tree.cap(common.HexToHash("0x03"), 1)
 
 	// [02 (disk), 03, 04]
 	db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil, false))
 	iter := newIter(db, common.HexToHash("0x04"))
 
 	// [04 (disk), 05]
 	db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(),
-		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil))
+		NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil, false))
 	db.tree.cap(common.HexToHash("0x05"), 1)
 
 	// Iterator can't finish the traversal as the layer 02 has becoming stale.
@@ -969,7 +969,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
 		if i == 1 {
 			parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
 		}
-		db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
+		db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
 	}
 	// We call this once before the benchmark, so the creation of
 	// sorted accountlists are not included in the results.
@@ -1059,9 +1059,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
 	db := New(rawdb.NewMemoryDatabase(), config, false)
 	// db.WaitGeneration()
 
-	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil))
+	db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil, false))
 	for i := 2; i <= 100; i++ {
-		db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil))
+		db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil, false))
 	}
 	// We call this once before the benchmark, so the creation of
 	// sorted accountlists are not included in the results.
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index 267d675bc2..79a7a22e0b 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -45,7 +45,8 @@ var (
 // - Version 0: initial version
 // - Version 1: storage.Incomplete field is removed
 // - Version 2: add post-modification state values
-const journalVersion uint64 = 2
+// - Version 3: a flag has been added to indicate whether the storage slot key is the raw key or a hash
+const journalVersion uint64 = 3
 
 // loadJournal tries to parse the layer journal from the disk.
 func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go
index 81d34da5df..969782e3c4 100644
--- a/triedb/pathdb/states.go
+++ b/triedb/pathdb/states.go
@@ -65,6 +65,8 @@ type stateSet struct {
 	accountListSorted []common.Hash                 // List of account for iteration. If it exists, it's sorted, otherwise it's nil
 	storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
 
+	rawStorageKey bool // indicates whether the storage set uses the raw slot key or the hash
+
 	// Lock for guarding the two lists above. These lists might be accessed
 	// concurrently and lock protection is essential to avoid concurrent
 	// slice or map read/write.
@@ -72,7 +74,7 @@ type stateSet struct {
 }
 
 // newStates constructs the state set with the provided account and storage data.
-func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
+func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, rawStorageKey bool) *stateSet {
 	// Don't panic for the lazy callers, initialize the nil maps instead.
 	if accounts == nil {
 		accounts = make(map[common.Hash][]byte)
@@ -83,6 +85,7 @@ func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[com
 	s := &stateSet{
 		accountData:       accounts,
 		storageData:       storages,
+		rawStorageKey:     rawStorageKey,
 		storageListSorted: make(map[common.Hash][]common.Hash),
 	}
 	s.size = s.check()
@@ -330,6 +333,9 @@ func (s *stateSet) updateSize(delta int) {
 // encode serializes the content of state set into the provided writer.
 func (s *stateSet) encode(w io.Writer) error {
 	// Encode accounts
+	if err := rlp.Encode(w, s.rawStorageKey); err != nil {
+		return err
+	}
 	type accounts struct {
 		AddrHashes []common.Hash
 		Accounts   [][]byte
@@ -367,6 +373,9 @@ func (s *stateSet) encode(w io.Writer) error {
 
 // decode deserializes the content from the rlp stream into the state set.
 func (s *stateSet) decode(r *rlp.Stream) error {
+	if err := r.Decode(&s.rawStorageKey); err != nil {
+		return fmt.Errorf("load diff raw storage key flag: %v", err)
+	}
 	type accounts struct {
 		AddrHashes []common.Hash
 		Accounts   [][]byte
@@ -435,23 +444,23 @@ func (s *stateSet) dbsize() int {
 type StateSetWithOrigin struct {
 	*stateSet
 
-	// AccountOrigin represents the account data before the state transition,
+	// accountOrigin represents the account data before the state transition,
 	// corresponding to both the accountData and destructSet. It's keyed by the
 	// account address. The nil value means the account was not present before.
 	accountOrigin map[common.Address][]byte
 
-	// StorageOrigin represents the storage data before the state transition,
+	// storageOrigin represents the storage data before the state transition,
 	// corresponding to storageData and deleted slots of destructSet. It's keyed
 	// by the account address and slot key hash. The nil value means the slot was
 	// not present.
 	storageOrigin map[common.Address]map[common.Hash][]byte
 
-	// Memory size of the state data (accountOrigin and storageOrigin)
+	// memory size of the state data (accountOrigin and storageOrigin)
 	size uint64
 }
 
 // NewStateSetWithOrigin constructs the state set with the provided data.
-func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
+func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *StateSetWithOrigin {
 	// Don't panic for the lazy callers, initialize the nil maps instead.
 	if accountOrigin == nil {
 		accountOrigin = make(map[common.Address][]byte)
@@ -471,7 +480,7 @@ func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.
 			size += 2*common.HashLength + len(data)
 		}
 	}
-	set := newStates(accounts, storages)
+	set := newStates(accounts, storages, rawStorageKey)
 	return &StateSetWithOrigin{
 		stateSet:      set,
 		accountOrigin: accountOrigin,
diff --git a/triedb/pathdb/states_test.go b/triedb/pathdb/states_test.go
index f097e90e81..30eb6ad6c8 100644
--- a/triedb/pathdb/states_test.go
+++ b/triedb/pathdb/states_test.go
@@ -44,6 +44,7 @@ func TestStatesMerge(t *testing.T) {
 				common.Hash{0x1}: {0x10},
 			},
 		},
+		false,
 	)
 	b := newStates(
 		map[common.Hash][]byte{
@@ -64,6 +65,7 @@ func TestStatesMerge(t *testing.T) {
 				common.Hash{0x1}: nil, // delete slot
 			},
 		},
+		false,
 	)
 	a.merge(b)
 
@@ -132,6 +134,7 @@ func TestStatesRevert(t *testing.T) {
 				common.Hash{0x1}: {0x10},
 			},
 		},
+		false,
 	)
 	b := newStates(
 		map[common.Hash][]byte{
@@ -152,6 +155,7 @@ func TestStatesRevert(t *testing.T) {
 				common.Hash{0x1}: nil,
 			},
 		},
+		false,
 	)
 	a.merge(b)
 	a.revertTo(
@@ -224,12 +228,13 @@ func TestStatesRevert(t *testing.T) {
 // before and was created during transition w, reverting w will retain an x=nil
 // entry in the set.
 func TestStateRevertAccountNullMarker(t *testing.T) {
-	a := newStates(nil, nil) // empty initial state
+	a := newStates(nil, nil, false) // empty initial state
 	b := newStates(
 		map[common.Hash][]byte{
 			{0xa}: {0xa},
 		},
 		nil,
+		false,
 	)
 	a.merge(b) // create account 0xa
 	a.revertTo(
@@ -254,7 +259,7 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
 func TestStateRevertStorageNullMarker(t *testing.T) {
 	a := newStates(map[common.Hash][]byte{
 		{0xa}: {0xa},
-	}, nil) // initial state with account 0xa
+	}, nil, false) // initial state with account 0xa
 
 	b := newStates(
 		nil,
@@ -263,6 +268,7 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
 				common.Hash{0x1}: {0x1},
 			},
 		},
+		false,
 	)
 	a.merge(b) // create slot 0x1
 	a.revertTo(
@@ -284,6 +290,11 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
 }
 
 func TestStatesEncode(t *testing.T) {
+	testStatesEncode(t, false)
+	testStatesEncode(t, true)
+}
+
+func testStatesEncode(t *testing.T, rawStorageKey bool) {
 	s := newStates(
 		map[common.Hash][]byte{
 			{0x1}: {0x1},
@@ -293,6 +304,7 @@ func TestStatesEncode(t *testing.T) {
 				common.Hash{0x1}: {0x1},
 			},
 		},
+		rawStorageKey,
 	)
 	buf := bytes.NewBuffer(nil)
 	if err := s.encode(buf); err != nil {
@@ -308,9 +320,17 @@ func TestStatesEncode(t *testing.T) {
 	if !reflect.DeepEqual(s.storageData, dec.storageData) {
 		t.Fatal("Unexpected storage data")
 	}
+	if s.rawStorageKey != dec.rawStorageKey {
+		t.Fatal("Unexpected rawStorageKey flag")
+	}
 }
 
 func TestStateWithOriginEncode(t *testing.T) {
+	testStateWithOriginEncode(t, false)
+	testStateWithOriginEncode(t, true)
+}
+
+func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
 	s := NewStateSetWithOrigin(
 		map[common.Hash][]byte{
 			{0x1}: {0x1},
@@ -328,6 +348,7 @@ func TestStateWithOriginEncode(t *testing.T) {
 				common.Hash{0x1}: {0x1},
 			},
 		},
+		rawStorageKey,
 	)
 	buf := bytes.NewBuffer(nil)
 	if err := s.encode(buf); err != nil {
@@ -349,6 +370,9 @@ func TestStateWithOriginEncode(t *testing.T) {
 	if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
 		t.Fatal("Unexpected storage origin data")
 	}
+	if s.rawStorageKey != dec.rawStorageKey {
+		t.Fatal("Unexpected rawStorageKey flag")
+	}
 }
 
 func TestStateSizeTracking(t *testing.T) {
@@ -375,6 +399,7 @@ func TestStateSizeTracking(t *testing.T) {
 				common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
 			},
 		},
+		false,
 	)
 	if a.size != uint64(expSizeA) {
 		t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
@@ -406,6 +431,7 @@ func TestStateSizeTracking(t *testing.T) {
 				common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
 			},
 		},
+		false,
 	)
 	if b.size != uint64(expSizeB) {
 		t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
diff --git a/triedb/states.go b/triedb/states.go
index fa432e0704..9fabdb088d 100644
--- a/triedb/states.go
+++ b/triedb/states.go
@@ -27,6 +27,7 @@ type StateSet struct {
 	AccountsOrigin map[common.Address][]byte                 // Original values of mutated accounts in 'slim RLP' encoding
 	Storages       map[common.Hash]map[common.Hash][]byte    // Mutated storage slots in 'prefix-zero-trimmed' RLP format
 	StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
+	RawStorageKey  bool                                      // Flag whether the storage set uses the raw slot key or the hash
 }
 
 // NewStateSet initializes an empty state set.
@@ -45,5 +46,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
 	if set == nil {
 		return nil
 	}
-	return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
+	return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin, set.RawStorageKey)
 }