From 4a19c0e7b81e553a9e2e4ec4d50e3c2cdb957e3a Mon Sep 17 00:00:00 2001 From: gary rong Date: Wed, 10 Jun 2020 17:46:13 +0800 Subject: [PATCH] core, eth, internal: include read storage entries in structlog output (#21204) * core, eth, internal: extend structLog tracer * core/vm, internal: add storage view * core, internal: add slots to storage directly * core: remove useless * core: address martin's comment * core/vm: fix tests --- core/vm/logger.go | 61 ++++++++++++++++++++------------------- core/vm/logger_test.go | 8 ++--- eth/fetcher/tx_fetcher.go | 14 ++++----- 3 files changed, 43 insertions(+), 40 deletions(-) diff --git a/core/vm/logger.go b/core/vm/logger.go index f786d1e9e3..2c90399aca 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -42,7 +42,6 @@ func (s Storage) Copy() Storage { for key, value := range s { cpy[key] = value } - return cpy } @@ -118,16 +117,16 @@ type Tracer interface { type StructLogger struct { cfg LogConfig - logs []StructLog - changedValues map[common.Address]Storage - output []byte - err error + storage map[common.Address]Storage + logs []StructLog + output []byte + err error } // NewStructLogger returns a new logger func NewStructLogger(cfg *LogConfig) *StructLogger { logger := &StructLogger{ - changedValues: make(map[common.Address]Storage), + storage: make(map[common.Address]Storage), } if cfg != nil { logger.cfg = *cfg @@ -142,28 +141,12 @@ func (l *StructLogger) CaptureStart(from common.Address, to common.Address, crea // CaptureState logs a new structured log message and pushes it out to the environment // -// CaptureState also tracks SSTORE ops to track dirty values. +// CaptureState also tracks SLOAD/SSTORE ops to track storage change. func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, rStack *ReturnStack, contract *Contract, depth int, err error) error { // check if already accumulated the specified number of logs if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { return errTraceLimitReached } - - // initialise new changed values storage container for this contract - // if not present. - if l.changedValues[contract.Address()] == nil { - l.changedValues[contract.Address()] = make(Storage) - } - - // capture SSTORE opcodes and determine the changed value and store - // it in the local storage container. - if op == SSTORE && stack.len() >= 2 { - var ( - value = common.Hash(stack.data[stack.len()-2].Bytes32()) - address = common.Hash(stack.data[stack.len()-1].Bytes32()) - ) - l.changedValues[contract.Address()][address] = value - } // Copy a snapshot of the current memory state to a new buffer var mem []byte if !l.cfg.DisableMemory { @@ -178,19 +161,39 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui stck[i] = new(big.Int).Set(item.ToBig()) } } - // Copy a snapshot of the current storage to a new container - var storage Storage - if !l.cfg.DisableStorage { - storage = l.changedValues[contract.Address()].Copy() - } var rstack []uint64 if !l.cfg.DisableStack && rStack != nil { rstck := make([]uint64, len(rStack.data)) copy(rstck, rStack.data) } + // Copy a snapshot of the current storage to a new container + var storage Storage + if !l.cfg.DisableStorage { + // initialise new changed values storage container for this contract + // if not present. + if l.storage[contract.Address()] == nil { + l.storage[contract.Address()] = make(Storage) + } + // capture SLOAD opcodes and record the read entry in the local storage + if op == SLOAD && stack.len() >= 1 { + var ( + address = common.Hash(stack.data[stack.len()-1].Bytes32()) + value = env.StateDB.GetState(contract.Address(), address) + ) + l.storage[contract.Address()][address] = value + } + // capture SSTORE opcodes and record the written entry in the local storage. + if op == SSTORE && stack.len() >= 2 { + var ( + value = common.Hash(stack.data[stack.len()-2].Bytes32()) + address = common.Hash(stack.data[stack.len()-1].Bytes32()) + ) + l.storage[contract.Address()][address] = value + } + storage = l.storage[contract.Address()].Copy() + } // create a new snapshot of the EVM. log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rstack, storage, depth, env.StateDB.GetRefund(), err} - l.logs = append(l.logs, log) return nil } diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index 9ee7237901..572edf9807 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -62,11 +62,11 @@ func TestStoreCapture(t *testing.T) { stack.push(uint256.NewInt()) var index common.Hash logger.CaptureState(env, 0, SSTORE, 0, 0, mem, stack, rstack, contract, 0, nil) - if len(logger.changedValues[contract.Address()]) == 0 { - t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.changedValues[contract.Address()])) + if len(logger.storage[contract.Address()]) == 0 { + t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.storage[contract.Address()])) } exp := common.BigToHash(big.NewInt(1)) - if logger.changedValues[contract.Address()][index] != exp { - t.Errorf("expected %x, got %x", exp, logger.changedValues[contract.Address()][index]) + if logger.storage[contract.Address()][index] != exp { + t.Errorf("expected %x, got %x", exp, logger.storage[contract.Address()][index]) } } diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index e46c5c5764..3ba7753916 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -387,7 +387,7 @@ func (f *TxFetcher) loop() { if announces := f.announces[ann.origin]; announces != nil { announces[hash] = struct{}{} } else { - f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}} + f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} } continue } @@ -400,7 +400,7 @@ func (f *TxFetcher) loop() { if announces := f.announces[ann.origin]; announces != nil { announces[hash] = struct{}{} } else { - f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}} + f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} } continue } @@ -413,18 +413,18 @@ func (f *TxFetcher) loop() { if waitslots := f.waitslots[ann.origin]; waitslots != nil { waitslots[hash] = struct{}{} } else { - f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}} + f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} } continue } // Transaction unknown to the fetcher, insert it into the waiting list - f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}} + f.waitlist[hash] = map[string]struct{}{ann.origin: {}} f.waittime[hash] = f.clock.Now() if waitslots := f.waitslots[ann.origin]; waitslots != nil { waitslots[hash] = struct{}{} } else { - f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}} + f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} } } // If a new item was added to the waitlist, schedule it into the fetcher @@ -434,7 +434,7 @@ func (f *TxFetcher) loop() { // If this peer is new and announced something already queued, maybe // request transactions from them if !oldPeer && len(f.announces[ann.origin]) > 0 { - f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: struct{}{}}) + f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}}) } case <-waitTrigger: @@ -452,7 +452,7 @@ func (f *TxFetcher) loop() { if announces := f.announces[peer]; announces != nil { announces[hash] = struct{}{} } else { - f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}} + f.announces[peer] = map[common.Hash]struct{}{hash: {}} } delete(f.waitslots[peer], hash) if len(f.waitslots[peer]) == 0 {