|
|
@ -1,6 +1,11 @@ |
|
|
|
package trie |
|
|
|
package trie |
|
|
|
|
|
|
|
|
|
|
|
import "github.com/ethereum/go-ethereum/logger/glog" |
|
|
|
import ( |
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/compression/rle" |
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb" |
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/logger/glog" |
|
|
|
|
|
|
|
"github.com/syndtr/goleveldb/leveldb" |
|
|
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
type Backend interface { |
|
|
|
type Backend interface { |
|
|
|
Get([]byte) ([]byte, error) |
|
|
|
Get([]byte) ([]byte, error) |
|
|
@ -8,12 +13,13 @@ type Backend interface { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
type Cache struct { |
|
|
|
type Cache struct { |
|
|
|
|
|
|
|
batch *leveldb.Batch |
|
|
|
store map[string][]byte |
|
|
|
store map[string][]byte |
|
|
|
backend Backend |
|
|
|
backend Backend |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func NewCache(backend Backend) *Cache { |
|
|
|
func NewCache(backend Backend) *Cache { |
|
|
|
return &Cache{make(map[string][]byte), backend} |
|
|
|
return &Cache{new(leveldb.Batch), make(map[string][]byte), backend} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func (self *Cache) Get(key []byte) []byte { |
|
|
|
func (self *Cache) Get(key []byte) []byte { |
|
|
@ -26,19 +32,23 @@ func (self *Cache) Get(key []byte) []byte { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func (self *Cache) Put(key []byte, data []byte) { |
|
|
|
func (self *Cache) Put(key []byte, data []byte) { |
|
|
|
|
|
|
|
// write the data to the ldb batch
|
|
|
|
|
|
|
|
self.batch.Put(key, rle.Compress(data)) |
|
|
|
self.store[string(key)] = data |
|
|
|
self.store[string(key)] = data |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Flush flushes the trie to the backing layer. If this is a leveldb instance
|
|
|
|
|
|
|
|
// we'll use a batched write, otherwise we'll use regular put.
|
|
|
|
func (self *Cache) Flush() { |
|
|
|
func (self *Cache) Flush() { |
|
|
|
for k, v := range self.store { |
|
|
|
if db, ok := self.backend.(*ethdb.LDBDatabase); ok { |
|
|
|
if err := self.backend.Put([]byte(k), v); err != nil { |
|
|
|
if err := db.LDB().Write(self.batch, nil); err != nil { |
|
|
|
glog.Fatal("db write err:", err) |
|
|
|
glog.Fatal("db write err:", err) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
for k, v := range self.store { |
|
|
|
|
|
|
|
self.backend.Put([]byte(k), v) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// This will eventually grow too large. We'd could
|
|
|
|
|
|
|
|
// do a make limit on storage and push out not-so-popular nodes.
|
|
|
|
|
|
|
|
//self.Reset()
|
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
func (self *Cache) Copy() *Cache { |
|
|
|
func (self *Cache) Copy() *Cache { |
|
|
|