cmd, eth, les, node, pow: disk caching and progress reports

release/1.6
Péter Szilágyi 8 years ago committed by Felix Lange
parent 567d41d936
commit 023670f6ba
  1. 5
      cmd/geth/main.go
  2. 10
      cmd/geth/usage.go
  3. 61
      cmd/utils/flags.go
  4. 24
      eth/backend.go
  5. 7
      les/backend.go
  6. 7
      node/service.go
  7. 121
      pow/ethash.go
  8. 36
      pow/ethash_algo.go

@ -91,6 +91,11 @@ func init() {
utils.BootnodesFlag, utils.BootnodesFlag,
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsOnDiskFlag,
utils.FastSyncFlag, utils.FastSyncFlag,
utils.LightModeFlag, utils.LightModeFlag,
utils.LightServFlag, utils.LightServFlag,

@ -77,6 +77,16 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightKDFFlag, utils.LightKDFFlag,
}, },
}, },
{
Name: "ETHASH",
Flags: []cli.Flag{
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsOnDiskFlag,
},
},
{ {
Name: "PERFORMANCE TUNING", Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{ Flags: []cli.Flag{

@ -23,6 +23,7 @@ import (
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"os" "os"
"os/user"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv" "strconv"
@ -113,6 +114,29 @@ var (
Name: "keystore", Name: "keystore",
Usage: "Directory for the keystore (default = inside the datadir)", Usage: "Directory for the keystore (default = inside the datadir)",
} }
EthashCacheDirFlag = DirectoryFlag{
Name: "ethash.cachedir",
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
}
EthashCachesInMemoryFlag = cli.IntFlag{
Name: "ethash.cachesinmem",
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
Value: 2,
}
EthashCachesOnDiskFlag = cli.IntFlag{
Name: "ethash.cachesondisk",
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
Value: 3,
}
EthashDatasetDirFlag = DirectoryFlag{
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
}
EthashDatasetsOnDiskFlag = cli.IntFlag{
Name: "ethash.dagsondisk",
Usage: "Number of ethash mining DAGs to keep on disk (1+GB each)",
Value: 2,
}
NetworkIdFlag = cli.IntFlag{ NetworkIdFlag = cli.IntFlag{
Name: "networkid", Name: "networkid",
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)", Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)",
@ -431,6 +455,36 @@ func MakeDataDir(ctx *cli.Context) string {
return "" return ""
} }
// MakeEthashCacheDir returns the directory to use for storing the ethash cache
// dumps.
func MakeEthashCacheDir(ctx *cli.Context) string {
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) && ctx.GlobalString(EthashCacheDirFlag.Name) == "" {
return ""
}
if !ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
return "ethash"
}
return ctx.GlobalString(EthashCacheDirFlag.Name)
}
// MakeEthashDatasetDir returns the directory to use for storing the full ethash
// dataset dumps.
func MakeEthashDatasetDir(ctx *cli.Context) string {
if !ctx.GlobalIsSet(EthashDatasetDirFlag.Name) {
home := os.Getenv("HOME")
if home == "" {
if user, err := user.Current(); err == nil {
home = user.HomeDir
}
}
if runtime.GOOS == "windows" {
return filepath.Join(home, "AppData", "Ethash")
}
return filepath.Join(home, ".ethash")
}
return ctx.GlobalString(EthashDatasetDirFlag.Name)
}
// MakeIPCPath creates an IPC path configuration from the set command line flags, // MakeIPCPath creates an IPC path configuration from the set command line flags,
// returning an empty string if IPC was explicitly disabled, or the set path. // returning an empty string if IPC was explicitly disabled, or the set path.
func MakeIPCPath(ctx *cli.Context) string { func MakeIPCPath(ctx *cli.Context) string {
@ -751,6 +805,11 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
GpobaseStepUp: ctx.GlobalInt(GpobaseStepUpFlag.Name), GpobaseStepUp: ctx.GlobalInt(GpobaseStepUpFlag.Name),
GpobaseCorrectionFactor: ctx.GlobalInt(GpobaseCorrectionFactorFlag.Name), GpobaseCorrectionFactor: ctx.GlobalInt(GpobaseCorrectionFactorFlag.Name),
SolcPath: ctx.GlobalString(SolcPathFlag.Name), SolcPath: ctx.GlobalString(SolcPathFlag.Name),
EthashCacheDir: MakeEthashCacheDir(ctx),
EthashCachesInMem: ctx.GlobalInt(EthashCachesInMemoryFlag.Name),
EthashCachesOnDisk: ctx.GlobalInt(EthashCachesOnDiskFlag.Name),
EthashDatasetDir: MakeEthashDatasetDir(ctx),
EthashDatasetsOnDisk: ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name),
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name), AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name), EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name),
} }
@ -923,7 +982,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
seal := pow.PoW(pow.FakePow{}) seal := pow.PoW(pow.FakePow{})
if !ctx.GlobalBool(FakePoWFlag.Name) { if !ctx.GlobalBool(FakePoWFlag.Name) {
seal = pow.NewFullEthash("", "") seal = pow.NewFullEthash("", 1, 0, "", 0)
} }
chain, err = core.NewBlockChain(chainDb, chainConfig, seal, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}) chain, err = core.NewBlockChain(chainDb, chainConfig, seal, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)})
if err != nil { if err != nil {

@ -84,6 +84,12 @@ type Config struct {
PowShared bool PowShared bool
ExtraData []byte ExtraData []byte
EthashCacheDir string
EthashCachesInMem int
EthashCachesOnDisk int
EthashDatasetDir string
EthashDatasetsOnDisk int
Etherbase common.Address Etherbase common.Address
GasPrice *big.Int GasPrice *big.Int
MinerThreads int MinerThreads int
@ -157,16 +163,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err := SetupGenesisBlock(&chainDb, config); err != nil { if err := SetupGenesisBlock(&chainDb, config); err != nil {
return nil, err return nil, err
} }
pow, err := CreatePoW(config)
if err != nil {
return nil, err
}
eth := &Ethereum{ eth := &Ethereum{
chainDb: chainDb, chainDb: chainDb,
eventMux: ctx.EventMux, eventMux: ctx.EventMux,
accountManager: ctx.AccountManager, accountManager: ctx.AccountManager,
pow: pow, pow: CreatePoW(ctx, config),
shutdownChan: make(chan bool), shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade, stopDbUpgrade: stopDbUpgrade,
netVersionId: config.NetworkId, netVersionId: config.NetworkId,
@ -284,19 +285,20 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
} }
// CreatePoW creates the required type of PoW instance for an Ethereum service // CreatePoW creates the required type of PoW instance for an Ethereum service
func CreatePoW(config *Config) (pow.PoW, error) { func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
switch { switch {
case config.PowFake: case config.PowFake:
log.Warn("Ethash used in fake mode") log.Warn("Ethash used in fake mode")
return pow.FakePow{}, nil return pow.FakePow{}
case config.PowTest: case config.PowTest:
log.Warn("Ethash used in test mode") log.Warn("Ethash used in test mode")
return pow.NewTestEthash(), nil return pow.NewTestEthash()
case config.PowShared: case config.PowShared:
log.Warn("Ethash used in shared mode") log.Warn("Ethash used in shared mode")
return pow.NewSharedEthash(), nil return pow.NewSharedEthash()
default: default:
return pow.NewFullEthash("", ""), nil return pow.NewFullEthash(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
config.EthashDatasetDir, config.EthashDatasetsOnDisk)
} }
} }

@ -77,11 +77,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err := eth.SetupGenesisBlock(&chainDb, config); err != nil { if err := eth.SetupGenesisBlock(&chainDb, config); err != nil {
return nil, err return nil, err
} }
pow, err := eth.CreatePoW(config)
if err != nil {
return nil, err
}
odr := NewLesOdr(chainDb) odr := NewLesOdr(chainDb)
relay := NewLesTxRelay() relay := NewLesTxRelay()
eth := &LightEthereum{ eth := &LightEthereum{
@ -90,7 +85,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
chainDb: chainDb, chainDb: chainDb,
eventMux: ctx.EventMux, eventMux: ctx.EventMux,
accountManager: ctx.AccountManager, accountManager: ctx.AccountManager,
pow: pow, pow: eth.CreatePoW(ctx, config),
shutdownChan: make(chan bool), shutdownChan: make(chan bool),
netVersionId: config.NetworkId, netVersionId: config.NetworkId,
solcPath: config.SolcPath, solcPath: config.SolcPath,

@ -46,6 +46,13 @@ func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int) (et
return ethdb.NewLDBDatabase(ctx.config.resolvePath(name), cache, handles) return ethdb.NewLDBDatabase(ctx.config.resolvePath(name), cache, handles)
} }
// ResolvePath resolves a user path into the data directory if that was relative
// and if the user actually uses persistent storage. It will return an empty string
// for emphemeral storage and the user's own input for absolute paths.
func (ctx *ServiceContext) ResolvePath(path string) string {
return ctx.config.resolvePath(path)
}
// Service retrieves a currently running service registered of a specific type. // Service retrieves a currently running service registered of a specific type.
func (ctx *ServiceContext) Service(service interface{}) error { func (ctx *ServiceContext) Service(service interface{}) error {
element := reflect.ValueOf(service).Elem() element := reflect.ValueOf(service).Elem()

@ -17,12 +17,19 @@
package pow package pow
import ( import (
"bufio"
"bytes" "bytes"
"errors" "errors"
"fmt"
"io/ioutil"
"math/big" "math/big"
"os"
"path/filepath"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
metrics "github.com/rcrowley/go-metrics" metrics "github.com/rcrowley/go-metrics"
) )
@ -36,6 +43,15 @@ var (
var ( var (
// maxUint256 is a big integer representing 2^256-1 // maxUint256 is a big integer representing 2^256-1
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash = NewFullEthash("", 3, 0, "", 0)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
// dumpMagic is a dataset dump header to sanity check a data dump.
dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
) )
// cache wraps an ethash cache with some metadata to allow easier concurrent use. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
@ -48,21 +64,65 @@ type cache struct {
} }
// generate ensures that the cache content is generates. // generate ensures that the cache content is generates.
func (c *cache) generate(test bool) { func (c *cache) generate(dir string, limit int, test bool) {
c.once.Do(func() { c.once.Do(func() {
cacheSize := cacheSize(c.epoch*epochLength + 1) // If we have a testing cache, generate and return
if test { if test {
cacheSize = 1024 rawCache := generateCache(1024, seedHash(c.epoch*epochLength+1))
c.cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
return
}
// Full cache generation is needed, check cache dir for existing data
size := cacheSize(c.epoch*epochLength + 1)
seed := seedHash(c.epoch*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
logger := log.New("seed", hexutil.Bytes(seed))
if dir != "" {
dump, err := os.Open(path)
if err == nil {
logger.Info("Loading ethash cache from disk")
start := time.Now()
c.cache = prepare(size, bufio.NewReader(dump))
logger.Info("Loaded ethash cache from disk", "elapsed", common.PrettyDuration(time.Since(start)))
dump.Close()
return
}
}
// No previous disk cache was available, generate on the fly
rawCache := generateCache(size, seed)
c.cache = prepare(size, bytes.NewReader(rawCache))
// If a cache directory is given, attempt to serialize for next time
if dir != "" {
// Store the ethash cache to disk
start := time.Now()
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
logger.Error("Failed to create ethash cache dir", "err", err)
} else if err := ioutil.WriteFile(path, rawCache, os.ModePerm); err != nil {
logger.Error("Failed to write ethash cache to disk", "err", err)
} else {
logger.Info("Stored ethash cache to disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
// Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
os.Remove(path)
}
} }
rawCache := generateCache(cacheSize, seedHash(c.epoch*epochLength+1))
c.cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
}) })
} }
// Ethash is a PoW data struture implementing the ethash algorithm. // Ethash is a PoW data struture implementing the ethash algorithm.
type Ethash struct { type Ethash struct {
cachedir string // Data directory to store the verification caches cachedir string // Data directory to store the verification caches
dagdir string // Data directory to store full mining datasets cachesinmem int // Number of caches to keep in memory
cachesondisk int // Number of caches to keep on disk
dagdir string // Data directory to store full mining datasets
dagsondisk int // Number of mining datasets to keep on disk
caches map[uint64]*cache // In memory caches to avoid regenerating too often caches map[uint64]*cache // In memory caches to avoid regenerating too often
future *cache // Pre-generated cache for the estimated future epoch future *cache // Pre-generated cache for the estimated future epoch
@ -71,15 +131,27 @@ type Ethash struct {
hashrate *metrics.StandardMeter // Meter tracking the average hashrate hashrate *metrics.StandardMeter // Meter tracking the average hashrate
tester bool // Flag whether to use a smaller test dataset tester bool // Flag whether to use a smaller test dataset
shared bool // Flag whether to use a global chared dataset
} }
// NewFullEthash creates a full sized ethash PoW scheme. // NewFullEthash creates a full sized ethash PoW scheme.
func NewFullEthash(cachedir, dagdir string) PoW { func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsondisk int) PoW {
if cachesinmem <= 0 {
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
cachesinmem = 1
}
if cachedir != "" && cachesondisk > 0 {
log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
}
if dagdir != "" && dagsondisk > 0 {
log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
}
return &Ethash{ return &Ethash{
cachedir: cachedir, cachedir: cachedir,
dagdir: dagdir, cachesinmem: cachesinmem,
caches: make(map[uint64]*cache), cachesondisk: cachesondisk,
dagdir: dagdir,
dagsondisk: dagsondisk,
caches: make(map[uint64]*cache),
} }
} }
@ -87,18 +159,16 @@ func NewFullEthash(cachedir, dagdir string) PoW {
// purposes. // purposes.
func NewTestEthash() PoW { func NewTestEthash() PoW {
return &Ethash{ return &Ethash{
caches: make(map[uint64]*cache), cachesinmem: 1,
tester: true, caches: make(map[uint64]*cache),
tester: true,
} }
} }
// NewSharedEthash creates a full sized ethash PoW shared between all requesters // NewSharedEthash creates a full sized ethash PoW shared between all requesters
// running in the same process. // running in the same process.
func NewSharedEthash() PoW { func NewSharedEthash() PoW {
return &Ethash{ return sharedEthash
caches: make(map[uint64]*cache),
shared: true,
}
} }
// Verify implements PoW, checking whether the given block satisfies the PoW // Verify implements PoW, checking whether the given block satisfies the PoW
@ -140,7 +210,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
current, future := ethash.caches[epoch], (*cache)(nil) current, future := ethash.caches[epoch], (*cache)(nil)
if current == nil { if current == nil {
// No in-memory cache, evict the oldest if the cache limit was reached // No in-memory cache, evict the oldest if the cache limit was reached
for len(ethash.caches) >= 3 { for len(ethash.caches) >= ethash.cachesinmem {
var evict *cache var evict *cache
for _, cache := range ethash.caches { for _, cache := range ethash.caches {
if evict == nil || evict.used.After(cache.used) { if evict == nil || evict.used.After(cache.used) {
@ -149,21 +219,21 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
} }
delete(ethash.caches, evict.epoch) delete(ethash.caches, evict.epoch)
log.Debug("Evictinged ethash cache", "old", evict.epoch, "used", evict.used) log.Debug("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
} }
// If we have the new cache pre-generated, use that, otherwise create a new one // If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.future != nil && ethash.future.epoch == epoch { if ethash.future != nil && ethash.future.epoch == epoch {
log.Debug("Using pre-generated cache", "epoch", epoch) log.Debug("Using pre-generated cache", "epoch", epoch)
current, ethash.future = ethash.future, nil current, ethash.future = ethash.future, nil
} else { } else {
log.Debug("Generating new ethash cache", "epoch", epoch) log.Debug("Requiring new ethash cache", "epoch", epoch)
current = &cache{epoch: epoch} current = &cache{epoch: epoch}
} }
ethash.caches[epoch] = current ethash.caches[epoch] = current
// If we just used up the future cache, or need a refresh, regenerate // If we just used up the future cache, or need a refresh, regenerate
if ethash.future == nil || ethash.future.epoch <= epoch { if ethash.future == nil || ethash.future.epoch <= epoch {
log.Debug("Pre-generating cache for the future", "epoch", epoch+1) log.Debug("Requiring new future ethash cache", "epoch", epoch+1)
future = &cache{epoch: epoch + 1} future = &cache{epoch: epoch + 1}
ethash.future = future ethash.future = future
} }
@ -172,16 +242,15 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
ethash.lock.Unlock() ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache // Wait for generation finish, bump the timestamp and finalize the cache
current.once.Do(func() { current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
current.generate(ethash.tester)
})
current.lock.Lock() current.lock.Lock()
current.used = time.Now() current.used = time.Now()
current.lock.Unlock() current.lock.Unlock()
// If we exhusted the future cache, now's a goot time to regenerate it // If we exhusted the future cache, now's a goot time to regenerate it
if future != nil { if future != nil {
go future.generate(ethash.tester) go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
} }
return current.cache return current.cache
} }

@ -45,12 +45,6 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop loopAccesses = 64 // Number of accesses in hashimoto loop
) )
var (
// Metadata fields to be compatible with the C++ ethash
ethashRevision = 23 // Data structure version
ethashMagic = hexutil.MustDecode("0xfee1deadbaddcafe") // Dataset dump magic number
)
// cacheSize calculates and returns the size of the ethash verification cache that // cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we // belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to // always take the highest prime below the linearly growing threshold in order to
@ -108,16 +102,33 @@ func seedHash(block uint64) []byte {
// set of 524288 64-byte values. // set of 524288 64-byte values.
func generateCache(size uint64, seed []byte) []byte { func generateCache(size uint64, seed []byte) []byte {
// Print some debug logs to allow analysis on low end devices // Print some debug logs to allow analysis on low end devices
logger := log.New("size", size, "seed", hexutil.Bytes(seed)) logger := log.New("seed", hexutil.Bytes(seed))
logger.Debug("Generating ethash cache") logger.Debug("Generating ethash verification cache")
defer func(start time.Time) { start := time.Now()
logger.Debug("Generated ethash cache", "elapsed", common.PrettyDuration(time.Since(start))) defer func() {
}(time.Now()) logger.Info("Generated ethash verification cache", "elapsed", common.PrettyDuration(time.Since(start)))
}()
// Calculate the number of thoretical rows (we'll store in one buffer nonetheless) // Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
rows := int(size) / hashBytes rows := int(size) / hashBytes
// Start a monitoring goroutine to report progress on low end devices
var progress uint32
done := make(chan struct{})
defer close(done)
go func() {
for {
select {
case <-done:
return
case <-time.After(3 * time.Second):
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/4, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
// Create a hasher to reuse between invocations // Create a hasher to reuse between invocations
keccak512 := crypto.Keccak512Hasher() keccak512 := crypto.Keccak512Hasher()
@ -126,6 +137,7 @@ func generateCache(size uint64, seed []byte) []byte {
copy(cache, keccak512(seed)) copy(cache, keccak512(seed))
for offset := uint64(hashBytes); offset < size; offset += hashBytes { for offset := uint64(hashBytes); offset < size; offset += hashBytes {
copy(cache[offset:], keccak512(cache[offset-hashBytes:offset])) copy(cache[offset:], keccak512(cache[offset-hashBytes:offset]))
atomic.AddUint32(&progress, 1)
} }
// Use a low-round version of randmemohash // Use a low-round version of randmemohash
temp := make([]byte, hashBytes) temp := make([]byte, hashBytes)
@ -139,6 +151,8 @@ func generateCache(size uint64, seed []byte) []byte {
) )
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes]) xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
copy(cache[dstOff:], keccak512(temp)) copy(cache[dstOff:], keccak512(temp))
atomic.AddUint32(&progress, 1)
} }
} }
return cache return cache

Loading…
Cancel
Save