|
|
@ -29,6 +29,7 @@ import ( |
|
|
|
"runtime" |
|
|
|
"runtime" |
|
|
|
"strconv" |
|
|
|
"strconv" |
|
|
|
"sync" |
|
|
|
"sync" |
|
|
|
|
|
|
|
"sync/atomic" |
|
|
|
"time" |
|
|
|
"time" |
|
|
|
"unsafe" |
|
|
|
"unsafe" |
|
|
|
|
|
|
|
|
|
|
@ -281,6 +282,7 @@ type dataset struct { |
|
|
|
mmap mmap.MMap // Memory map itself to unmap before releasing
|
|
|
|
mmap mmap.MMap // Memory map itself to unmap before releasing
|
|
|
|
dataset []uint32 // The actual cache data content
|
|
|
|
dataset []uint32 // The actual cache data content
|
|
|
|
once sync.Once // Ensures the cache is generated only once
|
|
|
|
once sync.Once // Ensures the cache is generated only once
|
|
|
|
|
|
|
|
done uint32 // Atomic flag to determine generation status
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
|
|
|
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
|
|
@ -292,6 +294,9 @@ func newDataset(epoch uint64) interface{} { |
|
|
|
// generate ensures that the dataset content is generated before use.
|
|
|
|
// generate ensures that the dataset content is generated before use.
|
|
|
|
func (d *dataset) generate(dir string, limit int, test bool) { |
|
|
|
func (d *dataset) generate(dir string, limit int, test bool) { |
|
|
|
d.once.Do(func() { |
|
|
|
d.once.Do(func() { |
|
|
|
|
|
|
|
// Mark the dataset generated after we're done. This is needed for remote
|
|
|
|
|
|
|
|
defer atomic.StoreUint32(&d.done, 1) |
|
|
|
|
|
|
|
|
|
|
|
csize := cacheSize(d.epoch*epochLength + 1) |
|
|
|
csize := cacheSize(d.epoch*epochLength + 1) |
|
|
|
dsize := datasetSize(d.epoch*epochLength + 1) |
|
|
|
dsize := datasetSize(d.epoch*epochLength + 1) |
|
|
|
seed := seedHash(d.epoch*epochLength + 1) |
|
|
|
seed := seedHash(d.epoch*epochLength + 1) |
|
|
@ -306,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) { |
|
|
|
|
|
|
|
|
|
|
|
d.dataset = make([]uint32, dsize/4) |
|
|
|
d.dataset = make([]uint32, dsize/4) |
|
|
|
generateDataset(d.dataset, d.epoch, cache) |
|
|
|
generateDataset(d.dataset, d.epoch, cache) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
// Disk storage is needed, this will get fancy
|
|
|
|
// Disk storage is needed, this will get fancy
|
|
|
|
var endian string |
|
|
|
var endian string |
|
|
@ -348,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) { |
|
|
|
}) |
|
|
|
}) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// generated returns whether this particular dataset finished generating already
|
|
|
|
|
|
|
|
// or not (it may not have been started at all). This is useful for remote miners
|
|
|
|
|
|
|
|
// to default to verification caches instead of blocking on DAG generations.
|
|
|
|
|
|
|
|
func (d *dataset) generated() bool { |
|
|
|
|
|
|
|
return atomic.LoadUint32(&d.done) == 1 |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// finalizer closes any file handlers and memory maps open.
|
|
|
|
// finalizer closes any file handlers and memory maps open.
|
|
|
|
func (d *dataset) finalizer() { |
|
|
|
func (d *dataset) finalizer() { |
|
|
|
if d.mmap != nil { |
|
|
|
if d.mmap != nil { |
|
|
@ -589,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache { |
|
|
|
// dataset tries to retrieve a mining dataset for the specified block number
|
|
|
|
// dataset tries to retrieve a mining dataset for the specified block number
|
|
|
|
// by first checking against a list of in-memory datasets, then against DAGs
|
|
|
|
// by first checking against a list of in-memory datasets, then against DAGs
|
|
|
|
// stored on disk, and finally generating one if none can be found.
|
|
|
|
// stored on disk, and finally generating one if none can be found.
|
|
|
|
func (ethash *Ethash) dataset(block uint64) *dataset { |
|
|
|
//
|
|
|
|
|
|
|
|
// If async is specified, not only the future but the current DAG is also
|
|
|
|
|
|
|
|
// generates on a background thread.
|
|
|
|
|
|
|
|
func (ethash *Ethash) dataset(block uint64, async bool) *dataset { |
|
|
|
|
|
|
|
// Retrieve the requested ethash dataset
|
|
|
|
epoch := block / epochLength |
|
|
|
epoch := block / epochLength |
|
|
|
currentI, futureI := ethash.datasets.get(epoch) |
|
|
|
currentI, futureI := ethash.datasets.get(epoch) |
|
|
|
current := currentI.(*dataset) |
|
|
|
current := currentI.(*dataset) |
|
|
|
|
|
|
|
|
|
|
|
// Wait for generation finish.
|
|
|
|
// If async is specified, generate everything in a background thread
|
|
|
|
|
|
|
|
if async && !current.generated() { |
|
|
|
|
|
|
|
go func() { |
|
|
|
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
|
|
|
|
|
|
|
|
// If we need a new future dataset, now's a good time to regenerate it.
|
|
|
|
|
|
|
|
if futureI != nil { |
|
|
|
if futureI != nil { |
|
|
|
future := futureI.(*dataset) |
|
|
|
future := futureI.(*dataset) |
|
|
|
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
}() |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// Either blocking generation was requested, or already done
|
|
|
|
|
|
|
|
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if futureI != nil { |
|
|
|
|
|
|
|
future := futureI.(*dataset) |
|
|
|
|
|
|
|
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
return current |
|
|
|
return current |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|