From 3768b0074791169441ca5eb3a7c73c7b9e74c24a Mon Sep 17 00:00:00 2001 From: s7v7nislands Date: Tue, 18 Apr 2023 20:54:06 +0800 Subject: [PATCH] consensus/ethash: use atomic type (#27068) --- consensus/ethash/algorithm.go | 12 ++++++------ consensus/ethash/ethash.go | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index d539183822..a2a6081f55 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -163,7 +163,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { rows := int(size) / hashBytes // Start a monitoring goroutine to report progress on low end devices - var progress uint32 + var progress atomic.Uint32 done := make(chan struct{}) defer close(done) @@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "percentage", progress.Load()*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) } } }() @@ -185,7 +185,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { keccak512(cache, seed) for offset := uint64(hashBytes); offset < size; offset += hashBytes { keccak512(cache[offset:], cache[offset-hashBytes:offset]) - atomic.AddUint32(&progress, 1) + progress.Add(1) } // Use a low-round version of randmemohash temp := make([]byte, hashBytes) @@ -200,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes]) keccak512(cache[dstOff:], temp) - atomic.AddUint32(&progress, 1) + progress.Add(1) } } // Swap the byte order on big endian systems and return @@ -299,7 +299,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { var pend sync.WaitGroup pend.Add(threads) - var progress uint64 + var progress atomic.Uint64 for i := 0; i < threads; i++ { go func(id int) { defer pend.Done() @@ -323,7 +323,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { } copy(dataset[index*hashBytes:], item) - if status := atomic.AddUint64(&progress, 1); status%percent == 0 { + if status := progress.Add(1); status%percent == 0 { logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) } } diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 6cb3124827..462f109564 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -308,12 +308,12 @@ func (c *cache) finalizer() { // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. type dataset struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - dataset []uint32 // The actual cache data content - once sync.Once // Ensures the cache is generated only once - done uint32 // Atomic flag to determine generation status + epoch uint64 // Epoch for which this cache is relevant + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + dataset []uint32 // The actual cache data content + once sync.Once // Ensures the cache is generated only once + done atomic.Bool // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go @@ -326,7 +326,7 @@ func newDataset(epoch uint64) *dataset { func (d *dataset) generate(dir string, limit int, lock bool, test bool) { d.once.Do(func() { // Mark the dataset generated after we're done. This is needed for remote - defer atomic.StoreUint32(&d.done, 1) + defer d.done.Store(true) csize := cacheSize(d.epoch*epochLength + 1) dsize := datasetSize(d.epoch*epochLength + 1) @@ -390,7 +390,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // or not (it may not have been started at all). This is useful for remote miners // to default to verification caches instead of blocking on DAG generations. func (d *dataset) generated() bool { - return atomic.LoadUint32(&d.done) == 1 + return d.done.Load() } // finalizer closes any file handlers and memory maps open.