Merge pull request #859 from Gustav-Simonsson/ethash_reloaded

Ethash reloaded
pull/865/merge
Jeffrey Wilcke 10 years ago
commit 1f26a1b863
  1. 4
      Godeps/Godeps.json
  2. 19
      Godeps/_workspace/src/github.com/ethereum/ethash/.travis.yml
  3. 17
      Godeps/_workspace/src/github.com/ethereum/ethash/README.md
  4. 43
      Godeps/_workspace/src/github.com/ethereum/ethash/appveyor.yml
  5. 576
      Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
  6. 176
      Godeps/_workspace/src/github.com/ethereum/ethash/ethash_test.go
  7. 25
      Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
  8. 47
      Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
  9. 46
      Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/CMakeLists.txt
  10. 63
      Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/benchmark.cpp
  11. 2
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/CMakeLists.txt
  12. 143
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.cpp
  13. 26
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner.h
  14. 5
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash-cl/ethash_cl_miner_kernel.cl
  15. 5
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/CMakeLists.txt
  16. 1476
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/data_sizes.h
  17. 74
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
  18. 215
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/ethash.h
  19. 7
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
  20. 630
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
  21. 149
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.h
  22. 129
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.c
  23. 197
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io.h
  24. 102
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_posix.c
  25. 103
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/io_win32.c
  26. 47
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap.h
  27. 84
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/mmap_win32.c
  28. 194
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.c
  29. 12
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3.h
  30. 14
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp
  31. 7
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h
  32. 10
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util.h
  33. 9
      Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/util_win32.c
  34. 79
      Godeps/_workspace/src/github.com/ethereum/ethash/src/python/core.c
  35. 13
      Godeps/_workspace/src/github.com/ethereum/ethash/test/c/CMakeLists.txt
  36. 879
      Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.cpp
  37. 13
      Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
  38. 82
      Godeps/_workspace/src/github.com/ethereum/ethash/test/go/ethash_test.go
  39. 9
      Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
  40. 34
      cmd/geth/main.go
  41. 2
      cmd/utils/flags.go
  42. 4
      core/chain_makers.go
  43. 3
      eth/backend.go
  44. 2
      miner/agent.go
  45. 7
      miner/miner.go
  46. 6
      pow/dagger/dagger.go
  47. 8
      pow/ezp/pow.go
  48. 2
      pow/pow.go
  49. 14
      tests/block_test.go

4
Godeps/Godeps.json generated vendored

@ -17,8 +17,8 @@
}, },
{ {
"ImportPath": "github.com/ethereum/ethash", "ImportPath": "github.com/ethereum/ethash",
"Comment": "v23.1-82-g908aad3", "Comment": "v23.1-195-g4d50db9",
"Rev": "908aad345c9fbf3ab9bbb94031dc02d0d90df1b8" "Rev": "4d50db90d8bb5f2fae357570366cb8c657a4ddfc"
}, },
{ {
"ImportPath": "github.com/howeyc/fsnotify", "ImportPath": "github.com/howeyc/fsnotify",

@ -1,14 +1,23 @@
# making our travis.yml play well with C++11 by obtaining g++4.8 language: go
# Taken from this file: go:
# https://github.com/beark/ftl/blob/master/.travis.yml - 1.4.2
before_install: before_install:
# for g++4.8 and C++11
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
# Set up go-ethereum
- sudo apt-get update -y -qq - sudo apt-get update -y -qq
- sudo apt-get install -yqq libgmp3-dev
- git clone --depth=10 https://github.com/ethereum/go-ethereum ${GOPATH}/src/github.com/ethereum/go-ethereum
# use canned dependencies from the go-ethereum repository
- export GOPATH=$GOPATH:$GOPATH/src/github.com/ethereum/go-ethereum/Godeps/_workspace/
- echo $GOPATH
install: install:
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
- sudo apt-get install -qq --yes --force-yes g++-4.8 - sudo apt-get install -qq --yes --force-yes g++-4.8
- sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11 - sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind
- sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev
- sudo pip install virtualenv -q - sudo pip install virtualenv -q
script: "./test/test.sh" script: "./test/test.sh"

@ -1,7 +1,22 @@
[![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash) [![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
# Ethash # Ethash
For details on this project, please see the Ethereum wiki: For details on this project, please see the Ethereum wiki:
https://github.com/ethereum/wiki/wiki/Ethash https://github.com/ethereum/wiki/wiki/Ethash
### Coding Style for C++ code:
Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
### Coding Style for C code:
The main thing above all is code consistency.
- Tabs for indentation. A tab is 4 spaces
- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
especially for the C code.
- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
than 110. Some people work with multiple buffers next to each other.
Make them like you :)

@ -0,0 +1,43 @@
version: 1.0.0.{build}
environment:
BOOST_ROOT: "c:/projects/ethash/deps/boost"
branches:
only:
- master
- develop
os: Windows Server 2012 R2
clone_folder: c:\projects\ethash
#platform: Any CPU
#configuration: Debug
install:
# by default, all script lines are interpreted as batch
# scripts to run before build
before_build:
- echo "Downloading boost..."
- mkdir c:\projects\ethash\deps
- cd c:\projects\ethash\deps
- curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
- echo "Unzipping boost..."
- 7z x boost.tar.gz > nul
- 7z x boost.tar > nul
- ls
- echo "Running cmake..."
- cd c:\projects\ethash
- cmake .
build:
project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
after_build:
- echo "Running tests..."
- cd c:\projects\ethash\test\c\Debug
- Test.exe
- echo "Finished!"

@ -1,32 +1,22 @@
/*
###################################################################################
###################################################################################
#################### ####################
#################### EDIT AND YOU SHALL FEEL MY WRATH - jeff ####################
#################### ####################
###################################################################################
###################################################################################
*/
package ethash package ethash
/* /*
#cgo CFLAGS: -std=gnu99 -Wall #include "src/libethash/internal.h"
#include "src/libethash/util.c"
#include "src/libethash/internal.c" int ethashGoCallback_cgo(unsigned);
#include "src/libethash/sha3.c"
*/ */
import "C" import "C"
import ( import (
"bytes" "errors"
"encoding/binary"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"math/rand" "math/rand"
"os" "os"
"path" "os/user"
"path/filepath"
"runtime"
"sync" "sync"
"time" "time"
"unsafe" "unsafe"
@ -38,318 +28,267 @@ import (
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
) )
var minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) var (
minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
type ParamsAndCache struct { sharedLight = new(Light)
params *C.ethash_params )
cache *C.ethash_cache
Epoch uint64
}
type DAG struct {
dag unsafe.Pointer // full GB of memory for dag
file bool
paramsAndCache *ParamsAndCache
}
type Ethash struct {
turbo bool
HashRate int64
chainManager pow.ChainManager
dag *DAG
paramsAndCache *ParamsAndCache
ret *C.ethash_return_value
dagMutex *sync.RWMutex
cacheMutex *sync.RWMutex
}
func parseNonce(nonce []byte) (uint64, error) { const (
nonceBuf := bytes.NewBuffer(nonce) epochLength uint64 = 30000
nonceInt, err := binary.ReadUvarint(nonceBuf) cacheSizeForTesting C.uint64_t = 1024
if err != nil { dagSizeForTesting C.uint64_t = 1024 * 32
return 0, err )
}
return nonceInt, nil
}
const epochLength uint64 = 30000 var DefaultDir = defaultDir()
func makeParamsAndCache(chainManager pow.ChainManager, blockNum uint64) (*ParamsAndCache, error) { func defaultDir() string {
if blockNum >= epochLength*2048 { home := os.Getenv("HOME")
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048) if user, err := user.Current(); err == nil {
home = user.HomeDir
} }
paramsAndCache := &ParamsAndCache{ if runtime.GOOS == "windows" {
params: new(C.ethash_params), return filepath.Join(home, "AppData", "Ethash")
cache: new(C.ethash_cache),
Epoch: blockNum / epochLength,
} }
C.ethash_params_init(paramsAndCache.params, C.uint32_t(uint32(blockNum))) return filepath.Join(home, ".ethash")
paramsAndCache.cache.mem = C.malloc(C.size_t(paramsAndCache.params.cache_size)) }
seedHash, err := GetSeedHash(blockNum)
if err != nil {
return nil, err
}
glog.V(logger.Info).Infof("Making cache for epoch: %d (%v) (%x)\n", paramsAndCache.Epoch, blockNum, seedHash)
start := time.Now()
C.ethash_mkcache(paramsAndCache.cache, paramsAndCache.params, (*C.ethash_blockhash_t)(unsafe.Pointer(&seedHash[0])))
if glog.V(logger.Info) { // cache wraps an ethash_light_t with some metadata
glog.Infoln("Took:", time.Since(start)) // and automatic memory management.
} type cache struct {
epoch uint64
test bool
return paramsAndCache, nil gen sync.Once // ensures cache is only generated once.
ptr *C.struct_ethash_light
} }
func (pow *Ethash) UpdateCache(blockNum uint64, force bool) error { // generate creates the actual cache. it can be called from multiple
pow.cacheMutex.Lock() // goroutines. the first call will generate the cache, subsequent
defer pow.cacheMutex.Unlock() // calls wait until it is generated.
func (cache *cache) generate() {
thisEpoch := blockNum / epochLength cache.gen.Do(func() {
if force || pow.paramsAndCache.Epoch != thisEpoch { started := time.Now()
var err error seedHash := makeSeedHash(cache.epoch)
pow.paramsAndCache, err = makeParamsAndCache(pow.chainManager, blockNum) glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
if err != nil { size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
panic(err) if cache.test {
size = cacheSizeForTesting
} }
} cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
runtime.SetFinalizer(cache, freeCache)
return nil glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
})
} }
func makeDAG(p *ParamsAndCache) *DAG { func freeCache(cache *cache) {
d := &DAG{ C.ethash_light_delete(cache.ptr)
dag: C.malloc(C.size_t(p.params.full_size)), cache.ptr = nil
file: false, }
paramsAndCache: p,
}
donech := make(chan string) // Light implements the Verify half of the proof of work.
go func() { // It uses a small in-memory cache to verify the nonces
t := time.NewTicker(5 * time.Second) // found by Full.
tstart := time.Now() type Light struct {
done: test bool // if set use a smaller cache size
for { mu sync.Mutex // protects current
select { current *cache // last cache which was generated.
case <-t.C: // TODO: keep multiple caches.
glog.V(logger.Info).Infof("... still generating DAG (%v) ...\n", time.Since(tstart).Seconds())
case str := <-donech:
glog.V(logger.Info).Infof("... %s ...\n", str)
break done
}
}
}()
C.ethash_compute_full_data(d.dag, p.params, p.cache)
donech <- "DAG generation completed"
return d
} }
func (pow *Ethash) writeDagToDisk(dag *DAG, epoch uint64) *os.File { // Verify checks whether the block's nonce is valid.
if epoch > 2048 { func (l *Light) Verify(block pow.Block) bool {
panic(fmt.Errorf("Epoch must be less than 2048 (is %v)", epoch)) // TODO: do ethash_quick_verify before getCache in order
// to prevent DOS attacks.
var (
blockNum = block.NumberU64()
difficulty = block.Difficulty()
cache = l.getCache(blockNum)
dagSize = C.ethash_get_datasize(C.uint64_t(blockNum))
)
if l.test {
dagSize = dagSizeForTesting
} }
data := C.GoBytes(unsafe.Pointer(dag.dag), C.int(dag.paramsAndCache.params.full_size)) if blockNum >= epochLength*2048 {
file, err := os.Create("/tmp/dag") glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
if err != nil { return false
panic(err)
} }
// Recompute the hash using the cache.
hash := hashToH256(block.HashNoNonce())
ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
if !ret.success {
return false
}
// Make sure cache is live until after the C call.
// This is important because a GC might happen and execute
// the finalizer before the call completes.
_ = cache
// The actual check.
target := new(big.Int).Div(minDifficulty, difficulty)
return h256ToHash(ret.result).Big().Cmp(target) <= 0
}
dataEpoch := make([]byte, 8) func h256ToHash(in C.ethash_h256_t) common.Hash {
binary.BigEndian.PutUint64(dataEpoch, epoch) return *(*common.Hash)(unsafe.Pointer(&in.b))
}
file.Write(dataEpoch)
file.Write(data)
return file func hashToH256(in common.Hash) C.ethash_h256_t {
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
} }
func (pow *Ethash) UpdateDAG() { func (l *Light) getCache(blockNum uint64) *cache {
blockNum := pow.chainManager.CurrentBlock().NumberU64() var c *cache
if blockNum >= epochLength*2048 { epoch := blockNum / epochLength
// This will crash in the 2030s or 2040s // Update or reuse the last cache.
panic(fmt.Errorf("Current block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048)) l.mu.Lock()
if l.current != nil && l.current.epoch == epoch {
c = l.current
} else {
c = &cache{epoch: epoch, test: l.test}
l.current = c
} }
l.mu.Unlock()
// Wait for the cache to finish generating.
c.generate()
return c
}
pow.dagMutex.Lock() // dag wraps an ethash_full_t with some metadata
defer pow.dagMutex.Unlock() // and automatic memory management.
thisEpoch := blockNum / epochLength type dag struct {
if pow.dag == nil || pow.dag.paramsAndCache.Epoch != thisEpoch { epoch uint64
if pow.dag != nil && pow.dag.dag != nil { test bool
C.free(pow.dag.dag) dir string
pow.dag.dag = nil
}
if pow.dag != nil && pow.dag.paramsAndCache.cache.mem != nil { gen sync.Once // ensures DAG is only generated once.
C.free(pow.dag.paramsAndCache.cache.mem) ptr *C.struct_ethash_full
pow.dag.paramsAndCache.cache.mem = nil }
}
// Make the params and cache for the DAG // generate creates the actual DAG. it can be called from multiple
paramsAndCache, err := makeParamsAndCache(pow.chainManager, blockNum) // goroutines. the first call will generate the DAG, subsequent
if err != nil { // calls wait until it is generated.
panic(err) func (d *dag) generate() {
d.gen.Do(func() {
var (
started = time.Now()
seedHash = makeSeedHash(d.epoch)
blockNum = C.uint64_t(d.epoch * epochLength)
cacheSize = C.ethash_get_cachesize(blockNum)
dagSize = C.ethash_get_datasize(blockNum)
)
if d.test {
cacheSize = cacheSizeForTesting
dagSize = dagSizeForTesting
} }
if d.dir == "" {
// TODO: On non-SSD disks, loading the DAG from disk takes longer than generating it in memory d.dir = DefaultDir
pow.paramsAndCache = paramsAndCache
path := path.Join("/", "tmp", "dag")
pow.dag = nil
glog.V(logger.Info).Infoln("Retrieving DAG")
start := time.Now()
file, err := os.Open(path)
if err != nil {
glog.V(logger.Info).Infof("No DAG found. Generating new DAG in '%s' (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
data, err := ioutil.ReadAll(file)
if err != nil {
glog.V(logger.Info).Infof("DAG load err: %v\n", err)
}
if len(data) < 8 {
glog.V(logger.Info).Infof("DAG in '%s' is less than 8 bytes, it must be corrupted. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
dataEpoch := binary.BigEndian.Uint64(data[0:8])
if dataEpoch < thisEpoch {
glog.V(logger.Info).Infof("DAG in '%s' is stale. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else if dataEpoch > thisEpoch {
// FIXME
panic(fmt.Errorf("Saved DAG in '%s' reports to be from future epoch %v (current epoch is %v)\n", path, dataEpoch, thisEpoch))
} else if len(data) != (int(paramsAndCache.params.full_size) + 8) {
glog.V(logger.Info).Infof("DAG in '%s' is corrupted. Generating new DAG (this takes a while)...\n", path)
pow.dag = makeDAG(paramsAndCache)
file = pow.writeDagToDisk(pow.dag, thisEpoch)
pow.dag.file = true
} else {
data = data[8:]
pow.dag = &DAG{
dag: unsafe.Pointer(&data[0]),
file: true,
paramsAndCache: paramsAndCache,
}
}
}
} }
glog.V(logger.Info).Infoln("Took:", time.Since(start)) glog.V(logger.Info).Infof("Generating DAG for epoch %d (%x)", d.epoch, seedHash)
// Generate a temporary cache.
file.Close() // TODO: this could share the cache with Light
} cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
} defer C.ethash_light_delete(cache)
// Generate the actual DAG.
func New(chainManager pow.ChainManager) *Ethash { d.ptr = C.ethash_full_new_internal(
paramsAndCache, err := makeParamsAndCache(chainManager, chainManager.CurrentBlock().NumberU64()) C.CString(d.dir),
if err != nil { hashToH256(seedHash),
panic(err) dagSize,
} cache,
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
return &Ethash{ )
turbo: true, if d.ptr == nil {
paramsAndCache: paramsAndCache, panic("ethash_full_new IO or memory error")
chainManager: chainManager, }
dag: nil, runtime.SetFinalizer(d, freeDAG)
cacheMutex: new(sync.RWMutex), glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
dagMutex: new(sync.RWMutex), })
}
} }
func (pow *Ethash) DAGSize() uint64 { func freeDAG(h *dag) {
return uint64(pow.dag.paramsAndCache.params.full_size) C.ethash_full_delete(h.ptr)
h.ptr = nil
} }
func (pow *Ethash) CacheSize() uint64 { //export ethashGoCallback
return uint64(pow.paramsAndCache.params.cache_size) func ethashGoCallback(percent C.unsigned) C.int {
glog.V(logger.Info).Infof("Still generating DAG: %d%%", percent)
return 0
} }
func GetSeedHash(blockNum uint64) ([]byte, error) { // MakeDAG pre-generates a DAG file for the given block number in the
// given directory. If dir is the empty string, the default directory
// is used.
func MakeDAG(blockNum uint64, dir string) error {
d := &dag{epoch: blockNum / epochLength, dir: dir}
if blockNum >= epochLength*2048 { if blockNum >= epochLength*2048 {
return nil, fmt.Errorf("block number is out of bounds (value %v, limit is %v)", blockNum, epochLength*2048) return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
}
epoch := blockNum / epochLength
seedHash := make([]byte, 32)
var i uint64
for i = 0; i < 32; i++ {
seedHash[i] = 0
} }
for i = 0; i < epoch; i++ { d.generate()
seedHash = crypto.Sha3(seedHash) if d.ptr == nil {
return errors.New("failed")
} }
return seedHash, nil return nil
} }
func (pow *Ethash) Stop() { // Full implements the Search half of the proof of work.
pow.cacheMutex.Lock() type Full struct {
pow.dagMutex.Lock() Dir string // use this to specify a non-default DAG directory
defer pow.dagMutex.Unlock()
defer pow.cacheMutex.Unlock()
if pow.paramsAndCache.cache != nil { test bool // if set use a smaller DAG size
C.free(pow.paramsAndCache.cache.mem) turbo bool
} hashRate int64
if pow.dag.dag != nil && !pow.dag.file {
C.free(pow.dag.dag) mu sync.Mutex // protects dag
} current *dag // current full DAG
if pow.dag != nil && pow.dag.paramsAndCache != nil && pow.dag.paramsAndCache.cache.mem != nil {
C.free(pow.dag.paramsAndCache.cache.mem)
pow.dag.paramsAndCache.cache.mem = nil
}
pow.dag.dag = nil
} }
func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) { func (pow *Full) getDAG(blockNum uint64) (d *dag) {
pow.UpdateDAG() epoch := blockNum / epochLength
pow.mu.Lock()
if pow.current != nil && pow.current.epoch == epoch {
d = pow.current
} else {
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
pow.current = d
}
pow.mu.Unlock()
// wait for it to finish generating.
d.generate()
return d
}
pow.dagMutex.RLock() func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
defer pow.dagMutex.RUnlock() dag := pow.getDAG(block.NumberU64())
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
miningHash := block.HashNoNonce()
diff := block.Difficulty() diff := block.Difficulty()
i := int64(0) i := int64(0)
starti := i starti := i
start := time.Now().UnixNano() start := time.Now().UnixNano()
nonce := uint64(r.Int63()) nonce = uint64(r.Int63())
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0])) hash := hashToH256(block.HashNoNonce())
target := new(big.Int).Div(minDifficulty, diff) target := new(big.Int).Div(minDifficulty, diff)
var ret C.ethash_return_value
for { for {
select { select {
case <-stop: case <-stop:
pow.HashRate = 0 pow.hashRate = 0
return 0, nil, nil return 0, nil
default: default:
i++ i++
elapsed := time.Now().UnixNano() - start elapsed := time.Now().UnixNano() - start
hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000 hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
pow.HashRate = int64(hashes) pow.hashRate = int64(hashes)
C.ethash_full(&ret, pow.dag.dag, pow.dag.paramsAndCache.params, cMiningHash, C.uint64_t(nonce)) ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
result := common.Bytes2Big(C.GoBytes(unsafe.Pointer(&ret.result), C.int(32))) result := h256ToHash(ret.result).Big()
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining // TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
if result.Cmp(target) <= 0 { if ret.success && result.Cmp(target) <= 0 {
mixDigest := C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32)) mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
seedHash, err := GetSeedHash(block.NumberU64()) // This seedhash is useless return nonce, mixDigest
if err != nil {
panic(err)
}
return nonce, mixDigest, seedHash
} }
nonce += 1 nonce += 1
} }
@ -357,82 +296,57 @@ func (pow *Ethash) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte
time.Sleep(20 * time.Microsecond) time.Sleep(20 * time.Microsecond)
} }
} }
} }
func (pow *Ethash) Verify(block pow.Block) bool { func (pow *Full) GetHashrate() int64 {
// TODO: this needs to use an atomic operation.
return pow.verify(block.HashNoNonce(), block.MixDigest(), block.Difficulty(), block.NumberU64(), block.Nonce()) return pow.hashRate
} }
func (pow *Ethash) verify(hash common.Hash, mixDigest common.Hash, difficulty *big.Int, blockNum uint64, nonce uint64) bool { func (pow *Full) Turbo(on bool) {
// Make sure the block num is valid // TODO: this needs to use an atomic operation.
if blockNum >= epochLength*2048 { pow.turbo = on
glog.V(logger.Info).Infoln(fmt.Sprintf("Block number exceeds limit, invalid (value is %v, limit is %v)", }
blockNum, epochLength*2048))
return false
}
// First check: make sure header, mixDigest, nonce are correct without hitting the cache
// This is to prevent DOS attacks
chash := (*C.ethash_blockhash_t)(unsafe.Pointer(&hash[0]))
cnonce := C.uint64_t(nonce)
target := new(big.Int).Div(minDifficulty, difficulty)
var pAc *ParamsAndCache
// If its an old block (doesn't use the current cache)
// get the cache for it but don't update (so we don't need the mutex)
// Otherwise, it's the current block or a future block.
// If current, updateCache will do nothing.
if blockNum/epochLength < pow.paramsAndCache.Epoch {
var err error
// If we can't make the params for some reason, this block is invalid
pAc, err = makeParamsAndCache(pow.chainManager, blockNum)
if err != nil {
glog.V(logger.Info).Infoln("big fucking eror", err)
return false
}
} else {
pow.UpdateCache(blockNum, false)
pow.cacheMutex.RLock()
defer pow.cacheMutex.RUnlock()
pAc = pow.paramsAndCache
}
ret := new(C.ethash_return_value)
C.ethash_light(ret, pAc.cache, pAc.params, chash, cnonce)
result := common.Bytes2Big(C.GoBytes(unsafe.Pointer(&ret.result), C.int(32))) // Ethash combines block verification with Light and
return result.Cmp(target) <= 0 // nonce searching with Full into a single proof of work.
type Ethash struct {
*Light
*Full
} }
func (pow *Ethash) GetHashrate() int64 { // New creates an instance of the proof of work.
return pow.HashRate // A single instance of Light is shared across all instances
// created with New.
func New() *Ethash {
return &Ethash{sharedLight, &Full{turbo: true}}
} }
func (pow *Ethash) Turbo(on bool) { // NewForTesting creates a proof of work for use in unit tests.
pow.turbo = on // It uses a smaller DAG and cache size to keep test times low.
// DAG files are stored in a temporary directory.
//
// Nonces found by a testing instance are not verifiable with a
// regular-size cache.
func NewForTesting() (*Ethash, error) {
dir, err := ioutil.TempDir("", "ethash-test")
if err != nil {
return nil, err
}
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
} }
func (pow *Ethash) FullHash(nonce uint64, miningHash []byte) []byte { func GetSeedHash(blockNum uint64) ([]byte, error) {
pow.UpdateDAG() if blockNum >= epochLength*2048 {
pow.dagMutex.Lock() return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
defer pow.dagMutex.Unlock() }
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0])) sh := makeSeedHash(blockNum / epochLength)
cnonce := C.uint64_t(nonce) return sh[:], nil
ret := new(C.ethash_return_value)
// pow.hash is the output/return of ethash_full
C.ethash_full(ret, pow.dag.dag, pow.paramsAndCache.params, cMiningHash, cnonce)
ghash_full := C.GoBytes(unsafe.Pointer(&ret.result), 32)
return ghash_full
} }
func (pow *Ethash) LightHash(nonce uint64, miningHash []byte) []byte { func makeSeedHash(epoch uint64) (sh common.Hash) {
cMiningHash := (*C.ethash_blockhash_t)(unsafe.Pointer(&miningHash[0])) for ; epoch > 0; epoch-- {
cnonce := C.uint64_t(nonce) sh = crypto.Sha3Hash(sh[:])
ret := new(C.ethash_return_value) }
C.ethash_light(ret, pow.paramsAndCache.cache, pow.paramsAndCache.params, cMiningHash, cnonce) return sh
ghash_light := C.GoBytes(unsafe.Pointer(&ret.result), 32)
return ghash_light
} }

@ -0,0 +1,176 @@
package ethash
import (
"bytes"
"crypto/rand"
"encoding/hex"
"log"
"math/big"
"os"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func init() {
// glog.SetV(6)
// glog.SetToStderr(true)
}
type testBlock struct {
difficulty *big.Int
hashNoNonce common.Hash
nonce uint64
mixDigest common.Hash
number uint64
}
func (b *testBlock) Difficulty() *big.Int { return b.difficulty }
func (b *testBlock) HashNoNonce() common.Hash { return b.hashNoNonce }
func (b *testBlock) Nonce() uint64 { return b.nonce }
func (b *testBlock) MixDigest() common.Hash { return b.mixDigest }
func (b *testBlock) NumberU64() uint64 { return b.number }
var validBlocks = []*testBlock{
// from proof of concept nine testnet, epoch 0
{
number: 22,
hashNoNonce: common.HexToHash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d"),
difficulty: big.NewInt(132416),
nonce: 0x495732e0ed7a801c,
},
// from proof of concept nine testnet, epoch 1
{
number: 30001,
hashNoNonce: common.HexToHash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34"),
difficulty: big.NewInt(1532671),
nonce: 0x318df1c8adef7e5e,
},
// from proof of concept nine testnet, epoch 2
{
number: 60000,
hashNoNonce: common.HexToHash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0"),
difficulty: big.NewInt(2467358),
nonce: 0x50377003e5d830ca,
},
}
func TestEthashVerifyValid(t *testing.T) {
eth := New()
for i, block := range validBlocks {
if !eth.Verify(block) {
t.Errorf("block %d (%x) did not validate.", i, block.hashNoNonce[:6])
}
}
}
func TestEthashConcurrentVerify(t *testing.T) {
eth, err := NewForTesting()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(eth.Full.Dir)
block := &testBlock{difficulty: big.NewInt(10)}
nonce, _ := eth.Search(block, nil)
block.nonce = nonce
// Verify the block concurrently to check for data races.
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func() {
if !eth.Verify(block) {
t.Error("Block could not be verified")
}
wg.Done()
}()
}
wg.Wait()
}
func TestEthashConcurrentSearch(t *testing.T) {
eth, err := NewForTesting()
if err != nil {
t.Fatal(err)
}
eth.Turbo(true)
defer os.RemoveAll(eth.Full.Dir)
// launch n searches concurrently.
var (
block = &testBlock{difficulty: big.NewInt(35000)}
nsearch = 10
wg = new(sync.WaitGroup)
found = make(chan uint64)
stop = make(chan struct{})
)
rand.Read(block.hashNoNonce[:])
wg.Add(nsearch)
for i := 0; i < nsearch; i++ {
go func() {
nonce, _ := eth.Search(block, stop)
select {
case found <- nonce:
case <-stop:
}
wg.Done()
}()
}
// wait for one of them to find the nonce
nonce := <-found
// stop the others
close(stop)
wg.Wait()
if block.nonce = nonce; !eth.Verify(block) {
t.Error("Block could not be verified")
}
}
func TestEthashSearchAcrossEpoch(t *testing.T) {
eth, err := NewForTesting()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(eth.Full.Dir)
for i := epochLength - 40; i < epochLength+40; i++ {
block := &testBlock{number: i, difficulty: big.NewInt(90)}
rand.Read(block.hashNoNonce[:])
nonce, _ := eth.Search(block, nil)
block.nonce = nonce
if !eth.Verify(block) {
t.Fatalf("Block could not be verified")
}
}
}
func TestGetSeedHash(t *testing.T) {
seed0, err := GetSeedHash(0)
if err != nil {
t.Errorf("Failed to get seedHash for block 0: %v", err)
}
if bytes.Compare(seed0, make([]byte, 32)) != 0 {
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
}
seed1, err := GetSeedHash(30000)
if err != nil {
t.Error(err)
}
// From python:
// > from pyethash import get_seedhash
// > get_seedhash(30000)
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
if err != nil {
t.Error(err)
}
if bytes.Compare(seed1, expectedSeed1) != 0 {
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
}
}

@ -0,0 +1,25 @@
package ethash
/*
#cgo CFLAGS: -std=gnu99 -Wall
#cgo windows CFLAGS: -mno-stack-arg-probe
#cgo LDFLAGS: -lm
#include "src/libethash/internal.c"
#include "src/libethash/sha3.c"
#include "src/libethash/io.c"
#ifdef _WIN32
# include "src/libethash/util_win32.c"
# include "src/libethash/io_win32.c"
# include "src/libethash/mmap_win32.c"
#else
# include "src/libethash/io_posix.c"
#endif
// 'gateway function' for calling back into go.
extern int ethashGoCallback(unsigned);
int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
*/
import "C"

@ -1,23 +1,36 @@
#!/usr/bin/env python #!/usr/bin/env python
import os
from distutils.core import setup, Extension from distutils.core import setup, Extension
sources = [
'src/python/core.c',
'src/libethash/io.c',
'src/libethash/internal.c',
'src/libethash/sha3.c']
if os.name == 'nt':
sources += [
'src/libethash/util_win32.c',
'src/libethash/io_win32.c',
'src/libethash/mmap_win32.c',
]
else:
sources += [
'src/libethash/io_posix.c'
]
depends = [
'src/libethash/ethash.h',
'src/libethash/compiler.h',
'src/libethash/data_sizes.h',
'src/libethash/endian.h',
'src/libethash/ethash.h',
'src/libethash/io.h',
'src/libethash/fnv.h',
'src/libethash/internal.h',
'src/libethash/sha3.h',
'src/libethash/util.h',
]
pyethash = Extension('pyethash', pyethash = Extension('pyethash',
sources=[ sources=sources,
'src/python/core.c', depends=depends,
'src/libethash/util.c',
'src/libethash/internal.c',
'src/libethash/sha3.c'],
depends=[
'src/libethash/ethash.h',
'src/libethash/compiler.h',
'src/libethash/data_sizes.h',
'src/libethash/endian.h',
'src/libethash/ethash.h',
'src/libethash/fnv.h',
'src/libethash/internal.h',
'src/libethash/sha3.h',
'src/libethash/util.h'
],
extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"]) extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
setup( setup(

@ -3,56 +3,56 @@ include_directories(..)
set(CMAKE_BUILD_TYPE Release) set(CMAKE_BUILD_TYPE Release)
if (MSVC) if (MSVC)
add_definitions("/openmp") add_definitions("/openmp")
endif() endif()
# enable C++11, should probably be a bit more specific about compiler # enable C++11, should probably be a bit more specific about compiler
if (NOT MSVC) if (NOT MSVC)
SET(CMAKE_CXX_FLAGS "-std=c++11") SET(CMAKE_CXX_FLAGS "-std=c++11")
endif() endif()
if (NOT MPI_FOUND) if (NOT MPI_FOUND)
find_package(MPI) find_package(MPI)
endif() endif()
if (NOT CRYPTOPP_FOUND) if (NOT CRYPTOPP_FOUND)
find_package(CryptoPP 5.6.2) find_package(CryptoPP 5.6.2)
endif() endif()
if (CRYPTOPP_FOUND) if (CRYPTOPP_FOUND)
add_definitions(-DWITH_CRYPTOPP) add_definitions(-DWITH_CRYPTOPP)
find_package (Threads REQUIRED)
endif() endif()
if (NOT OpenCL_FOUND) if (NOT OpenCL_FOUND)
find_package(OpenCL) find_package(OpenCL)
endif() endif()
if (OpenCL_FOUND) if (OpenCL_FOUND)
add_definitions(-DWITH_OPENCL) add_definitions(-DWITH_OPENCL)
include_directories(${OpenCL_INCLUDE_DIRS}) include_directories(${OpenCL_INCLUDE_DIRS})
list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h) list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h)
endif() endif()
if (MPI_FOUND) if (MPI_FOUND)
include_directories(${MPI_INCLUDE_PATH}) include_directories(${MPI_INCLUDE_PATH})
add_executable (Benchmark_MPI_FULL benchmark.cpp) add_executable (Benchmark_MPI_FULL benchmark.cpp)
target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES}) target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI") SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI")
add_executable (Benchmark_MPI_LIGHT benchmark.cpp) add_executable (Benchmark_MPI_LIGHT benchmark.cpp)
target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES}) target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI") SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI")
endif() endif()
add_executable (Benchmark_FULL benchmark.cpp) add_executable (Benchmark_FULL benchmark.cpp)
target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS}) target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL") SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL")
add_executable (Benchmark_LIGHT benchmark.cpp) add_executable (Benchmark_LIGHT benchmark.cpp)
target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS}) target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
if (OpenCL_FOUND) if (OpenCL_FOUND)
add_executable (Benchmark_CL benchmark.cpp) add_executable (Benchmark_CL benchmark.cpp)
target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl) target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl ${CMAKE_THREAD_LIBS_INIT})
SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL") SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL")
endif() endif()

@ -96,6 +96,11 @@ static std::string bytesToHexString(uint8_t const* bytes, unsigned size)
return str; return str;
} }
static std::string bytesToHexString(ethash_h256_t const *hash, unsigned size)
{
return bytesToHexString((uint8_t*)hash, size);
}
extern "C" int main(void) extern "C" int main(void)
{ {
// params for ethash // params for ethash
@ -106,12 +111,12 @@ extern "C" int main(void)
//params.full_size = 8209 * 4096; // 8MBish; //params.full_size = 8209 * 4096; // 8MBish;
//params.cache_size = 8209*4096; //params.cache_size = 8209*4096;
//params.cache_size = 2053*4096; //params.cache_size = 2053*4096;
ethash_blockhash_t seed; ethash_h256_t seed;
ethash_blockhash_t previous_hash; ethash_h256_t previous_hash;
memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32); memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32);
memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32); memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32);
// allocate page aligned buffer for dataset // allocate page aligned buffer for dataset
#ifdef FULL #ifdef FULL
void* full_mem_buf = malloc(params.full_size + 4095); void* full_mem_buf = malloc(params.full_size + 4095);
@ -122,24 +127,24 @@ extern "C" int main(void)
ethash_cache cache; ethash_cache cache;
cache.mem = cache_mem; cache.mem = cache_mem;
// compute cache or full data // compute cache or full data
{ {
auto startTime = high_resolution_clock::now(); auto startTime = high_resolution_clock::now();
ethash_mkcache(&cache, &params, seed); ethash_mkcache(&cache, &params, &seed);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count(); auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
ethash_blockhash_t cache_hash; ethash_h256_t cache_hash;
SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size); SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size);
debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(cache_hash,sizeof(cache_hash)).data()); debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(&cache_hash, sizeof(cache_hash)).data());
// print a couple of test hashes // print a couple of test hashes
{ {
auto startTime = high_resolution_clock::now(); auto startTime = high_resolution_clock::now();
ethash_return_value hash; ethash_return_value hash;
ethash_light(&hash, &cache, &params, previous_hash, 0); ethash_light(&hash, &cache, &params, &previous_hash, 0);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count(); auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(hash.result, 32).data()); debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(&hash.result, 32).data());
} }
#ifdef FULL #ifdef FULL
@ -154,34 +159,34 @@ extern "C" int main(void)
ethash_cl_miner miner; ethash_cl_miner miner;
{ {
auto startTime = high_resolution_clock::now(); auto startTime = high_resolution_clock::now();
if (!miner.init(params, seed)) if (!miner.init(params, &seed))
exit(-1); exit(-1);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count(); auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
debugf("ethash_cl_miner init: %ums\n", (unsigned)time); debugf("ethash_cl_miner init: %ums\n", (unsigned)time);
} }
#endif #endif
#ifdef FULL #ifdef FULL
{ {
auto startTime = high_resolution_clock::now(); auto startTime = high_resolution_clock::now();
ethash_return_value hash; ethash_return_value hash;
ethash_full(&hash, full_mem, &params, previous_hash, 0); ethash_full(&hash, full_mem, &params, &previous_hash, 0);
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count(); auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
debugf("ethash_full test: %uns, %s\n", (unsigned)time); debugf("ethash_full test: %uns\n", (unsigned)time);
} }
#endif #endif
#ifdef OPENCL #ifdef OPENCL
// validate 1024 hashes against CPU // validate 1024 hashes against CPU
miner.hash(g_hashes, previous_hash, 0, 1024); miner.hash(g_hashes, (uint8_t*)&previous_hash, 0, 1024);
for (unsigned i = 0; i != 1024; ++i) for (unsigned i = 0; i != 1024; ++i)
{ {
ethash_return_value hash; ethash_return_value hash;
ethash_light(&hash, &cache, &params, previous_hash, i); ethash_light(&hash, &cache, &params, &previous_hash, i);
if (memcmp(hash.result, g_hashes + 32*i, 32) != 0) if (memcmp(&hash.result, g_hashes + 32*i, 32) != 0)
{ {
debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(hash.result, 32).c_str()); debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(&hash.result, 32).c_str());
static unsigned c = 0; static unsigned c = 0;
if (++c == 16) if (++c == 16)
{ {
@ -189,14 +194,14 @@ extern "C" int main(void)
} }
} }
} }
// ensure nothing else is going on // ensure nothing else is going on
miner.finish(); miner.finish();
#endif #endif
auto startTime = high_resolution_clock::now(); auto startTime = high_resolution_clock::now();
unsigned hash_count = trials; unsigned hash_count = trials;
#ifdef OPENCL #ifdef OPENCL
{ {
struct search_hook : ethash_cl_miner::search_hook struct search_hook : ethash_cl_miner::search_hook
@ -220,14 +225,14 @@ extern "C" int main(void)
search_hook hook; search_hook hook;
hook.hash_count = 0; hook.hash_count = 0;
miner.search(previous_hash, 0x000000ffffffffff, hook); miner.search((uint8_t*)&previous_hash, 0x000000ffffffffff, hook);
for (unsigned i = 0; i != hook.nonce_vec.size(); ++i) for (unsigned i = 0; i != hook.nonce_vec.size(); ++i)
{ {
uint64_t nonce = hook.nonce_vec[i]; uint64_t nonce = hook.nonce_vec[i];
ethash_return_value hash; ethash_return_value hash;
ethash_light(&hash, &cache, &params, previous_hash, nonce); ethash_light(&hash, &cache, &params, &previous_hash, nonce);
debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(hash.result, 32).c_str()); debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(&hash.result, 32).c_str());
} }
hash_count = hook.hash_count; hash_count = hook.hash_count;
@ -239,9 +244,9 @@ extern "C" int main(void)
{ {
ethash_return_value hash; ethash_return_value hash;
#ifdef FULL #ifdef FULL
ethash_full(&hash, full_mem, &params, previous_hash, nonce); ethash_full(&hash, full_mem, &params, &previous_hash, nonce);
#else #else
ethash_light(&hash, &cache, &params, previous_hash, nonce); ethash_light(&hash, &cache, &params, &previous_hash, nonce);
#endif // FULL #endif // FULL
} }
} }
@ -249,7 +254,7 @@ extern "C" int main(void)
auto time = std::chrono::duration_cast<std::chrono::microseconds>(high_resolution_clock::now() - startTime).count(); auto time = std::chrono::duration_cast<std::chrono::microseconds>(high_resolution_clock::now() - startTime).count();
debugf("Search took: %ums\n", (unsigned)time/1000); debugf("Search took: %ums\n", (unsigned)time/1000);
unsigned read_size = ACCESSES * MIX_BYTES; unsigned read_size = ETHASH_ACCESSES * ETHASH_MIX_BYTES;
#if defined(OPENCL) || defined(FULL) #if defined(OPENCL) || defined(FULL)
debugf( debugf(
"hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n", "hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n",

@ -13,7 +13,7 @@ if (NOT MSVC)
set(CMAKE_CXX_FLAGS "-Wall -std=c++11") set(CMAKE_CXX_FLAGS "-Wall -std=c++11")
set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g") set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g")
set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG") set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-O4 -DNDEBUG") set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g")
# Compiler-specific C++11 activation. # Compiler-specific C++11 activation.

@ -24,12 +24,15 @@
#include <cstdio> #include <cstdio>
#include <cstdlib> #include <cstdlib>
#include <iostream>
#include <assert.h> #include <assert.h>
#include <queue> #include <queue>
#include <vector> #include <vector>
#include <libethash/util.h>
#include <libethash/ethash.h>
#include <libethash/internal.h>
#include "ethash_cl_miner.h" #include "ethash_cl_miner.h"
#include "ethash_cl_miner_kernel.h" #include "ethash_cl_miner_kernel.h"
#include <libethash/util.h>
#define ETHASH_BYTES 32 #define ETHASH_BYTES 32
@ -42,6 +45,8 @@
#undef min #undef min
#undef max #undef max
using namespace std;
static void add_definition(std::string& source, char const* id, unsigned value) static void add_definition(std::string& source, char const* id, unsigned value)
{ {
char buf[256]; char buf[256];
@ -49,52 +54,108 @@ static void add_definition(std::string& source, char const* id, unsigned value)
source.insert(source.begin(), buf, buf + strlen(buf)); source.insert(source.begin(), buf, buf + strlen(buf));
} }
ethash_cl_miner::search_hook::~search_hook() {}
ethash_cl_miner::ethash_cl_miner() ethash_cl_miner::ethash_cl_miner()
: m_opencl_1_1() : m_opencl_1_1()
{ {
} }
bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const *seed, unsigned workgroup_size) std::string ethash_cl_miner::platform_info(unsigned _platformId, unsigned _deviceId)
{
std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
cout << "No OpenCL platforms found." << endl;
return std::string();
}
// get GPU device of the selected platform
std::vector<cl::Device> devices;
unsigned platform_num = std::min<unsigned>(_platformId, platforms.size() - 1);
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
cout << "No OpenCL devices found." << endl;
return std::string();
}
// use selected default device
unsigned device_num = std::min<unsigned>(_deviceId, devices.size() - 1);
cl::Device& device = devices[device_num];
std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
return "{ \"platform\": \"" + platforms[platform_num].getInfo<CL_PLATFORM_NAME>() + "\", \"device\": \"" + device.getInfo<CL_DEVICE_NAME>() + "\", \"version\": \"" + device_version + "\" }";
}
unsigned ethash_cl_miner::get_num_devices(unsigned _platformId)
{
std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms);
if (platforms.empty())
{
cout << "No OpenCL platforms found." << endl;
return 0;
}
std::vector<cl::Device> devices;
unsigned platform_num = std::min<unsigned>(_platformId, platforms.size() - 1);
platforms[platform_num].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{
cout << "No OpenCL devices found." << endl;
return 0;
}
return devices.size();
}
void ethash_cl_miner::finish()
{
if (m_queue())
m_queue.finish();
}
bool ethash_cl_miner::init(uint64_t block_number, std::function<void(void*)> _fillDAG, unsigned workgroup_size, unsigned _platformId, unsigned _deviceId)
{ {
// store params // store params
m_params = params; m_fullSize = ethash_get_datasize(block_number);
// get all platforms // get all platforms
std::vector<cl::Platform> platforms; std::vector<cl::Platform> platforms;
cl::Platform::get(&platforms); cl::Platform::get(&platforms);
if (platforms.empty()) if (platforms.empty())
{ {
debugf("No OpenCL platforms found.\n"); cout << "No OpenCL platforms found." << endl;
return false; return false;
} }
// use default platform // use selected platform
debugf("Using platform: %s\n", platforms[0].getInfo<CL_PLATFORM_NAME>().c_str()); _platformId = std::min<unsigned>(_platformId, platforms.size() - 1);
// get GPU device of the default platform cout << "Using platform: " << platforms[_platformId].getInfo<CL_PLATFORM_NAME>().c_str() << endl;
std::vector<cl::Device> devices;
platforms[0].getDevices(CL_DEVICE_TYPE_ALL, &devices); // get GPU device of the default platform
if (devices.empty()) std::vector<cl::Device> devices;
platforms[_platformId].getDevices(CL_DEVICE_TYPE_ALL, &devices);
if (devices.empty())
{ {
debugf("No OpenCL devices found.\n"); cout << "No OpenCL devices found." << endl;
return false; return false;
} }
// use default device // use selected device
unsigned device_num = 0; cl::Device& device = devices[std::min<unsigned>(_deviceId, devices.size() - 1)];
cl::Device& device = devices[device_num];
std::string device_version = device.getInfo<CL_DEVICE_VERSION>(); std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
debugf("Using device: %s (%s)\n", device.getInfo<CL_DEVICE_NAME>().c_str(),device_version.c_str()); cout << "Using device: " << device.getInfo<CL_DEVICE_NAME>().c_str() << "(" << device_version.c_str() << ")" << endl;
if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0) if (strncmp("OpenCL 1.0", device_version.c_str(), 10) == 0)
{ {
debugf("OpenCL 1.0 is not supported.\n"); cout << "OpenCL 1.0 is not supported." << endl;
return false; return false;
} }
if (strncmp("OpenCL 1.1", device_version.c_str(), 10) == 0) if (strncmp("OpenCL 1.1", device_version.c_str(), 10) == 0)
{
m_opencl_1_1 = true; m_opencl_1_1 = true;
}
// create context // create context
m_context = cl::Context(std::vector<cl::Device>(&device, &device + 1)); m_context = cl::Context(std::vector<cl::Device>(&device, &device + 1));
@ -106,8 +167,8 @@ bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const
// patch source code // patch source code
std::string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE); std::string code(ETHASH_CL_MINER_KERNEL, ETHASH_CL_MINER_KERNEL + ETHASH_CL_MINER_KERNEL_SIZE);
add_definition(code, "GROUP_SIZE", m_workgroup_size); add_definition(code, "GROUP_SIZE", m_workgroup_size);
add_definition(code, "DAG_SIZE", (unsigned)(params.full_size / MIX_BYTES)); add_definition(code, "DAG_SIZE", (unsigned)(m_fullSize / ETHASH_MIX_BYTES));
add_definition(code, "ACCESSES", ACCESSES); add_definition(code, "ACCESSES", ETHASH_ACCESSES);
add_definition(code, "MAX_OUTPUTS", c_max_search_results); add_definition(code, "MAX_OUTPUTS", c_max_search_results);
//debugf("%s", code.c_str()); //debugf("%s", code.c_str());
@ -122,31 +183,25 @@ bool ethash_cl_miner::init(ethash_params const& params, ethash_blockhash_t const
} }
catch (cl::Error err) catch (cl::Error err)
{ {
debugf("%s\n", program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str()); cout << program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(device).c_str();
return false; return false;
} }
m_hash_kernel = cl::Kernel(program, "ethash_hash"); m_hash_kernel = cl::Kernel(program, "ethash_hash");
m_search_kernel = cl::Kernel(program, "ethash_search"); m_search_kernel = cl::Kernel(program, "ethash_search");
// create buffer for dag // create buffer for dag
m_dag = cl::Buffer(m_context, CL_MEM_READ_ONLY, params.full_size); m_dag = cl::Buffer(m_context, CL_MEM_READ_ONLY, m_fullSize);
// create buffer for header // create buffer for header
m_header = cl::Buffer(m_context, CL_MEM_READ_ONLY, 32); m_header = cl::Buffer(m_context, CL_MEM_READ_ONLY, 32);
// compute dag on CPU // compute dag on CPU
{ {
void* cache_mem = malloc(params.cache_size + 63);
ethash_cache cache;
cache.mem = (void*)(((uintptr_t)cache_mem + 63) & ~63);
ethash_mkcache(&cache, &params, seed);
// if this throws then it's because we probably need to subdivide the dag uploads for compatibility // if this throws then it's because we probably need to subdivide the dag uploads for compatibility
void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, params.full_size); void* dag_ptr = m_queue.enqueueMapBuffer(m_dag, true, m_opencl_1_1 ? CL_MAP_WRITE : CL_MAP_WRITE_INVALIDATE_REGION, 0, m_fullSize);
ethash_compute_full_data(dag_ptr, &params, &cache); // memcpying 1GB: horrible... really. horrible. but necessary since we can't mmap *and* gpumap.
_fillDAG(dag_ptr);
m_queue.enqueueUnmapMemObject(m_dag, dag_ptr); m_queue.enqueueUnmapMemObject(m_dag, dag_ptr);
free(cache_mem);
} }
// create mining buffers // create mining buffers
@ -167,7 +222,7 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
unsigned buf; unsigned buf;
}; };
std::queue<pending_batch> pending; std::queue<pending_batch> pending;
// update header constant buffer // update header constant buffer
m_queue.enqueueWriteBuffer(m_header, true, 0, 32, header); m_queue.enqueueWriteBuffer(m_header, true, 0, 32, header);
@ -191,8 +246,8 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
// how many this batch // how many this batch
if (i < count) if (i < count)
{ {
unsigned const this_count = std::min(count - i, c_hash_batch_size); unsigned const this_count = std::min<unsigned>(count - i, c_hash_batch_size);
unsigned const batch_count = std::max(this_count, m_workgroup_size); unsigned const batch_count = std::max<unsigned>(this_count, m_workgroup_size);
// supply output hash buffer to kernel // supply output hash buffer to kernel
m_hash_kernel.setArg(0, m_hash_buf[buf]); m_hash_kernel.setArg(0, m_hash_buf[buf]);
@ -205,7 +260,7 @@ void ethash_cl_miner::hash(uint8_t* ret, uint8_t const* header, uint64_t nonce,
cl::NDRange(m_workgroup_size) cl::NDRange(m_workgroup_size)
); );
m_queue.flush(); m_queue.flush();
pending.push({i, this_count, buf}); pending.push({i, this_count, buf});
i += this_count; i += this_count;
buf = (buf + 1) % c_num_buffers; buf = (buf + 1) % c_num_buffers;
@ -245,7 +300,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
m_queue.enqueueWriteBuffer(m_search_buf[i], false, 0, 4, &c_zero); m_queue.enqueueWriteBuffer(m_search_buf[i], false, 0, 4, &c_zero);
} }
#if CL_VERSION_1_2 #if CL_VERSION_1_2 && 0
cl::Event pre_return_event; cl::Event pre_return_event;
if (!m_opencl_1_1) if (!m_opencl_1_1)
{ {
@ -284,7 +339,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
// execute it! // execute it!
m_queue.enqueueNDRangeKernel(m_search_kernel, cl::NullRange, c_search_batch_size, m_workgroup_size); m_queue.enqueueNDRangeKernel(m_search_kernel, cl::NullRange, c_search_batch_size, m_workgroup_size);
pending.push({start_nonce, buf}); pending.push({start_nonce, buf});
buf = (buf + 1) % c_num_buffers; buf = (buf + 1) % c_num_buffers;
@ -295,16 +350,16 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
// could use pinned host pointer instead // could use pinned host pointer instead
uint32_t* results = (uint32_t*)m_queue.enqueueMapBuffer(m_search_buf[batch.buf], true, CL_MAP_READ, 0, (1+c_max_search_results) * sizeof(uint32_t)); uint32_t* results = (uint32_t*)m_queue.enqueueMapBuffer(m_search_buf[batch.buf], true, CL_MAP_READ, 0, (1+c_max_search_results) * sizeof(uint32_t));
unsigned num_found = std::min(results[0], c_max_search_results); unsigned num_found = std::min<unsigned>(results[0], c_max_search_results);
uint64_t nonces[c_max_search_results]; uint64_t nonces[c_max_search_results];
for (unsigned i = 0; i != num_found; ++i) for (unsigned i = 0; i != num_found; ++i)
{ {
nonces[i] = batch.start_nonce + results[i+1]; nonces[i] = batch.start_nonce + results[i+1];
} }
m_queue.enqueueUnmapMemObject(m_search_buf[batch.buf], results); m_queue.enqueueUnmapMemObject(m_search_buf[batch.buf], results);
bool exit = num_found && hook.found(nonces, num_found); bool exit = num_found && hook.found(nonces, num_found);
exit |= hook.searched(batch.start_nonce, c_search_batch_size); // always report searched before exit exit |= hook.searched(batch.start_nonce, c_search_batch_size); // always report searched before exit
if (exit) if (exit)
@ -319,7 +374,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook
} }
// not safe to return until this is ready // not safe to return until this is ready
#if CL_VERSION_1_2 #if CL_VERSION_1_2 && 0
if (!m_opencl_1_1) if (!m_opencl_1_1)
{ {
pre_return_event.wait(); pre_return_event.wait();

@ -1,9 +1,19 @@
#pragma once #pragma once
#define __CL_ENABLE_EXCEPTIONS #define __CL_ENABLE_EXCEPTIONS
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS #define CL_USE_DEPRECATED_OPENCL_2_0_APIS
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-parameter"
#include "cl.hpp"
#pragma clang diagnostic pop
#else
#include "cl.hpp" #include "cl.hpp"
#endif
#include <time.h> #include <time.h>
#include <functional>
#include <libethash/ethash.h> #include <libethash/ethash.h>
class ethash_cl_miner class ethash_cl_miner
@ -11,6 +21,8 @@ class ethash_cl_miner
public: public:
struct search_hook struct search_hook
{ {
virtual ~search_hook(); // always a virtual destructor for a class with virtuals.
// reports progress, return true to abort // reports progress, return true to abort
virtual bool found(uint64_t const* nonces, uint32_t count) = 0; virtual bool found(uint64_t const* nonces, uint32_t count) = 0;
virtual bool searched(uint64_t start_nonce, uint32_t count) = 0; virtual bool searched(uint64_t start_nonce, uint32_t count) = 0;
@ -19,19 +31,19 @@ public:
public: public:
ethash_cl_miner(); ethash_cl_miner();
bool init(ethash_params const& params, ethash_blockhash_t const *seed, unsigned workgroup_size = 64); bool init(uint64_t block_number, std::function<void(void*)> _fillDAG, unsigned workgroup_size = 64, unsigned _platformId = 0, unsigned _deviceId = 0);
static std::string platform_info(unsigned _platformId = 0, unsigned _deviceId = 0);
static unsigned get_num_devices(unsigned _platformId = 0);
void finish(); void finish();
void hash(uint8_t* ret, uint8_t const* header, uint64_t nonce, unsigned count); void hash(uint8_t* ret, uint8_t const* header, uint64_t nonce, unsigned count);
void search(uint8_t const* header, uint64_t target, search_hook& hook); void search(uint8_t const* header, uint64_t target, search_hook& hook);
private: private:
static unsigned const c_max_search_results = 63; enum { c_max_search_results = 63, c_num_buffers = 2, c_hash_batch_size = 1024, c_search_batch_size = 1024*256 };
static unsigned const c_num_buffers = 2;
static unsigned const c_hash_batch_size = 1024;
static unsigned const c_search_batch_size = 1024*256;
ethash_params m_params; uint64_t m_fullSize;
cl::Context m_context; cl::Context m_context;
cl::CommandQueue m_queue; cl::CommandQueue m_queue;
cl::Kernel m_hash_kernel; cl::Kernel m_hash_kernel;

@ -415,8 +415,7 @@ __kernel void ethash_search_simple(
{ {
uint const gid = get_global_id(0); uint const gid = get_global_id(0);
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate); hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
if (hash.ulongs[countof(hash.ulongs)-1] < target)
{ {
uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1); uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
g_output[slot] = gid; g_output[slot] = gid;
@ -453,7 +452,7 @@ __kernel void ethash_search(
uint const gid = get_global_id(0); uint const gid = get_global_id(0);
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate); hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
if (hash.ulongs[countof(hash.ulongs)-1] < target) if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
{ {
uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1); uint slot = min(MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
g_output[slot] = gid; g_output[slot] = gid;

@ -10,8 +10,7 @@ if (NOT MSVC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
endif() endif()
set(FILES util.c set(FILES util.h
util.h
io.c io.c
internal.c internal.c
ethash.h ethash.h
@ -21,7 +20,7 @@ set(FILES util.c
data_sizes.h) data_sizes.h)
if (MSVC) if (MSVC)
list(APPEND FILES io_win32.c) list(APPEND FILES util_win32.c io_win32.c mmap_win32.c)
else() else()
list(APPEND FILES io_posix.c) list(APPEND FILES io_posix.c)
endif() endif()

File diff suppressed because it is too large Load Diff

@ -3,38 +3,6 @@
#include <stdint.h> #include <stdint.h>
#include "compiler.h" #include "compiler.h"
static const uint8_t BitReverseTable256[] =
{
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
static inline uint32_t bitfn_swap32(uint32_t a) {
return (BitReverseTable256[a & 0xff] << 24) |
(BitReverseTable256[(a >> 8) & 0xff] << 16) |
(BitReverseTable256[(a >> 16) & 0xff] << 8) |
(BitReverseTable256[(a >> 24) & 0xff]);
}
static inline uint64_t bitfn_swap64(uint64_t a) {
return ((uint64_t) bitfn_swap32((uint32_t) (a >> 32))) |
(((uint64_t) bitfn_swap32((uint32_t) a)) << 32);
}
#if defined(__MINGW32__) || defined(_WIN32) #if defined(__MINGW32__) || defined(_WIN32)
# define LITTLE_ENDIAN 1234 # define LITTLE_ENDIAN 1234
# define BYTE_ORDER LITTLE_ENDIAN # define BYTE_ORDER LITTLE_ENDIAN
@ -53,22 +21,52 @@ static inline uint64_t bitfn_swap64(uint64_t a) {
# define BIG_ENDIAN 1234 # define BIG_ENDIAN 1234
# define BYTE_ORDER BIG_ENDIAN # define BYTE_ORDER BIG_ENDIAN
#else #else
# include <endian.h> # include <endian.h>
#endif
#if defined(_WIN32)
#include <stdlib.h>
#define ethash_swap_u32(input_) _byteswap_ulong(input_)
#define ethash_swap_u64(input_) _byteswap_uint64(input_)
#elif defined(__APPLE__)
#include <libkern/OSByteOrder.h>
#define ethash_swap_u32(input_) OSSwapInt32(input_)
#define ethash_swap_u64(input_) OSSwapInt64(input_)
#else // posix
#include <byteswap.h>
#define ethash_swap_u32(input_) __bswap_32(input_)
#define ethash_swap_u64(input_) __bswap_64(input_)
#endif #endif
#if LITTLE_ENDIAN == BYTE_ORDER #if LITTLE_ENDIAN == BYTE_ORDER
#define fix_endian32(x) (x) #define fix_endian32(dst_ ,src_) dst_ = src_
#define fix_endian64(x) (x) #define fix_endian32_same(val_)
#define fix_endian64(dst_, src_) dst_ = src_
#define fix_endian64_same(val_)
#define fix_endian_arr32(arr_, size_)
#define fix_endian_arr64(arr_, size_)
#elif BIG_ENDIAN == BYTE_ORDER #elif BIG_ENDIAN == BYTE_ORDER
#define fix_endian32(x) bitfn_swap32(x) #define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
#define fix_endian64(x) bitfn_swap64(x) #define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_
#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
#define fix_endian_arr32(arr_, size_) \
do { \
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
arr_[i_] = ethash_swap_u32(arr_[i_]); \
} \
while (0)
#define fix_endian_arr64(arr_, size_) \
do { \
for (unsigned i_ = 0; i_ < (size_), ++i_) { \
arr_[i_] = ethash_swap_u64(arr_[i_]); \
} \
while (0) \
#else #else
# error "endian not supported" # error "endian not supported"
#endif // BYTE_ORDER #endif // BYTE_ORDER

@ -26,124 +26,121 @@
#include <stddef.h> #include <stddef.h>
#include "compiler.h" #include "compiler.h"
#define REVISION 23 #define ETHASH_REVISION 23
#define DATASET_BYTES_INIT 1073741824U // 2**30 #define ETHASH_DATASET_BYTES_INIT 1073741824U // 2**30
#define DATASET_BYTES_GROWTH 8388608U // 2**23 #define ETHASH_DATASET_BYTES_GROWTH 8388608U // 2**23
#define CACHE_BYTES_INIT 1073741824U // 2**24 #define ETHASH_CACHE_BYTES_INIT 1073741824U // 2**24
#define CACHE_BYTES_GROWTH 131072U // 2**17 #define ETHASH_CACHE_BYTES_GROWTH 131072U // 2**17
#define EPOCH_LENGTH 30000U #define ETHASH_EPOCH_LENGTH 30000U
#define MIX_BYTES 128 #define ETHASH_MIX_BYTES 128
#define HASH_BYTES 64 #define ETHASH_HASH_BYTES 64
#define DATASET_PARENTS 256 #define ETHASH_DATASET_PARENTS 256
#define CACHE_ROUNDS 3 #define ETHASH_CACHE_ROUNDS 3
#define ACCESSES 64 #define ETHASH_ACCESSES 64
#define ETHASH_DAG_MAGIC_NUM_SIZE 8
#define ETHASH_DAG_MAGIC_NUM 0xFEE1DEADBADDCAFE
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
typedef struct ethash_params { /// Type of a seedhash/blockhash e.t.c.
uint64_t full_size; // Size of full data set (in bytes, multiple of mix size (128)). typedef struct ethash_h256 { uint8_t b[32]; } ethash_h256_t;
uint64_t cache_size; // Size of compute cache (in bytes, multiple of node size (64)).
} ethash_params;
/// Type of a blockhash // convenience macro to statically initialize an h256_t
typedef struct ethash_blockhash { uint8_t b[32]; } ethash_blockhash_t; // usage:
static inline uint8_t ethash_blockhash_get(ethash_blockhash_t const* hash, unsigned int i) // ethash_h256_t a = ethash_h256_static_init(1, 2, 3, ... )
{ // have to provide all 32 values. If you don't provide all the rest
return hash->b[i]; // will simply be unitialized (not guranteed to be 0)
} #define ethash_h256_static_init(...) \
{ {__VA_ARGS__} }
static inline void ethash_blockhash_set(ethash_blockhash_t *hash, unsigned int i, uint8_t v) struct ethash_light;
{ typedef struct ethash_light* ethash_light_t;
hash->b[i] = v; struct ethash_full;
} typedef struct ethash_full* ethash_full_t;
typedef int(*ethash_callback_t)(unsigned);
static inline void ethash_blockhash_reset(ethash_blockhash_t *hash)
{
memset(hash, 0, 32);
}
typedef struct ethash_return_value { typedef struct ethash_return_value {
ethash_blockhash_t result; ethash_h256_t result;
ethash_blockhash_t mix_hash; ethash_h256_t mix_hash;
} ethash_return_value; bool success;
} ethash_return_value_t;
uint64_t ethash_get_datasize(const uint32_t block_number);
uint64_t ethash_get_cachesize(const uint32_t block_number); /**
* Allocate and initialize a new ethash_light handler
// initialize the parameters *
static inline void ethash_params_init(ethash_params *params, const uint32_t block_number) { * @param block_number The block number for which to create the handler
params->full_size = ethash_get_datasize(block_number); * @return Newly allocated ethash_light handler or NULL in case of
params->cache_size = ethash_get_cachesize(block_number); * ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
} */
ethash_light_t ethash_light_new(uint64_t block_number);
typedef struct ethash_cache { /**
void *mem; * Frees a previously allocated ethash_light handler
} ethash_cache; * @param light The light handler to free
*/
void ethash_mkcache(ethash_cache *cache, ethash_params const *params, ethash_blockhash_t const *seed); void ethash_light_delete(ethash_light_t light);
void ethash_compute_full_data(void *mem, ethash_params const *params, ethash_cache const *cache); /**
void ethash_full(ethash_return_value *ret, * Calculate the light client data
void const *full_mem, *
ethash_params const *params, * @param light The light client handler
ethash_blockhash_t const *header_hash, * @param header_hash The header hash to pack into the mix
const uint64_t nonce); * @param nonce The nonce to pack into the mix
void ethash_light(ethash_return_value *ret, * @return an object of ethash_return_value_t holding the return values
ethash_cache const *cache, */
ethash_params const *params, ethash_return_value_t ethash_light_compute(
ethash_blockhash_t const *header_hash, ethash_light_t light,
const uint64_t nonce); ethash_h256_t const header_hash,
void ethash_get_seedhash(ethash_blockhash_t *seedhash, const uint32_t block_number); uint64_t nonce
);
static inline void ethash_prep_light(void *cache, ethash_params const *params, ethash_blockhash_t const* seed)
{ /**
ethash_cache c; * Allocate and initialize a new ethash_full handler
c.mem = cache; *
ethash_mkcache(&c, params, seed); * @param light The light handler containing the cache.
} * @param callback A callback function with signature of @ref ethash_callback_t
* It accepts an unsigned with which a progress of DAG calculation
static inline void ethash_compute_light(ethash_return_value *ret, void const *cache, ethash_params const *params, ethash_blockhash_t const *header_hash, const uint64_t nonce) * can be displayed. If all goes well the callback should return 0.
{ * If a non-zero value is returned then DAG generation will stop.
ethash_cache c; * Be advised. A progress value of 100 means that DAG creation is
c.mem = (void *) cache; * almost complete and that this function will soon return succesfully.
ethash_light(ret, &c, params, header_hash, nonce); * It does not mean that the function has already had a succesfull return.
} * @return Newly allocated ethash_full handler or NULL in case of
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
static inline void ethash_prep_full(void *full, ethash_params const *params, void const *cache) */
{ ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback);
ethash_cache c;
c.mem = (void *) cache; /**
ethash_compute_full_data(full, params, &c); * Frees a previously allocated ethash_full handler
} * @param full The light handler to free
*/
static inline void ethash_compute_full(ethash_return_value *ret, void ethash_full_delete(ethash_full_t full);
void const *full, /**
ethash_params const *params, * Calculate the full client data
ethash_blockhash_t const *header_hash, *
const uint64_t nonce) * @param full The full client handler
{ * @param header_hash The header hash to pack into the mix
ethash_full(ret, full, params, header_hash, nonce); * @param nonce The nonce to pack into the mix
} * @return An object of ethash_return_value to hold the return value
*/
// Returns if hash is less than or equal to difficulty ethash_return_value_t ethash_full_compute(
static inline int ethash_check_difficulty(ethash_blockhash_t const *hash, ethash_full_t full,
ethash_blockhash_t const *difficulty) ethash_h256_t const header_hash,
{ uint64_t nonce
// Difficulty is big endian );
for (int i = 0; i < 32; i++) { /**
if (ethash_blockhash_get(hash, i) == ethash_blockhash_get(difficulty, i)) { * Get a pointer to the full DAG data
continue; */
} void const* ethash_full_dag(ethash_full_t full);
return ethash_blockhash_get(hash, i) < ethash_blockhash_get(difficulty, i); /**
} * Get the size of the DAG data
return 1; */
} uint64_t ethash_full_dag_size(ethash_full_t full);
int ethash_quick_check_difficulty(ethash_blockhash_t const *header_hash, /**
const uint64_t nonce, * Calculate the seedhash for a given block number
ethash_blockhash_t const *mix_hash, */
ethash_blockhash_t const *difficulty); ethash_h256_t ethash_get_seedhash(uint64_t block_number);
#ifdef __cplusplus #ifdef __cplusplus
} }

@ -29,10 +29,11 @@ extern "C" {
#define FNV_PRIME 0x01000193 #define FNV_PRIME 0x01000193
static inline uint32_t fnv_hash(const uint32_t x, const uint32_t y) { static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
return x*FNV_PRIME ^ y; {
return x * FNV_PRIME ^ y;
} }
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

@ -8,11 +8,11 @@
ethash is distributed in the hope that it will be useful, ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>. along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/ */
/** @file internal.c /** @file internal.c
* @author Tim Hughes <tim@twistedfury.com> * @author Tim Hughes <tim@twistedfury.com>
@ -23,11 +23,15 @@
#include <assert.h> #include <assert.h>
#include <inttypes.h> #include <inttypes.h>
#include <stddef.h> #include <stddef.h>
#include <errno.h>
#include <math.h>
#include "mmap.h"
#include "ethash.h" #include "ethash.h"
#include "fnv.h" #include "fnv.h"
#include "endian.h" #include "endian.h"
#include "internal.h" #include "internal.h"
#include "data_sizes.h" #include "data_sizes.h"
#include "io.h"
#ifdef WITH_CRYPTOPP #ifdef WITH_CRYPTOPP
@ -37,274 +41,456 @@
#include "sha3.h" #include "sha3.h"
#endif // WITH_CRYPTOPP #endif // WITH_CRYPTOPP
uint64_t ethash_get_datasize(const uint32_t block_number) { uint64_t ethash_get_datasize(uint64_t const block_number)
assert(block_number / EPOCH_LENGTH < 2048); {
return dag_sizes[block_number / EPOCH_LENGTH]; assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
return dag_sizes[block_number / ETHASH_EPOCH_LENGTH];
} }
uint64_t ethash_get_cachesize(const uint32_t block_number) { uint64_t ethash_get_cachesize(uint64_t const block_number)
assert(block_number / EPOCH_LENGTH < 2048); {
return cache_sizes[block_number / EPOCH_LENGTH]; assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
return cache_sizes[block_number / ETHASH_EPOCH_LENGTH];
} }
// Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014) // Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014)
// https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf // https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf
// SeqMemoHash(s, R, N) // SeqMemoHash(s, R, N)
void static ethash_compute_cache_nodes(node *const nodes, bool static ethash_compute_cache_nodes(
ethash_params const *params, node* const nodes,
ethash_blockhash_t const* seed) uint64_t cache_size,
{ ethash_h256_t const* seed
assert((params->cache_size % sizeof(node)) == 0); )
uint32_t const num_nodes = (uint32_t) (params->cache_size / sizeof(node));
SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
for (unsigned i = 1; i != num_nodes; ++i) {
SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
}
for (unsigned j = 0; j != CACHE_ROUNDS; j++) {
for (unsigned i = 0; i != num_nodes; i++) {
uint32_t const idx = nodes[i].words[0] % num_nodes;
node data;
data = nodes[(num_nodes - 1 + i) % num_nodes];
for (unsigned w = 0; w != NODE_WORDS; ++w) {
data.words[w] ^= nodes[idx].words[w];
}
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
}
}
// now perform endian conversion
#if BYTE_ORDER != LITTLE_ENDIAN
for (unsigned w = 0; w != (num_nodes*NODE_WORDS); ++w)
{
nodes->words[w] = fix_endian32(nodes->words[w]);
}
#endif
}
void ethash_mkcache(ethash_cache *cache,
ethash_params const *params,
ethash_blockhash_t const* seed)
{ {
node *nodes = (node *) cache->mem; if (cache_size % sizeof(node) != 0) {
ethash_compute_cache_nodes(nodes, params, seed); return false;
}
uint32_t const num_nodes = (uint32_t) (cache_size / sizeof(node));
SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
for (uint32_t i = 1; i != num_nodes; ++i) {
SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
}
for (uint32_t j = 0; j != ETHASH_CACHE_ROUNDS; j++) {
for (uint32_t i = 0; i != num_nodes; i++) {
uint32_t const idx = nodes[i].words[0] % num_nodes;
node data;
data = nodes[(num_nodes - 1 + i) % num_nodes];
for (uint32_t w = 0; w != NODE_WORDS; ++w) {
data.words[w] ^= nodes[idx].words[w];
}
SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
}
}
// now perform endian conversion
fix_endian_arr32(nodes->words, num_nodes * NODE_WORDS);
return true;
} }
void ethash_calculate_dag_item(node *const ret, void ethash_calculate_dag_item(
const unsigned node_index, node* const ret,
const struct ethash_params *params, uint32_t node_index,
const struct ethash_cache *cache) ethash_light_t const light
)
{ {
uint32_t num_parent_nodes = (uint32_t) (params->cache_size / sizeof(node)); uint32_t num_parent_nodes = (uint32_t) (light->cache_size / sizeof(node));
node const *cache_nodes = (node const *) cache->mem; node const* cache_nodes = (node const *) light->cache;
node const *init = &cache_nodes[node_index % num_parent_nodes]; node const* init = &cache_nodes[node_index % num_parent_nodes];
memcpy(ret, init, sizeof(node));
memcpy(ret, init, sizeof(node)); ret->words[0] ^= node_index;
ret->words[0] ^= node_index; SHA3_512(ret->bytes, ret->bytes, sizeof(node));
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
#if defined(_M_X64) && ENABLE_SSE #if defined(_M_X64) && ENABLE_SSE
__m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME); __m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
__m128i xmm0 = ret->xmm[0]; __m128i xmm0 = ret->xmm[0];
__m128i xmm1 = ret->xmm[1]; __m128i xmm1 = ret->xmm[1];
__m128i xmm2 = ret->xmm[2]; __m128i xmm2 = ret->xmm[2];
__m128i xmm3 = ret->xmm[3]; __m128i xmm3 = ret->xmm[3];
#endif #endif
for (unsigned i = 0; i != DATASET_PARENTS; ++i) { for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = ((node_index ^ i) * FNV_PRIME ^ ret->words[i % NODE_WORDS]) % num_parent_nodes; uint32_t parent_index = fnv_hash(node_index ^ i, ret->words[i % NODE_WORDS]) % num_parent_nodes;
node const *parent = &cache_nodes[parent_index]; node const *parent = &cache_nodes[parent_index];
#if defined(_M_X64) && ENABLE_SSE #if defined(_M_X64) && ENABLE_SSE
{ {
xmm0 = _mm_mullo_epi32(xmm0, fnv_prime); xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
xmm1 = _mm_mullo_epi32(xmm1, fnv_prime); xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
xmm2 = _mm_mullo_epi32(xmm2, fnv_prime); xmm2 = _mm_mullo_epi32(xmm2, fnv_prime);
xmm3 = _mm_mullo_epi32(xmm3, fnv_prime); xmm3 = _mm_mullo_epi32(xmm3, fnv_prime);
xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]); xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]);
xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]); xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]);
xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]); xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]);
xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]); xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]);
// have to write to ret as values are used to compute index // have to write to ret as values are used to compute index
ret->xmm[0] = xmm0; ret->xmm[0] = xmm0;
ret->xmm[1] = xmm1; ret->xmm[1] = xmm1;
ret->xmm[2] = xmm2; ret->xmm[2] = xmm2;
ret->xmm[3] = xmm3; ret->xmm[3] = xmm3;
} }
#else #else
{ {
for (unsigned w = 0; w != NODE_WORDS; ++w) { for (unsigned w = 0; w != NODE_WORDS; ++w) {
ret->words[w] = fnv_hash(ret->words[w], parent->words[w]); ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
} }
} }
#endif #endif
} }
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
SHA3_512(ret->bytes, ret->bytes, sizeof(node));
} }
void ethash_compute_full_data( bool ethash_compute_full_data(
void *mem, void* mem,
ethash_params const *params, uint64_t full_size,
ethash_cache const *cache) { ethash_light_t const light,
assert((params->full_size % (sizeof(uint32_t) * MIX_WORDS)) == 0); ethash_callback_t callback
assert((params->full_size % sizeof(node)) == 0); )
node *full_nodes = mem; {
if (full_size % (sizeof(uint32_t) * MIX_WORDS) != 0 ||
// now compute full nodes (full_size % sizeof(node)) != 0) {
for (unsigned n = 0; n != (params->full_size / sizeof(node)); ++n) { return false;
ethash_calculate_dag_item(&(full_nodes[n]), n, params, cache); }
} uint32_t const max_n = (uint32_t)(full_size / sizeof(node));
node* full_nodes = mem;
double const progress_change = 1.0f / max_n;
double progress = 0.0f;
// now compute full nodes
for (uint32_t n = 0; n != max_n; ++n) {
if (callback &&
n % (max_n / 100) == 0 &&
callback((unsigned int)(ceil(progress * 100.0f))) != 0) {
return false;
}
progress += progress_change;
ethash_calculate_dag_item(&(full_nodes[n]), n, light);
}
return true;
} }
static void ethash_hash(ethash_return_value *ret, static bool ethash_hash(
node const *full_nodes, ethash_return_value_t* ret,
ethash_cache const *cache, node const* full_nodes,
ethash_params const *params, ethash_light_t const light,
ethash_blockhash_t const *header_hash, uint64_t full_size,
const uint64_t nonce) ethash_h256_t const header_hash,
uint64_t const nonce
)
{ {
if (full_size % MIX_WORDS != 0) {
return false;
}
// pack hash and nonce together into first 40 bytes of s_mix
assert(sizeof(node) * 8 == 512);
node s_mix[MIX_NODES + 1];
memcpy(s_mix[0].bytes, &header_hash, 32);
fix_endian64(s_mix[0].double_words[4], nonce);
// compute sha3-512 hash and replicate across mix
SHA3_512(s_mix->bytes, s_mix->bytes, 40);
fix_endian_arr32(s_mix[0].words, 16);
node* const mix = s_mix + 1;
for (uint32_t w = 0; w != MIX_WORDS; ++w) {
mix->words[w] = s_mix[0].words[w % NODE_WORDS];
}
unsigned const page_size = sizeof(uint32_t) * MIX_WORDS;
unsigned const num_full_pages = (unsigned) (full_size / page_size);
for (unsigned i = 0; i != ETHASH_ACCESSES; ++i) {
uint32_t const index = fnv_hash(s_mix->words[0] ^ i, mix->words[i % MIX_WORDS]) % num_full_pages;
for (unsigned n = 0; n != MIX_NODES; ++n) {
node const* dag_node;
if (full_nodes) {
dag_node = &full_nodes[MIX_NODES * index + n];
} else {
node tmp_node;
ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, light);
dag_node = &tmp_node;
}
assert((params->full_size % MIX_WORDS) == 0); #if defined(_M_X64) && ENABLE_SSE
{
// pack hash and nonce together into first 40 bytes of s_mix __m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
assert(sizeof(node) * 8 == 512); __m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
node s_mix[MIX_NODES + 1]; __m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]);
memcpy(s_mix[0].bytes, header_hash, 32); __m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]);
__m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]);
#if BYTE_ORDER != LITTLE_ENDIAN mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]);
s_mix[0].double_words[4] = fix_endian64(nonce); mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]);
#else mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]);
s_mix[0].double_words[4] = nonce; mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]);
}
#else
{
for (unsigned w = 0; w != NODE_WORDS; ++w) {
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
}
}
#endif #endif
}
}
// compress mix
for (uint32_t w = 0; w != MIX_WORDS; w += 4) {
uint32_t reduction = mix->words[w + 0];
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
mix->words[w / 4] = reduction;
}
fix_endian_arr32(mix->words, MIX_WORDS / 4);
memcpy(&ret->mix_hash, mix->bytes, 32);
// final Keccak hash
SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
return true;
}
// compute sha3-512 hash and replicate across mix void ethash_quick_hash(
SHA3_512(s_mix->bytes, s_mix->bytes, 40); ethash_h256_t* return_hash,
ethash_h256_t const* header_hash,
#if BYTE_ORDER != LITTLE_ENDIAN uint64_t const nonce,
for (unsigned w = 0; w != 16; ++w) { ethash_h256_t const* mix_hash
s_mix[0].words[w] = fix_endian32(s_mix[0].words[w]); )
} {
#endif uint8_t buf[64 + 32];
memcpy(buf, header_hash, 32);
fix_endian64_same(nonce);
memcpy(&(buf[32]), &nonce, 8);
SHA3_512(buf, buf, 40);
memcpy(&(buf[64]), mix_hash, 32);
SHA3_256(return_hash, buf, 64 + 32);
}
node *const mix = s_mix + 1; ethash_h256_t ethash_get_seedhash(uint64_t block_number)
for (unsigned w = 0; w != MIX_WORDS; ++w) { {
mix->words[w] = s_mix[0].words[w % NODE_WORDS]; ethash_h256_t ret;
} ethash_h256_reset(&ret);
uint64_t const epochs = block_number / ETHASH_EPOCH_LENGTH;
for (uint32_t i = 0; i < epochs; ++i)
SHA3_256(&ret, (uint8_t*)&ret, 32);
return ret;
}
unsigned const int ethash_quick_check_difficulty(
page_size = sizeof(uint32_t) * MIX_WORDS, ethash_h256_t const* header_hash,
num_full_pages = (unsigned) (params->full_size / page_size); uint64_t const nonce,
ethash_h256_t const* mix_hash,
ethash_h256_t const* difficulty
)
{
ethash_h256_t return_hash;
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
return ethash_check_difficulty(&return_hash, difficulty);
}
for (unsigned i = 0; i != ACCESSES; ++i) { ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed)
uint32_t const index = ((s_mix->words[0] ^ i) * FNV_PRIME ^ mix->words[i % MIX_WORDS]) % num_full_pages; {
struct ethash_light *ret;
ret = calloc(sizeof(*ret), 1);
if (!ret) {
return NULL;
}
ret->cache = malloc((size_t)cache_size);
if (!ret->cache) {
goto fail_free_light;
}
node* nodes = (node*)ret->cache;
if (!ethash_compute_cache_nodes(nodes, cache_size, seed)) {
goto fail_free_cache_mem;
}
ret->cache_size = cache_size;
return ret;
fail_free_cache_mem:
free(ret->cache);
fail_free_light:
free(ret);
return NULL;
}
for (unsigned n = 0; n != MIX_NODES; ++n) { ethash_light_t ethash_light_new(uint64_t block_number)
const node *dag_node = &full_nodes[MIX_NODES * index + n]; {
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
ethash_light_t ret;
ret = ethash_light_new_internal(ethash_get_cachesize(block_number), &seedhash);
ret->block_number = block_number;
return ret;
}
if (!full_nodes) { void ethash_light_delete(ethash_light_t light)
node tmp_node; {
ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, params, cache); if (light->cache) {
dag_node = &tmp_node; free(light->cache);
} }
free(light);
}
#if defined(_M_X64) && ENABLE_SSE ethash_return_value_t ethash_light_compute_internal(
{ ethash_light_t light,
__m128i fnv_prime = _mm_set1_epi32(FNV_PRIME); uint64_t full_size,
__m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]); ethash_h256_t const header_hash,
__m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]); uint64_t nonce
__m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]); )
__m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]); {
mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]); ethash_return_value_t ret;
mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]); ret.success = true;
mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]); if (!ethash_hash(&ret, NULL, light, full_size, header_hash, nonce)) {
mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]); ret.success = false;
} }
#else return ret;
{ }
for (unsigned w = 0; w != NODE_WORDS; ++w) {
mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
}
}
#endif
}
}
// compress mix
for (unsigned w = 0; w != MIX_WORDS; w += 4) {
uint32_t reduction = mix->words[w + 0];
reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
mix->words[w / 4] = reduction;
}
#if BYTE_ORDER != LITTLE_ENDIAN
for (unsigned w = 0; w != MIX_WORDS/4; ++w) {
mix->words[w] = fix_endian32(mix->words[w]);
}
#endif
memcpy(&ret->mix_hash, mix->bytes, 32); ethash_return_value_t ethash_light_compute(
// final Keccak hash ethash_light_t light,
SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix) ethash_h256_t const header_hash,
uint64_t nonce
)
{
uint64_t full_size = ethash_get_datasize(light->block_number);
return ethash_light_compute_internal(light, full_size, header_hash, nonce);
} }
void ethash_quick_hash(ethash_blockhash_t *return_hash, static bool ethash_mmap(struct ethash_full* ret, FILE* f)
ethash_blockhash_t const *header_hash,
const uint64_t nonce,
ethash_blockhash_t const *mix_hash)
{ {
int fd;
char* mmapped_data;
ret->file = f;
if ((fd = ethash_fileno(ret->file)) == -1) {
return false;
}
mmapped_data= mmap(
NULL,
(size_t)ret->file_size + ETHASH_DAG_MAGIC_NUM_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fd,
0
);
if (mmapped_data == MAP_FAILED) {
return false;
}
ret->data = (node*)(mmapped_data + ETHASH_DAG_MAGIC_NUM_SIZE);
return true;
}
uint8_t buf[64 + 32]; ethash_full_t ethash_full_new_internal(
memcpy(buf, header_hash, 32); char const* dirname,
#if BYTE_ORDER != LITTLE_ENDIAN ethash_h256_t const seed_hash,
nonce = fix_endian64(nonce); uint64_t full_size,
#endif ethash_light_t const light,
memcpy(&(buf[32]), &nonce, 8); ethash_callback_t callback
SHA3_512(buf, buf, 40); )
memcpy(&(buf[64]), mix_hash, 32); {
SHA3_256(return_hash, buf, 64 + 32); struct ethash_full* ret;
FILE *f = NULL;
ret = calloc(sizeof(*ret), 1);
if (!ret) {
return NULL;
}
ret->file_size = (size_t)full_size;
switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) {
case ETHASH_IO_FAIL:
goto fail_free_full;
case ETHASH_IO_MEMO_MATCH:
if (!ethash_mmap(ret, f)) {
goto fail_close_file;
}
return ret;
case ETHASH_IO_MEMO_SIZE_MISMATCH:
// if a DAG of same filename but unexpected size is found, silently force new file creation
if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) {
goto fail_free_full;
}
// fallthrough to the mismatch case here, DO NOT go through match
case ETHASH_IO_MEMO_MISMATCH:
if (!ethash_mmap(ret, f)) {
goto fail_close_file;
}
break;
}
if (!ethash_compute_full_data(ret->data, full_size, light, callback)) {
goto fail_free_full_data;
}
// after the DAG has been filled then we finalize it by writting the magic number at the beginning
if (fseek(f, 0, SEEK_SET) != 0) {
goto fail_free_full_data;
}
uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM;
if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
goto fail_free_full_data;
}
fflush(f); // make sure the magic number IS there
return ret;
fail_free_full_data:
// could check that munmap(..) == 0 but even if it did not can't really do anything here
munmap(ret->data, (size_t)full_size);
fail_close_file:
fclose(ret->file);
fail_free_full:
free(ret);
return NULL;
} }
void ethash_get_seedhash(ethash_blockhash_t *seedhash, const uint32_t block_number) ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback)
{ {
ethash_blockhash_reset(seedhash); char strbuf[256];
const uint32_t epochs = block_number / EPOCH_LENGTH; if (!ethash_get_default_dirname(strbuf, 256)) {
for (uint32_t i = 0; i < epochs; ++i) return NULL;
SHA3_256(seedhash, (uint8_t*)seedhash, 32); }
uint64_t full_size = ethash_get_datasize(light->block_number);
ethash_h256_t seedhash = ethash_get_seedhash(light->block_number);
return ethash_full_new_internal(strbuf, seedhash, full_size, light, callback);
} }
int ethash_quick_check_difficulty(ethash_blockhash_t const *header_hash, void ethash_full_delete(ethash_full_t full)
const uint64_t nonce,
ethash_blockhash_t const *mix_hash,
ethash_blockhash_t const *difficulty)
{ {
// could check that munmap(..) == 0 but even if it did not can't really do anything here
munmap(full->data, (size_t)full->file_size);
if (full->file) {
fclose(full->file);
}
free(full);
}
ethash_blockhash_t return_hash; ethash_return_value_t ethash_full_compute(
ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash); ethash_full_t full,
return ethash_check_difficulty(&return_hash, difficulty); ethash_h256_t const header_hash,
uint64_t nonce
)
{
ethash_return_value_t ret;
ret.success = true;
if (!ethash_hash(
&ret,
(node const*)full->data,
NULL,
full->file_size,
header_hash,
nonce)) {
ret.success = false;
}
return ret;
} }
void ethash_full(ethash_return_value *ret, void const* ethash_full_dag(ethash_full_t full)
void const *full_mem,
ethash_params const *params,
ethash_blockhash_t const *header_hash,
const uint64_t nonce)
{ {
ethash_hash(ret, (node const *) full_mem, NULL, params, header_hash, nonce); return full->data;
} }
void ethash_light(ethash_return_value *ret, uint64_t ethash_full_dag_size(ethash_full_t full)
ethash_cache const *cache,
ethash_params const *params,
ethash_blockhash_t const *header_hash,
const uint64_t nonce)
{ {
ethash_hash(ret, NULL, cache, params, header_hash, nonce); return full->file_size;
} }

@ -2,6 +2,7 @@
#include "compiler.h" #include "compiler.h"
#include "endian.h" #include "endian.h"
#include "ethash.h" #include "ethash.h"
#include <stdio.h>
#define ENABLE_SSE 0 #define ENABLE_SSE 0
@ -15,14 +16,14 @@ extern "C" {
// compile time settings // compile time settings
#define NODE_WORDS (64/4) #define NODE_WORDS (64/4)
#define MIX_WORDS (MIX_BYTES/4) #define MIX_WORDS (ETHASH_MIX_BYTES/4)
#define MIX_NODES (MIX_WORDS / NODE_WORDS) #define MIX_NODES (MIX_WORDS / NODE_WORDS)
#include <stdint.h> #include <stdint.h>
typedef union node { typedef union node {
uint8_t bytes[NODE_WORDS * 4]; uint8_t bytes[NODE_WORDS * 4];
uint32_t words[NODE_WORDS]; uint32_t words[NODE_WORDS];
uint64_t double_words[NODE_WORDS / 2]; uint64_t double_words[NODE_WORDS / 2];
#if defined(_M_X64) && ENABLE_SSE #if defined(_M_X64) && ENABLE_SSE
__m128i xmm[NODE_WORDS/4]; __m128i xmm[NODE_WORDS/4];
@ -30,15 +31,139 @@ typedef union node {
} node; } node;
void ethash_calculate_dag_item(node *const ret, static inline uint8_t ethash_h256_get(ethash_h256_t const* hash, unsigned int i)
const unsigned node_index, {
ethash_params const *params, return hash->b[i];
ethash_cache const *cache); }
static inline void ethash_h256_set(ethash_h256_t* hash, unsigned int i, uint8_t v)
{
hash->b[i] = v;
}
static inline void ethash_h256_reset(ethash_h256_t* hash)
{
memset(hash, 0, 32);
}
// Returns if hash is less than or equal to difficulty
static inline int ethash_check_difficulty(
ethash_h256_t const* hash,
ethash_h256_t const* difficulty
)
{
// Difficulty is big endian
for (int i = 0; i < 32; i++) {
if (ethash_h256_get(hash, i) == ethash_h256_get(difficulty, i)) {
continue;
}
return ethash_h256_get(hash, i) < ethash_h256_get(difficulty, i);
}
return 1;
}
int ethash_quick_check_difficulty(
ethash_h256_t const* header_hash,
uint64_t const nonce,
ethash_h256_t const* mix_hash,
ethash_h256_t const* difficulty
);
struct ethash_light {
void* cache;
uint64_t cache_size;
uint64_t block_number;
};
/**
* Allocate and initialize a new ethash_light handler. Internal version
*
* @param cache_size The size of the cache in bytes
* @param seed Block seedhash to be used during the computation of the
* cache nodes
* @return Newly allocated ethash_light handler or NULL in case of
* ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
*/
ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed);
/**
* Calculate the light client data. Internal version.
*
* @param light The light client handler
* @param full_size The size of the full data in bytes.
* @param header_hash The header hash to pack into the mix
* @param nonce The nonce to pack into the mix
* @return The resulting hash.
*/
ethash_return_value_t ethash_light_compute_internal(
ethash_light_t light,
uint64_t full_size,
ethash_h256_t const header_hash,
uint64_t nonce
);
struct ethash_full {
FILE* file;
uint64_t file_size;
node* data;
};
/**
* Allocate and initialize a new ethash_full handler. Internal version.
*
* @param dirname The directory in which to put the DAG file.
* @param seedhash The seed hash of the block. Used in the DAG file naming.
* @param full_size The size of the full data in bytes.
* @param cache A cache object to use that was allocated with @ref ethash_cache_new().
* Iff this function succeeds the ethash_full_t will take memory
* memory ownership of the cache and free it at deletion. If
* not then the user still has to handle freeing of the cache himself.
* @param callback A callback function with signature of @ref ethash_callback_t
* It accepts an unsigned with which a progress of DAG calculation
* can be displayed. If all goes well the callback should return 0.
* If a non-zero value is returned then DAG generation will stop.
* @return Newly allocated ethash_full handler or NULL in case of
* ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
*/
ethash_full_t ethash_full_new_internal(
char const* dirname,
ethash_h256_t const seed_hash,
uint64_t full_size,
ethash_light_t const light,
ethash_callback_t callback
);
void ethash_calculate_dag_item(
node* const ret,
uint32_t node_index,
ethash_light_t const cache
);
void ethash_quick_hash(
ethash_h256_t* return_hash,
ethash_h256_t const* header_hash,
const uint64_t nonce,
ethash_h256_t const* mix_hash
);
uint64_t ethash_get_datasize(uint64_t const block_number);
uint64_t ethash_get_cachesize(uint64_t const block_number);
void ethash_quick_hash(ethash_blockhash_t *return_hash, /**
ethash_blockhash_t const *header_hash, * Compute the memory data for a full node's memory
const uint64_t nonce, *
ethash_blockhash_t const *mix_hash); * @param mem A pointer to an ethash full's memory
* @param full_size The size of the full data in bytes
* @param cache A cache object to use in the calculation
* @param callback The callback function. Check @ref ethash_full_new() for details.
* @return true if all went fine and false for invalid parameters
*/
bool ethash_compute_full_data(
void* mem,
uint64_t full_size,
ethash_light_t const light,
ethash_callback_t callback
);
#ifdef __cplusplus #ifdef __cplusplus
} }

@ -22,68 +22,81 @@
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
// silly macro to save some typing enum ethash_io_rc ethash_io_prepare(
#define PASS_ARR(c_) (c_), sizeof(c_) char const* dirname,
ethash_h256_t const seedhash,
static bool ethash_io_write_file(char const *dirname, FILE** output_file,
char const* filename, uint64_t file_size,
size_t filename_length, bool force_create
void const* data, )
size_t data_size)
{
bool ret = false;
char *fullname = ethash_io_create_filename(dirname, filename, filename_length);
if (!fullname) {
return false;
}
FILE *f = fopen(fullname, "wb");
if (!f) {
goto free_name;
}
if (data_size != fwrite(data, 1, data_size, f)) {
goto close;
}
ret = true;
close:
fclose(f);
free_name:
free(fullname);
return ret;
}
bool ethash_io_write(char const *dirname,
ethash_params const* params,
ethash_blockhash_t seedhash,
void const* cache,
uint8_t **data,
uint64_t *data_size)
{ {
char info_buffer[DAG_MEMO_BYTESIZE]; char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
// allocate the bytes enum ethash_io_rc ret = ETHASH_IO_FAIL;
uint8_t *temp_data_ptr = malloc((size_t)params->full_size);
if (!temp_data_ptr) {
goto end;
}
ethash_compute_full_data(temp_data_ptr, params, cache);
if (!ethash_io_write_file(dirname, PASS_ARR(DAG_FILE_NAME), temp_data_ptr, (size_t)params->full_size)) { // assert directory exists
goto fail_free; if (!ethash_mkdir(dirname)) {
} goto end;
}
ethash_io_serialize_info(REVISION, seedhash, info_buffer); ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name);
if (!ethash_io_write_file(dirname, PASS_ARR(DAG_MEMO_NAME), info_buffer, DAG_MEMO_BYTESIZE)) { char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name));
goto fail_free; if (!tmpfile) {
} goto end;
}
*data = temp_data_ptr; FILE *f;
*data_size = params->full_size; if (!force_create) {
return true; // try to open the file
f = ethash_fopen(tmpfile, "rb+");
if (f) {
size_t found_size;
if (!ethash_file_size(f, &found_size)) {
fclose(f);
goto free_memo;
}
if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) {
fclose(f);
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
goto free_memo;
}
// compare the magic number, no need to care about endianess since it's local
uint64_t magic_num;
if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
// I/O error
fclose(f);
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
goto free_memo;
}
if (magic_num != ETHASH_DAG_MAGIC_NUM) {
fclose(f);
ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
goto free_memo;
}
ret = ETHASH_IO_MEMO_MATCH;
goto set_file;
}
}
// file does not exist, will need to be created
f = ethash_fopen(tmpfile, "wb+");
if (!f) {
goto free_memo;
}
// make sure it's of the proper size
if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) {
fclose(f);
goto free_memo;
}
fputc('\n', f);
fflush(f);
ret = ETHASH_IO_MEMO_MISMATCH;
goto set_file;
fail_free: ret = ETHASH_IO_MEMO_MATCH;
free(temp_data_ptr); set_file:
*output_file = f;
free_memo:
free(tmpfile);
end: end:
return false; return ret;
} }
#undef PASS_ARR

@ -22,93 +22,162 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdint.h> #include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdio.h>
#ifdef __cplusplus
#define __STDC_FORMAT_MACROS 1
#endif
#include <inttypes.h>
#include "endian.h"
#include "ethash.h" #include "ethash.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
// Maximum size for mutable part of DAG file name
static const char DAG_FILE_NAME[] = "full"; // 10 is for maximum number of digits of a uint32_t (for REVISION)
static const char DAG_MEMO_NAME[] = "full.info"; // 1 is for _ and 16 is for the first 16 hex digits for first 8 bytes of
// MSVC thinks that "static const unsigned int" is not a compile time variable. Sorry for the #define :( // the seedhash and last 1 is for the null terminating character
#define DAG_MEMO_BYTESIZE 36 // Reference: https://github.com/ethereum/wiki/wiki/Ethash-DAG
#define DAG_MUTABLE_NAME_MAX_SIZE (10 + 1 + 16 + 1)
/// Possible return values of @see ethash_io_prepare /// Possible return values of @see ethash_io_prepare
enum ethash_io_rc { enum ethash_io_rc {
ETHASH_IO_FAIL = 0, ///< There has been an IO failure ETHASH_IO_FAIL = 0, ///< There has been an IO failure
ETHASH_IO_MEMO_MISMATCH, ///< Memo file either did not exist or there was content mismatch ETHASH_IO_MEMO_SIZE_MISMATCH, ///< DAG with revision/hash match, but file size was wrong.
ETHASH_IO_MEMO_MATCH, ///< Memo file existed and contents matched. No need to do anything ETHASH_IO_MEMO_MISMATCH, ///< The DAG file did not exist or there was revision/hash mismatch
ETHASH_IO_MEMO_MATCH, ///< DAG file existed and revision/hash matched. No need to do anything
}; };
// small hack for windows. I don't feel I should use va_args and forward just
// to have this one function properly cross-platform abstracted
#if defined(_WIN32) && !defined(__GNUC__)
#define snprintf(...) sprintf_s(__VA_ARGS__)
#endif
/** /**
* Prepares io for ethash * Prepares io for ethash
* *
* Create the DAG directory if it does not exist, and check if the memo file matches. * Create the DAG directory and the DAG file if they don't exist.
* If it does not match then it's deleted to pave the way for @ref ethash_io_write()
* *
* @param dirname A null terminated c-string of the path of the ethash * @param[in] dirname A null terminated c-string of the path of the ethash
* data directory. If it does not exist it's created. * data directory. If it does not exist it's created.
* @param seedhash The seedhash of the current block number * @param[in] seedhash The seedhash of the current block number, used in the
* @return For possible return values @see enum ethash_io_rc * naming of the file as can be seen from the spec at:
* https://github.com/ethereum/wiki/wiki/Ethash-DAG
* @param[out] output_file If there was no failure then this will point to an open
* file descriptor. User is responsible for closing it.
* In the case of memo match then the file is open on read
* mode, while on the case of mismatch a new file is created
* on write mode
* @param[in] file_size The size that the DAG file should have on disk
* @param[out] force_create If true then there is no check to see if the file
* already exists
* @return For possible return values @see enum ethash_io_rc
*/ */
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash); enum ethash_io_rc ethash_io_prepare(
char const* dirname,
ethash_h256_t const seedhash,
FILE** output_file,
uint64_t file_size,
bool force_create
);
/** /**
* Fully computes data and writes it to the file on disk. * An fopen wrapper for no-warnings crossplatform fopen.
* *
* This function should be called after @see ethash_io_prepare() and only if * Msvc compiler considers fopen to be insecure and suggests to use their
* its return value is @c ETHASH_IO_MEMO_MISMATCH. Will write both the full data * alternative. This is a wrapper for this alternative. Another way is to
* and the memo file. * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
* not sound like a good idea.
* *
* @param[in] dirname A null terminated c-string of the path of the ethash * @param file_name The path to the file to open
* data directory. Has to exist. * @param mode Opening mode. Check fopen()
* @param[in] params An ethash_params object containing the full size * @return The FILE* or NULL in failure
* and the cache size
* @param[in] seedhash The seedhash of the current block number
* @param[in] cache The cache data. Would have usually been calulated by
* @see ethash_prep_light().
* @param[out] data Pass a pointer to uint8_t by reference here. If the
* function is succesfull then this point to the allocated
* data calculated by @see ethash_prep_full(). Memory
* ownership is transfered to the callee. Remember that
* you eventually need to free this with a call to free().
* @param[out] data_size Pass a uint64_t by value. If the function is succesfull
* then this will contain the number of bytes allocated
* for @a data.
* @return True for success and false in case of failure.
*/ */
bool ethash_io_write(char const *dirname, FILE* ethash_fopen(char const* file_name, char const* mode);
ethash_params const* params,
ethash_blockhash_t seedhash,
void const* cache,
uint8_t **data,
uint64_t *data_size);
static inline void ethash_io_serialize_info(uint32_t revision, /**
ethash_blockhash_t seed_hash, * An strncat wrapper for no-warnings crossplatform strncat.
char *output) *
{ * Msvc compiler considers strncat to be insecure and suggests to use their
// if .info is only consumed locally we don't really care about endianess * alternative. This is a wrapper for this alternative. Another way is to
memcpy(output, &revision, 4); * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
memcpy(output + 4, &seed_hash, 32); * not sound like a good idea.
} *
* @param des Destination buffer
* @param dest_size Maximum size of the destination buffer. This is the
* extra argument for the MSVC secure strncat
* @param src Souce buffer
* @param count Number of bytes to copy from source
* @return If all is well returns the dest buffer. If there is an
* error returns NULL
*/
char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count);
static inline char *ethash_io_create_filename(char const *dirname, /**
char const* filename, * A cross-platform mkdir wrapper to create a directory or assert it's there
size_t filename_length) *
{ * @param dirname The full path of the directory to create
// in C the cast is not needed, but a C++ compiler will complain for invalid conversion * @return true if the directory was created or if it already
char *name = (char*)malloc(strlen(dirname) + filename_length); * existed
if (!name) { */
return NULL; bool ethash_mkdir(char const* dirname);
}
name[0] = '\0'; /**
strcat(name, dirname); * Get a file's size
strcat(name, filename); *
return name; * @param[in] f The open file stream whose size to get
} * @param[out] size Pass a size_t by reference to contain the file size
* @return true in success and false if there was a failure
*/
bool ethash_file_size(FILE* f, size_t* ret_size);
/**
* Get a file descriptor number from a FILE stream
*
* @param f The file stream whose fd to get
* @return Platform specific fd handler
*/
int ethash_fileno(FILE* f);
/**
* Create the filename for the DAG.
*
* @param dirname The directory name in which the DAG file should reside
* If it does not end with a directory separator it is appended.
* @param filename The actual name of the file
* @param filename_length The length of the filename in bytes
* @return A char* containing the full name. User must deallocate.
*/
char* ethash_io_create_filename(
char const* dirname,
char const* filename,
size_t filename_length
);
/**
* Gets the default directory name for the DAG depending on the system
*
* The spec defining this directory is here: https://github.com/ethereum/wiki/wiki/Ethash-DAG
*
* @param[out] strbuf A string buffer of sufficient size to keep the
* null termninated string of the directory name
* @param[in] buffsize Size of @a strbuf in bytes
* @return true for success and false otherwise
*/
bool ethash_get_default_dirname(char* strbuf, size_t buffsize);
static inline bool ethash_io_mutable_name(
uint32_t revision,
ethash_h256_t const* seed_hash,
char* output
)
{
uint64_t hash = *((uint64_t*)seed_hash);
#if LITTLE_ENDIAN == BYTE_ORDER
hash = ethash_swap_u64(hash);
#endif
return snprintf(output, DAG_MUTABLE_NAME_MAX_SIZE, "%u_%016" PRIx64, revision, hash) >= 0;
}
#ifdef __cplusplus #ifdef __cplusplus
} }

@ -27,50 +27,76 @@
#include <stdio.h> #include <stdio.h>
#include <unistd.h> #include <unistd.h>
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash) FILE* ethash_fopen(char const* file_name, char const* mode)
{ {
char read_buffer[DAG_MEMO_BYTESIZE]; return fopen(file_name, mode);
char expect_buffer[DAG_MEMO_BYTESIZE]; }
enum ethash_io_rc ret = ETHASH_IO_FAIL;
// assert directory exists, full owner permissions and read/search for others char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); {
if (rc == -1 && errno != EEXIST) { return strlen(dest) + count + 1 <= dest_size ? strncat(dest, src, count) : NULL;
goto end; }
}
char *memofile = ethash_io_create_filename(dirname, DAG_MEMO_NAME, sizeof(DAG_MEMO_NAME)); bool ethash_mkdir(char const* dirname)
if (!memofile) { {
goto end; int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
} return rc != -1 || errno == EEXIST;
}
// try to open memo file int ethash_fileno(FILE *f)
FILE *f = fopen(memofile, "rb"); {
if (!f) { return fileno(f);
// file does not exist, so no checking happens. All is fine. }
ret = ETHASH_IO_MEMO_MISMATCH;
goto free_memo;
}
if (fread(read_buffer, 1, DAG_MEMO_BYTESIZE, f) != DAG_MEMO_BYTESIZE) { char* ethash_io_create_filename(
goto close; char const* dirname,
} char const* filename,
size_t filename_length
)
{
size_t dirlen = strlen(dirname);
size_t dest_size = dirlen + filename_length + 1;
if (dirname[dirlen] != '/') {
dest_size += 1;
}
char* name = malloc(dest_size);
if (!name) {
return NULL;
}
ethash_io_serialize_info(REVISION, seedhash, expect_buffer); name[0] = '\0';
if (memcmp(read_buffer, expect_buffer, DAG_MEMO_BYTESIZE) != 0) { ethash_strncat(name, dest_size, dirname, dirlen);
// we have different memo contents so delete the memo file if (dirname[dirlen] != '/') {
if (unlink(memofile) != 0) { ethash_strncat(name, dest_size, "/", 1);
goto close; }
} ethash_strncat(name, dest_size, filename, filename_length);
ret = ETHASH_IO_MEMO_MISMATCH; return name;
} }
ret = ETHASH_IO_MEMO_MATCH; bool ethash_file_size(FILE* f, size_t* ret_size)
{
struct stat st;
int fd;
if ((fd = fileno(f)) == -1 || fstat(fd, &st) != 0) {
return false;
}
*ret_size = st.st_size;
return true;
}
close: bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
fclose(f); {
free_memo: static const char dir_suffix[] = ".ethash/";
free(memofile); strbuf[0] = '\0';
end: char* home_dir = getenv("HOME");
return ret; size_t len = strlen(home_dir);
if (!ethash_strncat(strbuf, buffsize, home_dir, len)) {
return false;
}
if (home_dir[len] != '/') {
if (!ethash_strncat(strbuf, buffsize, "/", 1)) {
return false;
}
}
return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
} }

@ -23,51 +23,78 @@
#include <direct.h> #include <direct.h>
#include <errno.h> #include <errno.h>
#include <stdio.h> #include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <Shlobj.h>
enum ethash_io_rc ethash_io_prepare(char const *dirname, ethash_blockhash_t seedhash) FILE* ethash_fopen(char const* file_name, char const* mode)
{ {
char read_buffer[DAG_MEMO_BYTESIZE]; FILE* f;
char expect_buffer[DAG_MEMO_BYTESIZE]; return fopen_s(&f, file_name, mode) == 0 ? f : NULL;
enum ethash_io_rc ret = ETHASH_IO_FAIL; }
// assert directory exists char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
int rc = _mkdir(dirname); {
if (rc == -1 && errno != EEXIST) { return strncat_s(dest, dest_size, src, count) == 0 ? dest : NULL;
goto end; }
}
char *memofile = ethash_io_create_filename(dirname, DAG_MEMO_NAME, sizeof(DAG_MEMO_NAME)); bool ethash_mkdir(char const* dirname)
if (!memofile) { {
goto end; int rc = _mkdir(dirname);
} return rc != -1 || errno == EEXIST;
}
// try to open memo file int ethash_fileno(FILE* f)
FILE *f = fopen(memofile, "rb"); {
if (!f) { return _fileno(f);
// file does not exist, so no checking happens. All is fine. }
ret = ETHASH_IO_MEMO_MISMATCH;
goto free_memo;
}
if (fread(read_buffer, 1, DAG_MEMO_BYTESIZE, f) != DAG_MEMO_BYTESIZE) { char* ethash_io_create_filename(
goto close; char const* dirname,
} char const* filename,
size_t filename_length
)
{
size_t dirlen = strlen(dirname);
size_t dest_size = dirlen + filename_length + 1;
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
dest_size += 1;
}
char* name = malloc(dest_size);
if (!name) {
return NULL;
}
name[0] = '\0';
ethash_strncat(name, dest_size, dirname, dirlen);
if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
ethash_strncat(name, dest_size, "\\", 1);
}
ethash_strncat(name, dest_size, filename, filename_length);
return name;
}
ethash_io_serialize_info(REVISION, seedhash, expect_buffer); bool ethash_file_size(FILE* f, size_t* ret_size)
if (memcmp(read_buffer, expect_buffer, DAG_MEMO_BYTESIZE) != 0) { {
// we have different memo contents so delete the memo file struct _stat st;
if (_unlink(memofile) != 0) { int fd;
goto close; if ((fd = _fileno(f)) == -1 || _fstat(fd, &st) != 0) {
} return false;
ret = ETHASH_IO_MEMO_MISMATCH; }
} *ret_size = st.st_size;
return true;
}
ret = ETHASH_IO_MEMO_MATCH; bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
{
static const char dir_suffix[] = "Appdata\\Ethash\\";
strbuf[0] = '\0';
if (!SUCCEEDED(SHGetFolderPathW(NULL, CSIDL_PROFILE, NULL, 0, (WCHAR*)strbuf))) {
return false;
}
if (!ethash_strncat(strbuf, buffsize, "\\", 1)) {
return false;
}
close: return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
fclose(f);
free_memo:
free(memofile);
end:
return ret;
} }

@ -0,0 +1,47 @@
/*
This file is part of ethash.
ethash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ethash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ethash. If not, see <http://www.gnu.org/licenses/>.
*/
/** @file mmap.h
* @author Lefteris Karapetsas <lefteris@ethdev.com>
* @date 2015
*/
#pragma once
#if defined(__MINGW32__) || defined(_WIN32)
#include <sys/types.h>
#define PROT_READ 0x1
#define PROT_WRITE 0x2
/* This flag is only available in WinXP+ */
#ifdef FILE_MAP_EXECUTE
#define PROT_EXEC 0x4
#else
#define PROT_EXEC 0x0
#define FILE_MAP_EXECUTE 0
#endif
#define MAP_SHARED 0x01
#define MAP_PRIVATE 0x02
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FAILED ((void *) -1)
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset);
void munmap(void* addr, size_t length);
#else // posix, yay! ^_^
#include <sys/mman.h>
#endif

@ -0,0 +1,84 @@
/* mmap() replacement for Windows
*
* Author: Mike Frysinger <vapier@gentoo.org>
* Placed into the public domain
*/
/* References:
* CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx
* CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx
* MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx
* UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx
*/
#include <io.h>
#include <windows.h>
#include "mmap.h"
#ifdef __USE_FILE_OFFSET64
# define DWORD_HI(x) (x >> 32)
# define DWORD_LO(x) ((x) & 0xffffffff)
#else
# define DWORD_HI(x) (0)
# define DWORD_LO(x) (x)
#endif
void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return MAP_FAILED;
if (fd == -1) {
if (!(flags & MAP_ANON) || offset)
return MAP_FAILED;
} else if (flags & MAP_ANON)
return MAP_FAILED;
DWORD flProtect;
if (prot & PROT_WRITE) {
if (prot & PROT_EXEC)
flProtect = PAGE_EXECUTE_READWRITE;
else
flProtect = PAGE_READWRITE;
} else if (prot & PROT_EXEC) {
if (prot & PROT_READ)
flProtect = PAGE_EXECUTE_READ;
else if (prot & PROT_EXEC)
flProtect = PAGE_EXECUTE;
} else
flProtect = PAGE_READONLY;
off_t end = length + offset;
HANDLE mmap_fd, h;
if (fd == -1)
mmap_fd = INVALID_HANDLE_VALUE;
else
mmap_fd = (HANDLE)_get_osfhandle(fd);
h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL);
if (h == NULL)
return MAP_FAILED;
DWORD dwDesiredAccess;
if (prot & PROT_WRITE)
dwDesiredAccess = FILE_MAP_WRITE;
else
dwDesiredAccess = FILE_MAP_READ;
if (prot & PROT_EXEC)
dwDesiredAccess |= FILE_MAP_EXECUTE;
if (flags & MAP_PRIVATE)
dwDesiredAccess |= FILE_MAP_COPY;
void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length);
if (ret == NULL) {
ret = MAP_FAILED;
}
// since we are handling the file ourselves with fd, close the Windows Handle here
CloseHandle(h);
return ret;
}
void munmap(void* addr, size_t length)
{
UnmapViewOfFile(addr);
}
#undef DWORD_HI
#undef DWORD_LO

@ -17,65 +17,65 @@
/*** Constants. ***/ /*** Constants. ***/
static const uint8_t rho[24] = \ static const uint8_t rho[24] = \
{ 1, 3, 6, 10, 15, 21, { 1, 3, 6, 10, 15, 21,
28, 36, 45, 55, 2, 14, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 27, 41, 56, 8, 25, 43,
62, 18, 39, 61, 20, 44}; 62, 18, 39, 61, 20, 44};
static const uint8_t pi[24] = \ static const uint8_t pi[24] = \
{10, 7, 11, 17, 18, 3, {10, 7, 11, 17, 18, 3,
5, 16, 8, 21, 24, 4, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 15, 23, 19, 13, 12, 2,
20, 14, 22, 9, 6, 1}; 20, 14, 22, 9, 6, 1};
static const uint64_t RC[24] = \ static const uint64_t RC[24] = \
{1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, {1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, 0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL, 0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, 0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL, 0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL}; 0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
/*** Helper macros to unroll the permutation. ***/ /*** Helper macros to unroll the permutation. ***/
#define rol(x, s) (((x) << s) | ((x) >> (64 - s))) #define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
#define REPEAT6(e) e e e e e e #define REPEAT6(e) e e e e e e
#define REPEAT24(e) REPEAT6(e e e e) #define REPEAT24(e) REPEAT6(e e e e)
#define REPEAT5(e) e e e e e #define REPEAT5(e) e e e e e
#define FOR5(v, s, e) \ #define FOR5(v, s, e) \
v = 0; \ v = 0; \
REPEAT5(e; v += s;) REPEAT5(e; v += s;)
/*** Keccak-f[1600] ***/ /*** Keccak-f[1600] ***/
static inline void keccakf(void* state) { static inline void keccakf(void* state) {
uint64_t* a = (uint64_t*)state; uint64_t* a = (uint64_t*)state;
uint64_t b[5] = {0}; uint64_t b[5] = {0};
uint64_t t = 0; uint64_t t = 0;
uint8_t x, y; uint8_t x, y;
for (int i = 0; i < 24; i++) { for (int i = 0; i < 24; i++) {
// Theta // Theta
FOR5(x, 1, FOR5(x, 1,
b[x] = 0; b[x] = 0;
FOR5(y, 5, FOR5(y, 5,
b[x] ^= a[x + y]; )) b[x] ^= a[x + y]; ))
FOR5(x, 1, FOR5(x, 1,
FOR5(y, 5, FOR5(y, 5,
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); )) a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
// Rho and pi // Rho and pi
t = a[1]; t = a[1];
x = 0; x = 0;
REPEAT24(b[0] = a[pi[x]]; REPEAT24(b[0] = a[pi[x]];
a[pi[x]] = rol(t, rho[x]); a[pi[x]] = rol(t, rho[x]);
t = b[0]; t = b[0];
x++; ) x++; )
// Chi // Chi
FOR5(y, FOR5(y,
5, 5,
FOR5(x, 1, FOR5(x, 1,
b[x] = a[y + x];) b[x] = a[y + x];)
FOR5(x, 1, FOR5(x, 1,
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); )) a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
// Iota // Iota
a[0] ^= RC[i]; a[0] ^= RC[i];
} }
} }
/******** The FIPS202-defined functions. ********/ /******** The FIPS202-defined functions. ********/
@ -83,20 +83,20 @@ static inline void keccakf(void* state) {
/*** Some helper macros. ***/ /*** Some helper macros. ***/
#define _(S) do { S } while (0) #define _(S) do { S } while (0)
#define FOR(i, ST, L, S) \ #define FOR(i, ST, L, S) \
_(for (size_t i = 0; i < L; i += ST) { S; }) _(for (size_t i = 0; i < L; i += ST) { S; })
#define mkapply_ds(NAME, S) \ #define mkapply_ds(NAME, S) \
static inline void NAME(uint8_t* dst, \ static inline void NAME(uint8_t* dst, \
const uint8_t* src, \ const uint8_t* src, \
size_t len) { \ size_t len) { \
FOR(i, 1, len, S); \ FOR(i, 1, len, S); \
} }
#define mkapply_sd(NAME, S) \ #define mkapply_sd(NAME, S) \
static inline void NAME(const uint8_t* src, \ static inline void NAME(const uint8_t* src, \
uint8_t* dst, \ uint8_t* dst, \
size_t len) { \ size_t len) { \
FOR(i, 1, len, S); \ FOR(i, 1, len, S); \
} }
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
mkapply_sd(setout, dst[i] = src[i]) // setout mkapply_sd(setout, dst[i] = src[i]) // setout
@ -105,47 +105,47 @@ mkapply_sd(setout, dst[i] = src[i]) // setout
#define Plen 200 #define Plen 200
// Fold P*F over the full blocks of an input. // Fold P*F over the full blocks of an input.
#define foldP(I, L, F) \ #define foldP(I, L, F) \
while (L >= rate) { \ while (L >= rate) { \
F(a, I, rate); \ F(a, I, rate); \
P(a); \ P(a); \
I += rate; \ I += rate; \
L -= rate; \ L -= rate; \
} }
/** The sponge-based hash construction. **/ /** The sponge-based hash construction. **/
static inline int hash(uint8_t* out, size_t outlen, static inline int hash(uint8_t* out, size_t outlen,
const uint8_t* in, size_t inlen, const uint8_t* in, size_t inlen,
size_t rate, uint8_t delim) { size_t rate, uint8_t delim) {
if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) { if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
return -1; return -1;
} }
uint8_t a[Plen] = {0}; uint8_t a[Plen] = {0};
// Absorb input. // Absorb input.
foldP(in, inlen, xorin); foldP(in, inlen, xorin);
// Xor in the DS and pad frame. // Xor in the DS and pad frame.
a[inlen] ^= delim; a[inlen] ^= delim;
a[rate - 1] ^= 0x80; a[rate - 1] ^= 0x80;
// Xor in the last block. // Xor in the last block.
xorin(a, in, inlen); xorin(a, in, inlen);
// Apply P // Apply P
P(a); P(a);
// Squeeze output. // Squeeze output.
foldP(out, outlen, setout); foldP(out, outlen, setout);
setout(a, out, outlen); setout(a, out, outlen);
memset(a, 0, 200); memset(a, 0, 200);
return 0; return 0;
} }
#define defsha3(bits) \ #define defsha3(bits) \
int sha3_##bits(uint8_t* out, size_t outlen, \ int sha3_##bits(uint8_t* out, size_t outlen, \
const uint8_t* in, size_t inlen) { \ const uint8_t* in, size_t inlen) { \
if (outlen > (bits/8)) { \ if (outlen > (bits/8)) { \
return -1; \ return -1; \
} \ } \
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \ return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
} }
/*** FIPS202 SHA3 FOFs ***/ /*** FIPS202 SHA3 FOFs ***/
defsha3(256) defsha3(256)
defsha3(512) defsha3(512)

@ -8,22 +8,22 @@ extern "C" {
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
struct ethash_blockhash; struct ethash_h256;
#define decsha3(bits) \ #define decsha3(bits) \
int sha3_##bits(uint8_t*, size_t, const uint8_t*, size_t); int sha3_##bits(uint8_t*, size_t, uint8_t const*, size_t);
decsha3(256) decsha3(256)
decsha3(512) decsha3(512)
static inline void SHA3_256(struct ethash_blockhash const* ret, uint8_t const *data, const size_t size) static inline void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t const size)
{ {
sha3_256((uint8_t*)ret, 32, data, size); sha3_256((uint8_t*)ret, 32, data, size);
} }
static inline void SHA3_512(uint8_t *ret, uint8_t const *data, const size_t size) static inline void SHA3_512(uint8_t* ret, uint8_t const* data, size_t const size)
{ {
sha3_512(ret, 64, data, size); sha3_512(ret, 64, data, size);
} }
#ifdef __cplusplus #ifdef __cplusplus

@ -23,13 +23,15 @@
#include <cryptopp/sha3.h> #include <cryptopp/sha3.h>
extern "C" { extern "C" {
struct ethash_blockhash; struct ethash_h256;
typedef struct ethash_blockhash ethash_blockhash_t; typedef struct ethash_h256 ethash_h256_t;
void SHA3_256(ethash_blockhash_t const* ret, const uint8_t *data, size_t size) { void SHA3_256(ethash_h256_t const* ret, uint8_t const* data, size_t size)
CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size); {
CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size);
} }
void SHA3_512(uint8_t *const ret, const uint8_t *data, size_t size) { void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size)
CryptoPP::SHA3_512().CalculateDigest(ret, data, size); {
CryptoPP::SHA3_512().CalculateDigest(ret, data, size);
} }
} }

@ -8,11 +8,10 @@
extern "C" { extern "C" {
#endif #endif
struct ethash_blockhash; struct ethash_h256;
typedef struct ethash_blockhash ethash_blockhash_t;
void SHA3_256(ethash_blockhash_t *const ret, const uint8_t *data, size_t size); void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t size);
void SHA3_512(uint8_t *const ret, const uint8_t *data, size_t size); void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size);
#ifdef __cplusplus #ifdef __cplusplus
} }

@ -26,11 +26,11 @@
extern "C" { extern "C" {
#endif #endif
#ifdef _MSC_VER //#ifdef _MSC_VER
void debugf(const char *str, ...); void debugf(char const* str, ...);
#else //#else
#define debugf printf //#define debugf printf
#endif //#endif
static inline uint32_t min_u32(uint32_t a, uint32_t b) static inline uint32_t min_u32(uint32_t a, uint32_t b)
{ {

@ -22,20 +22,17 @@
#include <stdio.h> #include <stdio.h>
#include "util.h" #include "util.h"
#ifdef _MSC_VER
// foward declare without all of Windows.h // foward declare without all of Windows.h
__declspec(dllimport) void __stdcall OutputDebugStringA(const char* lpOutputString); __declspec(dllimport) void __stdcall OutputDebugStringA(char const* lpOutputString);
void debugf(const char *str, ...) void debugf(char const* str, ...)
{ {
va_list args; va_list args;
va_start(args, str); va_start(args, str);
char buf[1<<16]; char buf[1<<16];
_vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args); _vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args);
buf[sizeof(buf)-1] = '\0'; buf[sizeof(buf)-1] = '\0';
OutputDebugStringA(buf); OutputDebugStringA(buf);
} }
#endif

@ -13,16 +13,16 @@
#define PY_CONST_STRING_FORMAT "s" #define PY_CONST_STRING_FORMAT "s"
#endif #endif
#define MIX_WORDS (MIX_BYTES/4) #define MIX_WORDS (ETHASH_MIX_BYTES/4)
static PyObject * static PyObject *
get_cache_size(PyObject *self, PyObject *args) { get_cache_size(PyObject *self, PyObject *args) {
unsigned long block_number; unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number)) if (!PyArg_ParseTuple(args, "k", &block_number))
return 0; return 0;
if (block_number >= EPOCH_LENGTH * 2048) { if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
char error_message[1024]; char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number); sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
@ -36,9 +36,9 @@ get_full_size(PyObject *self, PyObject *args) {
unsigned long block_number; unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number)) if (!PyArg_ParseTuple(args, "k", &block_number))
return 0; return 0;
if (block_number >= EPOCH_LENGTH * 2048) { if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
char error_message[1024]; char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number); sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
@ -69,7 +69,7 @@ mkcache_bytes(PyObject *self, PyObject *args) {
params.cache_size = (size_t) cache_size; params.cache_size = (size_t) cache_size;
ethash_cache cache; ethash_cache cache;
cache.mem = malloc(cache_size); cache.mem = malloc(cache_size);
ethash_mkcache(&cache, &params, (ethash_blockhash_t *) seed); ethash_mkcache(&cache, &params, (ethash_h256_t *) seed);
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, cache.mem, cache_size); PyObject * val = Py_BuildValue(PY_STRING_FORMAT, cache.mem, cache_size);
free(cache.mem); free(cache.mem);
return val; return val;
@ -92,9 +92,9 @@ calc_dataset_bytes(PyObject *self, PyObject *args) {
return 0; return 0;
} }
if (cache_size % HASH_BYTES != 0) { if (cache_size % ETHASH_HASH_BYTES != 0) {
char error_message[1024]; char error_message[1024];
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size); sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
} }
@ -127,9 +127,9 @@ hashimoto_light(PyObject *self, PyObject *args) {
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
} }
if (cache_size % HASH_BYTES != 0) { if (cache_size % ETHASH_HASH_BYTES != 0) {
char error_message[1024]; char error_message[1024];
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", HASH_BYTES, cache_size); sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
} }
@ -146,7 +146,7 @@ hashimoto_light(PyObject *self, PyObject *args) {
params.full_size = (size_t) full_size; params.full_size = (size_t) full_size;
ethash_cache cache; ethash_cache cache;
cache.mem = (void *) cache_bytes; cache.mem = (void *) cache_bytes;
ethash_light(&out, &cache, &params, (ethash_blockhash_t *) header, nonce); ethash_light(&out, &cache, &params, (ethash_h256_t *) header, nonce);
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}", return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
"mix digest", &out.mix_hash, 32, "mix digest", &out.mix_hash, 32,
"result", &out.result, 32); "result", &out.result, 32);
@ -181,7 +181,7 @@ hashimoto_full(PyObject *self, PyObject *args) {
ethash_return_value out; ethash_return_value out;
ethash_params params; ethash_params params;
params.full_size = (size_t) full_size; params.full_size = (size_t) full_size;
ethash_full(&out, (void *) full_bytes, &params, (ethash_blockhash_t *) header, nonce); ethash_full(&out, (void *) full_bytes, &params, (ethash_h256_t *) header, nonce);
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}", return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
"mix digest", &out.mix_hash, 32, "mix digest", &out.mix_hash, 32,
"result", &out.result, 32); "result", &out.result, 32);
@ -227,9 +227,9 @@ mine(PyObject *self, PyObject *args) {
// TODO: Multi threading? // TODO: Multi threading?
do { do {
ethash_full(&out, (void *) full_bytes, &params, (const ethash_blockhash_t *) header, nonce++); ethash_full(&out, (void *) full_bytes, &params, (const ethash_h256_t *) header, nonce++);
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining // TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
} while (!ethash_check_difficulty(&out.result, (const ethash_blockhash_t *) difficulty)); } while (!ethash_check_difficulty(&out.result, (const ethash_h256_t *) difficulty));
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}", return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}",
"mix digest", &out.mix_hash, 32, "mix digest", &out.mix_hash, 32,
@ -243,15 +243,14 @@ get_seedhash(PyObject *self, PyObject *args) {
unsigned long block_number; unsigned long block_number;
if (!PyArg_ParseTuple(args, "k", &block_number)) if (!PyArg_ParseTuple(args, "k", &block_number))
return 0; return 0;
if (block_number >= EPOCH_LENGTH * 2048) { if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
char error_message[1024]; char error_message[1024];
sprintf(error_message, "Block number must be less than %i (was %lu)", EPOCH_LENGTH * 2048, block_number); sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
PyErr_SetString(PyExc_ValueError, error_message); PyErr_SetString(PyExc_ValueError, error_message);
return 0; return 0;
} }
ethash_blockhash_t seedhash; ethash_h256_t seedhash = ethash_get_seedhash(block_number);
ethash_get_seedhash(&seedhash, block_number);
return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32); return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32);
} }
@ -306,17 +305,17 @@ static struct PyModuleDef PyethashModule = {
PyMODINIT_FUNC PyInit_pyethash(void) { PyMODINIT_FUNC PyInit_pyethash(void) {
PyObject *module = PyModule_Create(&PyethashModule); PyObject *module = PyModule_Create(&PyethashModule);
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions // Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
PyModule_AddIntConstant(module, "REVISION", (long) REVISION); PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) DATASET_BYTES_INIT); PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) DATASET_BYTES_GROWTH); PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) CACHE_BYTES_INIT); PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) CACHE_BYTES_GROWTH); PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) EPOCH_LENGTH); PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
PyModule_AddIntConstant(module, "MIX_BYTES", (long) MIX_BYTES); PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
PyModule_AddIntConstant(module, "HASH_BYTES", (long) HASH_BYTES); PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) DATASET_PARENTS); PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) CACHE_ROUNDS); PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
PyModule_AddIntConstant(module, "ACCESSES", (long) ACCESSES); PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
return module; return module;
} }
#else #else
@ -324,16 +323,16 @@ PyMODINIT_FUNC
initpyethash(void) { initpyethash(void) {
PyObject *module = Py_InitModule("pyethash", PyethashMethods); PyObject *module = Py_InitModule("pyethash", PyethashMethods);
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions // Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
PyModule_AddIntConstant(module, "REVISION", (long) REVISION); PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) DATASET_BYTES_INIT); PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) DATASET_BYTES_GROWTH); PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) CACHE_BYTES_INIT); PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) CACHE_BYTES_GROWTH); PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) EPOCH_LENGTH); PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
PyModule_AddIntConstant(module, "MIX_BYTES", (long) MIX_BYTES); PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
PyModule_AddIntConstant(module, "HASH_BYTES", (long) HASH_BYTES); PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) DATASET_PARENTS); PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) CACHE_ROUNDS); PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
PyModule_AddIntConstant(module, "ACCESSES", (long) ACCESSES); PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
} }
#endif #endif

@ -27,13 +27,16 @@ IF( NOT Boost_FOUND )
find_package(Boost 1.48.0 COMPONENTS unit_test_framework system filesystem) find_package(Boost 1.48.0 COMPONENTS unit_test_framework system filesystem)
ENDIF() ENDIF()
IF( Boost_FOUND ) IF (Boost_FOUND)
message(STATUS "boost header: ${Boost_INCLUDE_DIRS}")
message(STATUS "boost libs : ${Boost_LIBRARIES}")
include_directories( ${Boost_INCLUDE_DIR} ) include_directories( ${Boost_INCLUDE_DIR} )
include_directories(../../src) include_directories(../../src)
link_directories ( ${Boost_LIBRARY_DIRS} ) link_directories(${Boost_LIBRARY_DIRS})
file(GLOB HEADERS "*.h") file(GLOB HEADERS "*.h")
if (NOT MSVC) if ((NOT MSVC) AND (NOT APPLE))
ADD_DEFINITIONS(-DBOOST_TEST_DYN_LINK) ADD_DEFINITIONS(-DBOOST_TEST_DYN_LINK)
endif() endif()
if (NOT CRYPTOPP_FOUND) if (NOT CRYPTOPP_FOUND)
@ -48,11 +51,11 @@ IF( Boost_FOUND )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ")
endif() endif()
add_executable (Test test.cpp ${HEADERS}) add_executable (Test "./test.cpp" ${HEADERS})
target_link_libraries(Test ${ETHHASH_LIBS}) target_link_libraries(Test ${ETHHASH_LIBS})
target_link_libraries(Test ${Boost_FILESYSTEM_LIBRARIES}) target_link_libraries(Test ${Boost_FILESYSTEM_LIBRARIES})
target_link_libraries(Test ${Boost_SYSTEM_LIBRARIES}) target_link_libraries(Test ${Boost_SYSTEM_LIBRARIES})
target_link_libraries (Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}) target_link_libraries(Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARIES})
if (CRYPTOPP_FOUND) if (CRYPTOPP_FOUND)
TARGET_LINK_LIBRARIES(Test ${CRYPTOPP_LIBRARIES}) TARGET_LINK_LIBRARIES(Test ${CRYPTOPP_LIBRARIES})

@ -12,6 +12,11 @@
#include <libethash/sha3.h> #include <libethash/sha3.h>
#endif // WITH_CRYPTOPP #endif // WITH_CRYPTOPP
#ifdef _WIN32
#include <windows.h>
#include <Shlobj.h>
#endif
#define BOOST_TEST_MODULE Daggerhashimoto #define BOOST_TEST_MODULE Daggerhashimoto
#define BOOST_TEST_MAIN #define BOOST_TEST_MAIN
@ -22,367 +27,609 @@
#include <boost/test/unit_test.hpp> #include <boost/test/unit_test.hpp>
using namespace std; using namespace std;
using byte = uint8_t;
using bytes = std::vector<byte>;
namespace fs = boost::filesystem; namespace fs = boost::filesystem;
// Just an alloca "wrapper" to silence uint64_t to size_t conversion warnings in windows // Just an alloca "wrapper" to silence uint64_t to size_t conversion warnings in windows
// consider replacing alloca calls with something better though! // consider replacing alloca calls with something better though!
#define our_alloca(param__) alloca((size_t)(param__)) #define our_alloca(param__) alloca((size_t)(param__))
std::string bytesToHexString(const uint8_t *str, const uint64_t s) {
std::ostringstream ret;
for (size_t i = 0; i < s; ++i) // some functions taken from eth::dev for convenience.
ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i]; std::string bytesToHexString(const uint8_t *str, const uint64_t s)
{
std::ostringstream ret;
for (size_t i = 0; i < s; ++i)
ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i];
return ret.str();
}
std::string blockhashToHexString(ethash_h256_t* _hash)
{
return bytesToHexString((uint8_t*)_hash, 32);
}
return ret.str(); int fromHex(char _i)
{
if (_i >= '0' && _i <= '9')
return _i - '0';
if (_i >= 'a' && _i <= 'f')
return _i - 'a' + 10;
if (_i >= 'A' && _i <= 'F')
return _i - 'A' + 10;
BOOST_REQUIRE_MESSAGE(false, "should never get here");
return -1;
} }
std::string blockhashToHexString(ethash_blockhash_t *hash) { bytes hexStringToBytes(std::string const& _s)
return bytesToHexString((uint8_t*)hash, 32); {
unsigned s = (_s[0] == '0' && _s[1] == 'x') ? 2 : 0;
std::vector<uint8_t> ret;
ret.reserve((_s.size() - s + 1) / 2);
if (_s.size() % 2)
try
{
ret.push_back(fromHex(_s[s++]));
}
catch (...)
{
ret.push_back(0);
}
for (unsigned i = s; i < _s.size(); i += 2)
try
{
ret.push_back((byte)(fromHex(_s[i]) * 16 + fromHex(_s[i + 1])));
}
catch (...){
ret.push_back(0);
}
return ret;
} }
ethash_h256_t stringToBlockhash(std::string const& _s)
{
ethash_h256_t ret;
bytes b = hexStringToBytes(_s);
memcpy(&ret, b.data(), b.size());
return ret;
}
BOOST_AUTO_TEST_CASE(fnv_hash_check) { BOOST_AUTO_TEST_CASE(fnv_hash_check) {
uint32_t x = 1235U; uint32_t x = 1235U;
const uint32_t const uint32_t
y = 9999999U, y = 9999999U,
expected = (FNV_PRIME * x) ^y; expected = (FNV_PRIME * x) ^y;
x = fnv_hash(x, y); x = fnv_hash(x, y);
BOOST_REQUIRE_MESSAGE(x == expected, BOOST_REQUIRE_MESSAGE(x == expected,
"\nexpected: " << expected << "\n" "\nexpected: " << expected << "\n"
<< "actual: " << x << "\n"); << "actual: " << x << "\n");
} }
BOOST_AUTO_TEST_CASE(SHA256_check) { BOOST_AUTO_TEST_CASE(SHA256_check) {
ethash_blockhash_t input; ethash_h256_t input;
ethash_blockhash_t out; ethash_h256_t out;
memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
SHA3_256(&out, (uint8_t*)&input, 32); SHA3_256(&out, (uint8_t*)&input, 32);
const std::string const std::string
expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7", expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7",
actual = bytesToHexString((uint8_t*)&out, 32); actual = bytesToHexString((uint8_t*)&out, 32);
BOOST_REQUIRE_MESSAGE(expected == actual, BOOST_REQUIRE_MESSAGE(expected == actual,
"\nexpected: " << expected.c_str() << "\n" "\nexpected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n"); << "actual: " << actual.c_str() << "\n");
} }
BOOST_AUTO_TEST_CASE(SHA512_check) { BOOST_AUTO_TEST_CASE(SHA512_check) {
uint8_t input[64], out[64]; uint8_t input[64], out[64];
memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64); memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64);
SHA3_512(out, input, 64); SHA3_512(out, input, 64);
const std::string const std::string
expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405", expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405",
actual = bytesToHexString(out, 64); actual = bytesToHexString(out, 64);
BOOST_REQUIRE_MESSAGE(expected == actual, BOOST_REQUIRE_MESSAGE(expected == actual,
"\nexpected: " << expected.c_str() << "\n" "\nexpected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n"); << "actual: " << actual.c_str() << "\n");
}
BOOST_AUTO_TEST_CASE(test_swap_endian32) {
uint32_t v32 = (uint32_t)0xBAADF00D;
v32 = ethash_swap_u32(v32);
BOOST_REQUIRE_EQUAL(v32, (uint32_t)0x0DF0ADBA);
}
BOOST_AUTO_TEST_CASE(test_swap_endian64) {
uint64_t v64 = (uint64_t)0xFEE1DEADDEADBEEF;
v64 = ethash_swap_u64(v64);
BOOST_REQUIRE_EQUAL(v64, (uint64_t)0xEFBEADDEADDEE1FE);
} }
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) { BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) {
ethash_params params; uint64_t full_size = ethash_get_datasize(0);
ethash_params_init(&params, 0); uint64_t cache_size = ethash_get_cachesize(0);
BOOST_REQUIRE_MESSAGE(params.full_size < DATASET_BYTES_INIT, BOOST_REQUIRE_MESSAGE(full_size < ETHASH_DATASET_BYTES_INIT,
"\nfull size: " << params.full_size << "\n" "\nfull size: " << full_size << "\n"
<< "should be less than or equal to: " << DATASET_BYTES_INIT << "\n"); << "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.full_size + 20 * MIX_BYTES >= DATASET_BYTES_INIT, BOOST_REQUIRE_MESSAGE(full_size + 20 * ETHASH_MIX_BYTES >= ETHASH_DATASET_BYTES_INIT,
"\nfull size + 20*MIX_BYTES: " << params.full_size + 20 * MIX_BYTES << "\n" "\nfull size + 20*MIX_BYTES: " << full_size + 20 * ETHASH_MIX_BYTES << "\n"
<< "should be greater than or equal to: " << DATASET_BYTES_INIT << "\n"); << "should be greater than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
BOOST_REQUIRE_MESSAGE(params.cache_size < DATASET_BYTES_INIT / 32, BOOST_REQUIRE_MESSAGE(cache_size < ETHASH_DATASET_BYTES_INIT / 32,
"\ncache size: " << params.cache_size << "\n" "\ncache size: " << cache_size << "\n"
<< "should be less than or equal to: " << DATASET_BYTES_INIT / 32 << "\n"); << "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT / 32 << "\n");
} }
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) { BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) {
ethash_params params; uint64_t full_size = ethash_get_datasize(0);
ethash_params_init(&params, 0); uint64_t cache_size = ethash_get_cachesize(0);
const uint32_t expected_full_size = 1073739904; const uint32_t expected_full_size = 1073739904;
const uint32_t expected_cache_size = 16776896; const uint32_t expected_cache_size = 16776896;
BOOST_REQUIRE_MESSAGE(params.full_size == expected_full_size, BOOST_REQUIRE_MESSAGE(full_size == expected_full_size,
"\nexpected: " << expected_cache_size << "\n" "\nexpected: " << expected_cache_size << "\n"
<< "actual: " << params.full_size << "\n"); << "actual: " << full_size << "\n");
BOOST_REQUIRE_MESSAGE(params.cache_size == expected_cache_size, BOOST_REQUIRE_MESSAGE(cache_size == expected_cache_size,
"\nexpected: " << expected_cache_size << "\n" "\nexpected: " << expected_cache_size << "\n"
<< "actual: " << params.cache_size << "\n"); << "actual: " << cache_size << "\n");
} }
BOOST_AUTO_TEST_CASE(light_and_full_client_checks) { BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) {
ethash_params params; ethash_h256_t hash;
ethash_blockhash_t seed; ethash_h256_t target;
ethash_blockhash_t hash; memcpy(&hash, "11111111111111111111111111111111", 32);
ethash_blockhash_t difficulty; memcpy(&target, "22222222222222222222222222222222", 32);
ethash_return_value light_out; BOOST_REQUIRE_MESSAGE(
ethash_return_value full_out; ethash_check_difficulty(&hash, &target),
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); "\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n");
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32); BOOST_REQUIRE_MESSAGE(
ethash_check_difficulty(&hash, &hash), "");
// Set the difficulty // "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
ethash_blockhash_set(&difficulty, 0, 197); memcpy(&target, "11111111111111111111111111111112", 32);
ethash_blockhash_set(&difficulty, 1, 90); BOOST_REQUIRE_MESSAGE(
for (int i = 2; i < 32; i++) ethash_check_difficulty(&hash, &target), "");
ethash_blockhash_set(&difficulty, i, 255); // "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
memcpy(&target, "11111111111111111111111111111110", 32);
ethash_params_init(&params, 0); BOOST_REQUIRE_MESSAGE(
params.cache_size = 1024; !ethash_check_difficulty(&hash, &target), "");
params.full_size = 1024 * 32; // "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n");
ethash_cache cache;
cache.mem = our_alloca(params.cache_size);
ethash_mkcache(&cache, &params, &seed);
node *full_mem = (node *) our_alloca(params.full_size);
ethash_compute_full_data(full_mem, &params, &cache);
{
const std::string
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b",
actual = bytesToHexString((uint8_t const *) cache.mem, params.cache_size);
BOOST_REQUIRE_MESSAGE(expected == actual,
"\nexpected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
{
node node;
ethash_calculate_dag_item(&node, 0, &params, &cache);
const std::string
actual = bytesToHexString((uint8_t const *) &node, sizeof(node)),
expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597";
BOOST_REQUIRE_MESSAGE(actual == expected,
"\n" << "expected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
{
for (int i = 0; i < params.full_size / sizeof(node); ++i) {
for (uint32_t j = 0; j < 32; ++j) {
node expected_node;
ethash_calculate_dag_item(&expected_node, j, &params, &cache);
const std::string
actual = bytesToHexString((uint8_t const *) &(full_mem[j]), sizeof(node)),
expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node));
BOOST_REQUIRE_MESSAGE(actual == expected,
"\ni: " << j << "\n"
<< "expected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
}
}
{
uint64_t nonce = 0x7c7c597c;
ethash_full(&full_out, full_mem, &params, &hash, nonce);
ethash_light(&light_out, &cache, &params, &hash, nonce);
const std::string
light_result_string = blockhashToHexString(&light_out.result),
full_result_string = blockhashToHexString(&full_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
"\nlight result: " << light_result_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
const std::string
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
ethash_blockhash_t check_hash;
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
const std::string check_hash_string = blockhashToHexString(&check_hash);
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
}
{
ethash_full(&full_out, full_mem, &params, &hash, 5);
std::string
light_result_string = blockhashToHexString(&light_out.result),
full_result_string = blockhashToHexString(&full_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string,
"\nlight result and full result should differ: " << light_result_string.c_str() << "\n");
ethash_light(&light_out, &cache, &params, &hash, 5);
light_result_string = blockhashToHexString(&light_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
"\nlight result and full result should be the same\n"
<< "light result: " << light_result_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
std::string
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty),
"ethash_check_difficulty failed"
);
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty),
"ethash_quick_check_difficulty failed"
);
}
} }
BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) { BOOST_AUTO_TEST_CASE(test_ethash_io_mutable_name) {
ethash_blockhash_t hash; char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
ethash_blockhash_t target; // should have at least 8 bytes provided since this is what we test :)
memcpy(&hash, "11111111111111111111111111111111", 32); ethash_h256_t seed1 = ethash_h256_static_init(0, 10, 65, 255, 34, 55, 22, 8);
memcpy(&target, "22222222222222222222222222222222", 32); ethash_io_mutable_name(1, &seed1, mutable_name);
BOOST_REQUIRE_MESSAGE( BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "1_000a41ff22371608"));
ethash_check_difficulty(&hash, &target), ethash_h256_t seed2 = ethash_h256_static_init(0, 0, 0, 0, 0, 0, 0, 0);
"\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n"); ethash_io_mutable_name(44, &seed2, mutable_name);
BOOST_REQUIRE_MESSAGE( BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "44_0000000000000000"));
ethash_check_difficulty(&hash, &hash), "");
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
memcpy(&target, "11111111111111111111111111111112", 32);
BOOST_REQUIRE_MESSAGE(
ethash_check_difficulty(&hash, &target), "");
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
memcpy(&target, "11111111111111111111111111111110", 32);
BOOST_REQUIRE_MESSAGE(
!ethash_check_difficulty(&hash, &target), "");
// "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n");
} }
BOOST_AUTO_TEST_CASE(test_ethash_dir_creation) { BOOST_AUTO_TEST_CASE(test_ethash_dir_creation) {
ethash_blockhash_t seedhash; ethash_h256_t seedhash;
memset(&seedhash, 0, 32); FILE *f = NULL;
BOOST_REQUIRE_EQUAL( memset(&seedhash, 0, 32);
ETHASH_IO_MEMO_MISMATCH, BOOST_REQUIRE_EQUAL(
ethash_io_prepare("./test_ethash_directory/", seedhash) ETHASH_IO_MEMO_MISMATCH,
); ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
);
// let's make sure that the directory was created BOOST_REQUIRE(f);
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
// let's make sure that the directory was created
// cleanup BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
fs::remove_all("./test_ethash_directory/");
// cleanup
fclose(f);
fs::remove_all("./test_ethash_directory/");
}
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_match) {
uint64_t full_size;
uint64_t cache_size;
ethash_h256_t seed;
ethash_h256_t hash;
FILE* f;
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
cache_size = 1024;
full_size = 1024 * 32;
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
NULL
);
BOOST_ASSERT(full);
// let's make sure that the directory was created
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
// delete the full here so that memory is properly unmapped and FILE handler freed
ethash_full_delete(full);
// and check that we have a match when checking again
BOOST_REQUIRE_EQUAL(
ETHASH_IO_MEMO_MATCH,
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
);
BOOST_REQUIRE(f);
// cleanup
fclose(f);
ethash_light_delete(light);
fs::remove_all("./test_ethash_directory/");
} }
BOOST_AUTO_TEST_CASE(test_ethash_io_write_files_are_created) { BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_size_mismatch) {
ethash_blockhash_t seedhash; static const int blockn = 0;
static const int blockn = 0; ethash_h256_t seedhash = ethash_get_seedhash(blockn);
ethash_get_seedhash(&seedhash, blockn); FILE *f = NULL;
BOOST_REQUIRE_EQUAL( BOOST_REQUIRE_EQUAL(
ETHASH_IO_MEMO_MISMATCH, ETHASH_IO_MEMO_MISMATCH,
ethash_io_prepare("./test_ethash_directory/", seedhash) ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
); );
BOOST_REQUIRE(f);
// let's make sure that the directory was created fclose(f);
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
// let's make sure that the directory was created
ethash_cache cache; BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
ethash_params params; // and check that we get the size mismatch detected if we request diffferent size
uint8_t *data; BOOST_REQUIRE_EQUAL(
uint64_t size; ETHASH_IO_MEMO_SIZE_MISMATCH,
ethash_params_init(&params, blockn); ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 65, false)
params.cache_size = 1024; );
params.full_size = 1024 * 32;
cache.mem = our_alloca(params.cache_size); // cleanup
ethash_mkcache(&cache, &params, &seedhash); fs::remove_all("./test_ethash_directory/");
BOOST_REQUIRE(
ethash_io_write("./test_ethash_directory/", &params, seedhash, &cache, &data, &size)
);
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full")));
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info")));
// cleanup
fs::remove_all("./test_ethash_directory/");
free(data);
} }
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_match) { BOOST_AUTO_TEST_CASE(test_ethash_get_default_dirname) {
ethash_blockhash_t seedhash; char result[256];
static const int blockn = 0; // this is really not an easy thing to test for in a unit test, so yeah it does look ugly
ethash_get_seedhash(&seedhash, blockn); #ifdef _WIN32
BOOST_REQUIRE_EQUAL( char homedir[256];
ETHASH_IO_MEMO_MISMATCH, BOOST_REQUIRE(SUCCEEDED(SHGetFolderPathW(NULL, CSIDL_PROFILE, NULL, 0, (WCHAR*)homedir)));
ethash_io_prepare("./test_ethash_directory/", seedhash) BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
); std::string res = std::string(homedir) + std::string("\\Appdata\\Ethash\\");
#else
// let's make sure that the directory was created char* homedir = getenv("HOME");
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/"))); BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
std::string res = std::string(homedir) + std::string("/.ethash/");
ethash_cache cache; #endif
ethash_params params; BOOST_CHECK_MESSAGE(strcmp(res.c_str(), result) == 0,
uint8_t *data; "Expected \"" + res + "\" but got \"" + std::string(result) + "\""
uint64_t size; );
ethash_params_init(&params, blockn); }
params.cache_size = 1024;
params.full_size = 1024 * 32; BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
cache.mem = our_alloca(params.cache_size); uint64_t full_size;
ethash_mkcache(&cache, &params, &seedhash); uint64_t cache_size;
ethash_h256_t seed;
BOOST_REQUIRE( ethash_h256_t hash;
ethash_io_write("./test_ethash_directory/", &params, seedhash, &cache, &data, &size) ethash_h256_t difficulty;
); ethash_return_value_t light_out;
ethash_return_value_t full_out;
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full"))); memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info"))); memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
BOOST_REQUIRE_EQUAL( // Set the difficulty
ETHASH_IO_MEMO_MATCH, ethash_h256_set(&difficulty, 0, 197);
ethash_io_prepare("./test_ethash_directory/", seedhash) ethash_h256_set(&difficulty, 1, 90);
); for (int i = 2; i < 32; i++)
ethash_h256_set(&difficulty, i, 255);
// cleanup
fs::remove_all("./test_ethash_directory/"); cache_size = 1024;
free(data); full_size = 1024 * 32;
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
NULL
);
BOOST_ASSERT(full);
{
const std::string
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b",
actual = bytesToHexString((uint8_t const *) light->cache, cache_size);
BOOST_REQUIRE_MESSAGE(expected == actual,
"\nexpected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
{
node node;
ethash_calculate_dag_item(&node, 0, light);
const std::string
actual = bytesToHexString((uint8_t const *) &node, sizeof(node)),
expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597";
BOOST_REQUIRE_MESSAGE(actual == expected,
"\n" << "expected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
{
for (int i = 0; i < full_size / sizeof(node); ++i) {
for (uint32_t j = 0; j < 32; ++j) {
node expected_node;
ethash_calculate_dag_item(&expected_node, j, light);
const std::string
actual = bytesToHexString((uint8_t const *) &(full->data[j]), sizeof(node)),
expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node));
BOOST_REQUIRE_MESSAGE(actual == expected,
"\ni: " << j << "\n"
<< "expected: " << expected.c_str() << "\n"
<< "actual: " << actual.c_str() << "\n");
}
}
}
{
uint64_t nonce = 0x7c7c597c;
full_out = ethash_full_compute(full, hash, nonce);
BOOST_REQUIRE(full_out.success);
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
BOOST_REQUIRE(light_out.success);
const std::string
light_result_string = blockhashToHexString(&light_out.result),
full_result_string = blockhashToHexString(&full_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
"\nlight result: " << light_result_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
const std::string
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
ethash_h256_t check_hash;
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
const std::string check_hash_string = blockhashToHexString(&check_hash);
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
}
{
full_out = ethash_full_compute(full, hash, 5);
BOOST_REQUIRE(full_out.success);
std::string
light_result_string = blockhashToHexString(&light_out.result),
full_result_string = blockhashToHexString(&full_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string,
"\nlight result and full result should differ: " << light_result_string.c_str() << "\n");
light_out = ethash_light_compute_internal(light, full_size, hash, 5);
BOOST_REQUIRE(light_out.success);
light_result_string = blockhashToHexString(&light_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
"\nlight result and full result should be the same\n"
<< "light result: " << light_result_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
std::string
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty),
"ethash_check_difficulty failed"
);
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty),
"ethash_quick_check_difficulty failed"
);
}
ethash_light_delete(light);
ethash_full_delete(full);
fs::remove_all("./test_ethash_directory/");
}
BOOST_AUTO_TEST_CASE(ethash_full_new_when_dag_exists_with_wrong_size) {
uint64_t full_size;
uint64_t cache_size;
ethash_h256_t seed;
ethash_h256_t hash;
ethash_return_value_t full_out;
ethash_return_value_t light_out;
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
cache_size = 1024;
full_size = 1024 * 32;
// first make a DAG file of "wrong size"
FILE *f;
BOOST_REQUIRE_EQUAL(
ETHASH_IO_MEMO_MISMATCH,
ethash_io_prepare("./test_ethash_directory/", seed, &f, 64, false)
);
fclose(f);
// then create new DAG, which should detect the wrong size and force create a new file
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
BOOST_ASSERT(light);
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
NULL
);
BOOST_ASSERT(full);
{
uint64_t nonce = 0x7c7c597c;
full_out = ethash_full_compute(full, hash, nonce);
BOOST_REQUIRE(full_out.success);
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
BOOST_REQUIRE(light_out.success);
const std::string
light_result_string = blockhashToHexString(&light_out.result),
full_result_string = blockhashToHexString(&full_out.result);
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
"\nlight result: " << light_result_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
const std::string
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
ethash_h256_t check_hash;
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
const std::string check_hash_string = blockhashToHexString(&check_hash);
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
<< "full result: " << full_result_string.c_str() << "\n");
}
ethash_light_delete(light);
ethash_full_delete(full);
fs::remove_all("./test_ethash_directory/");
} }
// could have used dev::contentsNew but don't wanna try to import static bool g_executed = false;
// libdevcore just for one function static unsigned g_prev_progress = 0;
static std::vector<char> readFileIntoVector(char const* filename) static int test_full_callback(unsigned _progress)
{ {
ifstream ifs(filename, ios::binary|ios::ate); g_executed = true;
ifstream::pos_type pos = ifs.tellg(); BOOST_CHECK(_progress >= g_prev_progress);
g_prev_progress = _progress;
return 0;
}
std::vector<char> result((unsigned int)pos); static int test_full_callback_that_fails(unsigned _progress)
{
return 1;
}
ifs.seekg(0, ios::beg); static int test_full_callback_create_incomplete_dag(unsigned _progress)
ifs.read(&result[0], pos); {
if (_progress >= 30) {
return 1;
}
return 0;
}
BOOST_AUTO_TEST_CASE(full_client_callback) {
uint64_t full_size;
uint64_t cache_size;
ethash_h256_t seed;
ethash_h256_t hash;
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
cache_size = 1024;
full_size = 1024 * 32;
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
test_full_callback
);
BOOST_ASSERT(full);
BOOST_CHECK(g_executed);
BOOST_REQUIRE_EQUAL(g_prev_progress, 100);
ethash_full_delete(full);
ethash_light_delete(light);
fs::remove_all("./test_ethash_directory/");
}
BOOST_AUTO_TEST_CASE(failing_full_client_callback) {
uint64_t full_size;
uint64_t cache_size;
ethash_h256_t seed;
ethash_h256_t hash;
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
cache_size = 1024;
full_size = 1024 * 32;
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
test_full_callback_that_fails
);
BOOST_ASSERT(!full);
ethash_light_delete(light);
fs::remove_all("./test_ethash_directory/");
}
return result; BOOST_AUTO_TEST_CASE(test_incomplete_dag_file) {
uint64_t full_size;
uint64_t cache_size;
ethash_h256_t seed;
ethash_h256_t hash;
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
cache_size = 1024;
full_size = 1024 * 32;
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
// create a full but stop at 30%, so no magic number is written
ethash_full_t full = ethash_full_new_internal(
"./test_ethash_directory/",
seed,
full_size,
light,
test_full_callback_create_incomplete_dag
);
BOOST_ASSERT(!full);
FILE *f = NULL;
// confirm that we get a size_mismatch because the magic number is missing
BOOST_REQUIRE_EQUAL(
ETHASH_IO_MEMO_SIZE_MISMATCH,
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
);
ethash_light_delete(light);
fs::remove_all("./test_ethash_directory/");
}
BOOST_AUTO_TEST_CASE(test_block_verification) {
ethash_light_t light = ethash_light_new(22);
ethash_h256_t seedhash = stringToBlockhash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d");
BOOST_ASSERT(light);
ethash_return_value_t ret = ethash_light_compute(
light,
seedhash,
0x495732e0ed7a801c
);
BOOST_REQUIRE_EQUAL(blockhashToHexString(&ret.result), "00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614");
ethash_light_delete(light);
}
// Test of Full DAG creation with the minimal ethash.h API.
// Commented out since travis tests would take too much time.
// Uncomment and run on your own machine if you want to confirm
// it works fine.
#if 0
static int lef_cb(unsigned _progress)
{
printf("CREATING DAG. PROGRESS: %u\n", _progress);
fflush(stdout);
return 0;
} }
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_contents) { BOOST_AUTO_TEST_CASE(full_dag_test) {
ethash_blockhash_t seedhash; ethash_light_t light = ethash_light_new(55);
static const int blockn = 0; BOOST_ASSERT(light);
ethash_get_seedhash(&seedhash, blockn); ethash_full_t full = ethash_full_new(light, lef_cb);
BOOST_REQUIRE_EQUAL( BOOST_ASSERT(full);
ETHASH_IO_MEMO_MISMATCH, ethash_light_delete(light);
ethash_io_prepare("./test_ethash_directory/", seedhash) ethash_full_delete(full);
);
// let's make sure that the directory was created
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
ethash_cache cache;
ethash_params params;
uint8_t *data;
uint64_t size;
ethash_params_init(&params, blockn);
params.cache_size = 1024;
params.full_size = 1024 * 32;
cache.mem = our_alloca(params.cache_size);
ethash_mkcache(&cache, &params, &seedhash);
BOOST_REQUIRE(
ethash_io_write("./test_ethash_directory/", &params, seedhash, &cache, &data, &size)
);
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full")));
BOOST_REQUIRE(fs::exists(fs::path("./test_ethash_directory/full.info")));
char expect_buffer[DAG_MEMO_BYTESIZE];
ethash_io_serialize_info(REVISION, seedhash, expect_buffer);
auto vec = readFileIntoVector("./test_ethash_directory/full.info");
BOOST_REQUIRE_EQUAL(vec.size(), DAG_MEMO_BYTESIZE);
BOOST_REQUIRE(memcmp(expect_buffer, &vec[0], DAG_MEMO_BYTESIZE) == 0);
// cleanup
fs::remove_all("./test_ethash_directory/");
free(data);
} }
#endif

@ -3,6 +3,13 @@
# Strict mode # Strict mode
set -e set -e
VALGRIND_ARGS="--tool=memcheck"
VALGRIND_ARGS+=" --leak-check=yes"
VALGRIND_ARGS+=" --track-origins=yes"
VALGRIND_ARGS+=" --show-reachable=yes"
VALGRIND_ARGS+=" --num-callers=20"
VALGRIND_ARGS+=" --track-fds=yes"
SOURCE="${BASH_SOURCE[0]}" SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
@ -17,3 +24,9 @@ cd $TEST_DIR/build ;
cmake ../../.. > /dev/null cmake ../../.. > /dev/null
make Test make Test
./test/c/Test ./test/c/Test
# If we have valgrind also run memory check tests
if hash valgrind 2>/dev/null; then
echo "======== Running tests under valgrind ========";
cd $TEST_DIR/build/ && valgrind $VALGRIND_ARGS ./test/c/Test
fi

@ -1,82 +0,0 @@
package ethashTest
import (
"bytes"
"crypto/rand"
"encoding/hex"
"log"
"math/big"
"testing"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/ethdb"
)
func TestEthash(t *testing.T) {
seedHash := make([]byte, 32)
_, err := rand.Read(seedHash)
if err != nil {
panic(err)
}
db, err := ethdb.NewMemDatabase()
if err != nil {
panic(err)
}
blockProcessor, err := core.NewCanonical(5, db)
if err != nil {
panic(err)
}
log.Println("Block Number: ", blockProcessor.ChainManager().CurrentBlock().Number())
e := ethash.New(blockProcessor.ChainManager())
miningHash := make([]byte, 32)
if _, err := rand.Read(miningHash); err != nil {
panic(err)
}
diff := big.NewInt(10000)
log.Println("difficulty", diff)
nonce := uint64(0)
ghash_full := e.FullHash(nonce, miningHash)
log.Printf("ethash full (on nonce): %x %x\n", ghash_full, nonce)
ghash_light := e.LightHash(nonce, miningHash)
log.Printf("ethash light (on nonce): %x %x\n", ghash_light, nonce)
if bytes.Compare(ghash_full, ghash_light) != 0 {
t.Errorf("full: %x, light: %x", ghash_full, ghash_light)
}
}
func TestGetSeedHash(t *testing.T) {
seed0, err := ethash.GetSeedHash(0)
if err != nil {
t.Errorf("Failed to get seedHash for block 0: %v", err)
}
if bytes.Compare(seed0, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) != 0 {
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
}
seed1, err := ethash.GetSeedHash(30000)
if err != nil {
t.Error(err)
}
// From python:
// > from pyethash import get_seedhash
// > get_seedhash(30000)
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
if err != nil {
t.Error(err)
}
if bytes.Compare(seed1, expectedSeed1) != 0 {
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
}
}

@ -24,8 +24,9 @@ fi
echo -e "\n################# Testing C ##################" echo -e "\n################# Testing C ##################"
$TEST_DIR/c/test.sh $TEST_DIR/c/test.sh
echo -e "\n################# Testing Python ##################" # Temporarily commenting out python tests until they conform to the API
$TEST_DIR/python/test.sh #echo -e "\n################# Testing Python ##################"
#$TEST_DIR/python/test.sh
#echo "################# Testing Go ##################" echo "################# Testing Go ##################"
#$TEST_DIR/go/test.sh cd $TEST_DIR/.. && go test -timeout 9999s

@ -27,8 +27,10 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
"path/filepath"
"runtime" "runtime"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/codegangsta/cli" "github.com/codegangsta/cli"
@ -601,12 +603,32 @@ func dump(ctx *cli.Context) {
} }
func makedag(ctx *cli.Context) { func makedag(ctx *cli.Context) {
chain, _, _ := utils.GetChain(ctx) args := ctx.Args()
pow := ethash.New(chain) wrongArgs := func() {
fmt.Println("making cache") utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
pow.UpdateCache(0, true) }
fmt.Println("making DAG") switch {
pow.UpdateDAG() case len(args) == 2:
blockNum, err := strconv.ParseUint(args[0], 0, 64)
dir := args[1]
if err != nil {
wrongArgs()
} else {
dir = filepath.Clean(dir)
// seems to require a trailing slash
if !strings.HasSuffix(dir, "/") {
dir = dir + "/"
}
_, err = ioutil.ReadDir(dir)
if err != nil {
utils.Fatalf("Can't find dir")
}
fmt.Println("making DAG, this could take awhile...")
ethash.MakeDAG(blockNum, dir)
}
default:
wrongArgs()
}
} }
func version(c *cli.Context) { func version(c *cli.Context) {

@ -316,7 +316,7 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat
eventMux := new(event.TypeMux) eventMux := new(event.TypeMux)
chainManager := core.NewChainManager(blockDb, stateDb, eventMux) chainManager := core.NewChainManager(blockDb, stateDb, eventMux)
pow := ethash.New(chainManager) pow := ethash.New()
txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit) txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit)
blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux) blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux)
chainManager.SetProcessor(blockProcessor) chainManager.SetProcessor(blockProcessor)

@ -14,8 +14,8 @@ import (
// So we can generate blocks easily // So we can generate blocks easily
type FakePow struct{} type FakePow struct{}
func (f FakePow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) { func (f FakePow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
return 0, nil, nil return 0, nil
} }
func (f FakePow) Verify(block pow.Block) bool { return true } func (f FakePow) Verify(block pow.Block) bool { return true }
func (f FakePow) GetHashrate() int64 { return 0 } func (f FakePow) GetHashrate() int64 { return 0 }

@ -220,7 +220,7 @@ func New(config *Config) (*Ethereum, error) {
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux()) eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.GetBlock) eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.GetBlock)
eth.pow = ethash.New(eth.chainManager) eth.pow = ethash.New()
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor) eth.chainManager.SetProcessor(eth.blockProcessor)
@ -318,7 +318,6 @@ func (s *Ethereum) PeersInfo() (peersinfo []*PeerInfo) {
func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) { func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
s.chainManager.ResetWithGenesisBlock(gb) s.chainManager.ResetWithGenesisBlock(gb)
s.pow.UpdateCache(0, true)
} }
func (s *Ethereum) StartMining() error { func (s *Ethereum) StartMining() error {

@ -85,7 +85,7 @@ func (self *CpuMiner) mine(block *types.Block) {
self.chMu.Unlock() self.chMu.Unlock()
// Mine // Mine
nonce, mixDigest, _ := self.pow.Search(block, self.quitCurrentOp) nonce, mixDigest := self.pow.Search(block, self.quitCurrentOp)
if nonce != 0 { if nonce != 0 {
block.SetNonce(nonce) block.SetNonce(nonce)
block.Header().MixDigest = common.BytesToHash(mixDigest) block.Header().MixDigest = common.BytesToHash(mixDigest)

@ -3,7 +3,6 @@ package miner
import ( import (
"math/big" "math/big"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
@ -41,13 +40,7 @@ func (self *Miner) Mining() bool {
func (self *Miner) Start(coinbase common.Address) { func (self *Miner) Start(coinbase common.Address) {
self.mining = true self.mining = true
self.worker.coinbase = coinbase self.worker.coinbase = coinbase
if self.threads > 0 {
self.pow.(*ethash.Ethash).UpdateDAG()
}
self.worker.start() self.worker.start()
self.worker.commitNewWork() self.worker.commitNewWork()
} }

@ -6,8 +6,8 @@ import (
"math/rand" "math/rand"
"time" "time"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
) )
@ -44,7 +44,7 @@ func (dag *Dagger) Find(obj *big.Int, resChan chan int64) {
resChan <- 0 resChan <- 0
} }
func (dag *Dagger) Search(hash, diff *big.Int) ([]byte, []byte, []byte) { func (dag *Dagger) Search(hash, diff *big.Int) (uint64, []byte) {
// TODO fix multi threading. Somehow it results in the wrong nonce // TODO fix multi threading. Somehow it results in the wrong nonce
amountOfRoutines := 1 amountOfRoutines := 1
@ -69,7 +69,7 @@ func (dag *Dagger) Search(hash, diff *big.Int) ([]byte, []byte, []byte) {
} }
} }
return big.NewInt(res).Bytes(), nil, nil return uint64(res), nil
} }
func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool { func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool {

@ -32,7 +32,7 @@ func (pow *EasyPow) Turbo(on bool) {
pow.turbo = on pow.turbo = on
} }
func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte, []byte) { func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}) (uint64, []byte) {
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
hash := block.HashNoNonce() hash := block.HashNoNonce()
diff := block.Difficulty() diff := block.Difficulty()
@ -57,7 +57,7 @@ empty:
for { for {
select { select {
case <-stop: case <-stop:
return 0, nil, nil return 0, nil
default: default:
i++ i++
@ -67,7 +67,7 @@ empty:
sha := uint64(r.Int63()) sha := uint64(r.Int63())
if verify(hash, diff, sha) { if verify(hash, diff, sha) {
return sha, nil, nil return sha, nil
} }
} }
@ -76,7 +76,7 @@ empty:
} }
} }
return 0, nil, nil return 0, nil
} }
func (pow *EasyPow) Verify(block pow.Block) bool { func (pow *EasyPow) Verify(block pow.Block) bool {

@ -1,7 +1,7 @@
package pow package pow
type PoW interface { type PoW interface {
Search(block Block, stop <-chan struct{}) (uint64, []byte, []byte) Search(block Block, stop <-chan struct{}) (uint64, []byte)
Verify(block Block) bool Verify(block Block) bool
GetHashrate() int64 GetHashrate() int64
Turbo(bool) Turbo(bool)

@ -13,42 +13,37 @@ import (
// TODO: refactor test setup & execution to better align with vm and tx tests // TODO: refactor test setup & execution to better align with vm and tx tests
func TestBcValidBlockTests(t *testing.T) { func TestBcValidBlockTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.")
runBlockTestsInFile("files/BlockTests/bcValidBlockTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcValidBlockTest.json", []string{}, t)
} }
func TestBcUncleTests(t *testing.T) { func TestBcUncleTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.") t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
runBlockTestsInFile("files/BlockTests/bcUncleTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcUncleTest.json", []string{}, t)
} }
func TestBcUncleHeaderValidityTests(t *testing.T) { func TestBcUncleHeaderValidityTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.") t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
runBlockTestsInFile("files/BlockTests/bcUncleHeaderValiditiy.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcUncleHeaderValiditiy.json", []string{}, t)
} }
func TestBcInvalidHeaderTests(t *testing.T) { func TestBcInvalidHeaderTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.")
runBlockTestsInFile("files/BlockTests/bcInvalidHeaderTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcInvalidHeaderTest.json", []string{}, t)
} }
func TestBcInvalidRLPTests(t *testing.T) { func TestBcInvalidRLPTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.")
runBlockTestsInFile("files/BlockTests/bcInvalidRLPTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcInvalidRLPTest.json", []string{}, t)
} }
func TestBcJSAPITests(t *testing.T) { func TestBcJSAPITests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.")
runBlockTestsInFile("files/BlockTests/bcJS_API_Test.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcJS_API_Test.json", []string{}, t)
} }
func TestBcRPCAPITests(t *testing.T) { func TestBcRPCAPITests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.") t.Skip("Skipped until https://github.com/ethereum/go-ethereum/pull/857 is merged.")
runBlockTestsInFile("files/BlockTests/bcRPC_API_Test.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcRPC_API_Test.json", []string{}, t)
} }
func TestBcForkBlockTests(t *testing.T) { func TestBcForkBlockTests(t *testing.T) {
t.Skip("Skipped in lieu of performance fixes.")
runBlockTestsInFile("files/BlockTests/bcForkBlockTest.json", []string{}, t) runBlockTestsInFile("files/BlockTests/bcForkBlockTest.json", []string{}, t)
} }
@ -71,7 +66,6 @@ func runBlockTestsInFile(filepath string, snafus []string, t *testing.T) {
} }
func runBlockTest(name string, test *BlockTest, t *testing.T) { func runBlockTest(name string, test *BlockTest, t *testing.T) {
t.Log("Running test: ", name)
cfg := testEthConfig() cfg := testEthConfig()
ethereum, err := eth.New(cfg) ethereum, err := eth.New(cfg)
if err != nil { if err != nil {
@ -100,7 +94,7 @@ func runBlockTest(name string, test *BlockTest, t *testing.T) {
if err = test.ValidatePostState(statedb); err != nil { if err = test.ValidatePostState(statedb); err != nil {
t.Fatal("post state validation failed: %v", err) t.Fatal("post state validation failed: %v", err)
} }
t.Log("Test passed: ", name) t.Log("Test passed: ", name)
} }
func testEthConfig() *eth.Config { func testEthConfig() *eth.Config {

Loading…
Cancel
Save