bmt: golint updates for this or self warning (#16628)

* bmt/*: golint updates for this or self warning

* Update bmt.go
release/1.8
kiel barry 7 years ago committed by Péter Szilágyi
parent fcc18f4c80
commit 784aa83942
  1. 178
      bmt/bmt.go

@ -150,29 +150,29 @@ func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
} }
// Drain drains the pool until it has no more than n resources // Drain drains the pool until it has no more than n resources
func (self *TreePool) Drain(n int) { func (p *TreePool) Drain(n int) {
self.lock.Lock() p.lock.Lock()
defer self.lock.Unlock() defer p.lock.Unlock()
for len(self.c) > n { for len(p.c) > n {
<-self.c <-p.c
self.count-- p.count--
} }
} }
// Reserve is blocking until it returns an available Tree // Reserve is blocking until it returns an available Tree
// it reuses free Trees or creates a new one if size is not reached // it reuses free Trees or creates a new one if size is not reached
func (self *TreePool) Reserve() *Tree { func (p *TreePool) Reserve() *Tree {
self.lock.Lock() p.lock.Lock()
defer self.lock.Unlock() defer p.lock.Unlock()
var t *Tree var t *Tree
if self.count == self.Capacity { if p.count == p.Capacity {
return <-self.c return <-p.c
} }
select { select {
case t = <-self.c: case t = <-p.c:
default: default:
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount) t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
self.count++ p.count++
} }
return t return t
} }
@ -180,8 +180,8 @@ func (self *TreePool) Reserve() *Tree {
// Release gives back a Tree to the pool. // Release gives back a Tree to the pool.
// This Tree is guaranteed to be in reusable state // This Tree is guaranteed to be in reusable state
// does not need locking // does not need locking
func (self *TreePool) Release(t *Tree) { func (p *TreePool) Release(t *Tree) {
self.c <- t // can never fail but... p.c <- t // can never fail but...
} }
// Tree is a reusable control structure representing a BMT // Tree is a reusable control structure representing a BMT
@ -193,17 +193,17 @@ type Tree struct {
} }
// Draw draws the BMT (badly) // Draw draws the BMT (badly)
func (self *Tree) Draw(hash []byte, d int) string { func (t *Tree) Draw(hash []byte, d int) string {
var left, right []string var left, right []string
var anc []*Node var anc []*Node
for i, n := range self.leaves { for i, n := range t.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left))) left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 { if i%2 == 0 {
anc = append(anc, n.parent) anc = append(anc, n.parent)
} }
right = append(right, fmt.Sprintf("%v", hashstr(n.right))) right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
} }
anc = self.leaves anc = t.leaves
var hashes [][]string var hashes [][]string
for l := 0; len(anc) > 0; l++ { for l := 0; len(anc) > 0; l++ {
var nodes []*Node var nodes []*Node
@ -277,42 +277,42 @@ func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
// methods needed by hash.Hash // methods needed by hash.Hash
// Size returns the size // Size returns the size
func (self *Hasher) Size() int { func (h *Hasher) Size() int {
return self.size return h.size
} }
// BlockSize returns the block size // BlockSize returns the block size
func (self *Hasher) BlockSize() int { func (h *Hasher) BlockSize() int {
return self.blocksize return h.blocksize
} }
// Sum returns the hash of the buffer // Sum returns the hash of the buffer
// hash.Hash interface Sum method appends the byte slice to the underlying // hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk // data before it calculates and returns the hash of the chunk
func (self *Hasher) Sum(b []byte) (r []byte) { func (h *Hasher) Sum(b []byte) (r []byte) {
t := self.bmt t := h.bmt
i := self.cur i := h.cur
n := t.leaves[i] n := t.leaves[i]
j := i j := i
// must run strictly before all nodes calculate // must run strictly before all nodes calculate
// datanodes are guaranteed to have a parent // datanodes are guaranteed to have a parent
if len(self.segment) > self.size && i > 0 && n.parent != nil { if len(h.segment) > h.size && i > 0 && n.parent != nil {
n = n.parent n = n.parent
} else { } else {
i *= 2 i *= 2
} }
d := self.finalise(n, i) d := h.finalise(n, i)
self.writeSegment(j, self.segment, d) h.writeSegment(j, h.segment, d)
c := <-self.result c := <-h.result
self.releaseTree() h.releaseTree()
// sha3(length + BMT(pure_chunk)) // sha3(length + BMT(pure_chunk))
if self.blockLength == nil { if h.blockLength == nil {
return c return c
} }
res := self.pool.hasher() res := h.pool.hasher()
res.Reset() res.Reset()
res.Write(self.blockLength) res.Write(h.blockLength)
res.Write(c) res.Write(c)
return res.Sum(nil) return res.Sum(nil)
} }
@ -321,8 +321,8 @@ func (self *Hasher) Sum(b []byte) (r []byte) {
// Hash waits for the hasher result and returns it // Hash waits for the hasher result and returns it
// caller must call this on a BMT Hasher being written to // caller must call this on a BMT Hasher being written to
func (self *Hasher) Hash() []byte { func (h *Hasher) Hash() []byte {
return <-self.result return <-h.result
} }
// Hasher implements the io.Writer interface // Hasher implements the io.Writer interface
@ -330,16 +330,16 @@ func (self *Hasher) Hash() []byte {
// Write fills the buffer to hash // Write fills the buffer to hash
// with every full segment complete launches a hasher go routine // with every full segment complete launches a hasher go routine
// that shoots up the BMT // that shoots up the BMT
func (self *Hasher) Write(b []byte) (int, error) { func (h *Hasher) Write(b []byte) (int, error) {
l := len(b) l := len(b)
if l <= 0 { if l <= 0 {
return 0, nil return 0, nil
} }
s := self.segment s := h.segment
i := self.cur i := h.cur
count := (self.count + 1) / 2 count := (h.count + 1) / 2
need := self.count*self.size - self.cur*2*self.size need := h.count*h.size - h.cur*2*h.size
size := self.size size := h.size
if need > size { if need > size {
size *= 2 size *= 2
} }
@ -356,7 +356,7 @@ func (self *Hasher) Write(b []byte) (int, error) {
// read full segments and the last possibly partial segment // read full segments and the last possibly partial segment
for need > 0 && i < count-1 { for need > 0 && i < count-1 {
// push all finished chunks we read // push all finished chunks we read
self.writeSegment(i, s, self.depth) h.writeSegment(i, s, h.depth)
need -= size need -= size
if need < 0 { if need < 0 {
size += need size += need
@ -365,8 +365,8 @@ func (self *Hasher) Write(b []byte) (int, error) {
rest += size rest += size
i++ i++
} }
self.segment = s h.segment = s
self.cur = i h.cur = i
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full // otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
return l, nil return l, nil
} }
@ -376,8 +376,8 @@ func (self *Hasher) Write(b []byte) (int, error) {
// ReadFrom reads from io.Reader and appends to the data to hash using Write // ReadFrom reads from io.Reader and appends to the data to hash using Write
// it reads so that chunk to hash is maximum length or reader reaches EOF // it reads so that chunk to hash is maximum length or reader reaches EOF
// caller must Reset the hasher prior to call // caller must Reset the hasher prior to call
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) { func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
bufsize := self.size*self.count - self.size*self.cur - len(self.segment) bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
buf := make([]byte, bufsize) buf := make([]byte, bufsize)
var read int var read int
for { for {
@ -385,7 +385,7 @@ func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
n, err = r.Read(buf) n, err = r.Read(buf)
read += n read += n
if err == io.EOF || read == len(buf) { if err == io.EOF || read == len(buf) {
hash := self.Sum(buf[:n]) hash := h.Sum(buf[:n])
if read == len(buf) { if read == len(buf) {
err = NewEOC(hash) err = NewEOC(hash)
} }
@ -394,7 +394,7 @@ func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
if err != nil { if err != nil {
break break
} }
n, err = self.Write(buf[:n]) n, err = h.Write(buf[:n])
if err != nil { if err != nil {
break break
} }
@ -403,9 +403,9 @@ func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
} }
// Reset needs to be called before writing to the hasher // Reset needs to be called before writing to the hasher
func (self *Hasher) Reset() { func (h *Hasher) Reset() {
self.getTree() h.getTree()
self.blockLength = nil h.blockLength = nil
} }
// Hasher implements the SwarmHash interface // Hasher implements the SwarmHash interface
@ -413,52 +413,52 @@ func (self *Hasher) Reset() {
// ResetWithLength needs to be called before writing to the hasher // ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of // the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash // the length of the data subsumed under the hash
func (self *Hasher) ResetWithLength(l []byte) { func (h *Hasher) ResetWithLength(l []byte) {
self.Reset() h.Reset()
self.blockLength = l h.blockLength = l
} }
// Release gives back the Tree to the pool whereby it unlocks // Release gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index // it resets tree, segment and index
func (self *Hasher) releaseTree() { func (h *Hasher) releaseTree() {
if self.bmt != nil { if h.bmt != nil {
n := self.bmt.leaves[self.cur] n := h.bmt.leaves[h.cur]
for ; n != nil; n = n.parent { for ; n != nil; n = n.parent {
n.unbalanced = false n.unbalanced = false
if n.parent != nil { if n.parent != nil {
n.root = false n.root = false
} }
} }
self.pool.Release(self.bmt) h.pool.Release(h.bmt)
self.bmt = nil h.bmt = nil
} }
self.cur = 0 h.cur = 0
self.segment = nil h.segment = nil
} }
func (self *Hasher) writeSegment(i int, s []byte, d int) { func (h *Hasher) writeSegment(i int, s []byte, d int) {
h := self.pool.hasher() hash := h.pool.hasher()
n := self.bmt.leaves[i] n := h.bmt.leaves[i]
if len(s) > self.size && n.parent != nil { if len(s) > h.size && n.parent != nil {
go func() { go func() {
h.Reset() hash.Reset()
h.Write(s) hash.Write(s)
s = h.Sum(nil) s = hash.Sum(nil)
if n.root { if n.root {
self.result <- s h.result <- s
return return
} }
self.run(n.parent, h, d, n.index, s) h.run(n.parent, hash, d, n.index, s)
}() }()
return return
} }
go self.run(n, h, d, i*2, s) go h.run(n, hash, d, i*2, s)
} }
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) { func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
isLeft := i%2 == 0 isLeft := i%2 == 0
for { for {
if isLeft { if isLeft {
@ -470,18 +470,18 @@ func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
return return
} }
if !n.unbalanced || !isLeft || i == 0 && d == 0 { if !n.unbalanced || !isLeft || i == 0 && d == 0 {
h.Reset() hash.Reset()
h.Write(n.left) hash.Write(n.left)
h.Write(n.right) hash.Write(n.right)
s = h.Sum(nil) s = hash.Sum(nil)
} else { } else {
s = append(n.left, n.right...) s = append(n.left, n.right...)
} }
self.hash = s h.hash = s
if n.root { if n.root {
self.result <- s h.result <- s
return return
} }
@ -492,20 +492,20 @@ func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
} }
// getTree obtains a BMT resource by reserving one from the pool // getTree obtains a BMT resource by reserving one from the pool
func (self *Hasher) getTree() *Tree { func (h *Hasher) getTree() *Tree {
if self.bmt != nil { if h.bmt != nil {
return self.bmt return h.bmt
} }
t := self.pool.Reserve() t := h.pool.Reserve()
self.bmt = t h.bmt = t
return t return t
} }
// atomic bool toggle implementing a concurrent reusable 2-state object // atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle // atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state // it returns true if the toggler just put it in the active/waiting state
func (self *Node) toggle() bool { func (n *Node) toggle() bool {
return atomic.AddInt32(&self.state, 1)%2 == 1 return atomic.AddInt32(&n.state, 1)%2 == 1
} }
func hashstr(b []byte) string { func hashstr(b []byte) string {
@ -525,7 +525,7 @@ func depth(n int) (d int) {
// finalise is following the zigzags on the tree belonging // finalise is following the zigzags on the tree belonging
// to the final datasegment // to the final datasegment
func (self *Hasher) finalise(n *Node, i int) (d int) { func (h *Hasher) finalise(n *Node, i int) (d int) {
isLeft := i%2 == 0 isLeft := i%2 == 0
for { for {
// when the final segment's path is going via left segments // when the final segment's path is going via left segments
@ -550,8 +550,8 @@ type EOC struct {
} }
// Error returns the error string // Error returns the error string
func (self *EOC) Error() string { func (e *EOC) Error() string {
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash) return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
} }
// NewEOC creates new end of chunk error with the hash // NewEOC creates new end of chunk error with the hash

Loading…
Cancel
Save