mirror of https://github.com/ethereum/go-ethereum
parent
0045ce4cde
commit
6eaa404187
@ -0,0 +1,171 @@ |
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3 |
||||
|
||||
// This file implements the core Keccak permutation function necessary for computing SHA3.
|
||||
// This is implemented in a separate file to allow for replacement by an optimized implementation.
|
||||
// Nothing in this package is exported.
|
||||
// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/).
|
||||
|
||||
// rc stores the round constants for use in the ι step.
|
||||
var rc = [...]uint64{ |
||||
0x0000000000000001, |
||||
0x0000000000008082, |
||||
0x800000000000808A, |
||||
0x8000000080008000, |
||||
0x000000000000808B, |
||||
0x0000000080000001, |
||||
0x8000000080008081, |
||||
0x8000000000008009, |
||||
0x000000000000008A, |
||||
0x0000000000000088, |
||||
0x0000000080008009, |
||||
0x000000008000000A, |
||||
0x000000008000808B, |
||||
0x800000000000008B, |
||||
0x8000000000008089, |
||||
0x8000000000008003, |
||||
0x8000000000008002, |
||||
0x8000000000000080, |
||||
0x000000000000800A, |
||||
0x800000008000000A, |
||||
0x8000000080008081, |
||||
0x8000000000008080, |
||||
0x0000000080000001, |
||||
0x8000000080008008, |
||||
} |
||||
|
||||
// ro_xx represent the rotation offsets for use in the χ step.
|
||||
// Defining them as const instead of in an array allows the compiler to insert constant shifts.
|
||||
const ( |
||||
ro_00 = 0 |
||||
ro_01 = 36 |
||||
ro_02 = 3 |
||||
ro_03 = 41 |
||||
ro_04 = 18 |
||||
ro_05 = 1 |
||||
ro_06 = 44 |
||||
ro_07 = 10 |
||||
ro_08 = 45 |
||||
ro_09 = 2 |
||||
ro_10 = 62 |
||||
ro_11 = 6 |
||||
ro_12 = 43 |
||||
ro_13 = 15 |
||||
ro_14 = 61 |
||||
ro_15 = 28 |
||||
ro_16 = 55 |
||||
ro_17 = 25 |
||||
ro_18 = 21 |
||||
ro_19 = 56 |
||||
ro_20 = 27 |
||||
ro_21 = 20 |
||||
ro_22 = 39 |
||||
ro_23 = 8 |
||||
ro_24 = 14 |
||||
) |
||||
|
||||
// keccakF computes the complete Keccak-f function consisting of 24 rounds with a different
|
||||
// constant (rc) in each round. This implementation fully unrolls the round function to avoid
|
||||
// inner loops, as well as pre-calculating shift offsets.
|
||||
func (d *digest) keccakF() { |
||||
for _, roundConstant := range rc { |
||||
// θ step
|
||||
d.c[0] = d.a[0] ^ d.a[5] ^ d.a[10] ^ d.a[15] ^ d.a[20] |
||||
d.c[1] = d.a[1] ^ d.a[6] ^ d.a[11] ^ d.a[16] ^ d.a[21] |
||||
d.c[2] = d.a[2] ^ d.a[7] ^ d.a[12] ^ d.a[17] ^ d.a[22] |
||||
d.c[3] = d.a[3] ^ d.a[8] ^ d.a[13] ^ d.a[18] ^ d.a[23] |
||||
d.c[4] = d.a[4] ^ d.a[9] ^ d.a[14] ^ d.a[19] ^ d.a[24] |
||||
|
||||
d.d[0] = d.c[4] ^ (d.c[1]<<1 ^ d.c[1]>>63) |
||||
d.d[1] = d.c[0] ^ (d.c[2]<<1 ^ d.c[2]>>63) |
||||
d.d[2] = d.c[1] ^ (d.c[3]<<1 ^ d.c[3]>>63) |
||||
d.d[3] = d.c[2] ^ (d.c[4]<<1 ^ d.c[4]>>63) |
||||
d.d[4] = d.c[3] ^ (d.c[0]<<1 ^ d.c[0]>>63) |
||||
|
||||
d.a[0] ^= d.d[0] |
||||
d.a[1] ^= d.d[1] |
||||
d.a[2] ^= d.d[2] |
||||
d.a[3] ^= d.d[3] |
||||
d.a[4] ^= d.d[4] |
||||
d.a[5] ^= d.d[0] |
||||
d.a[6] ^= d.d[1] |
||||
d.a[7] ^= d.d[2] |
||||
d.a[8] ^= d.d[3] |
||||
d.a[9] ^= d.d[4] |
||||
d.a[10] ^= d.d[0] |
||||
d.a[11] ^= d.d[1] |
||||
d.a[12] ^= d.d[2] |
||||
d.a[13] ^= d.d[3] |
||||
d.a[14] ^= d.d[4] |
||||
d.a[15] ^= d.d[0] |
||||
d.a[16] ^= d.d[1] |
||||
d.a[17] ^= d.d[2] |
||||
d.a[18] ^= d.d[3] |
||||
d.a[19] ^= d.d[4] |
||||
d.a[20] ^= d.d[0] |
||||
d.a[21] ^= d.d[1] |
||||
d.a[22] ^= d.d[2] |
||||
d.a[23] ^= d.d[3] |
||||
d.a[24] ^= d.d[4] |
||||
|
||||
// ρ and π steps
|
||||
d.b[0] = d.a[0] |
||||
d.b[1] = d.a[6]<<ro_06 ^ d.a[6]>>(64-ro_06) |
||||
d.b[2] = d.a[12]<<ro_12 ^ d.a[12]>>(64-ro_12) |
||||
d.b[3] = d.a[18]<<ro_18 ^ d.a[18]>>(64-ro_18) |
||||
d.b[4] = d.a[24]<<ro_24 ^ d.a[24]>>(64-ro_24) |
||||
d.b[5] = d.a[3]<<ro_15 ^ d.a[3]>>(64-ro_15) |
||||
d.b[6] = d.a[9]<<ro_21 ^ d.a[9]>>(64-ro_21) |
||||
d.b[7] = d.a[10]<<ro_02 ^ d.a[10]>>(64-ro_02) |
||||
d.b[8] = d.a[16]<<ro_08 ^ d.a[16]>>(64-ro_08) |
||||
d.b[9] = d.a[22]<<ro_14 ^ d.a[22]>>(64-ro_14) |
||||
d.b[10] = d.a[1]<<ro_05 ^ d.a[1]>>(64-ro_05) |
||||
d.b[11] = d.a[7]<<ro_11 ^ d.a[7]>>(64-ro_11) |
||||
d.b[12] = d.a[13]<<ro_17 ^ d.a[13]>>(64-ro_17) |
||||
d.b[13] = d.a[19]<<ro_23 ^ d.a[19]>>(64-ro_23) |
||||
d.b[14] = d.a[20]<<ro_04 ^ d.a[20]>>(64-ro_04) |
||||
d.b[15] = d.a[4]<<ro_20 ^ d.a[4]>>(64-ro_20) |
||||
d.b[16] = d.a[5]<<ro_01 ^ d.a[5]>>(64-ro_01) |
||||
d.b[17] = d.a[11]<<ro_07 ^ d.a[11]>>(64-ro_07) |
||||
d.b[18] = d.a[17]<<ro_13 ^ d.a[17]>>(64-ro_13) |
||||
d.b[19] = d.a[23]<<ro_19 ^ d.a[23]>>(64-ro_19) |
||||
d.b[20] = d.a[2]<<ro_10 ^ d.a[2]>>(64-ro_10) |
||||
d.b[21] = d.a[8]<<ro_16 ^ d.a[8]>>(64-ro_16) |
||||
d.b[22] = d.a[14]<<ro_22 ^ d.a[14]>>(64-ro_22) |
||||
d.b[23] = d.a[15]<<ro_03 ^ d.a[15]>>(64-ro_03) |
||||
d.b[24] = d.a[21]<<ro_09 ^ d.a[21]>>(64-ro_09) |
||||
|
||||
// χ step
|
||||
d.a[0] = d.b[0] ^ (^d.b[1] & d.b[2]) |
||||
d.a[1] = d.b[1] ^ (^d.b[2] & d.b[3]) |
||||
d.a[2] = d.b[2] ^ (^d.b[3] & d.b[4]) |
||||
d.a[3] = d.b[3] ^ (^d.b[4] & d.b[0]) |
||||
d.a[4] = d.b[4] ^ (^d.b[0] & d.b[1]) |
||||
d.a[5] = d.b[5] ^ (^d.b[6] & d.b[7]) |
||||
d.a[6] = d.b[6] ^ (^d.b[7] & d.b[8]) |
||||
d.a[7] = d.b[7] ^ (^d.b[8] & d.b[9]) |
||||
d.a[8] = d.b[8] ^ (^d.b[9] & d.b[5]) |
||||
d.a[9] = d.b[9] ^ (^d.b[5] & d.b[6]) |
||||
d.a[10] = d.b[10] ^ (^d.b[11] & d.b[12]) |
||||
d.a[11] = d.b[11] ^ (^d.b[12] & d.b[13]) |
||||
d.a[12] = d.b[12] ^ (^d.b[13] & d.b[14]) |
||||
d.a[13] = d.b[13] ^ (^d.b[14] & d.b[10]) |
||||
d.a[14] = d.b[14] ^ (^d.b[10] & d.b[11]) |
||||
d.a[15] = d.b[15] ^ (^d.b[16] & d.b[17]) |
||||
d.a[16] = d.b[16] ^ (^d.b[17] & d.b[18]) |
||||
d.a[17] = d.b[17] ^ (^d.b[18] & d.b[19]) |
||||
d.a[18] = d.b[18] ^ (^d.b[19] & d.b[15]) |
||||
d.a[19] = d.b[19] ^ (^d.b[15] & d.b[16]) |
||||
d.a[20] = d.b[20] ^ (^d.b[21] & d.b[22]) |
||||
d.a[21] = d.b[21] ^ (^d.b[22] & d.b[23]) |
||||
d.a[22] = d.b[22] ^ (^d.b[23] & d.b[24]) |
||||
d.a[23] = d.b[23] ^ (^d.b[24] & d.b[20]) |
||||
d.a[24] = d.b[24] ^ (^d.b[20] & d.b[21]) |
||||
|
||||
// ι step
|
||||
d.a[0] ^= roundConstant |
||||
} |
||||
} |
@ -0,0 +1,216 @@ |
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA3 hash algorithm (formerly called Keccak) chosen by NIST in 2012.
|
||||
// This file provides a SHA3 implementation which implements the standard hash.Hash interface.
|
||||
// Writing input data, including padding, and reading output data are computed in this file.
|
||||
// Note that the current implementation can compute the hash of an integral number of bytes only.
|
||||
// This is a consequence of the hash interface in which a buffer of bytes is passed in.
|
||||
// The internals of the Keccak-f function are computed in keccakf.go.
|
||||
// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/).
|
||||
package sha3 |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"hash" |
||||
) |
||||
|
||||
// laneSize is the size in bytes of each "lane" of the internal state of SHA3 (5 * 5 * 8).
|
||||
// Note that changing this size would requires using a type other than uint64 to store each lane.
|
||||
const laneSize = 8 |
||||
|
||||
// sliceSize represents the dimensions of the internal state, a square matrix of
|
||||
// sliceSize ** 2 lanes. This is the size of both the "rows" and "columns" dimensions in the
|
||||
// terminology of the SHA3 specification.
|
||||
const sliceSize = 5 |
||||
|
||||
// numLanes represents the total number of lanes in the state.
|
||||
const numLanes = sliceSize * sliceSize |
||||
|
||||
// stateSize is the size in bytes of the internal state of SHA3 (5 * 5 * WSize).
|
||||
const stateSize = laneSize * numLanes |
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
// Note that capacity, and not outputSize, is the critical security parameter, as SHA3 can output
|
||||
// an arbitrary number of bytes for any given capacity. The Keccak proposal recommends that
|
||||
// capacity = 2*outputSize to ensure that finding a collision of size outputSize requires
|
||||
// O(2^{outputSize/2}) computations (the birthday lower bound). Future standards may modify the
|
||||
// capacity/outputSize ratio to allow for more output with lower cryptographic security.
|
||||
type digest struct { |
||||
a [numLanes]uint64 // main state of the hash
|
||||
b [numLanes]uint64 // intermediate states
|
||||
c [sliceSize]uint64 // intermediate states
|
||||
d [sliceSize]uint64 // intermediate states
|
||||
outputSize int // desired output size in bytes
|
||||
capacity int // number of bytes to leave untouched during squeeze/absorb
|
||||
absorbed int // number of bytes absorbed thus far
|
||||
} |
||||
|
||||
// minInt returns the lesser of two integer arguments, to simplify the absorption routine.
|
||||
func minInt(v1, v2 int) int { |
||||
if v1 <= v2 { |
||||
return v1 |
||||
} |
||||
return v2 |
||||
} |
||||
|
||||
// rate returns the number of bytes of the internal state which can be absorbed or squeezed
|
||||
// in between calls to the permutation function.
|
||||
func (d *digest) rate() int { |
||||
return stateSize - d.capacity |
||||
} |
||||
|
||||
// Reset clears the internal state by zeroing bytes in the state buffer.
|
||||
// This can be skipped for a newly-created hash state; the default zero-allocated state is correct.
|
||||
func (d *digest) Reset() { |
||||
d.absorbed = 0 |
||||
for i := range d.a { |
||||
d.a[i] = 0 |
||||
} |
||||
} |
||||
|
||||
// BlockSize, required by the hash.Hash interface, does not have a standard intepretation
|
||||
// for a sponge-based construction like SHA3. We return the data rate: the number of bytes which
|
||||
// can be absorbed per invocation of the permutation function. For Merkle-Damgård based hashes
|
||||
// (ie SHA1, SHA2, MD5) the output size of the internal compression function is returned.
|
||||
// We consider this to be roughly equivalent because it represents the number of bytes of output
|
||||
// produced per cryptographic operation.
|
||||
func (d *digest) BlockSize() int { return d.rate() } |
||||
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *digest) Size() int { |
||||
return d.outputSize |
||||
} |
||||
|
||||
// unalignedAbsorb is a helper function for Write, which absorbs data that isn't aligned with an
|
||||
// 8-byte lane. This requires shifting the individual bytes into position in a uint64.
|
||||
func (d *digest) unalignedAbsorb(p []byte) { |
||||
var t uint64 |
||||
for i := len(p) - 1; i >= 0; i-- { |
||||
t <<= 8 |
||||
t |= uint64(p[i]) |
||||
} |
||||
offset := (d.absorbed) % d.rate() |
||||
t <<= 8 * uint(offset%laneSize) |
||||
d.a[offset/laneSize] ^= t |
||||
d.absorbed += len(p) |
||||
} |
||||
|
||||
// Write "absorbs" bytes into the state of the SHA3 hash, updating as needed when the sponge
|
||||
// "fills up" with rate() bytes. Since lanes are stored internally as type uint64, this requires
|
||||
// converting the incoming bytes into uint64s using a little endian interpretation. This
|
||||
// implementation is optimized for large, aligned writes of multiples of 8 bytes (laneSize).
|
||||
// Non-aligned or uneven numbers of bytes require shifting and are slower.
|
||||
func (d *digest) Write(p []byte) (int, error) { |
||||
// An initial offset is needed if the we aren't absorbing to the first lane initially.
|
||||
offset := d.absorbed % d.rate() |
||||
toWrite := len(p) |
||||
|
||||
// The first lane may need to absorb unaligned and/or incomplete data.
|
||||
if (offset%laneSize != 0 || len(p) < 8) && len(p) > 0 { |
||||
toAbsorb := minInt(laneSize-(offset%laneSize), len(p)) |
||||
d.unalignedAbsorb(p[:toAbsorb]) |
||||
p = p[toAbsorb:] |
||||
offset = (d.absorbed) % d.rate() |
||||
|
||||
// For every rate() bytes absorbed, the state must be permuted via the F Function.
|
||||
if (d.absorbed)%d.rate() == 0 { |
||||
d.keccakF() |
||||
} |
||||
} |
||||
|
||||
// This loop should absorb the bulk of the data into full, aligned lanes.
|
||||
// It will call the update function as necessary.
|
||||
for len(p) > 7 { |
||||
firstLane := offset / laneSize |
||||
lastLane := minInt(d.rate()/laneSize, firstLane+len(p)/laneSize) |
||||
|
||||
// This inner loop absorbs input bytes into the state in groups of 8, converted to uint64s.
|
||||
for lane := firstLane; lane < lastLane; lane++ { |
||||
d.a[lane] ^= binary.LittleEndian.Uint64(p[:laneSize]) |
||||
p = p[laneSize:] |
||||
} |
||||
d.absorbed += (lastLane - firstLane) * laneSize |
||||
// For every rate() bytes absorbed, the state must be permuted via the F Function.
|
||||
if (d.absorbed)%d.rate() == 0 { |
||||
d.keccakF() |
||||
} |
||||
|
||||
offset = 0 |
||||
} |
||||
|
||||
// If there are insufficient bytes to fill the final lane, an unaligned absorption.
|
||||
// This should always start at a correct lane boundary though, or else it would be caught
|
||||
// by the uneven opening lane case above.
|
||||
if len(p) > 0 { |
||||
d.unalignedAbsorb(p) |
||||
} |
||||
|
||||
return toWrite, nil |
||||
} |
||||
|
||||
// pad computes the SHA3 padding scheme based on the number of bytes absorbed.
|
||||
// The padding is a 1 bit, followed by an arbitrary number of 0s and then a final 1 bit, such that
|
||||
// the input bits plus padding bits are a multiple of rate(). Adding the padding simply requires
|
||||
// xoring an opening and closing bit into the appropriate lanes.
|
||||
func (d *digest) pad() { |
||||
offset := d.absorbed % d.rate() |
||||
// The opening pad bit must be shifted into position based on the number of bytes absorbed
|
||||
padOpenLane := offset / laneSize |
||||
d.a[padOpenLane] ^= 0x0000000000000001 << uint(8*(offset%laneSize)) |
||||
// The closing padding bit is always in the last position
|
||||
padCloseLane := (d.rate() / laneSize) - 1 |
||||
d.a[padCloseLane] ^= 0x8000000000000000 |
||||
} |
||||
|
||||
// finalize prepares the hash to output data by padding and one final permutation of the state.
|
||||
func (d *digest) finalize() { |
||||
d.pad() |
||||
d.keccakF() |
||||
} |
||||
|
||||
// squeeze outputs an arbitrary number of bytes from the hash state.
|
||||
// Squeezing can require multiple calls to the F function (one per rate() bytes squeezed),
|
||||
// although this is not the case for standard SHA3 parameters. This implementation only supports
|
||||
// squeezing a single time, subsequent squeezes may lose alignment. Future implementations
|
||||
// may wish to support multiple squeeze calls, for example to support use as a PRNG.
|
||||
func (d *digest) squeeze(in []byte, toSqueeze int) []byte { |
||||
// Because we read in blocks of laneSize, we need enough room to read
|
||||
// an integral number of lanes
|
||||
needed := toSqueeze + (laneSize-toSqueeze%laneSize)%laneSize |
||||
if cap(in)-len(in) < needed { |
||||
newIn := make([]byte, len(in), len(in)+needed) |
||||
copy(newIn, in) |
||||
in = newIn |
||||
} |
||||
out := in[len(in) : len(in)+needed] |
||||
|
||||
for len(out) > 0 { |
||||
for i := 0; i < d.rate() && len(out) > 0; i += laneSize { |
||||
binary.LittleEndian.PutUint64(out[:], d.a[i/laneSize]) |
||||
out = out[laneSize:] |
||||
} |
||||
if len(out) > 0 { |
||||
d.keccakF() |
||||
} |
||||
} |
||||
return in[:len(in)+toSqueeze] // Re-slice in case we wrote extra data.
|
||||
} |
||||
|
||||
// Sum applies padding to the hash state and then squeezes out the desired nubmer of output bytes.
|
||||
func (d *digest) Sum(in []byte) []byte { |
||||
// Make a copy of the original hash so that caller can keep writing and summing.
|
||||
dup := *d |
||||
dup.finalize() |
||||
return dup.squeeze(in, dup.outputSize) |
||||
} |
||||
|
||||
// The NewKeccakX constructors enable initializing a hash in any of the four recommend sizes
|
||||
// from the Keccak specification, all of which set capacity=2*outputSize. Note that the final
|
||||
// NIST standard for SHA3 may specify different input/output lengths.
|
||||
// The output size is indicated in bits but converted into bytes internally.
|
||||
func NewKeccak224() hash.Hash { return &digest{outputSize: 224 / 8, capacity: 2 * 224 / 8} } |
||||
func NewKeccak256() hash.Hash { return &digest{outputSize: 256 / 8, capacity: 2 * 256 / 8} } |
||||
func NewKeccak384() hash.Hash { return &digest{outputSize: 384 / 8, capacity: 2 * 384 / 8} } |
||||
func NewKeccak512() hash.Hash { return &digest{outputSize: 512 / 8, capacity: 2 * 512 / 8} } |
Loading…
Reference in new issue