mirror of
https://github.com/XTLS/REALITY.git
synced 2025-08-24 15:38:36 +00:00
Simplify with public modules
This commit is contained in:
parent
02afebcf30
commit
32d3673ce6
@ -5,8 +5,6 @@
|
||||
package aes
|
||||
|
||||
import (
|
||||
// "crypto/internal/fips140"
|
||||
// "crypto/internal/fips140/alias"
|
||||
"strconv"
|
||||
|
||||
"github.com/xtls/reality/alias"
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
package aes
|
||||
|
||||
import "github.com/xtls/reality/byteorder"
|
||||
import "encoding/binary"
|
||||
|
||||
// Encrypt one block from src into dst, using the expanded key xk.
|
||||
func encryptBlockGeneric(c *blockExpanded, dst, src []byte) {
|
||||
@ -44,10 +44,10 @@ func encryptBlockGeneric(c *blockExpanded, dst, src []byte) {
|
||||
xk := c.enc[:]
|
||||
|
||||
_ = src[15] // early bounds check
|
||||
s0 := byteorder.BEUint32(src[0:4])
|
||||
s1 := byteorder.BEUint32(src[4:8])
|
||||
s2 := byteorder.BEUint32(src[8:12])
|
||||
s3 := byteorder.BEUint32(src[12:16])
|
||||
s0 := binary.BigEndian.Uint32(src[0:4])
|
||||
s1 := binary.BigEndian.Uint32(src[4:8])
|
||||
s2 := binary.BigEndian.Uint32(src[8:12])
|
||||
s3 := binary.BigEndian.Uint32(src[12:16])
|
||||
|
||||
// First round just XORs input with key.
|
||||
s0 ^= xk[0]
|
||||
@ -79,10 +79,10 @@ func encryptBlockGeneric(c *blockExpanded, dst, src []byte) {
|
||||
s3 ^= xk[k+3]
|
||||
|
||||
_ = dst[15] // early bounds check
|
||||
byteorder.BEPutUint32(dst[0:4], s0)
|
||||
byteorder.BEPutUint32(dst[4:8], s1)
|
||||
byteorder.BEPutUint32(dst[8:12], s2)
|
||||
byteorder.BEPutUint32(dst[12:16], s3)
|
||||
binary.BigEndian.PutUint32(dst[0:4], s0)
|
||||
binary.BigEndian.PutUint32(dst[4:8], s1)
|
||||
binary.BigEndian.PutUint32(dst[8:12], s2)
|
||||
binary.BigEndian.PutUint32(dst[12:16], s3)
|
||||
}
|
||||
|
||||
// Decrypt one block from src into dst, using the expanded key xk.
|
||||
@ -91,10 +91,10 @@ func decryptBlockGeneric(c *blockExpanded, dst, src []byte) {
|
||||
xk := c.dec[:]
|
||||
|
||||
_ = src[15] // early bounds check
|
||||
s0 := byteorder.BEUint32(src[0:4])
|
||||
s1 := byteorder.BEUint32(src[4:8])
|
||||
s2 := byteorder.BEUint32(src[8:12])
|
||||
s3 := byteorder.BEUint32(src[12:16])
|
||||
s0 := binary.BigEndian.Uint32(src[0:4])
|
||||
s1 := binary.BigEndian.Uint32(src[4:8])
|
||||
s2 := binary.BigEndian.Uint32(src[8:12])
|
||||
s3 := binary.BigEndian.Uint32(src[12:16])
|
||||
|
||||
// First round just XORs input with key.
|
||||
s0 ^= xk[0]
|
||||
@ -126,10 +126,10 @@ func decryptBlockGeneric(c *blockExpanded, dst, src []byte) {
|
||||
s3 ^= xk[k+3]
|
||||
|
||||
_ = dst[15] // early bounds check
|
||||
byteorder.BEPutUint32(dst[0:4], s0)
|
||||
byteorder.BEPutUint32(dst[4:8], s1)
|
||||
byteorder.BEPutUint32(dst[8:12], s2)
|
||||
byteorder.BEPutUint32(dst[12:16], s3)
|
||||
binary.BigEndian.PutUint32(dst[0:4], s0)
|
||||
binary.BigEndian.PutUint32(dst[4:8], s1)
|
||||
binary.BigEndian.PutUint32(dst[8:12], s2)
|
||||
binary.BigEndian.PutUint32(dst[12:16], s3)
|
||||
}
|
||||
|
||||
// Apply sbox0 to each byte in w.
|
||||
@ -152,7 +152,7 @@ func expandKeyGeneric(c *blockExpanded, key []byte) {
|
||||
var i int
|
||||
nk := len(key) / 4
|
||||
for i = 0; i < nk; i++ {
|
||||
c.enc[i] = byteorder.BEUint32(key[4*i:])
|
||||
c.enc[i] = binary.BigEndian.Uint32(key[4*i:])
|
||||
}
|
||||
for ; i < c.roundKeysSize(); i++ {
|
||||
t := c.enc[i-1]
|
||||
|
148
aes/ctr.go
148
aes/ctr.go
@ -1,148 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package aes
|
||||
|
||||
import (
|
||||
//"crypto/internal/fips140"
|
||||
"github.com/xtls/reality/alias"
|
||||
"github.com/xtls/reality/subtle"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type CTR struct {
|
||||
b Block
|
||||
ivlo, ivhi uint64 // start counter as 64-bit limbs
|
||||
offset uint64 // for XORKeyStream only
|
||||
}
|
||||
|
||||
func NewCTR(b *Block, iv []byte) *CTR {
|
||||
// Allocate the CTR here, in an easily inlineable function, so
|
||||
// the allocation can be done in the caller's stack frame
|
||||
// instead of the heap. See issue 70499.
|
||||
c := newCTR(b, iv)
|
||||
return &c
|
||||
}
|
||||
func newCTR(b *Block, iv []byte) CTR {
|
||||
if len(iv) != BlockSize {
|
||||
panic("bad IV length")
|
||||
}
|
||||
|
||||
return CTR{
|
||||
b: *b,
|
||||
ivlo: byteorder.BEUint64(iv[8:16]),
|
||||
ivhi: byteorder.BEUint64(iv[0:8]),
|
||||
offset: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CTR) XORKeyStream(dst, src []byte) {
|
||||
c.XORKeyStreamAt(dst, src, c.offset)
|
||||
|
||||
var carry uint64
|
||||
c.offset, carry = bits.Add64(c.offset, uint64(len(src)), 0)
|
||||
if carry != 0 {
|
||||
panic("crypto/aes: counter overflow")
|
||||
}
|
||||
}
|
||||
|
||||
// RoundToBlock is used by CTR_DRBG, which discards the rightmost unused bits at
|
||||
// each request. It rounds the offset up to the next block boundary.
|
||||
func RoundToBlock(c *CTR) {
|
||||
if remainder := c.offset % BlockSize; remainder != 0 {
|
||||
var carry uint64
|
||||
c.offset, carry = bits.Add64(c.offset, BlockSize-remainder, 0)
|
||||
if carry != 0 {
|
||||
panic("crypto/aes: counter overflow")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// XORKeyStreamAt behaves like XORKeyStream but keeps no state, and instead
|
||||
// seeks into the keystream by the given bytes offset from the start (ignoring
|
||||
// any XORKetStream calls). This allows for random access into the keystream, up
|
||||
// to 16 EiB from the start.
|
||||
func (c *CTR) XORKeyStreamAt(dst, src []byte, offset uint64) {
|
||||
if len(dst) < len(src) {
|
||||
panic("crypto/aes: len(dst) < len(src)")
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
if alias.InexactOverlap(dst, src) {
|
||||
panic("crypto/aes: invalid buffer overlap")
|
||||
}
|
||||
//fips140.RecordApproved()
|
||||
|
||||
ivlo, ivhi := add128(c.ivlo, c.ivhi, offset/BlockSize)
|
||||
|
||||
if blockOffset := offset % BlockSize; blockOffset != 0 {
|
||||
// We have a partial block at the beginning.
|
||||
var in, out [BlockSize]byte
|
||||
copy(in[blockOffset:], src)
|
||||
ctrBlocks1(&c.b, &out, &in, ivlo, ivhi)
|
||||
n := copy(dst, out[blockOffset:])
|
||||
src = src[n:]
|
||||
dst = dst[n:]
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 1)
|
||||
}
|
||||
|
||||
for len(src) >= 8*BlockSize {
|
||||
ctrBlocks8(&c.b, (*[8 * BlockSize]byte)(dst), (*[8 * BlockSize]byte)(src), ivlo, ivhi)
|
||||
src = src[8*BlockSize:]
|
||||
dst = dst[8*BlockSize:]
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 8)
|
||||
}
|
||||
|
||||
// The tail can have at most 7 = 4 + 2 + 1 blocks.
|
||||
if len(src) >= 4*BlockSize {
|
||||
ctrBlocks4(&c.b, (*[4 * BlockSize]byte)(dst), (*[4 * BlockSize]byte)(src), ivlo, ivhi)
|
||||
src = src[4*BlockSize:]
|
||||
dst = dst[4*BlockSize:]
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 4)
|
||||
}
|
||||
if len(src) >= 2*BlockSize {
|
||||
ctrBlocks2(&c.b, (*[2 * BlockSize]byte)(dst), (*[2 * BlockSize]byte)(src), ivlo, ivhi)
|
||||
src = src[2*BlockSize:]
|
||||
dst = dst[2*BlockSize:]
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 2)
|
||||
}
|
||||
if len(src) >= 1*BlockSize {
|
||||
ctrBlocks1(&c.b, (*[1 * BlockSize]byte)(dst), (*[1 * BlockSize]byte)(src), ivlo, ivhi)
|
||||
src = src[1*BlockSize:]
|
||||
dst = dst[1*BlockSize:]
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 1)
|
||||
}
|
||||
|
||||
if len(src) != 0 {
|
||||
// We have a partial block at the end.
|
||||
var in, out [BlockSize]byte
|
||||
copy(in[:], src)
|
||||
ctrBlocks1(&c.b, &out, &in, ivlo, ivhi)
|
||||
copy(dst, out[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Each ctrBlocksN function XORs src with N blocks of counter keystream, and
|
||||
// stores it in dst. src is loaded in full before storing dst, so they can
|
||||
// overlap even inexactly. The starting counter value is passed in as a pair of
|
||||
// little-endian 64-bit integers.
|
||||
|
||||
func ctrBlocks(b *Block, dst, src []byte, ivlo, ivhi uint64) {
|
||||
buf := make([]byte, len(src), 8*BlockSize)
|
||||
for i := 0; i < len(buf); i += BlockSize {
|
||||
byteorder.BEPutUint64(buf[i:], ivhi)
|
||||
byteorder.BEPutUint64(buf[i+8:], ivlo)
|
||||
ivlo, ivhi = add128(ivlo, ivhi, 1)
|
||||
encryptBlock(b, buf[i:], buf[i:])
|
||||
}
|
||||
// XOR into buf first, in case src and dst overlap (see above).
|
||||
subtle.XORBytes(buf, src, buf)
|
||||
copy(dst, buf)
|
||||
}
|
||||
|
||||
func add128(lo, hi uint64, x uint64) (uint64, uint64) {
|
||||
lo, c := bits.Add64(lo, x, 0)
|
||||
hi, _ = bits.Add64(hi, 0, c)
|
||||
return lo, hi
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package aes
|
||||
|
||||
func ctrBlocks1(b *Block, dst, src *[BlockSize]byte, ivlo, ivhi uint64) {
|
||||
ctrBlocks(b, dst[:], src[:], ivlo, ivhi)
|
||||
}
|
||||
|
||||
func ctrBlocks2(b *Block, dst, src *[2 * BlockSize]byte, ivlo, ivhi uint64) {
|
||||
ctrBlocks(b, dst[:], src[:], ivlo, ivhi)
|
||||
}
|
||||
|
||||
func ctrBlocks4(b *Block, dst, src *[4 * BlockSize]byte, ivlo, ivhi uint64) {
|
||||
ctrBlocks(b, dst[:], src[:], ivlo, ivhi)
|
||||
}
|
||||
|
||||
func ctrBlocks8(b *Block, dst, src *[8 * BlockSize]byte, ivlo, ivhi uint64) {
|
||||
ctrBlocks(b, dst[:], src[:], ivlo, ivhi)
|
||||
}
|
@ -1,149 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package byteorder provides functions for decoding and encoding
|
||||
// little and big endian integer types from/to byte slices.
|
||||
package byteorder
|
||||
|
||||
func LEUint16(b []byte) uint16 {
|
||||
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint16(b[0]) | uint16(b[1])<<8
|
||||
}
|
||||
|
||||
func LEPutUint16(b []byte, v uint16) {
|
||||
_ = b[1] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
}
|
||||
|
||||
func LEAppendUint16(b []byte, v uint16) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
)
|
||||
}
|
||||
|
||||
func LEUint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func LEPutUint32(b []byte, v uint32) {
|
||||
_ = b[3] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
b[2] = byte(v >> 16)
|
||||
b[3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func LEAppendUint32(b []byte, v uint32) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24),
|
||||
)
|
||||
}
|
||||
|
||||
func LEUint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func LEPutUint64(b []byte, v uint64) {
|
||||
_ = b[7] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
b[2] = byte(v >> 16)
|
||||
b[3] = byte(v >> 24)
|
||||
b[4] = byte(v >> 32)
|
||||
b[5] = byte(v >> 40)
|
||||
b[6] = byte(v >> 48)
|
||||
b[7] = byte(v >> 56)
|
||||
}
|
||||
|
||||
func LEAppendUint64(b []byte, v uint64) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24),
|
||||
byte(v>>32),
|
||||
byte(v>>40),
|
||||
byte(v>>48),
|
||||
byte(v>>56),
|
||||
)
|
||||
}
|
||||
|
||||
func BEUint16(b []byte) uint16 {
|
||||
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint16(b[1]) | uint16(b[0])<<8
|
||||
}
|
||||
|
||||
func BEPutUint16(b []byte, v uint16) {
|
||||
_ = b[1] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 8)
|
||||
b[1] = byte(v)
|
||||
}
|
||||
|
||||
func BEAppendUint16(b []byte, v uint16) []byte {
|
||||
return append(b,
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
func BEUint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
}
|
||||
|
||||
func BEPutUint32(b []byte, v uint32) {
|
||||
_ = b[3] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 24)
|
||||
b[1] = byte(v >> 16)
|
||||
b[2] = byte(v >> 8)
|
||||
b[3] = byte(v)
|
||||
}
|
||||
|
||||
func BEAppendUint32(b []byte, v uint32) []byte {
|
||||
return append(b,
|
||||
byte(v>>24),
|
||||
byte(v>>16),
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
func BEUint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
|
||||
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
|
||||
}
|
||||
|
||||
func BEPutUint64(b []byte, v uint64) {
|
||||
_ = b[7] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 56)
|
||||
b[1] = byte(v >> 48)
|
||||
b[2] = byte(v >> 40)
|
||||
b[3] = byte(v >> 32)
|
||||
b[4] = byte(v >> 24)
|
||||
b[5] = byte(v >> 16)
|
||||
b[6] = byte(v >> 8)
|
||||
b[7] = byte(v)
|
||||
}
|
||||
|
||||
func BEAppendUint64(b []byte, v uint64) []byte {
|
||||
return append(b,
|
||||
byte(v>>56),
|
||||
byte(v>>48),
|
||||
byte(v>>40),
|
||||
byte(v>>32),
|
||||
byte(v>>24),
|
||||
byte(v>>16),
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
143
drbg/ctrdrbg.go
143
drbg/ctrdrbg.go
@ -1,143 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package drbg
|
||||
|
||||
import (
|
||||
//"crypto/internal/fips140"
|
||||
"github.com/xtls/reality/aes"
|
||||
"github.com/xtls/reality/subtle"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// Counter is an SP 800-90A Rev. 1 CTR_DRBG instantiated with AES-256.
|
||||
//
|
||||
// Per Table 3, it has a security strength of 256 bits, a seed size of 384 bits,
|
||||
// a counter length of 128 bits, a reseed interval of 2^48 requests, and a
|
||||
// maximum request size of 2^19 bits (2^16 bytes, 64 KiB).
|
||||
//
|
||||
// We support a narrow range of parameters that fit the needs of our RNG:
|
||||
// AES-256, no derivation function, no personalization string, no prediction
|
||||
// resistance, and 384-bit additional input.
|
||||
//
|
||||
// WARNING: this type provides tightly scoped support for the DRBG
|
||||
// functionality we need for FIPS 140-3 _only_. This type _should not_ be used
|
||||
// outside of the FIPS 140-3 module for any other use.
|
||||
//
|
||||
// In particular, as documented, Counter does not support the derivation
|
||||
// function, or personalization strings which are necessary for safely using
|
||||
// this DRBG for generic purposes without leaking sensitive values.
|
||||
type Counter struct {
|
||||
// c is instantiated with K as the key and V as the counter.
|
||||
c aes.CTR
|
||||
|
||||
reseedCounter uint64
|
||||
}
|
||||
|
||||
const (
|
||||
keySize = 256 / 8
|
||||
SeedSize = keySize + aes.BlockSize
|
||||
reseedInterval = 1 << 48
|
||||
maxRequestSize = (1 << 19) / 8
|
||||
)
|
||||
|
||||
func NewCounter(entropy *[SeedSize]byte) *Counter {
|
||||
// CTR_DRBG_Instantiate_algorithm, per Section 10.2.1.3.1.
|
||||
//fips140.RecordApproved()
|
||||
|
||||
K := make([]byte, keySize)
|
||||
V := make([]byte, aes.BlockSize)
|
||||
|
||||
// V starts at 0, but is incremented in CTR_DRBG_Update before each use,
|
||||
// unlike AES-CTR where it is incremented after each use.
|
||||
V[len(V)-1] = 1
|
||||
|
||||
cipher, err := aes.New(K)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := &Counter{}
|
||||
c.c = *aes.NewCTR(cipher, V)
|
||||
c.update(entropy)
|
||||
c.reseedCounter = 1
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Counter) update(seed *[SeedSize]byte) {
|
||||
// CTR_DRBG_Update, per Section 10.2.1.2.
|
||||
|
||||
temp := make([]byte, SeedSize)
|
||||
c.c.XORKeyStream(temp, seed[:])
|
||||
K := temp[:keySize]
|
||||
V := temp[keySize:]
|
||||
|
||||
// Again, we pre-increment V, like in NewCounter.
|
||||
increment((*[aes.BlockSize]byte)(V))
|
||||
|
||||
cipher, err := aes.New(K)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c.c = *aes.NewCTR(cipher, V)
|
||||
}
|
||||
|
||||
func increment(v *[aes.BlockSize]byte) {
|
||||
hi := byteorder.BEUint64(v[:8])
|
||||
lo := byteorder.BEUint64(v[8:])
|
||||
lo, c := bits.Add64(lo, 1, 0)
|
||||
hi, _ = bits.Add64(hi, 0, c)
|
||||
byteorder.BEPutUint64(v[:8], hi)
|
||||
byteorder.BEPutUint64(v[8:], lo)
|
||||
}
|
||||
|
||||
func (c *Counter) Reseed(entropy, additionalInput *[SeedSize]byte) {
|
||||
// CTR_DRBG_Reseed_algorithm, per Section 10.2.1.4.1.
|
||||
//fips140.RecordApproved()
|
||||
|
||||
var seed [SeedSize]byte
|
||||
subtle.XORBytes(seed[:], entropy[:], additionalInput[:])
|
||||
c.update(&seed)
|
||||
c.reseedCounter = 1
|
||||
}
|
||||
|
||||
// Generate produces at most maxRequestSize bytes of random data in out.
|
||||
func (c *Counter) Generate(out []byte, additionalInput *[SeedSize]byte) (reseedRequired bool) {
|
||||
// CTR_DRBG_Generate_algorithm, per Section 10.2.1.5.1.
|
||||
//fips140.RecordApproved()
|
||||
|
||||
if len(out) > maxRequestSize {
|
||||
panic("crypto/drbg: internal error: request size exceeds maximum")
|
||||
}
|
||||
|
||||
// Step 1.
|
||||
if c.reseedCounter > reseedInterval {
|
||||
return true
|
||||
}
|
||||
|
||||
// Step 2.
|
||||
if additionalInput != nil {
|
||||
c.update(additionalInput)
|
||||
} else {
|
||||
// If the additional input is null, the first CTR_DRBG_Update is
|
||||
// skipped, but the additional input is replaced with an all-zero string
|
||||
// for the second CTR_DRBG_Update.
|
||||
additionalInput = new([SeedSize]byte)
|
||||
}
|
||||
|
||||
// Steps 3-5.
|
||||
clear(out)
|
||||
c.c.XORKeyStream(out, out)
|
||||
aes.RoundToBlock(&c.c)
|
||||
|
||||
// Step 6.
|
||||
c.update(additionalInput)
|
||||
|
||||
// Step 7.
|
||||
c.reseedCounter++
|
||||
|
||||
// Step 8.
|
||||
return false
|
||||
}
|
103
drbg/rand.go
103
drbg/rand.go
@ -1,103 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package drbg provides cryptographically secure random bytes
|
||||
// usable by FIPS code. In FIPS mode it uses an SP 800-90A Rev. 1
|
||||
// Deterministic Random Bit Generator (DRBG). Otherwise,
|
||||
// it uses the operating system's random number generator.
|
||||
package drbg
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/entropy"
|
||||
// "crypto/internal/fips140"
|
||||
"github.com/xtls/reality/randutil"
|
||||
// "github.com/xtls/reality/sysrand"
|
||||
|
||||
"crypto/fips140"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var drbgs = sync.Pool{
|
||||
New: func() any {
|
||||
var c *Counter
|
||||
entropy.Depleted(func(seed *[48]byte) {
|
||||
c = NewCounter(seed)
|
||||
})
|
||||
return c
|
||||
},
|
||||
}
|
||||
|
||||
// Read fills b with cryptographically secure random bytes. In FIPS mode, it
|
||||
// uses an SP 800-90A Rev. 1 Deterministic Random Bit Generator (DRBG).
|
||||
// Otherwise, it uses the operating system's random number generator.
|
||||
func Read(b []byte) {
|
||||
if !fips140.Enabled() {
|
||||
rand.Read(b)
|
||||
return
|
||||
}
|
||||
|
||||
// At every read, 128 random bits from the operating system are mixed as
|
||||
// additional input, to make the output as strong as non-FIPS randomness.
|
||||
// This is not credited as entropy for FIPS purposes, as allowed by Section
|
||||
// 8.7.2: "Note that a DRBG does not rely on additional input to provide
|
||||
// entropy, even though entropy could be provided in the additional input".
|
||||
additionalInput := new([SeedSize]byte)
|
||||
rand.Read(additionalInput[:16])
|
||||
|
||||
drbg := drbgs.Get().(*Counter)
|
||||
defer drbgs.Put(drbg)
|
||||
|
||||
for len(b) > 0 {
|
||||
size := min(len(b), maxRequestSize)
|
||||
if reseedRequired := drbg.Generate(b[:size], additionalInput); reseedRequired {
|
||||
// See SP 800-90A Rev. 1, Section 9.3.1, Steps 6-8, as explained in
|
||||
// Section 9.3.2: if Generate reports a reseed is required, the
|
||||
// additional input is passed to Reseed along with the entropy and
|
||||
// then nulled before the next Generate call.
|
||||
entropy.Depleted(func(seed *[48]byte) {
|
||||
drbg.Reseed(seed, additionalInput)
|
||||
})
|
||||
additionalInput = nil
|
||||
continue
|
||||
}
|
||||
b = b[size:]
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultReader is a sentinel type, embedded in the default
|
||||
// [crypto/rand.Reader], used to recognize it when passed to
|
||||
// APIs that accept a rand io.Reader.
|
||||
type DefaultReader interface{ defaultReader() }
|
||||
|
||||
// ReadWithReader uses Reader to fill b with cryptographically secure random
|
||||
// bytes. It is intended for use in APIs that expose a rand io.Reader.
|
||||
//
|
||||
// If Reader is not the default Reader from crypto/rand,
|
||||
// [randutil.MaybeReadByte] and [fips140.RecordNonApproved] are called.
|
||||
func ReadWithReader(r io.Reader, b []byte) error {
|
||||
if _, ok := r.(DefaultReader); ok {
|
||||
Read(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
//fips140.RecordNonApproved()
|
||||
randutil.MaybeReadByte(r)
|
||||
_, err := io.ReadFull(r, b)
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadWithReaderDeterministic is like ReadWithReader, but it doesn't call
|
||||
// [randutil.MaybeReadByte] on non-default Readers.
|
||||
func ReadWithReaderDeterministic(r io.Reader, b []byte) error {
|
||||
if _, ok := r.(DefaultReader); ok {
|
||||
Read(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
//fips140.RecordNonApproved()
|
||||
_, err := io.ReadFull(r, b)
|
||||
return err
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package entropy provides the passive entropy source for the FIPS 140-3
|
||||
// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read].
|
||||
//
|
||||
// This complies with IG 9.3.A, Additional Comment 12, which until January 1,
|
||||
// 2026 allows new modules to meet an [earlier version] of Resolution 2(b):
|
||||
// "A software module that contains an approved DRBG that receives a LOAD
|
||||
// command (or its logical equivalent) with entropy obtained from [...] inside
|
||||
// the physical perimeter of the operational environment of the module [...]."
|
||||
//
|
||||
// Distributions that have their own SP 800-90B entropy source should replace
|
||||
// this package with their own implementation.
|
||||
//
|
||||
// [earlier version]: https://csrc.nist.gov/CSRC/media/Projects/cryptographic-module-validation-program/documents/IG%209.3.A%20Resolution%202b%5BMarch%2026%202024%5D.pdf
|
||||
package entropy
|
||||
|
||||
// "github.com/xtls/reality/sysrand"
|
||||
import "crypto/rand"
|
||||
|
||||
// Depleted notifies the entropy source that the entropy in the module is
|
||||
// "depleted" and provides the callback for the LOAD command.
|
||||
func Depleted(LOAD func(*[48]byte)) {
|
||||
var entropy [48]byte
|
||||
rand.Read(entropy[:])
|
||||
LOAD(&entropy)
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fips140
|
||||
|
||||
import "io"
|
||||
|
||||
// Hash is the common interface implemented by all hash functions. It is a copy
|
||||
// of [hash.Hash] from the standard library, to avoid depending on security
|
||||
// definitions from outside of the module.
|
||||
type Hash interface {
|
||||
// Write (via the embedded io.Writer interface) adds more data to the
|
||||
// running hash. It never returns an error.
|
||||
io.Writer
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
Sum(b []byte) []byte
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
Reset()
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
Size() int
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
BlockSize() int
|
||||
}
|
@ -5,8 +5,6 @@
|
||||
package gcm
|
||||
|
||||
import (
|
||||
// "crypto/internal/fips140"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/xtls/reality/aes"
|
||||
|
@ -5,9 +5,10 @@
|
||||
package gcm
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/xtls/reality/aes"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"github.com/xtls/reality/subtle"
|
||||
)
|
||||
|
||||
func sealGeneric(out []byte, g *GCM, nonce, plaintext, additionalData []byte) {
|
||||
@ -58,7 +59,7 @@ func deriveCounterGeneric(H, counter *[gcmBlockSize]byte, nonce []byte) {
|
||||
counter[gcmBlockSize-1] = 1
|
||||
} else {
|
||||
lenBlock := make([]byte, 16)
|
||||
byteorder.BEPutUint64(lenBlock[8:], uint64(len(nonce))*8)
|
||||
binary.BigEndian.PutUint64(lenBlock[8:], uint64(len(nonce))*8)
|
||||
ghash(counter, H, nonce, lenBlock)
|
||||
}
|
||||
}
|
||||
@ -89,7 +90,7 @@ func gcmCounterCryptGeneric(b *aes.Block, out, src []byte, counter *[gcmBlockSiz
|
||||
// and increments it.
|
||||
func gcmInc32(counterBlock *[gcmBlockSize]byte) {
|
||||
ctr := counterBlock[len(counterBlock)-4:]
|
||||
byteorder.BEPutUint32(ctr, byteorder.BEUint32(ctr)+1)
|
||||
binary.BigEndian.PutUint32(ctr, binary.BigEndian.Uint32(ctr)+1)
|
||||
}
|
||||
|
||||
// gcmAuthGeneric calculates GHASH(additionalData, ciphertext), masks the result
|
||||
@ -97,8 +98,8 @@ func gcmInc32(counterBlock *[gcmBlockSize]byte) {
|
||||
func gcmAuthGeneric(out []byte, H, tagMask *[gcmBlockSize]byte, ciphertext, additionalData []byte) {
|
||||
checkGenericIsExpected()
|
||||
lenBlock := make([]byte, 16)
|
||||
byteorder.BEPutUint64(lenBlock[:8], uint64(len(additionalData))*8)
|
||||
byteorder.BEPutUint64(lenBlock[8:], uint64(len(ciphertext))*8)
|
||||
binary.BigEndian.PutUint64(lenBlock[:8], uint64(len(additionalData))*8)
|
||||
binary.BigEndian.PutUint64(lenBlock[8:], uint64(len(ciphertext))*8)
|
||||
var S [gcmBlockSize]byte
|
||||
ghash(&S, H, additionalData, ciphertext, lenBlock)
|
||||
subtle.XORBytes(out, S[:], tagMask[:])
|
||||
|
@ -5,14 +5,10 @@
|
||||
package gcm
|
||||
|
||||
import (
|
||||
// "crypto/internal/fips140"
|
||||
// "crypto/internal/fips140/drbg"
|
||||
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
"github.com/xtls/reality/aes"
|
||||
// "github.com/xtls/reality/alias"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
)
|
||||
|
||||
// SealWithRandomNonce encrypts plaintext to out, and writes a random nonce to
|
||||
@ -75,14 +71,14 @@ func (g *GCMWithCounterNonce) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
panic("crypto/cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
counter := byteorder.BEUint64(nonce[len(nonce)-8:])
|
||||
counter := binary.BigEndian.Uint64(nonce[len(nonce)-8:])
|
||||
if !g.ready {
|
||||
// The first invocation sets the fixed name encoding and start counter.
|
||||
g.ready = true
|
||||
g.start = counter
|
||||
g.fixedName = byteorder.BEUint32(nonce[:4])
|
||||
g.fixedName = binary.BigEndian.Uint32(nonce[:4])
|
||||
}
|
||||
if g.fixedName != byteorder.BEUint32(nonce[:4]) {
|
||||
if g.fixedName != binary.BigEndian.Uint32(nonce[:4]) {
|
||||
panic("crypto/cipher: incorrect module name given to GCMWithCounterNonce")
|
||||
}
|
||||
counter -= g.start
|
||||
@ -132,7 +128,7 @@ func (g *GCMForTLS12) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
panic("crypto/cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
counter := byteorder.BEUint64(nonce[len(nonce)-8:])
|
||||
counter := binary.BigEndian.Uint64(nonce[len(nonce)-8:])
|
||||
|
||||
// Ensure the counter is monotonically increasing.
|
||||
if counter == math.MaxUint64 {
|
||||
@ -178,7 +174,7 @@ func (g *GCMForTLS13) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
panic("crypto/cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
counter := byteorder.BEUint64(nonce[len(nonce)-8:])
|
||||
counter := binary.BigEndian.Uint64(nonce[len(nonce)-8:])
|
||||
if !g.ready {
|
||||
// In the first call, the counter is zero, so we learn the XOR mask.
|
||||
g.ready = true
|
||||
@ -232,7 +228,7 @@ func (g *GCMForSSH) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
panic("crypto/cipher: incorrect nonce length given to GCM")
|
||||
}
|
||||
|
||||
counter := byteorder.BEUint64(nonce[len(nonce)-8:])
|
||||
counter := binary.BigEndian.Uint64(nonce[len(nonce)-8:])
|
||||
if !g.ready {
|
||||
// In the first call we learn the start value.
|
||||
g.ready = true
|
||||
|
15
gcm/ghash.go
15
gcm/ghash.go
@ -5,8 +5,7 @@
|
||||
package gcm
|
||||
|
||||
import (
|
||||
// "crypto/internal/fips140"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
|
||||
@ -46,8 +45,8 @@ func ghash(out, H *[gcmBlockSize]byte, inputs ...[]byte) {
|
||||
// would expect, say, 4*H to be in index 4 of the table but due to
|
||||
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||
x := gcmFieldElement{
|
||||
byteorder.BEUint64(H[:8]),
|
||||
byteorder.BEUint64(H[8:]),
|
||||
binary.BigEndian.Uint64(H[:8]),
|
||||
binary.BigEndian.Uint64(H[8:]),
|
||||
}
|
||||
productTable[reverseBits(1)] = x
|
||||
|
||||
@ -61,8 +60,8 @@ func ghash(out, H *[gcmBlockSize]byte, inputs ...[]byte) {
|
||||
ghashUpdate(&productTable, &y, input)
|
||||
}
|
||||
|
||||
byteorder.BEPutUint64(out[:], y.low)
|
||||
byteorder.BEPutUint64(out[8:], y.high)
|
||||
binary.BigEndian.PutUint64(out[:], y.low)
|
||||
binary.BigEndian.PutUint64(out[8:], y.high)
|
||||
}
|
||||
|
||||
// reverseBits reverses the order of the bits of 4-bit number in i.
|
||||
@ -142,8 +141,8 @@ func ghashMul(productTable *[16]gcmFieldElement, y *gcmFieldElement) {
|
||||
// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
|
||||
func updateBlocks(productTable *[16]gcmFieldElement, y *gcmFieldElement, blocks []byte) {
|
||||
for len(blocks) > 0 {
|
||||
y.low ^= byteorder.BEUint64(blocks)
|
||||
y.high ^= byteorder.BEUint64(blocks[8:])
|
||||
y.low ^= binary.BigEndian.Uint64(blocks)
|
||||
y.high ^= binary.BigEndian.Uint64(blocks[8:])
|
||||
ghashMul(productTable, y)
|
||||
blocks = blocks[gcmBlockSize:]
|
||||
}
|
||||
|
@ -10,9 +10,11 @@ import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/mlkem"
|
||||
"crypto/rsa"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
@ -21,10 +23,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"github.com/xtls/reality/fips140tls"
|
||||
"github.com/xtls/reality/hpke"
|
||||
"github.com/xtls/reality/mlkem"
|
||||
"github.com/xtls/reality/tls13"
|
||||
)
|
||||
|
||||
@ -708,7 +708,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
|
||||
return err
|
||||
}
|
||||
if len(skx.key) >= 3 && skx.key[0] == 3 /* named curve */ {
|
||||
c.curveID = CurveID(byteorder.BEUint16(skx.key[1:]))
|
||||
c.curveID = CurveID(binary.BigEndian.Uint16(skx.key[1:]))
|
||||
}
|
||||
|
||||
msg, err = c.readHandshake(&hs.finishedHash)
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"crypto/mlkem"
|
||||
"crypto/rsa"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
@ -16,7 +17,6 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/xtls/reality/mlkem"
|
||||
"github.com/xtls/reality/tls13"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
@ -12,13 +12,12 @@ import (
|
||||
"crypto/rsa"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
)
|
||||
|
||||
// serverHandshakeState contains details of a server handshake in progress.
|
||||
@ -580,7 +579,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
|
||||
}
|
||||
if skx != nil {
|
||||
if len(skx.key) >= 3 && skx.key[0] == 3 /* named curve */ {
|
||||
c.curveID = CurveID(byteorder.BEUint16(skx.key[1:]))
|
||||
c.curveID = CurveID(binary.BigEndian.Uint16(skx.key[1:]))
|
||||
}
|
||||
if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil {
|
||||
return err
|
||||
|
@ -10,10 +10,12 @@ import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/hmac"
|
||||
"crypto/mlkem"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha512"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
@ -21,9 +23,7 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"github.com/xtls/reality/fips140tls"
|
||||
"github.com/xtls/reality/mlkem"
|
||||
"github.com/xtls/reality/tls13"
|
||||
)
|
||||
|
||||
@ -954,7 +954,7 @@ func (c *Conn) sendSessionTicket(earlyData bool, extra [][]byte) error {
|
||||
if _, err := c.config.rand().Read(ageAdd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.ageAdd = byteorder.LEUint32(ageAdd)
|
||||
m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
|
||||
|
||||
if earlyData {
|
||||
// RFC 9001, Section 4.6.1
|
||||
|
56
hkdf/hkdf.go
56
hkdf/hkdf.go
@ -1,56 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hkdf
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/hmac"
|
||||
)
|
||||
|
||||
func Extract[H fips140.Hash](h func() H, secret, salt []byte) []byte {
|
||||
if len(secret) < 112/8 {
|
||||
// fips140.RecordNonApproved()
|
||||
}
|
||||
if salt == nil {
|
||||
salt = make([]byte, h().Size())
|
||||
}
|
||||
extractor := hmac.New(h, salt)
|
||||
hmac.MarkAsUsedInKDF(extractor)
|
||||
extractor.Write(secret)
|
||||
|
||||
return extractor.Sum(nil)
|
||||
}
|
||||
|
||||
func Expand[H fips140.Hash](h func() H, pseudorandomKey []byte, info string, keyLen int) []byte {
|
||||
out := make([]byte, 0, keyLen)
|
||||
expander := hmac.New(h, pseudorandomKey)
|
||||
hmac.MarkAsUsedInKDF(expander)
|
||||
var counter uint8
|
||||
var buf []byte
|
||||
|
||||
for len(out) < keyLen {
|
||||
counter++
|
||||
if counter == 0 {
|
||||
panic("hkdf: counter overflow")
|
||||
}
|
||||
if counter > 1 {
|
||||
expander.Reset()
|
||||
}
|
||||
expander.Write(buf)
|
||||
expander.Write([]byte(info))
|
||||
expander.Write([]byte{counter})
|
||||
buf = expander.Sum(buf[:0])
|
||||
remain := keyLen - len(out)
|
||||
remain = min(remain, len(buf))
|
||||
out = append(out, buf[:remain]...)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func Key[H fips140.Hash](h func() H, secret, salt []byte, info string, keyLen int) []byte {
|
||||
prk := Extract(h, secret, salt)
|
||||
return Expand(h, prk, info, keyLen)
|
||||
}
|
172
hmac/hmac.go
172
hmac/hmac.go
@ -1,172 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hmac implements HMAC according to [FIPS 198-1].
|
||||
//
|
||||
// [FIPS 198-1]: https://doi.org/10.6028/NIST.FIPS.198-1
|
||||
package hmac
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/sha256"
|
||||
"github.com/xtls/reality/sha3"
|
||||
"github.com/xtls/reality/sha512"
|
||||
)
|
||||
|
||||
// key is zero padded to the block size of the hash function
|
||||
// ipad = 0x36 byte repeated for key length
|
||||
// opad = 0x5c byte repeated for key length
|
||||
// hmac = H([key ^ opad] H([key ^ ipad] text))
|
||||
|
||||
// marshalable is the combination of encoding.BinaryMarshaler and
|
||||
// encoding.BinaryUnmarshaler. Their method definitions are repeated here to
|
||||
// avoid a dependency on the encoding package.
|
||||
type marshalable interface {
|
||||
MarshalBinary() ([]byte, error)
|
||||
UnmarshalBinary([]byte) error
|
||||
}
|
||||
|
||||
type HMAC struct {
|
||||
opad, ipad []byte
|
||||
outer, inner fips140.Hash
|
||||
|
||||
// If marshaled is true, then opad and ipad do not contain a padded
|
||||
// copy of the key, but rather the marshaled state of outer/inner after
|
||||
// opad/ipad has been fed into it.
|
||||
marshaled bool
|
||||
|
||||
// forHKDF and keyLen are stored to inform the service indicator decision.
|
||||
forHKDF bool
|
||||
keyLen int
|
||||
}
|
||||
|
||||
func (h *HMAC) Sum(in []byte) []byte {
|
||||
// Per FIPS 140-3 IG C.M, key lengths below 112 bits are only allowed for
|
||||
// legacy use (i.e. verification only) and we don't support that. However,
|
||||
// HKDF uses the HMAC key for the salt, which is allowed to be shorter.
|
||||
if h.keyLen < 112/8 && !h.forHKDF {
|
||||
// fips140.RecordNonApproved()
|
||||
}
|
||||
switch h.inner.(type) {
|
||||
case *sha256.Digest, *sha512.Digest, *sha3.Digest:
|
||||
default:
|
||||
// fips140.RecordNonApproved()
|
||||
}
|
||||
|
||||
origLen := len(in)
|
||||
in = h.inner.Sum(in)
|
||||
|
||||
if h.marshaled {
|
||||
if err := h.outer.(marshalable).UnmarshalBinary(h.opad); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
h.outer.Reset()
|
||||
h.outer.Write(h.opad)
|
||||
}
|
||||
h.outer.Write(in[origLen:])
|
||||
return h.outer.Sum(in[:origLen])
|
||||
}
|
||||
|
||||
func (h *HMAC) Write(p []byte) (n int, err error) {
|
||||
return h.inner.Write(p)
|
||||
}
|
||||
|
||||
func (h *HMAC) Size() int { return h.outer.Size() }
|
||||
func (h *HMAC) BlockSize() int { return h.inner.BlockSize() }
|
||||
|
||||
func (h *HMAC) Reset() {
|
||||
if h.marshaled {
|
||||
if err := h.inner.(marshalable).UnmarshalBinary(h.ipad); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h.inner.Reset()
|
||||
h.inner.Write(h.ipad)
|
||||
|
||||
// If the underlying hash is marshalable, we can save some time by saving a
|
||||
// copy of the hash state now, and restoring it on future calls to Reset and
|
||||
// Sum instead of writing ipad/opad every time.
|
||||
//
|
||||
// We do this on Reset to avoid slowing down the common single-use case.
|
||||
//
|
||||
// This is allowed by FIPS 198-1, Section 6: "Conceptually, the intermediate
|
||||
// results of the compression function on the B-byte blocks (K0 ⊕ ipad) and
|
||||
// (K0 ⊕ opad) can be precomputed once, at the time of generation of the key
|
||||
// K, or before its first use. These intermediate results can be stored and
|
||||
// then used to initialize H each time that a message needs to be
|
||||
// authenticated using the same key. [...] These stored intermediate values
|
||||
// shall be treated and protected in the same manner as secret keys."
|
||||
marshalableInner, innerOK := h.inner.(marshalable)
|
||||
if !innerOK {
|
||||
return
|
||||
}
|
||||
marshalableOuter, outerOK := h.outer.(marshalable)
|
||||
if !outerOK {
|
||||
return
|
||||
}
|
||||
|
||||
imarshal, err := marshalableInner.MarshalBinary()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
h.outer.Reset()
|
||||
h.outer.Write(h.opad)
|
||||
omarshal, err := marshalableOuter.MarshalBinary()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Marshaling succeeded; save the marshaled state for later
|
||||
h.ipad = imarshal
|
||||
h.opad = omarshal
|
||||
h.marshaled = true
|
||||
}
|
||||
|
||||
// New returns a new HMAC hash using the given [fips140.Hash] type and key.
|
||||
func New[H fips140.Hash](h func() H, key []byte) *HMAC {
|
||||
hm := &HMAC{keyLen: len(key)}
|
||||
hm.outer = h()
|
||||
hm.inner = h()
|
||||
unique := true
|
||||
func() {
|
||||
defer func() {
|
||||
// The comparison might panic if the underlying types are not comparable.
|
||||
_ = recover()
|
||||
}()
|
||||
if hm.outer == hm.inner {
|
||||
unique = false
|
||||
}
|
||||
}()
|
||||
if !unique {
|
||||
panic("crypto/hmac: hash generation function does not produce unique values")
|
||||
}
|
||||
blocksize := hm.inner.BlockSize()
|
||||
hm.ipad = make([]byte, blocksize)
|
||||
hm.opad = make([]byte, blocksize)
|
||||
if len(key) > blocksize {
|
||||
// If key is too big, hash it.
|
||||
hm.outer.Write(key)
|
||||
key = hm.outer.Sum(nil)
|
||||
}
|
||||
copy(hm.ipad, key)
|
||||
copy(hm.opad, key)
|
||||
for i := range hm.ipad {
|
||||
hm.ipad[i] ^= 0x36
|
||||
}
|
||||
for i := range hm.opad {
|
||||
hm.opad[i] ^= 0x5c
|
||||
}
|
||||
hm.inner.Write(hm.ipad)
|
||||
|
||||
return hm
|
||||
}
|
||||
|
||||
// MarkAsUsedInKDF records that this HMAC instance is used as part of a KDF.
|
||||
func MarkAsUsedInKDF(h *HMAC) {
|
||||
h.forHKDF = true
|
||||
}
|
17
hpke/hpye.go
17
hpke/hpye.go
@ -11,12 +11,11 @@ import (
|
||||
"crypto/ecdh"
|
||||
"crypto/hkdf"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
)
|
||||
|
||||
// testingOnlyGenerateKey is only used during testing, to provide
|
||||
@ -38,7 +37,7 @@ func (kdf *hkdfKDF) LabeledExtract(sid []byte, salt []byte, label string, inputK
|
||||
|
||||
func (kdf *hkdfKDF) LabeledExpand(suiteID []byte, randomKey []byte, label string, info []byte, length uint16) ([]byte, error) {
|
||||
labeledInfo := make([]byte, 0, 2+7+len(suiteID)+len(label)+len(info))
|
||||
labeledInfo = byteorder.BEAppendUint16(labeledInfo, length)
|
||||
labeledInfo = binary.BigEndian.AppendUint16(labeledInfo, length)
|
||||
labeledInfo = append(labeledInfo, []byte("HPKE-v1")...)
|
||||
labeledInfo = append(labeledInfo, suiteID...)
|
||||
labeledInfo = append(labeledInfo, label...)
|
||||
@ -76,7 +75,7 @@ func newDHKem(kemID uint16) (*dhKEM, error) {
|
||||
return &dhKEM{
|
||||
dh: suite.curve,
|
||||
kdf: hkdfKDF{suite.hash},
|
||||
suiteID: byteorder.BEAppendUint16([]byte("KEM"), kemID),
|
||||
suiteID: binary.BigEndian.AppendUint16([]byte("KEM"), kemID),
|
||||
nSecret: suite.nSecret,
|
||||
}, nil
|
||||
}
|
||||
@ -313,9 +312,9 @@ func (r *Recipient) Open(aad, ciphertext []byte) ([]byte, error) {
|
||||
func suiteID(kemID, kdfID, aeadID uint16) []byte {
|
||||
suiteID := make([]byte, 0, 4+2+2+2)
|
||||
suiteID = append(suiteID, []byte("HPKE")...)
|
||||
suiteID = byteorder.BEAppendUint16(suiteID, kemID)
|
||||
suiteID = byteorder.BEAppendUint16(suiteID, kdfID)
|
||||
suiteID = byteorder.BEAppendUint16(suiteID, aeadID)
|
||||
suiteID = binary.BigEndian.AppendUint16(suiteID, kemID)
|
||||
suiteID = binary.BigEndian.AppendUint16(suiteID, kdfID)
|
||||
suiteID = binary.BigEndian.AppendUint16(suiteID, aeadID)
|
||||
return suiteID
|
||||
}
|
||||
|
||||
@ -350,7 +349,7 @@ func (u uint128) bitLen() int {
|
||||
|
||||
func (u uint128) bytes() []byte {
|
||||
b := make([]byte, 16)
|
||||
byteorder.BEPutUint64(b[0:], u.hi)
|
||||
byteorder.BEPutUint64(b[8:], u.lo)
|
||||
binary.BigEndian.PutUint64(b[0:], u.hi)
|
||||
binary.BigEndian.PutUint64(b[8:], u.lo)
|
||||
return b
|
||||
}
|
@ -7,12 +7,12 @@ package reality
|
||||
import (
|
||||
"crypto/ecdh"
|
||||
"crypto/hmac"
|
||||
"crypto/mlkem"
|
||||
"crypto/sha3"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/xtls/reality/mlkem"
|
||||
"github.com/xtls/reality/sha3"
|
||||
"github.com/xtls/reality/tls13"
|
||||
)
|
||||
|
||||
@ -80,7 +80,7 @@ func kyberSharedSecret(c, K []byte) []byte {
|
||||
// Package mlkem implements ML-KEM, which compared to Kyber removed a
|
||||
// final hashing step. Compute SHAKE-256(K || SHA3-256(c), 32) to match Kyber.
|
||||
// See https://words.filippo.io/mlkem768/#bonus-track-using-a-ml-kem-implementation-as-kyber-v3.
|
||||
h := sha3.NewShake256()
|
||||
h := sha3.NewSHAKE256()
|
||||
h.Write(K)
|
||||
ch := sha3.New256()
|
||||
ch.Write(c)
|
||||
|
550
mlkem/field.go
550
mlkem/field.go
@ -1,550 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mlkem
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/sha3"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// fieldElement is an integer modulo q, an element of ℤ_q. It is always reduced.
|
||||
type fieldElement uint16
|
||||
|
||||
// fieldCheckReduced checks that a value a is < q.
|
||||
func fieldCheckReduced(a uint16) (fieldElement, error) {
|
||||
if a >= q {
|
||||
return 0, errors.New("unreduced field element")
|
||||
}
|
||||
return fieldElement(a), nil
|
||||
}
|
||||
|
||||
// fieldReduceOnce reduces a value a < 2q.
|
||||
func fieldReduceOnce(a uint16) fieldElement {
|
||||
x := a - q
|
||||
// If x underflowed, then x >= 2¹⁶ - q > 2¹⁵, so the top bit is set.
|
||||
x += (x >> 15) * q
|
||||
return fieldElement(x)
|
||||
}
|
||||
|
||||
func fieldAdd(a, b fieldElement) fieldElement {
|
||||
x := uint16(a + b)
|
||||
return fieldReduceOnce(x)
|
||||
}
|
||||
|
||||
func fieldSub(a, b fieldElement) fieldElement {
|
||||
x := uint16(a - b + q)
|
||||
return fieldReduceOnce(x)
|
||||
}
|
||||
|
||||
const (
|
||||
barrettMultiplier = 5039 // 2¹² * 2¹² / q
|
||||
barrettShift = 24 // log₂(2¹² * 2¹²)
|
||||
)
|
||||
|
||||
// fieldReduce reduces a value a < 2q² using Barrett reduction, to avoid
|
||||
// potentially variable-time division.
|
||||
func fieldReduce(a uint32) fieldElement {
|
||||
quotient := uint32((uint64(a) * barrettMultiplier) >> barrettShift)
|
||||
return fieldReduceOnce(uint16(a - quotient*q))
|
||||
}
|
||||
|
||||
func fieldMul(a, b fieldElement) fieldElement {
|
||||
x := uint32(a) * uint32(b)
|
||||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// fieldMulSub returns a * (b - c). This operation is fused to save a
|
||||
// fieldReduceOnce after the subtraction.
|
||||
func fieldMulSub(a, b, c fieldElement) fieldElement {
|
||||
x := uint32(a) * uint32(b-c+q)
|
||||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// fieldAddMul returns a * b + c * d. This operation is fused to save a
|
||||
// fieldReduceOnce and a fieldReduce.
|
||||
func fieldAddMul(a, b, c, d fieldElement) fieldElement {
|
||||
x := uint32(a) * uint32(b)
|
||||
x += uint32(c) * uint32(d)
|
||||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// compress maps a field element uniformly to the range 0 to 2ᵈ-1, according to
|
||||
// FIPS 203, Definition 4.7.
|
||||
func compress(x fieldElement, d uint8) uint16 {
|
||||
// We want to compute (x * 2ᵈ) / q, rounded to nearest integer, with 1/2
|
||||
// rounding up (see FIPS 203, Section 2.3).
|
||||
|
||||
// Barrett reduction produces a quotient and a remainder in the range [0, 2q),
|
||||
// such that dividend = quotient * q + remainder.
|
||||
dividend := uint32(x) << d // x * 2ᵈ
|
||||
quotient := uint32(uint64(dividend) * barrettMultiplier >> barrettShift)
|
||||
remainder := dividend - quotient*q
|
||||
|
||||
// Since the remainder is in the range [0, 2q), not [0, q), we need to
|
||||
// portion it into three spans for rounding.
|
||||
//
|
||||
// [ 0, q/2 ) -> round to 0
|
||||
// [ q/2, q + q/2 ) -> round to 1
|
||||
// [ q + q/2, 2q ) -> round to 2
|
||||
//
|
||||
// We can convert that to the following logic: add 1 if remainder > q/2,
|
||||
// then add 1 again if remainder > q + q/2.
|
||||
//
|
||||
// Note that if remainder > x, then ⌊x⌋ - remainder underflows, and the top
|
||||
// bit of the difference will be set.
|
||||
quotient += (q/2 - remainder) >> 31 & 1
|
||||
quotient += (q + q/2 - remainder) >> 31 & 1
|
||||
|
||||
// quotient might have overflowed at this point, so reduce it by masking.
|
||||
var mask uint32 = (1 << d) - 1
|
||||
return uint16(quotient & mask)
|
||||
}
|
||||
|
||||
// decompress maps a number x between 0 and 2ᵈ-1 uniformly to the full range of
|
||||
// field elements, according to FIPS 203, Definition 4.8.
|
||||
func decompress(y uint16, d uint8) fieldElement {
|
||||
// We want to compute (y * q) / 2ᵈ, rounded to nearest integer, with 1/2
|
||||
// rounding up (see FIPS 203, Section 2.3).
|
||||
|
||||
dividend := uint32(y) * q
|
||||
quotient := dividend >> d // (y * q) / 2ᵈ
|
||||
|
||||
// The d'th least-significant bit of the dividend (the most significant bit
|
||||
// of the remainder) is 1 for the top half of the values that divide to the
|
||||
// same quotient, which are the ones that round up.
|
||||
quotient += dividend >> (d - 1) & 1
|
||||
|
||||
// quotient is at most (2¹¹-1) * q / 2¹¹ + 1 = 3328, so it didn't overflow.
|
||||
return fieldElement(quotient)
|
||||
}
|
||||
|
||||
// ringElement is a polynomial, an element of R_q, represented as an array
|
||||
// according to FIPS 203, Section 2.4.4.
|
||||
type ringElement [n]fieldElement
|
||||
|
||||
// polyAdd adds two ringElements or nttElements.
|
||||
func polyAdd[T ~[n]fieldElement](a, b T) (s T) {
|
||||
for i := range s {
|
||||
s[i] = fieldAdd(a[i], b[i])
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// polySub subtracts two ringElements or nttElements.
|
||||
func polySub[T ~[n]fieldElement](a, b T) (s T) {
|
||||
for i := range s {
|
||||
s[i] = fieldSub(a[i], b[i])
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// polyByteEncode appends the 384-byte encoding of f to b.
|
||||
//
|
||||
// It implements ByteEncode₁₂, according to FIPS 203, Algorithm 5.
|
||||
func polyByteEncode[T ~[n]fieldElement](b []byte, f T) []byte {
|
||||
out, B := sliceForAppend(b, encodingSize12)
|
||||
for i := 0; i < n; i += 2 {
|
||||
x := uint32(f[i]) | uint32(f[i+1])<<12
|
||||
B[0] = uint8(x)
|
||||
B[1] = uint8(x >> 8)
|
||||
B[2] = uint8(x >> 16)
|
||||
B = B[3:]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// polyByteDecode decodes the 384-byte encoding of a polynomial, checking that
|
||||
// all the coefficients are properly reduced. This fulfills the "Modulus check"
|
||||
// step of ML-KEM Encapsulation.
|
||||
//
|
||||
// It implements ByteDecode₁₂, according to FIPS 203, Algorithm 6.
|
||||
func polyByteDecode[T ~[n]fieldElement](b []byte) (T, error) {
|
||||
if len(b) != encodingSize12 {
|
||||
return T{}, errors.New("mlkem: invalid encoding length")
|
||||
}
|
||||
var f T
|
||||
for i := 0; i < n; i += 2 {
|
||||
d := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
|
||||
const mask12 = 0b1111_1111_1111
|
||||
var err error
|
||||
if f[i], err = fieldCheckReduced(uint16(d & mask12)); err != nil {
|
||||
return T{}, errors.New("mlkem: invalid polynomial encoding")
|
||||
}
|
||||
if f[i+1], err = fieldCheckReduced(uint16(d >> 12)); err != nil {
|
||||
return T{}, errors.New("mlkem: invalid polynomial encoding")
|
||||
}
|
||||
b = b[3:]
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// sliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||
// slice with the contents of the given slice followed by that many bytes and a
|
||||
// second slice that aliases into it and contains only the extra bytes. If the
|
||||
// original slice has sufficient capacity then no allocation is performed.
|
||||
func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||
if total := len(in) + n; cap(in) >= total {
|
||||
head = in[:total]
|
||||
} else {
|
||||
head = make([]byte, total)
|
||||
copy(head, in)
|
||||
}
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// ringCompressAndEncode1 appends a 32-byte encoding of a ring element to s,
|
||||
// compressing one coefficients per bit.
|
||||
//
|
||||
// It implements Compress₁, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode₁, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode1(s []byte, f ringElement) []byte {
|
||||
s, b := sliceForAppend(s, encodingSize1)
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
for i := range f {
|
||||
b[i/8] |= uint8(compress(f[i], 1) << (i % 8))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress1 decodes a 32-byte slice to a ring element where each
|
||||
// bit is mapped to 0 or ⌈q/2⌋.
|
||||
//
|
||||
// It implements ByteDecode₁, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress₁, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress1(b *[encodingSize1]byte) ringElement {
|
||||
var f ringElement
|
||||
for i := range f {
|
||||
b_i := b[i/8] >> (i % 8) & 1
|
||||
const halfQ = (q + 1) / 2 // ⌈q/2⌋, rounded up per FIPS 203, Section 2.3
|
||||
f[i] = fieldElement(b_i) * halfQ // 0 decompresses to 0, and 1 to ⌈q/2⌋
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode4 appends a 128-byte encoding of a ring element to s,
|
||||
// compressing two coefficients per byte.
|
||||
//
|
||||
// It implements Compress₄, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode₄, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode4(s []byte, f ringElement) []byte {
|
||||
s, b := sliceForAppend(s, encodingSize4)
|
||||
for i := 0; i < n; i += 2 {
|
||||
b[i/2] = uint8(compress(f[i], 4) | compress(f[i+1], 4)<<4)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress4 decodes a 128-byte encoding of a ring element where
|
||||
// each four bits are mapped to an equidistant distribution.
|
||||
//
|
||||
// It implements ByteDecode₄, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress₄, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress4(b *[encodingSize4]byte) ringElement {
|
||||
var f ringElement
|
||||
for i := 0; i < n; i += 2 {
|
||||
f[i] = fieldElement(decompress(uint16(b[i/2]&0b1111), 4))
|
||||
f[i+1] = fieldElement(decompress(uint16(b[i/2]>>4), 4))
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode10 appends a 320-byte encoding of a ring element to s,
|
||||
// compressing four coefficients per five bytes.
|
||||
//
|
||||
// It implements Compress₁₀, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode₁₀, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode10(s []byte, f ringElement) []byte {
|
||||
s, b := sliceForAppend(s, encodingSize10)
|
||||
for i := 0; i < n; i += 4 {
|
||||
var x uint64
|
||||
x |= uint64(compress(f[i], 10))
|
||||
x |= uint64(compress(f[i+1], 10)) << 10
|
||||
x |= uint64(compress(f[i+2], 10)) << 20
|
||||
x |= uint64(compress(f[i+3], 10)) << 30
|
||||
b[0] = uint8(x)
|
||||
b[1] = uint8(x >> 8)
|
||||
b[2] = uint8(x >> 16)
|
||||
b[3] = uint8(x >> 24)
|
||||
b[4] = uint8(x >> 32)
|
||||
b = b[5:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress10 decodes a 320-byte encoding of a ring element where
|
||||
// each ten bits are mapped to an equidistant distribution.
|
||||
//
|
||||
// It implements ByteDecode₁₀, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress₁₀, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress10(bb *[encodingSize10]byte) ringElement {
|
||||
b := bb[:]
|
||||
var f ringElement
|
||||
for i := 0; i < n; i += 4 {
|
||||
x := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32
|
||||
b = b[5:]
|
||||
f[i] = fieldElement(decompress(uint16(x>>0&0b11_1111_1111), 10))
|
||||
f[i+1] = fieldElement(decompress(uint16(x>>10&0b11_1111_1111), 10))
|
||||
f[i+2] = fieldElement(decompress(uint16(x>>20&0b11_1111_1111), 10))
|
||||
f[i+3] = fieldElement(decompress(uint16(x>>30&0b11_1111_1111), 10))
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode appends an encoding of a ring element to s,
|
||||
// compressing each coefficient to d bits.
|
||||
//
|
||||
// It implements Compress, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode(s []byte, f ringElement, d uint8) []byte {
|
||||
var b byte
|
||||
var bIdx uint8
|
||||
for i := 0; i < n; i++ {
|
||||
c := compress(f[i], d)
|
||||
var cIdx uint8
|
||||
for cIdx < d {
|
||||
b |= byte(c>>cIdx) << bIdx
|
||||
bits := min(8-bIdx, d-cIdx)
|
||||
bIdx += bits
|
||||
cIdx += bits
|
||||
if bIdx == 8 {
|
||||
s = append(s, b)
|
||||
b = 0
|
||||
bIdx = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
if bIdx != 0 {
|
||||
panic("mlkem: internal error: bitsFilled != 0")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress decodes an encoding of a ring element where
|
||||
// each d bits are mapped to an equidistant distribution.
|
||||
//
|
||||
// It implements ByteDecode, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress(b []byte, d uint8) ringElement {
|
||||
var f ringElement
|
||||
var bIdx uint8
|
||||
for i := 0; i < n; i++ {
|
||||
var c uint16
|
||||
var cIdx uint8
|
||||
for cIdx < d {
|
||||
c |= uint16(b[0]>>bIdx) << cIdx
|
||||
c &= (1 << d) - 1
|
||||
bits := min(8-bIdx, d-cIdx)
|
||||
bIdx += bits
|
||||
cIdx += bits
|
||||
if bIdx == 8 {
|
||||
b = b[1:]
|
||||
bIdx = 0
|
||||
}
|
||||
}
|
||||
f[i] = fieldElement(decompress(c, d))
|
||||
}
|
||||
if len(b) != 0 {
|
||||
panic("mlkem: internal error: leftover bytes")
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode5 appends a 160-byte encoding of a ring element to s,
|
||||
// compressing eight coefficients per five bytes.
|
||||
//
|
||||
// It implements Compress₅, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode₅, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode5(s []byte, f ringElement) []byte {
|
||||
return ringCompressAndEncode(s, f, 5)
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress5 decodes a 160-byte encoding of a ring element where
|
||||
// each five bits are mapped to an equidistant distribution.
|
||||
//
|
||||
// It implements ByteDecode₅, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress₅, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress5(bb *[encodingSize5]byte) ringElement {
|
||||
return ringDecodeAndDecompress(bb[:], 5)
|
||||
}
|
||||
|
||||
// ringCompressAndEncode11 appends a 352-byte encoding of a ring element to s,
|
||||
// compressing eight coefficients per eleven bytes.
|
||||
//
|
||||
// It implements Compress₁₁, according to FIPS 203, Definition 4.7,
|
||||
// followed by ByteEncode₁₁, according to FIPS 203, Algorithm 5.
|
||||
func ringCompressAndEncode11(s []byte, f ringElement) []byte {
|
||||
return ringCompressAndEncode(s, f, 11)
|
||||
}
|
||||
|
||||
// ringDecodeAndDecompress11 decodes a 352-byte encoding of a ring element where
|
||||
// each eleven bits are mapped to an equidistant distribution.
|
||||
//
|
||||
// It implements ByteDecode₁₁, according to FIPS 203, Algorithm 6,
|
||||
// followed by Decompress₁₁, according to FIPS 203, Definition 4.8.
|
||||
func ringDecodeAndDecompress11(bb *[encodingSize11]byte) ringElement {
|
||||
return ringDecodeAndDecompress(bb[:], 11)
|
||||
}
|
||||
|
||||
// samplePolyCBD draws a ringElement from the special Dη distribution given a
|
||||
// stream of random bytes generated by the PRF function, according to FIPS 203,
|
||||
// Algorithm 8 and Definition 4.3.
|
||||
func samplePolyCBD(s []byte, b byte) ringElement {
|
||||
prf := sha3.NewShake256()
|
||||
prf.Write(s)
|
||||
prf.Write([]byte{b})
|
||||
B := make([]byte, 64*2) // η = 2
|
||||
prf.Read(B)
|
||||
|
||||
// SamplePolyCBD simply draws four (2η) bits for each coefficient, and adds
|
||||
// the first two and subtracts the last two.
|
||||
|
||||
var f ringElement
|
||||
for i := 0; i < n; i += 2 {
|
||||
b := B[i/2]
|
||||
b_7, b_6, b_5, b_4 := b>>7, b>>6&1, b>>5&1, b>>4&1
|
||||
b_3, b_2, b_1, b_0 := b>>3&1, b>>2&1, b>>1&1, b&1
|
||||
f[i] = fieldSub(fieldElement(b_0+b_1), fieldElement(b_2+b_3))
|
||||
f[i+1] = fieldSub(fieldElement(b_4+b_5), fieldElement(b_6+b_7))
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// nttElement is an NTT representation, an element of T_q, represented as an
|
||||
// array according to FIPS 203, Section 2.4.4.
|
||||
type nttElement [n]fieldElement
|
||||
|
||||
// gammas are the values ζ^2BitRev7(i)+1 mod q for each index i, according to
|
||||
// FIPS 203, Appendix A (with negative values reduced to positive).
|
||||
var gammas = [128]fieldElement{17, 3312, 2761, 568, 583, 2746, 2649, 680, 1637, 1692, 723, 2606, 2288, 1041, 1100, 2229, 1409, 1920, 2662, 667, 3281, 48, 233, 3096, 756, 2573, 2156, 1173, 3015, 314, 3050, 279, 1703, 1626, 1651, 1678, 2789, 540, 1789, 1540, 1847, 1482, 952, 2377, 1461, 1868, 2687, 642, 939, 2390, 2308, 1021, 2437, 892, 2388, 941, 733, 2596, 2337, 992, 268, 3061, 641, 2688, 1584, 1745, 2298, 1031, 2037, 1292, 3220, 109, 375, 2954, 2549, 780, 2090, 1239, 1645, 1684, 1063, 2266, 319, 3010, 2773, 556, 757, 2572, 2099, 1230, 561, 2768, 2466, 863, 2594, 735, 2804, 525, 1092, 2237, 403, 2926, 1026, 2303, 1143, 2186, 2150, 1179, 2775, 554, 886, 2443, 1722, 1607, 1212, 2117, 1874, 1455, 1029, 2300, 2110, 1219, 2935, 394, 885, 2444, 2154, 1175}
|
||||
|
||||
// nttMul multiplies two nttElements.
|
||||
//
|
||||
// It implements MultiplyNTTs, according to FIPS 203, Algorithm 11.
|
||||
func nttMul(f, g nttElement) nttElement {
|
||||
var h nttElement
|
||||
// We use i += 2 for bounds check elimination. See https://go.dev/issue/66826.
|
||||
for i := 0; i < 256; i += 2 {
|
||||
a0, a1 := f[i], f[i+1]
|
||||
b0, b1 := g[i], g[i+1]
|
||||
h[i] = fieldAddMul(a0, b0, fieldMul(a1, b1), gammas[i/2])
|
||||
h[i+1] = fieldAddMul(a0, b1, a1, b0)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// zetas are the values ζ^BitRev7(k) mod q for each index k, according to FIPS
|
||||
// 203, Appendix A.
|
||||
var zetas = [128]fieldElement{1, 1729, 2580, 3289, 2642, 630, 1897, 848, 1062, 1919, 193, 797, 2786, 3260, 569, 1746, 296, 2447, 1339, 1476, 3046, 56, 2240, 1333, 1426, 2094, 535, 2882, 2393, 2879, 1974, 821, 289, 331, 3253, 1756, 1197, 2304, 2277, 2055, 650, 1977, 2513, 632, 2865, 33, 1320, 1915, 2319, 1435, 807, 452, 1438, 2868, 1534, 2402, 2647, 2617, 1481, 648, 2474, 3110, 1227, 910, 17, 2761, 583, 2649, 1637, 723, 2288, 1100, 1409, 2662, 3281, 233, 756, 2156, 3015, 3050, 1703, 1651, 2789, 1789, 1847, 952, 1461, 2687, 939, 2308, 2437, 2388, 733, 2337, 268, 641, 1584, 2298, 2037, 3220, 375, 2549, 2090, 1645, 1063, 319, 2773, 757, 2099, 561, 2466, 2594, 2804, 1092, 403, 1026, 1143, 2150, 2775, 886, 1722, 1212, 1874, 1029, 2110, 2935, 885, 2154}
|
||||
|
||||
// ntt maps a ringElement to its nttElement representation.
|
||||
//
|
||||
// It implements NTT, according to FIPS 203, Algorithm 9.
|
||||
func ntt(f ringElement) nttElement {
|
||||
k := 1
|
||||
for len := 128; len >= 2; len /= 2 {
|
||||
for start := 0; start < 256; start += 2 * len {
|
||||
zeta := zetas[k]
|
||||
k++
|
||||
// Bounds check elimination hint.
|
||||
f, flen := f[start:start+len], f[start+len:start+len+len]
|
||||
for j := 0; j < len; j++ {
|
||||
t := fieldMul(zeta, flen[j])
|
||||
flen[j] = fieldSub(f[j], t)
|
||||
f[j] = fieldAdd(f[j], t)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nttElement(f)
|
||||
}
|
||||
|
||||
// inverseNTT maps a nttElement back to the ringElement it represents.
|
||||
//
|
||||
// It implements NTT⁻¹, according to FIPS 203, Algorithm 10.
|
||||
func inverseNTT(f nttElement) ringElement {
|
||||
k := 127
|
||||
for len := 2; len <= 128; len *= 2 {
|
||||
for start := 0; start < 256; start += 2 * len {
|
||||
zeta := zetas[k]
|
||||
k--
|
||||
// Bounds check elimination hint.
|
||||
f, flen := f[start:start+len], f[start+len:start+len+len]
|
||||
for j := 0; j < len; j++ {
|
||||
t := f[j]
|
||||
f[j] = fieldAdd(t, flen[j])
|
||||
flen[j] = fieldMulSub(zeta, flen[j], t)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range f {
|
||||
f[i] = fieldMul(f[i], 3303) // 3303 = 128⁻¹ mod q
|
||||
}
|
||||
return ringElement(f)
|
||||
}
|
||||
|
||||
// sampleNTT draws a uniformly random nttElement from a stream of uniformly
|
||||
// random bytes generated by the XOF function, according to FIPS 203,
|
||||
// Algorithm 7.
|
||||
func sampleNTT(rho []byte, ii, jj byte) nttElement {
|
||||
B := sha3.NewShake128()
|
||||
B.Write(rho)
|
||||
B.Write([]byte{ii, jj})
|
||||
|
||||
// SampleNTT essentially draws 12 bits at a time from r, interprets them in
|
||||
// little-endian, and rejects values higher than q, until it drew 256
|
||||
// values. (The rejection rate is approximately 19%.)
|
||||
//
|
||||
// To do this from a bytes stream, it draws three bytes at a time, and
|
||||
// splits them into two uint16 appropriately masked.
|
||||
//
|
||||
// r₀ r₁ r₂
|
||||
// |- - - - - - - -|- - - - - - - -|- - - - - - - -|
|
||||
//
|
||||
// Uint16(r₀ || r₁)
|
||||
// |- - - - - - - - - - - - - - - -|
|
||||
// |- - - - - - - - - - - -|
|
||||
// d₁
|
||||
//
|
||||
// Uint16(r₁ || r₂)
|
||||
// |- - - - - - - - - - - - - - - -|
|
||||
// |- - - - - - - - - - - -|
|
||||
// d₂
|
||||
//
|
||||
// Note that in little-endian, the rightmost bits are the most significant
|
||||
// bits (dropped with a mask) and the leftmost bits are the least
|
||||
// significant bits (dropped with a right shift).
|
||||
|
||||
var a nttElement
|
||||
var j int // index into a
|
||||
var buf [24]byte // buffered reads from B
|
||||
off := len(buf) // index into buf, starts in a "buffer fully consumed" state
|
||||
for {
|
||||
if off >= len(buf) {
|
||||
B.Read(buf[:])
|
||||
off = 0
|
||||
}
|
||||
d1 := byteorder.LEUint16(buf[off:]) & 0b1111_1111_1111
|
||||
d2 := byteorder.LEUint16(buf[off+1:]) >> 4
|
||||
off += 3
|
||||
if d1 < q {
|
||||
a[j] = fieldElement(d1)
|
||||
j++
|
||||
}
|
||||
if j >= len(a) {
|
||||
break
|
||||
}
|
||||
if d2 < q {
|
||||
a[j] = fieldElement(d2)
|
||||
j++
|
||||
}
|
||||
if j >= len(a) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
@ -1,517 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package mlkem implements the quantum-resistant key encapsulation method
|
||||
// ML-KEM (formerly known as Kyber), as specified in [NIST FIPS 203].
|
||||
//
|
||||
// [NIST FIPS 203]: https://doi.org/10.6028/NIST.FIPS.203
|
||||
package mlkem
|
||||
|
||||
// This package targets security, correctness, simplicity, readability, and
|
||||
// reviewability as its primary goals. All critical operations are performed in
|
||||
// constant time.
|
||||
//
|
||||
// Variable and function names, as well as code layout, are selected to
|
||||
// facilitate reviewing the implementation against the NIST FIPS 203 document.
|
||||
//
|
||||
// Reviewers unfamiliar with polynomials or linear algebra might find the
|
||||
// background at https://words.filippo.io/kyber-math/ useful.
|
||||
//
|
||||
// This file implements the recommended parameter set ML-KEM-768. The ML-KEM-1024
|
||||
// parameter set implementation is auto-generated from this file.
|
||||
//
|
||||
//go:generate go run generate1024.go -input mlkem768.go -output mlkem1024.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
//"github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/drbg"
|
||||
"github.com/xtls/reality/sha3"
|
||||
"github.com/xtls/reality/subtle"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// ML-KEM global constants.
|
||||
n = 256
|
||||
q = 3329
|
||||
|
||||
// encodingSizeX is the byte size of a ringElement or nttElement encoded
|
||||
// by ByteEncode_X (FIPS 203, Algorithm 5).
|
||||
encodingSize12 = n * 12 / 8
|
||||
encodingSize11 = n * 11 / 8
|
||||
encodingSize10 = n * 10 / 8
|
||||
encodingSize5 = n * 5 / 8
|
||||
encodingSize4 = n * 4 / 8
|
||||
encodingSize1 = n * 1 / 8
|
||||
|
||||
messageSize = encodingSize1
|
||||
|
||||
SharedKeySize = 32
|
||||
SeedSize = 32 + 32
|
||||
)
|
||||
|
||||
// ML-KEM-768 parameters.
|
||||
const (
|
||||
k = 3
|
||||
|
||||
CiphertextSize768 = k*encodingSize10 + encodingSize4
|
||||
EncapsulationKeySize768 = k*encodingSize12 + 32
|
||||
decapsulationKeySize768 = k*encodingSize12 + EncapsulationKeySize768 + 32 + 32
|
||||
)
|
||||
|
||||
// ML-KEM-1024 parameters.
|
||||
const (
|
||||
k1024 = 4
|
||||
|
||||
CiphertextSize1024 = k1024*encodingSize11 + encodingSize5
|
||||
EncapsulationKeySize1024 = k1024*encodingSize12 + 32
|
||||
decapsulationKeySize1024 = k1024*encodingSize12 + EncapsulationKeySize1024 + 32 + 32
|
||||
)
|
||||
|
||||
// A DecapsulationKey768 is the secret key used to decapsulate a shared key from a
|
||||
// ciphertext. It includes various precomputed values.
|
||||
type DecapsulationKey768 struct {
|
||||
d [32]byte // decapsulation key seed
|
||||
z [32]byte // implicit rejection sampling seed
|
||||
|
||||
ρ [32]byte // sampleNTT seed for A, stored for the encapsulation key
|
||||
h [32]byte // H(ek), stored for ML-KEM.Decaps_internal
|
||||
|
||||
encryptionKey
|
||||
decryptionKey
|
||||
}
|
||||
|
||||
// Bytes returns the decapsulation key as a 64-byte seed in the "d || z" form.
|
||||
//
|
||||
// The decapsulation key must be kept secret.
|
||||
func (dk *DecapsulationKey768) Bytes() []byte {
|
||||
var b [SeedSize]byte
|
||||
copy(b[:], dk.d[:])
|
||||
copy(b[32:], dk.z[:])
|
||||
return b[:]
|
||||
}
|
||||
|
||||
// TestingOnlyExpandedBytes768 returns the decapsulation key as a byte slice
|
||||
// using the full expanded NIST encoding.
|
||||
//
|
||||
// This should only be used for ACVP testing. For all other purposes prefer
|
||||
// the Bytes method that returns the (much smaller) seed.
|
||||
func TestingOnlyExpandedBytes768(dk *DecapsulationKey768) []byte {
|
||||
b := make([]byte, 0, decapsulationKeySize768)
|
||||
|
||||
// ByteEncode₁₂(s)
|
||||
for i := range dk.s {
|
||||
b = polyByteEncode(b, dk.s[i])
|
||||
}
|
||||
|
||||
// ByteEncode₁₂(t) || ρ
|
||||
for i := range dk.t {
|
||||
b = polyByteEncode(b, dk.t[i])
|
||||
}
|
||||
b = append(b, dk.ρ[:]...)
|
||||
|
||||
// H(ek) || z
|
||||
b = append(b, dk.h[:]...)
|
||||
b = append(b, dk.z[:]...)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// EncapsulationKey returns the public encapsulation key necessary to produce
|
||||
// ciphertexts.
|
||||
func (dk *DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 {
|
||||
return &EncapsulationKey768{
|
||||
ρ: dk.ρ,
|
||||
h: dk.h,
|
||||
encryptionKey: dk.encryptionKey,
|
||||
}
|
||||
}
|
||||
|
||||
// An EncapsulationKey768 is the public key used to produce ciphertexts to be
|
||||
// decapsulated by the corresponding [DecapsulationKey768].
|
||||
type EncapsulationKey768 struct {
|
||||
ρ [32]byte // sampleNTT seed for A
|
||||
h [32]byte // H(ek)
|
||||
encryptionKey
|
||||
}
|
||||
|
||||
// Bytes returns the encapsulation key as a byte slice.
|
||||
func (ek *EncapsulationKey768) Bytes() []byte {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
b := make([]byte, 0, EncapsulationKeySize768)
|
||||
return ek.bytes(b)
|
||||
}
|
||||
|
||||
func (ek *EncapsulationKey768) bytes(b []byte) []byte {
|
||||
for i := range ek.t {
|
||||
b = polyByteEncode(b, ek.t[i])
|
||||
}
|
||||
b = append(b, ek.ρ[:]...)
|
||||
return b
|
||||
}
|
||||
|
||||
// encryptionKey is the parsed and expanded form of a PKE encryption key.
|
||||
type encryptionKey struct {
|
||||
t [k]nttElement // ByteDecode₁₂(ek[:384k])
|
||||
a [k * k]nttElement // A[i*k+j] = sampleNTT(ρ, j, i)
|
||||
}
|
||||
|
||||
// decryptionKey is the parsed and expanded form of a PKE decryption key.
|
||||
type decryptionKey struct {
|
||||
s [k]nttElement // ByteDecode₁₂(dk[:decryptionKeySize])
|
||||
}
|
||||
|
||||
// GenerateKey768 generates a new decapsulation key, drawing random bytes from
|
||||
// a DRBG. The decapsulation key must be kept secret.
|
||||
func GenerateKey768() (*DecapsulationKey768, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
dk := &DecapsulationKey768{}
|
||||
return generateKey(dk)
|
||||
}
|
||||
|
||||
func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) {
|
||||
var d [32]byte
|
||||
drbg.Read(d[:])
|
||||
var z [32]byte
|
||||
drbg.Read(z[:])
|
||||
kemKeyGen(dk, &d, &z)
|
||||
// if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil {
|
||||
// // This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
// panic(err)
|
||||
// }
|
||||
//fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
// GenerateKeyInternal768 is a derandomized version of GenerateKey768,
|
||||
// exclusively for use in tests.
|
||||
func GenerateKeyInternal768(d, z *[32]byte) *DecapsulationKey768 {
|
||||
dk := &DecapsulationKey768{}
|
||||
kemKeyGen(dk, d, z)
|
||||
return dk
|
||||
}
|
||||
|
||||
// NewDecapsulationKey768 parses a decapsulation key from a 64-byte
|
||||
// seed in the "d || z" form. The seed must be uniformly random.
|
||||
func NewDecapsulationKey768(seed []byte) (*DecapsulationKey768, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
dk := &DecapsulationKey768{}
|
||||
return newKeyFromSeed(dk, seed)
|
||||
}
|
||||
|
||||
func newKeyFromSeed(dk *DecapsulationKey768, seed []byte) (*DecapsulationKey768, error) {
|
||||
if len(seed) != SeedSize {
|
||||
return nil, errors.New("mlkem: invalid seed length")
|
||||
}
|
||||
d := (*[32]byte)(seed[:32])
|
||||
z := (*[32]byte)(seed[32:])
|
||||
kemKeyGen(dk, d, z)
|
||||
// if err := fips140.PCT("ML-KEM PCT", func() error { return kemPCT(dk) }); err != nil {
|
||||
// // This clearly can't happen, but FIPS 140-3 requires us to check.
|
||||
// panic(err)
|
||||
// }
|
||||
//fips140.RecordApproved()
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
// TestingOnlyNewDecapsulationKey768 parses a decapsulation key from its expanded NIST format.
|
||||
//
|
||||
// Bytes() must not be called on the returned key, as it will not produce the
|
||||
// original seed.
|
||||
//
|
||||
// This function should only be used for ACVP testing. Prefer NewDecapsulationKey768 for all
|
||||
// other purposes.
|
||||
func TestingOnlyNewDecapsulationKey768(b []byte) (*DecapsulationKey768, error) {
|
||||
if len(b) != decapsulationKeySize768 {
|
||||
return nil, errors.New("mlkem: invalid NIST decapsulation key length")
|
||||
}
|
||||
|
||||
dk := &DecapsulationKey768{}
|
||||
for i := range dk.s {
|
||||
var err error
|
||||
dk.s[i], err = polyByteDecode[nttElement](b[:encodingSize12])
|
||||
if err != nil {
|
||||
return nil, errors.New("mlkem: invalid secret key encoding")
|
||||
}
|
||||
b = b[encodingSize12:]
|
||||
}
|
||||
|
||||
ek, err := NewEncapsulationKey768(b[:EncapsulationKeySize768])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dk.ρ = ek.ρ
|
||||
dk.h = ek.h
|
||||
dk.encryptionKey = ek.encryptionKey
|
||||
b = b[EncapsulationKeySize768:]
|
||||
|
||||
if !bytes.Equal(dk.h[:], b[:32]) {
|
||||
return nil, errors.New("mlkem: inconsistent H(ek) in encoded bytes")
|
||||
}
|
||||
b = b[32:]
|
||||
|
||||
copy(dk.z[:], b)
|
||||
|
||||
// Generate a random d value for use in Bytes(). This is a safety mechanism
|
||||
// that avoids returning a broken key vs a random key if this function is
|
||||
// called in contravention of the TestingOnlyNewDecapsulationKey768 function
|
||||
// comment advising against it.
|
||||
drbg.Read(dk.d[:])
|
||||
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
// kemKeyGen generates a decapsulation key.
|
||||
//
|
||||
// It implements ML-KEM.KeyGen_internal according to FIPS 203, Algorithm 16, and
|
||||
// K-PKE.KeyGen according to FIPS 203, Algorithm 13. The two are merged to save
|
||||
// copies and allocations.
|
||||
func kemKeyGen(dk *DecapsulationKey768, d, z *[32]byte) {
|
||||
dk.d = *d
|
||||
dk.z = *z
|
||||
|
||||
g := sha3.New512()
|
||||
g.Write(d[:])
|
||||
g.Write([]byte{k}) // Module dimension as a domain separator.
|
||||
G := g.Sum(make([]byte, 0, 64))
|
||||
ρ, σ := G[:32], G[32:]
|
||||
dk.ρ = [32]byte(ρ)
|
||||
|
||||
A := &dk.a
|
||||
for i := byte(0); i < k; i++ {
|
||||
for j := byte(0); j < k; j++ {
|
||||
A[i*k+j] = sampleNTT(ρ, j, i)
|
||||
}
|
||||
}
|
||||
|
||||
var N byte
|
||||
s := &dk.s
|
||||
for i := range s {
|
||||
s[i] = ntt(samplePolyCBD(σ, N))
|
||||
N++
|
||||
}
|
||||
e := make([]nttElement, k)
|
||||
for i := range e {
|
||||
e[i] = ntt(samplePolyCBD(σ, N))
|
||||
N++
|
||||
}
|
||||
|
||||
t := &dk.t
|
||||
for i := range t { // t = A ◦ s + e
|
||||
t[i] = e[i]
|
||||
for j := range s {
|
||||
t[i] = polyAdd(t[i], nttMul(A[i*k+j], s[j]))
|
||||
}
|
||||
}
|
||||
|
||||
H := sha3.New256()
|
||||
ek := dk.EncapsulationKey().Bytes()
|
||||
H.Write(ek)
|
||||
H.Sum(dk.h[:0])
|
||||
}
|
||||
|
||||
// kemPCT performs a Pairwise Consistency Test per FIPS 140-3 IG 10.3.A
|
||||
// Additional Comment 1: "For key pairs generated for use with approved KEMs in
|
||||
// FIPS 203, the PCT shall consist of applying the encapsulation key ek to
|
||||
// encapsulate a shared secret K leading to ciphertext c, and then applying
|
||||
// decapsulation key dk to retrieve the same shared secret K. The PCT passes if
|
||||
// the two shared secret K values are equal. The PCT shall be performed either
|
||||
// when keys are generated/imported, prior to the first exportation, or prior to
|
||||
// the first operational use (if not exported before the first use)."
|
||||
func kemPCT(dk *DecapsulationKey768) error {
|
||||
ek := dk.EncapsulationKey()
|
||||
K, c := ek.Encapsulate()
|
||||
K1, err := dk.Decapsulate(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subtle.ConstantTimeCompare(K, K1) != 1 {
|
||||
return errors.New("mlkem: PCT failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encapsulate generates a shared key and an associated ciphertext from an
|
||||
// encapsulation key, drawing random bytes from a DRBG.
|
||||
//
|
||||
// The shared key must be kept secret.
|
||||
func (ek *EncapsulationKey768) Encapsulate() (sharedKey, ciphertext []byte) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
var cc [CiphertextSize768]byte
|
||||
return ek.encapsulate(&cc)
|
||||
}
|
||||
|
||||
func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedKey, ciphertext []byte) {
|
||||
var m [messageSize]byte
|
||||
drbg.Read(m[:])
|
||||
// Note that the modulus check (step 2 of the encapsulation key check from
|
||||
// FIPS 203, Section 7.2) is performed by polyByteDecode in parseEK.
|
||||
//fips140.RecordApproved()
|
||||
return kemEncaps(cc, ek, &m)
|
||||
}
|
||||
|
||||
// EncapsulateInternal is a derandomized version of Encapsulate, exclusively for
|
||||
// use in tests.
|
||||
func (ek *EncapsulationKey768) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) {
|
||||
cc := &[CiphertextSize768]byte{}
|
||||
return kemEncaps(cc, ek, m)
|
||||
}
|
||||
|
||||
// kemEncaps generates a shared key and an associated ciphertext.
|
||||
//
|
||||
// It implements ML-KEM.Encaps_internal according to FIPS 203, Algorithm 17.
|
||||
func kemEncaps(cc *[CiphertextSize768]byte, ek *EncapsulationKey768, m *[messageSize]byte) (K, c []byte) {
|
||||
g := sha3.New512()
|
||||
g.Write(m[:])
|
||||
g.Write(ek.h[:])
|
||||
G := g.Sum(nil)
|
||||
K, r := G[:SharedKeySize], G[SharedKeySize:]
|
||||
c = pkeEncrypt(cc, &ek.encryptionKey, m, r)
|
||||
return K, c
|
||||
}
|
||||
|
||||
// NewEncapsulationKey768 parses an encapsulation key from its encoded form.
|
||||
// If the encapsulation key is not valid, NewEncapsulationKey768 returns an error.
|
||||
func NewEncapsulationKey768(encapsulationKey []byte) (*EncapsulationKey768, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
ek := &EncapsulationKey768{}
|
||||
return parseEK(ek, encapsulationKey)
|
||||
}
|
||||
|
||||
// parseEK parses an encryption key from its encoded form.
|
||||
//
|
||||
// It implements the initial stages of K-PKE.Encrypt according to FIPS 203,
|
||||
// Algorithm 14.
|
||||
func parseEK(ek *EncapsulationKey768, ekPKE []byte) (*EncapsulationKey768, error) {
|
||||
if len(ekPKE) != EncapsulationKeySize768 {
|
||||
return nil, errors.New("mlkem: invalid encapsulation key length")
|
||||
}
|
||||
|
||||
h := sha3.New256()
|
||||
h.Write(ekPKE)
|
||||
h.Sum(ek.h[:0])
|
||||
|
||||
for i := range ek.t {
|
||||
var err error
|
||||
ek.t[i], err = polyByteDecode[nttElement](ekPKE[:encodingSize12])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ekPKE = ekPKE[encodingSize12:]
|
||||
}
|
||||
copy(ek.ρ[:], ekPKE)
|
||||
|
||||
for i := byte(0); i < k; i++ {
|
||||
for j := byte(0); j < k; j++ {
|
||||
ek.a[i*k+j] = sampleNTT(ek.ρ[:], j, i)
|
||||
}
|
||||
}
|
||||
|
||||
return ek, nil
|
||||
}
|
||||
|
||||
// pkeEncrypt encrypt a plaintext message.
|
||||
//
|
||||
// It implements K-PKE.Encrypt according to FIPS 203, Algorithm 14, although the
|
||||
// computation of t and AT is done in parseEK.
|
||||
func pkeEncrypt(cc *[CiphertextSize768]byte, ex *encryptionKey, m *[messageSize]byte, rnd []byte) []byte {
|
||||
var N byte
|
||||
r, e1 := make([]nttElement, k), make([]ringElement, k)
|
||||
for i := range r {
|
||||
r[i] = ntt(samplePolyCBD(rnd, N))
|
||||
N++
|
||||
}
|
||||
for i := range e1 {
|
||||
e1[i] = samplePolyCBD(rnd, N)
|
||||
N++
|
||||
}
|
||||
e2 := samplePolyCBD(rnd, N)
|
||||
|
||||
u := make([]ringElement, k) // NTT⁻¹(AT ◦ r) + e1
|
||||
for i := range u {
|
||||
u[i] = e1[i]
|
||||
for j := range r {
|
||||
// Note that i and j are inverted, as we need the transposed of A.
|
||||
u[i] = polyAdd(u[i], inverseNTT(nttMul(ex.a[j*k+i], r[j])))
|
||||
}
|
||||
}
|
||||
|
||||
μ := ringDecodeAndDecompress1(m)
|
||||
|
||||
var vNTT nttElement // t⊺ ◦ r
|
||||
for i := range ex.t {
|
||||
vNTT = polyAdd(vNTT, nttMul(ex.t[i], r[i]))
|
||||
}
|
||||
v := polyAdd(polyAdd(inverseNTT(vNTT), e2), μ)
|
||||
|
||||
c := cc[:0]
|
||||
for _, f := range u {
|
||||
c = ringCompressAndEncode10(c, f)
|
||||
}
|
||||
c = ringCompressAndEncode4(c, v)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Decapsulate generates a shared key from a ciphertext and a decapsulation key.
|
||||
// If the ciphertext is not valid, Decapsulate returns an error.
|
||||
//
|
||||
// The shared key must be kept secret.
|
||||
func (dk *DecapsulationKey768) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) {
|
||||
if len(ciphertext) != CiphertextSize768 {
|
||||
return nil, errors.New("mlkem: invalid ciphertext length")
|
||||
}
|
||||
c := (*[CiphertextSize768]byte)(ciphertext)
|
||||
// Note that the hash check (step 3 of the decapsulation input check from
|
||||
// FIPS 203, Section 7.3) is foregone as a DecapsulationKey is always
|
||||
// validly generated by ML-KEM.KeyGen_internal.
|
||||
return kemDecaps(dk, c), nil
|
||||
}
|
||||
|
||||
// kemDecaps produces a shared key from a ciphertext.
|
||||
//
|
||||
// It implements ML-KEM.Decaps_internal according to FIPS 203, Algorithm 18.
|
||||
func kemDecaps(dk *DecapsulationKey768, c *[CiphertextSize768]byte) (K []byte) {
|
||||
//fips140.RecordApproved()
|
||||
m := pkeDecrypt(&dk.decryptionKey, c)
|
||||
g := sha3.New512()
|
||||
g.Write(m[:])
|
||||
g.Write(dk.h[:])
|
||||
G := g.Sum(make([]byte, 0, 64))
|
||||
Kprime, r := G[:SharedKeySize], G[SharedKeySize:]
|
||||
J := sha3.NewShake256()
|
||||
J.Write(dk.z[:])
|
||||
J.Write(c[:])
|
||||
Kout := make([]byte, SharedKeySize)
|
||||
J.Read(Kout)
|
||||
var cc [CiphertextSize768]byte
|
||||
c1 := pkeEncrypt(&cc, &dk.encryptionKey, (*[32]byte)(m), r)
|
||||
|
||||
subtle.ConstantTimeCopy(subtle.ConstantTimeCompare(c[:], c1), Kout, Kprime)
|
||||
return Kout
|
||||
}
|
||||
|
||||
// pkeDecrypt decrypts a ciphertext.
|
||||
//
|
||||
// It implements K-PKE.Decrypt according to FIPS 203, Algorithm 15,
|
||||
// although s is retained from kemKeyGen.
|
||||
func pkeDecrypt(dx *decryptionKey, c *[CiphertextSize768]byte) []byte {
|
||||
u := make([]ringElement, k)
|
||||
for i := range u {
|
||||
b := (*[encodingSize10]byte)(c[encodingSize10*i : encodingSize10*(i+1)])
|
||||
u[i] = ringDecodeAndDecompress10(b)
|
||||
}
|
||||
|
||||
b := (*[encodingSize4]byte)(c[encodingSize10*k:])
|
||||
v := ringDecodeAndDecompress4(b)
|
||||
|
||||
var mask nttElement // s⊺ ◦ NTT(u)
|
||||
for i := range dx.s {
|
||||
mask = polyAdd(mask, nttMul(dx.s[i], ntt(u[i])))
|
||||
}
|
||||
w := polySub(v, inverseNTT(mask))
|
||||
|
||||
return ringCompressAndEncode1(nil, w)
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package randutil contains internal randomness utilities for various
|
||||
// crypto packages.
|
||||
package randutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math/rand/v2"
|
||||
)
|
||||
|
||||
// MaybeReadByte reads a single byte from r with 50% probability. This is used
|
||||
// to ensure that callers do not depend on non-guaranteed behaviour, e.g.
|
||||
// assuming that rsa.GenerateKey is deterministic w.r.t. a given random stream.
|
||||
//
|
||||
// This does not affect tests that pass a stream of fixed bytes as the random
|
||||
// source (e.g. a zeroReader).
|
||||
func MaybeReadByte(r io.Reader) {
|
||||
if rand.Uint64()&1 == 1 {
|
||||
return
|
||||
}
|
||||
var buf [1]byte
|
||||
r.Read(buf[:])
|
||||
}
|
231
sha256/sha256.go
231
sha256/sha256.go
@ -1,231 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha256 implements the SHA-224 and SHA-256 hash algorithms as defined
|
||||
// in FIPS 180-4.
|
||||
package sha256
|
||||
|
||||
import (
|
||||
//"github.com/xtls/reality"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// The size of a SHA-256 checksum in bytes.
|
||||
const size = 32
|
||||
|
||||
// The size of a SHA-224 checksum in bytes.
|
||||
const size224 = 28
|
||||
|
||||
// The block size of SHA-256 and SHA-224 in bytes.
|
||||
const blockSize = 64
|
||||
|
||||
const (
|
||||
chunk = 64
|
||||
init0 = 0x6A09E667
|
||||
init1 = 0xBB67AE85
|
||||
init2 = 0x3C6EF372
|
||||
init3 = 0xA54FF53A
|
||||
init4 = 0x510E527F
|
||||
init5 = 0x9B05688C
|
||||
init6 = 0x1F83D9AB
|
||||
init7 = 0x5BE0CD19
|
||||
init0_224 = 0xC1059ED8
|
||||
init1_224 = 0x367CD507
|
||||
init2_224 = 0x3070DD17
|
||||
init3_224 = 0xF70E5939
|
||||
init4_224 = 0xFFC00B31
|
||||
init5_224 = 0x68581511
|
||||
init6_224 = 0x64F98FA7
|
||||
init7_224 = 0xBEFA4FA4
|
||||
)
|
||||
|
||||
// Digest is a SHA-224 or SHA-256 [hash.Hash] implementation.
|
||||
type Digest struct {
|
||||
h [8]uint32
|
||||
x [chunk]byte
|
||||
nx int
|
||||
len uint64
|
||||
is224 bool // mark if this digest is SHA-224
|
||||
}
|
||||
|
||||
const (
|
||||
magic224 = "sha\x02"
|
||||
magic256 = "sha\x03"
|
||||
marshaledSize = len(magic256) + 8*4 + chunk + 8
|
||||
)
|
||||
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
return d.AppendBinary(make([]byte, 0, marshaledSize))
|
||||
}
|
||||
|
||||
func (d *Digest) AppendBinary(b []byte) ([]byte, error) {
|
||||
if d.is224 {
|
||||
b = append(b, magic224...)
|
||||
} else {
|
||||
b = append(b, magic256...)
|
||||
}
|
||||
b = byteorder.BEAppendUint32(b, d.h[0])
|
||||
b = byteorder.BEAppendUint32(b, d.h[1])
|
||||
b = byteorder.BEAppendUint32(b, d.h[2])
|
||||
b = byteorder.BEAppendUint32(b, d.h[3])
|
||||
b = byteorder.BEAppendUint32(b, d.h[4])
|
||||
b = byteorder.BEAppendUint32(b, d.h[5])
|
||||
b = byteorder.BEAppendUint32(b, d.h[6])
|
||||
b = byteorder.BEAppendUint32(b, d.h[7])
|
||||
b = append(b, d.x[:d.nx]...)
|
||||
b = append(b, make([]byte, len(d.x)-d.nx)...)
|
||||
b = byteorder.BEAppendUint64(b, d.len)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic224) || (d.is224 && string(b[:len(magic224)]) != magic224) || (!d.is224 && string(b[:len(magic256)]) != magic256) {
|
||||
return errors.New("crypto/sha256: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("crypto/sha256: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic224):]
|
||||
b, d.h[0] = consumeUint32(b)
|
||||
b, d.h[1] = consumeUint32(b)
|
||||
b, d.h[2] = consumeUint32(b)
|
||||
b, d.h[3] = consumeUint32(b)
|
||||
b, d.h[4] = consumeUint32(b)
|
||||
b, d.h[5] = consumeUint32(b)
|
||||
b, d.h[6] = consumeUint32(b)
|
||||
b, d.h[7] = consumeUint32(b)
|
||||
b = b[copy(d.x[:], b):]
|
||||
b, d.len = consumeUint64(b)
|
||||
d.nx = int(d.len % chunk)
|
||||
return nil
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
return b[8:], byteorder.BEUint64(b)
|
||||
}
|
||||
|
||||
func consumeUint32(b []byte) ([]byte, uint32) {
|
||||
return b[4:], byteorder.BEUint32(b)
|
||||
}
|
||||
|
||||
func (d *Digest) Reset() {
|
||||
if !d.is224 {
|
||||
d.h[0] = init0
|
||||
d.h[1] = init1
|
||||
d.h[2] = init2
|
||||
d.h[3] = init3
|
||||
d.h[4] = init4
|
||||
d.h[5] = init5
|
||||
d.h[6] = init6
|
||||
d.h[7] = init7
|
||||
} else {
|
||||
d.h[0] = init0_224
|
||||
d.h[1] = init1_224
|
||||
d.h[2] = init2_224
|
||||
d.h[3] = init3_224
|
||||
d.h[4] = init4_224
|
||||
d.h[5] = init5_224
|
||||
d.h[6] = init6_224
|
||||
d.h[7] = init7_224
|
||||
}
|
||||
d.nx = 0
|
||||
d.len = 0
|
||||
}
|
||||
|
||||
// New returns a new Digest computing the SHA-256 hash.
|
||||
func New() *Digest {
|
||||
d := new(Digest)
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// New224 returns a new Digest computing the SHA-224 hash.
|
||||
func New224() *Digest {
|
||||
d := new(Digest)
|
||||
d.is224 = true
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Digest) Size() int {
|
||||
if !d.is224 {
|
||||
return size
|
||||
}
|
||||
return size224
|
||||
}
|
||||
|
||||
func (d *Digest) BlockSize() int { return blockSize }
|
||||
|
||||
func (d *Digest) Write(p []byte) (nn int, err error) {
|
||||
nn = len(p)
|
||||
d.len += uint64(nn)
|
||||
if d.nx > 0 {
|
||||
n := copy(d.x[d.nx:], p)
|
||||
d.nx += n
|
||||
if d.nx == chunk {
|
||||
block(d, d.x[:])
|
||||
d.nx = 0
|
||||
}
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) >= chunk {
|
||||
n := len(p) &^ (chunk - 1)
|
||||
block(d, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
d.nx = copy(d.x[:], p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Digest) Sum(in []byte) []byte {
|
||||
//fips140.RecordApproved()
|
||||
// Make a copy of d so that caller can keep writing and summing.
|
||||
d0 := *d
|
||||
hash := d0.checkSum()
|
||||
if d0.is224 {
|
||||
return append(in, hash[:size224]...)
|
||||
}
|
||||
return append(in, hash[:]...)
|
||||
}
|
||||
|
||||
func (d *Digest) checkSum() [size]byte {
|
||||
len := d.len
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
var tmp [64 + 8]byte // padding + length buffer
|
||||
tmp[0] = 0x80
|
||||
var t uint64
|
||||
if len%64 < 56 {
|
||||
t = 56 - len%64
|
||||
} else {
|
||||
t = 64 + 56 - len%64
|
||||
}
|
||||
|
||||
// Length in bits.
|
||||
len <<= 3
|
||||
padlen := tmp[:t+8]
|
||||
byteorder.BEPutUint64(padlen[t+0:], len)
|
||||
d.Write(padlen)
|
||||
|
||||
if d.nx != 0 {
|
||||
panic("d.nx != 0")
|
||||
}
|
||||
|
||||
var digest [size]byte
|
||||
|
||||
byteorder.BEPutUint32(digest[0:], d.h[0])
|
||||
byteorder.BEPutUint32(digest[4:], d.h[1])
|
||||
byteorder.BEPutUint32(digest[8:], d.h[2])
|
||||
byteorder.BEPutUint32(digest[12:], d.h[3])
|
||||
byteorder.BEPutUint32(digest[16:], d.h[4])
|
||||
byteorder.BEPutUint32(digest[20:], d.h[5])
|
||||
byteorder.BEPutUint32(digest[24:], d.h[6])
|
||||
if !d.is224 {
|
||||
byteorder.BEPutUint32(digest[28:], d.h[7])
|
||||
}
|
||||
|
||||
return digest
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// SHA256 block step.
|
||||
// In its own file so that a faster assembly or C version
|
||||
// can be substituted easily.
|
||||
|
||||
package sha256
|
||||
|
||||
import "math/bits"
|
||||
|
||||
var _K = [...]uint32{
|
||||
0x428a2f98,
|
||||
0x71374491,
|
||||
0xb5c0fbcf,
|
||||
0xe9b5dba5,
|
||||
0x3956c25b,
|
||||
0x59f111f1,
|
||||
0x923f82a4,
|
||||
0xab1c5ed5,
|
||||
0xd807aa98,
|
||||
0x12835b01,
|
||||
0x243185be,
|
||||
0x550c7dc3,
|
||||
0x72be5d74,
|
||||
0x80deb1fe,
|
||||
0x9bdc06a7,
|
||||
0xc19bf174,
|
||||
0xe49b69c1,
|
||||
0xefbe4786,
|
||||
0x0fc19dc6,
|
||||
0x240ca1cc,
|
||||
0x2de92c6f,
|
||||
0x4a7484aa,
|
||||
0x5cb0a9dc,
|
||||
0x76f988da,
|
||||
0x983e5152,
|
||||
0xa831c66d,
|
||||
0xb00327c8,
|
||||
0xbf597fc7,
|
||||
0xc6e00bf3,
|
||||
0xd5a79147,
|
||||
0x06ca6351,
|
||||
0x14292967,
|
||||
0x27b70a85,
|
||||
0x2e1b2138,
|
||||
0x4d2c6dfc,
|
||||
0x53380d13,
|
||||
0x650a7354,
|
||||
0x766a0abb,
|
||||
0x81c2c92e,
|
||||
0x92722c85,
|
||||
0xa2bfe8a1,
|
||||
0xa81a664b,
|
||||
0xc24b8b70,
|
||||
0xc76c51a3,
|
||||
0xd192e819,
|
||||
0xd6990624,
|
||||
0xf40e3585,
|
||||
0x106aa070,
|
||||
0x19a4c116,
|
||||
0x1e376c08,
|
||||
0x2748774c,
|
||||
0x34b0bcb5,
|
||||
0x391c0cb3,
|
||||
0x4ed8aa4a,
|
||||
0x5b9cca4f,
|
||||
0x682e6ff3,
|
||||
0x748f82ee,
|
||||
0x78a5636f,
|
||||
0x84c87814,
|
||||
0x8cc70208,
|
||||
0x90befffa,
|
||||
0xa4506ceb,
|
||||
0xbef9a3f7,
|
||||
0xc67178f2,
|
||||
}
|
||||
|
||||
func blockGeneric(dig *Digest, p []byte) {
|
||||
var w [64]uint32
|
||||
h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
|
||||
for len(p) >= chunk {
|
||||
// Can interlace the computation of w with the
|
||||
// rounds below if needed for speed.
|
||||
for i := 0; i < 16; i++ {
|
||||
j := i * 4
|
||||
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
|
||||
}
|
||||
for i := 16; i < 64; i++ {
|
||||
v1 := w[i-2]
|
||||
t1 := (bits.RotateLeft32(v1, -17)) ^ (bits.RotateLeft32(v1, -19)) ^ (v1 >> 10)
|
||||
v2 := w[i-15]
|
||||
t2 := (bits.RotateLeft32(v2, -7)) ^ (bits.RotateLeft32(v2, -18)) ^ (v2 >> 3)
|
||||
w[i] = t1 + w[i-7] + t2 + w[i-16]
|
||||
}
|
||||
|
||||
a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
|
||||
|
||||
for i := 0; i < 64; i++ {
|
||||
t1 := h + ((bits.RotateLeft32(e, -6)) ^ (bits.RotateLeft32(e, -11)) ^ (bits.RotateLeft32(e, -25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
|
||||
|
||||
t2 := ((bits.RotateLeft32(a, -2)) ^ (bits.RotateLeft32(a, -13)) ^ (bits.RotateLeft32(a, -22))) + ((a & b) ^ (a & c) ^ (b & c))
|
||||
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d + t1
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = t1 + t2
|
||||
}
|
||||
|
||||
h0 += a
|
||||
h1 += b
|
||||
h2 += c
|
||||
h3 += d
|
||||
h4 += e
|
||||
h5 += f
|
||||
h6 += g
|
||||
h7 += h
|
||||
|
||||
p = p[chunk:]
|
||||
}
|
||||
|
||||
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha256
|
||||
|
||||
func block(dig *Digest, p []byte) {
|
||||
blockGeneric(dig, p)
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// New224 returns a new Digest computing the SHA3-224 hash.
|
||||
func New224() *Digest {
|
||||
return &Digest{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3}
|
||||
}
|
||||
|
||||
// New256 returns a new Digest computing the SHA3-256 hash.
|
||||
func New256() *Digest {
|
||||
return &Digest{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3}
|
||||
}
|
||||
|
||||
// New384 returns a new Digest computing the SHA3-384 hash.
|
||||
func New384() *Digest {
|
||||
return &Digest{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3}
|
||||
}
|
||||
|
||||
// New512 returns a new Digest computing the SHA3-512 hash.
|
||||
func New512() *Digest {
|
||||
return &Digest{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3}
|
||||
}
|
||||
|
||||
// TODO(fips): do this in the stdlib crypto/sha3 package.
|
||||
//
|
||||
// crypto.RegisterHash(crypto.SHA3_224, New224)
|
||||
// crypto.RegisterHash(crypto.SHA3_256, New256)
|
||||
// crypto.RegisterHash(crypto.SHA3_384, New384)
|
||||
// crypto.RegisterHash(crypto.SHA3_512, New512)
|
||||
|
||||
const (
|
||||
dsbyteSHA3 = 0b00000110
|
||||
dsbyteKeccak = 0b00000001
|
||||
dsbyteShake = 0b00011111
|
||||
dsbyteCShake = 0b00000100
|
||||
|
||||
// rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in
|
||||
// bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits.
|
||||
rateK256 = (1600 - 256) / 8
|
||||
rateK448 = (1600 - 448) / 8
|
||||
rateK512 = (1600 - 512) / 8
|
||||
rateK768 = (1600 - 768) / 8
|
||||
rateK1024 = (1600 - 1024) / 8
|
||||
)
|
||||
|
||||
// NewLegacyKeccak256 returns a new Digest computing the legacy, non-standard
|
||||
// Keccak-256 hash.
|
||||
func NewLegacyKeccak256() *Digest {
|
||||
return &Digest{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak}
|
||||
}
|
||||
|
||||
// NewLegacyKeccak512 returns a new Digest computing the legacy, non-standard
|
||||
// Keccak-512 hash.
|
||||
func NewLegacyKeccak512() *Digest {
|
||||
return &Digest{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak}
|
||||
}
|
434
sha3/keccakf.go
434
sha3/keccakf.go
@ -1,434 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
//"crypto/internal/fips140deps/cpu"
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// rc stores the round constants for use in the ι step.
|
||||
var rc = [24]uint64{
|
||||
0x0000000000000001,
|
||||
0x0000000000008082,
|
||||
0x800000000000808A,
|
||||
0x8000000080008000,
|
||||
0x000000000000808B,
|
||||
0x0000000080000001,
|
||||
0x8000000080008081,
|
||||
0x8000000000008009,
|
||||
0x000000000000008A,
|
||||
0x0000000000000088,
|
||||
0x0000000080008009,
|
||||
0x000000008000000A,
|
||||
0x000000008000808B,
|
||||
0x800000000000008B,
|
||||
0x8000000000008089,
|
||||
0x8000000000008003,
|
||||
0x8000000000008002,
|
||||
0x8000000000000080,
|
||||
0x000000000000800A,
|
||||
0x800000008000000A,
|
||||
0x8000000080008081,
|
||||
0x8000000000008080,
|
||||
0x0000000080000001,
|
||||
0x8000000080008008,
|
||||
}
|
||||
|
||||
// keccakF1600Generic applies the Keccak permutation.
|
||||
func keccakF1600Generic(da *[200]byte) {
|
||||
var a *[25]uint64
|
||||
//if cpu.BigEndian {
|
||||
if binary.NativeEndian.Uint16([]byte{0x12, 0x34}) != uint16(0x3412) {
|
||||
a = new([25]uint64)
|
||||
for i := range a {
|
||||
a[i] = byteorder.LEUint64(da[i*8:])
|
||||
}
|
||||
defer func() {
|
||||
for i := range a {
|
||||
byteorder.LEPutUint64(da[i*8:], a[i])
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
a = (*[25]uint64)(unsafe.Pointer(da))
|
||||
}
|
||||
|
||||
// Implementation translated from Keccak-inplace.c
|
||||
// in the keccak reference code.
|
||||
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
|
||||
|
||||
for i := 0; i < 24; i += 4 {
|
||||
// Combines the 5 steps in each round into 2 steps.
|
||||
// Unrolls 4 rounds per loop and spreads some steps across rounds.
|
||||
|
||||
// Round 1
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[6] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[12] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[18] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[24] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[16] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[22] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[3] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[1] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[7] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[19] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[11] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[23] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[4] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[2] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[8] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[14] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 2
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[16] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[7] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[23] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[14] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[11] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[2] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[18] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[6] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[22] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[4] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[1] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[8] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[24] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[12] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[3] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[19] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 3
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[11] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[22] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[8] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[19] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[1] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[12] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[23] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[16] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[2] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[24] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[6] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[3] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[14] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[7] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[18] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[4] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 4
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[1] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[2] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[3] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[4] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[6] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[7] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[8] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[11] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[12] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[14] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[16] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[18] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[19] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[22] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[23] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[24] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
}
|
||||
}
|
235
sha3/sha3.go
235
sha3/sha3.go
@ -1,235 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
|
||||
// the SHAKE variable-output-length functions defined by [FIPS 202], as well as
|
||||
// the cSHAKE extendable-output-length functions defined by [SP 800-185].
|
||||
//
|
||||
// [FIPS 202]: https://doi.org/10.6028/NIST.FIPS.202
|
||||
// [SP 800-185]: https://doi.org/10.6028/NIST.SP.800-185
|
||||
package sha3
|
||||
|
||||
import (
|
||||
// "github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/subtle"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// spongeDirection indicates the direction bytes are flowing through the sponge.
|
||||
type spongeDirection int
|
||||
|
||||
const (
|
||||
// spongeAbsorbing indicates that the sponge is absorbing input.
|
||||
spongeAbsorbing spongeDirection = iota
|
||||
// spongeSqueezing indicates that the sponge is being squeezed.
|
||||
spongeSqueezing
|
||||
)
|
||||
|
||||
type Digest struct {
|
||||
a [1600 / 8]byte // main state of the hash
|
||||
|
||||
// a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR
|
||||
// into before running the permutation. If squeezing, it's the remaining
|
||||
// output to produce before running the permutation.
|
||||
n, rate int
|
||||
|
||||
// dsbyte contains the "domain separation" bits and the first bit of
|
||||
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
|
||||
// SHA-3 and SHAKE functions by appending bitstrings to the message.
|
||||
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
|
||||
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
|
||||
// padding rule from section 5.1 is applied to pad the message to a multiple
|
||||
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
|
||||
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
|
||||
// giving 00000110b (0x06) and 00011111b (0x1f).
|
||||
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
|
||||
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
|
||||
// Extendable-Output Functions (May 2014)"
|
||||
dsbyte byte
|
||||
|
||||
outputLen int // the default output size in bytes
|
||||
state spongeDirection // whether the sponge is absorbing or squeezing
|
||||
}
|
||||
|
||||
// BlockSize returns the rate of sponge underlying this hash function.
|
||||
func (d *Digest) BlockSize() int { return d.rate }
|
||||
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *Digest) Size() int { return d.outputLen }
|
||||
|
||||
// Reset resets the Digest to its initial state.
|
||||
func (d *Digest) Reset() {
|
||||
// Zero the permutation's state.
|
||||
for i := range d.a {
|
||||
d.a[i] = 0
|
||||
}
|
||||
d.state = spongeAbsorbing
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
func (d *Digest) Clone() *Digest {
|
||||
ret := *d
|
||||
return &ret
|
||||
}
|
||||
|
||||
// permute applies the KeccakF-1600 permutation.
|
||||
func (d *Digest) permute() {
|
||||
keccakF1600(&d.a)
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// padAndPermute appends the domain separation bits in dsbyte, applies
|
||||
// the multi-bitrate 10..1 padding rule, and permutes the state.
|
||||
func (d *Digest) padAndPermute() {
|
||||
// Pad with this instance's domain-separator bits. We know that there's
|
||||
// at least one byte of space in the sponge because, if it were full,
|
||||
// permute would have been called to empty it. dsbyte also contains the
|
||||
// first one bit for the padding. See the comment in the state struct.
|
||||
d.a[d.n] ^= d.dsbyte
|
||||
// This adds the final one bit for the padding. Because of the way that
|
||||
// bits are numbered from the LSB upwards, the final bit is the MSB of
|
||||
// the last byte.
|
||||
d.a[d.rate-1] ^= 0x80
|
||||
// Apply the permutation
|
||||
d.permute()
|
||||
d.state = spongeSqueezing
|
||||
}
|
||||
|
||||
// Write absorbs more data into the hash's state.
|
||||
func (d *Digest) Write(p []byte) (n int, err error) { return d.write(p) }
|
||||
func (d *Digest) writeGeneric(p []byte) (n int, err error) {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: Write after Read")
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
|
||||
for len(p) > 0 {
|
||||
x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p)
|
||||
d.n += x
|
||||
p = p[x:]
|
||||
|
||||
// If the sponge is full, apply the permutation.
|
||||
if d.n == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// read squeezes an arbitrary number of bytes from the sponge.
|
||||
func (d *Digest) readGeneric(out []byte) (n int, err error) {
|
||||
// If we're still absorbing, pad and apply the permutation.
|
||||
if d.state == spongeAbsorbing {
|
||||
d.padAndPermute()
|
||||
}
|
||||
|
||||
n = len(out)
|
||||
|
||||
// Now, do the squeezing.
|
||||
for len(out) > 0 {
|
||||
// Apply the permutation if we've squeezed the sponge dry.
|
||||
if d.n == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
|
||||
x := copy(out, d.a[d.n:d.rate])
|
||||
d.n += x
|
||||
out = out[x:]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
// fips140.RecordApproved()
|
||||
return d.sum(b)
|
||||
}
|
||||
|
||||
func (d *Digest) sumGeneric(b []byte) []byte {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: Sum after Read")
|
||||
}
|
||||
|
||||
// Make a copy of the original hash so that caller can keep writing
|
||||
// and summing.
|
||||
dup := d.Clone()
|
||||
hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
|
||||
dup.read(hash)
|
||||
return append(b, hash...)
|
||||
}
|
||||
|
||||
const (
|
||||
magicSHA3 = "sha\x08"
|
||||
magicShake = "sha\x09"
|
||||
magicCShake = "sha\x0a"
|
||||
magicKeccak = "sha\x0b"
|
||||
// magic || rate || main state || n || sponge direction
|
||||
marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1
|
||||
)
|
||||
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
return d.AppendBinary(make([]byte, 0, marshaledSize))
|
||||
}
|
||||
|
||||
func (d *Digest) AppendBinary(b []byte) ([]byte, error) {
|
||||
switch d.dsbyte {
|
||||
case dsbyteSHA3:
|
||||
b = append(b, magicSHA3...)
|
||||
case dsbyteShake:
|
||||
b = append(b, magicShake...)
|
||||
case dsbyteCShake:
|
||||
b = append(b, magicCShake...)
|
||||
case dsbyteKeccak:
|
||||
b = append(b, magicKeccak...)
|
||||
default:
|
||||
panic("unknown dsbyte")
|
||||
}
|
||||
// rate is at most 168, and n is at most rate.
|
||||
b = append(b, byte(d.rate))
|
||||
b = append(b, d.a[:]...)
|
||||
b = append(b, byte(d.n), byte(d.state))
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
|
||||
magic := string(b[:len(magicSHA3)])
|
||||
b = b[len(magicSHA3):]
|
||||
switch {
|
||||
case magic == magicSHA3 && d.dsbyte == dsbyteSHA3:
|
||||
case magic == magicShake && d.dsbyte == dsbyteShake:
|
||||
case magic == magicCShake && d.dsbyte == dsbyteCShake:
|
||||
case magic == magicKeccak && d.dsbyte == dsbyteKeccak:
|
||||
default:
|
||||
return errors.New("sha3: invalid hash state identifier")
|
||||
}
|
||||
|
||||
rate := int(b[0])
|
||||
b = b[1:]
|
||||
if rate != d.rate {
|
||||
return errors.New("sha3: invalid hash state function")
|
||||
}
|
||||
|
||||
copy(d.a[:], b)
|
||||
b = b[len(d.a):]
|
||||
|
||||
n, state := int(b[0]), spongeDirection(b[1])
|
||||
if n > d.rate {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
d.n = n
|
||||
if state != spongeAbsorbing && state != spongeSqueezing {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
d.state = state
|
||||
|
||||
return nil
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
func keccakF1600(a *[200]byte) {
|
||||
keccakF1600Generic(a)
|
||||
}
|
||||
|
||||
func (d *Digest) write(p []byte) (n int, err error) {
|
||||
return d.writeGeneric(p)
|
||||
}
|
||||
func (d *Digest) read(out []byte) (n int, err error) {
|
||||
return d.readGeneric(out)
|
||||
}
|
||||
func (d *Digest) sum(b []byte) []byte {
|
||||
return d.sumGeneric(b)
|
||||
}
|
151
sha3/shake.go
151
sha3/shake.go
@ -1,151 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
//"crypto/internal/fips140"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type SHAKE struct {
|
||||
d Digest // SHA-3 state context and Read/Write operations
|
||||
|
||||
// initBlock is the cSHAKE specific initialization set of bytes. It is initialized
|
||||
// by newCShake function and stores concatenation of N followed by S, encoded
|
||||
// by the method specified in 3.3 of [1].
|
||||
// It is stored here in order for Reset() to be able to put context into
|
||||
// initial state.
|
||||
initBlock []byte
|
||||
}
|
||||
|
||||
func bytepad(data []byte, rate int) []byte {
|
||||
out := make([]byte, 0, 9+len(data)+rate-1)
|
||||
out = append(out, leftEncode(uint64(rate))...)
|
||||
out = append(out, data...)
|
||||
if padlen := rate - len(out)%rate; padlen < rate {
|
||||
out = append(out, make([]byte, padlen)...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func leftEncode(x uint64) []byte {
|
||||
// Let n be the smallest positive integer for which 2^(8n) > x.
|
||||
n := (bits.Len64(x) + 7) / 8
|
||||
if n == 0 {
|
||||
n = 1
|
||||
}
|
||||
// Return n || x with n as a byte and x an n bytes in big-endian order.
|
||||
b := make([]byte, 9)
|
||||
byteorder.BEPutUint64(b[1:], x)
|
||||
b = b[9-n-1:]
|
||||
b[0] = byte(n)
|
||||
return b
|
||||
}
|
||||
|
||||
func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) *SHAKE {
|
||||
c := &SHAKE{d: Digest{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
|
||||
c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes
|
||||
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...)
|
||||
c.initBlock = append(c.initBlock, N...)
|
||||
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...)
|
||||
c.initBlock = append(c.initBlock, S...)
|
||||
c.Write(bytepad(c.initBlock, c.d.rate))
|
||||
return c
|
||||
}
|
||||
|
||||
func (s *SHAKE) BlockSize() int { return s.d.BlockSize() }
|
||||
func (s *SHAKE) Size() int { return s.d.Size() }
|
||||
|
||||
// Sum appends a portion of output to b and returns the resulting slice. The
|
||||
// output length is selected to provide full-strength generic security: 32 bytes
|
||||
// for SHAKE128 and 64 bytes for SHAKE256. It does not change the underlying
|
||||
// state. It panics if any output has already been read.
|
||||
func (s *SHAKE) Sum(in []byte) []byte { return s.d.Sum(in) }
|
||||
|
||||
// Write absorbs more data into the hash's state.
|
||||
// It panics if any output has already been read.
|
||||
func (s *SHAKE) Write(p []byte) (n int, err error) { return s.d.Write(p) }
|
||||
|
||||
func (s *SHAKE) Read(out []byte) (n int, err error) {
|
||||
//fips140.RecordApproved()
|
||||
// Note that read is not exposed on Digest since SHA-3 does not offer
|
||||
// variable output length. It is only used internally by Sum.
|
||||
return s.d.read(out)
|
||||
}
|
||||
|
||||
// Reset resets the hash to initial state.
|
||||
func (s *SHAKE) Reset() {
|
||||
s.d.Reset()
|
||||
if len(s.initBlock) != 0 {
|
||||
s.Write(bytepad(s.initBlock, s.d.rate))
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of the SHAKE context in its current state.
|
||||
func (s *SHAKE) Clone() *SHAKE {
|
||||
ret := *s
|
||||
return &ret
|
||||
}
|
||||
|
||||
func (s *SHAKE) MarshalBinary() ([]byte, error) {
|
||||
return s.AppendBinary(make([]byte, 0, marshaledSize+len(s.initBlock)))
|
||||
}
|
||||
|
||||
func (s *SHAKE) AppendBinary(b []byte) ([]byte, error) {
|
||||
b, err := s.d.AppendBinary(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, s.initBlock...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SHAKE) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < marshaledSize {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
if err := s.d.UnmarshalBinary(b[:marshaledSize]); err != nil {
|
||||
return err
|
||||
}
|
||||
s.initBlock = bytes.Clone(b[marshaledSize:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewShake128 creates a new SHAKE128 XOF.
|
||||
func NewShake128() *SHAKE {
|
||||
return &SHAKE{d: Digest{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake}}
|
||||
}
|
||||
|
||||
// NewShake256 creates a new SHAKE256 XOF.
|
||||
func NewShake256() *SHAKE {
|
||||
return &SHAKE{d: Digest{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake}}
|
||||
}
|
||||
|
||||
// NewCShake128 creates a new cSHAKE128 XOF.
|
||||
//
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain
|
||||
// cSHAKE is desired. S is a customization byte string used for domain
|
||||
// separation. When N and S are both empty, this is equivalent to NewShake128.
|
||||
func NewCShake128(N, S []byte) *SHAKE {
|
||||
if len(N) == 0 && len(S) == 0 {
|
||||
return NewShake128()
|
||||
}
|
||||
return newCShake(N, S, rateK256, 32, dsbyteCShake)
|
||||
}
|
||||
|
||||
// NewCShake256 creates a new cSHAKE256 XOF.
|
||||
//
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain
|
||||
// cSHAKE is desired. S is a customization byte string used for domain
|
||||
// separation. When N and S are both empty, this is equivalent to NewShake256.
|
||||
func NewCShake256(N, S []byte) *SHAKE {
|
||||
if len(N) == 0 && len(S) == 0 {
|
||||
return NewShake256()
|
||||
}
|
||||
return newCShake(N, S, rateK512, 64, dsbyteCShake)
|
||||
}
|
301
sha512/sha512.go
301
sha512/sha512.go
@ -1,301 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256
|
||||
// hash algorithms as defined in FIPS 180-4.
|
||||
package sha512
|
||||
|
||||
import (
|
||||
// "github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// size512 is the size, in bytes, of a SHA-512 checksum.
|
||||
size512 = 64
|
||||
|
||||
// size224 is the size, in bytes, of a SHA-512/224 checksum.
|
||||
size224 = 28
|
||||
|
||||
// size256 is the size, in bytes, of a SHA-512/256 checksum.
|
||||
size256 = 32
|
||||
|
||||
// size384 is the size, in bytes, of a SHA-384 checksum.
|
||||
size384 = 48
|
||||
|
||||
// blockSize is the block size, in bytes, of the SHA-512/224,
|
||||
// SHA-512/256, SHA-384 and SHA-512 hash functions.
|
||||
blockSize = 128
|
||||
)
|
||||
|
||||
const (
|
||||
chunk = 128
|
||||
init0 = 0x6a09e667f3bcc908
|
||||
init1 = 0xbb67ae8584caa73b
|
||||
init2 = 0x3c6ef372fe94f82b
|
||||
init3 = 0xa54ff53a5f1d36f1
|
||||
init4 = 0x510e527fade682d1
|
||||
init5 = 0x9b05688c2b3e6c1f
|
||||
init6 = 0x1f83d9abfb41bd6b
|
||||
init7 = 0x5be0cd19137e2179
|
||||
init0_224 = 0x8c3d37c819544da2
|
||||
init1_224 = 0x73e1996689dcd4d6
|
||||
init2_224 = 0x1dfab7ae32ff9c82
|
||||
init3_224 = 0x679dd514582f9fcf
|
||||
init4_224 = 0x0f6d2b697bd44da8
|
||||
init5_224 = 0x77e36f7304c48942
|
||||
init6_224 = 0x3f9d85a86a1d36c8
|
||||
init7_224 = 0x1112e6ad91d692a1
|
||||
init0_256 = 0x22312194fc2bf72c
|
||||
init1_256 = 0x9f555fa3c84c64c2
|
||||
init2_256 = 0x2393b86b6f53b151
|
||||
init3_256 = 0x963877195940eabd
|
||||
init4_256 = 0x96283ee2a88effe3
|
||||
init5_256 = 0xbe5e1e2553863992
|
||||
init6_256 = 0x2b0199fc2c85b8aa
|
||||
init7_256 = 0x0eb72ddc81c52ca2
|
||||
init0_384 = 0xcbbb9d5dc1059ed8
|
||||
init1_384 = 0x629a292a367cd507
|
||||
init2_384 = 0x9159015a3070dd17
|
||||
init3_384 = 0x152fecd8f70e5939
|
||||
init4_384 = 0x67332667ffc00b31
|
||||
init5_384 = 0x8eb44a8768581511
|
||||
init6_384 = 0xdb0c2e0d64f98fa7
|
||||
init7_384 = 0x47b5481dbefa4fa4
|
||||
)
|
||||
|
||||
// Digest is a SHA-384, SHA-512, SHA-512/224, or SHA-512/256 [hash.Hash]
|
||||
// implementation.
|
||||
type Digest struct {
|
||||
h [8]uint64
|
||||
x [chunk]byte
|
||||
nx int
|
||||
len uint64
|
||||
size int // size224, size256, size384, or size512
|
||||
}
|
||||
|
||||
func (d *Digest) Reset() {
|
||||
switch d.size {
|
||||
case size384:
|
||||
d.h[0] = init0_384
|
||||
d.h[1] = init1_384
|
||||
d.h[2] = init2_384
|
||||
d.h[3] = init3_384
|
||||
d.h[4] = init4_384
|
||||
d.h[5] = init5_384
|
||||
d.h[6] = init6_384
|
||||
d.h[7] = init7_384
|
||||
case size224:
|
||||
d.h[0] = init0_224
|
||||
d.h[1] = init1_224
|
||||
d.h[2] = init2_224
|
||||
d.h[3] = init3_224
|
||||
d.h[4] = init4_224
|
||||
d.h[5] = init5_224
|
||||
d.h[6] = init6_224
|
||||
d.h[7] = init7_224
|
||||
case size256:
|
||||
d.h[0] = init0_256
|
||||
d.h[1] = init1_256
|
||||
d.h[2] = init2_256
|
||||
d.h[3] = init3_256
|
||||
d.h[4] = init4_256
|
||||
d.h[5] = init5_256
|
||||
d.h[6] = init6_256
|
||||
d.h[7] = init7_256
|
||||
case size512:
|
||||
d.h[0] = init0
|
||||
d.h[1] = init1
|
||||
d.h[2] = init2
|
||||
d.h[3] = init3
|
||||
d.h[4] = init4
|
||||
d.h[5] = init5
|
||||
d.h[6] = init6
|
||||
d.h[7] = init7
|
||||
default:
|
||||
panic("unknown size")
|
||||
}
|
||||
d.nx = 0
|
||||
d.len = 0
|
||||
}
|
||||
|
||||
const (
|
||||
magic384 = "sha\x04"
|
||||
magic512_224 = "sha\x05"
|
||||
magic512_256 = "sha\x06"
|
||||
magic512 = "sha\x07"
|
||||
marshaledSize = len(magic512) + 8*8 + chunk + 8
|
||||
)
|
||||
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
return d.AppendBinary(make([]byte, 0, marshaledSize))
|
||||
}
|
||||
|
||||
func (d *Digest) AppendBinary(b []byte) ([]byte, error) {
|
||||
switch d.size {
|
||||
case size384:
|
||||
b = append(b, magic384...)
|
||||
case size224:
|
||||
b = append(b, magic512_224...)
|
||||
case size256:
|
||||
b = append(b, magic512_256...)
|
||||
case size512:
|
||||
b = append(b, magic512...)
|
||||
default:
|
||||
panic("unknown size")
|
||||
}
|
||||
b = byteorder.BEAppendUint64(b, d.h[0])
|
||||
b = byteorder.BEAppendUint64(b, d.h[1])
|
||||
b = byteorder.BEAppendUint64(b, d.h[2])
|
||||
b = byteorder.BEAppendUint64(b, d.h[3])
|
||||
b = byteorder.BEAppendUint64(b, d.h[4])
|
||||
b = byteorder.BEAppendUint64(b, d.h[5])
|
||||
b = byteorder.BEAppendUint64(b, d.h[6])
|
||||
b = byteorder.BEAppendUint64(b, d.h[7])
|
||||
b = append(b, d.x[:d.nx]...)
|
||||
b = append(b, make([]byte, len(d.x)-d.nx)...)
|
||||
b = byteorder.BEAppendUint64(b, d.len)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic512) {
|
||||
return errors.New("crypto/sha512: invalid hash state identifier")
|
||||
}
|
||||
switch {
|
||||
case d.size == size384 && string(b[:len(magic384)]) == magic384:
|
||||
case d.size == size224 && string(b[:len(magic512_224)]) == magic512_224:
|
||||
case d.size == size256 && string(b[:len(magic512_256)]) == magic512_256:
|
||||
case d.size == size512 && string(b[:len(magic512)]) == magic512:
|
||||
default:
|
||||
return errors.New("crypto/sha512: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("crypto/sha512: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic512):]
|
||||
b, d.h[0] = consumeUint64(b)
|
||||
b, d.h[1] = consumeUint64(b)
|
||||
b, d.h[2] = consumeUint64(b)
|
||||
b, d.h[3] = consumeUint64(b)
|
||||
b, d.h[4] = consumeUint64(b)
|
||||
b, d.h[5] = consumeUint64(b)
|
||||
b, d.h[6] = consumeUint64(b)
|
||||
b, d.h[7] = consumeUint64(b)
|
||||
b = b[copy(d.x[:], b):]
|
||||
b, d.len = consumeUint64(b)
|
||||
d.nx = int(d.len % chunk)
|
||||
return nil
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
return b[8:], byteorder.BEUint64(b)
|
||||
}
|
||||
|
||||
// New returns a new Digest computing the SHA-512 hash.
|
||||
func New() *Digest {
|
||||
d := &Digest{size: size512}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// New512_224 returns a new Digest computing the SHA-512/224 hash.
|
||||
func New512_224() *Digest {
|
||||
d := &Digest{size: size224}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// New512_256 returns a new Digest computing the SHA-512/256 hash.
|
||||
func New512_256() *Digest {
|
||||
d := &Digest{size: size256}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// New384 returns a new Digest computing the SHA-384 hash.
|
||||
func New384() *Digest {
|
||||
d := &Digest{size: size384}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Digest) Size() int {
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *Digest) BlockSize() int { return blockSize }
|
||||
|
||||
func (d *Digest) Write(p []byte) (nn int, err error) {
|
||||
nn = len(p)
|
||||
d.len += uint64(nn)
|
||||
if d.nx > 0 {
|
||||
n := copy(d.x[d.nx:], p)
|
||||
d.nx += n
|
||||
if d.nx == chunk {
|
||||
block(d, d.x[:])
|
||||
d.nx = 0
|
||||
}
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) >= chunk {
|
||||
n := len(p) &^ (chunk - 1)
|
||||
block(d, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
d.nx = copy(d.x[:], p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Digest) Sum(in []byte) []byte {
|
||||
// fips140.RecordApproved()
|
||||
// Make a copy of d so that caller can keep writing and summing.
|
||||
d0 := new(Digest)
|
||||
*d0 = *d
|
||||
hash := d0.checkSum()
|
||||
return append(in, hash[:d.size]...)
|
||||
}
|
||||
|
||||
func (d *Digest) checkSum() [size512]byte {
|
||||
// Padding. Add a 1 bit and 0 bits until 112 bytes mod 128.
|
||||
len := d.len
|
||||
var tmp [128 + 16]byte // padding + length buffer
|
||||
tmp[0] = 0x80
|
||||
var t uint64
|
||||
if len%128 < 112 {
|
||||
t = 112 - len%128
|
||||
} else {
|
||||
t = 128 + 112 - len%128
|
||||
}
|
||||
|
||||
// Length in bits.
|
||||
len <<= 3
|
||||
padlen := tmp[:t+16]
|
||||
// Upper 64 bits are always zero, because len variable has type uint64,
|
||||
// and tmp is already zeroed at that index, so we can skip updating it.
|
||||
// byteorder.BEPutUint64(padlen[t+0:], 0)
|
||||
byteorder.BEPutUint64(padlen[t+8:], len)
|
||||
d.Write(padlen)
|
||||
|
||||
if d.nx != 0 {
|
||||
panic("d.nx != 0")
|
||||
}
|
||||
|
||||
var digest [size512]byte
|
||||
byteorder.BEPutUint64(digest[0:], d.h[0])
|
||||
byteorder.BEPutUint64(digest[8:], d.h[1])
|
||||
byteorder.BEPutUint64(digest[16:], d.h[2])
|
||||
byteorder.BEPutUint64(digest[24:], d.h[3])
|
||||
byteorder.BEPutUint64(digest[32:], d.h[4])
|
||||
byteorder.BEPutUint64(digest[40:], d.h[5])
|
||||
if d.size != size384 {
|
||||
byteorder.BEPutUint64(digest[48:], d.h[6])
|
||||
byteorder.BEPutUint64(digest[56:], d.h[7])
|
||||
}
|
||||
|
||||
return digest
|
||||
}
|
@ -1,144 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// SHA512 block step.
|
||||
// In its own file so that a faster assembly or C version
|
||||
// can be substituted easily.
|
||||
|
||||
package sha512
|
||||
|
||||
import "math/bits"
|
||||
|
||||
var _K = [...]uint64{
|
||||
0x428a2f98d728ae22,
|
||||
0x7137449123ef65cd,
|
||||
0xb5c0fbcfec4d3b2f,
|
||||
0xe9b5dba58189dbbc,
|
||||
0x3956c25bf348b538,
|
||||
0x59f111f1b605d019,
|
||||
0x923f82a4af194f9b,
|
||||
0xab1c5ed5da6d8118,
|
||||
0xd807aa98a3030242,
|
||||
0x12835b0145706fbe,
|
||||
0x243185be4ee4b28c,
|
||||
0x550c7dc3d5ffb4e2,
|
||||
0x72be5d74f27b896f,
|
||||
0x80deb1fe3b1696b1,
|
||||
0x9bdc06a725c71235,
|
||||
0xc19bf174cf692694,
|
||||
0xe49b69c19ef14ad2,
|
||||
0xefbe4786384f25e3,
|
||||
0x0fc19dc68b8cd5b5,
|
||||
0x240ca1cc77ac9c65,
|
||||
0x2de92c6f592b0275,
|
||||
0x4a7484aa6ea6e483,
|
||||
0x5cb0a9dcbd41fbd4,
|
||||
0x76f988da831153b5,
|
||||
0x983e5152ee66dfab,
|
||||
0xa831c66d2db43210,
|
||||
0xb00327c898fb213f,
|
||||
0xbf597fc7beef0ee4,
|
||||
0xc6e00bf33da88fc2,
|
||||
0xd5a79147930aa725,
|
||||
0x06ca6351e003826f,
|
||||
0x142929670a0e6e70,
|
||||
0x27b70a8546d22ffc,
|
||||
0x2e1b21385c26c926,
|
||||
0x4d2c6dfc5ac42aed,
|
||||
0x53380d139d95b3df,
|
||||
0x650a73548baf63de,
|
||||
0x766a0abb3c77b2a8,
|
||||
0x81c2c92e47edaee6,
|
||||
0x92722c851482353b,
|
||||
0xa2bfe8a14cf10364,
|
||||
0xa81a664bbc423001,
|
||||
0xc24b8b70d0f89791,
|
||||
0xc76c51a30654be30,
|
||||
0xd192e819d6ef5218,
|
||||
0xd69906245565a910,
|
||||
0xf40e35855771202a,
|
||||
0x106aa07032bbd1b8,
|
||||
0x19a4c116b8d2d0c8,
|
||||
0x1e376c085141ab53,
|
||||
0x2748774cdf8eeb99,
|
||||
0x34b0bcb5e19b48a8,
|
||||
0x391c0cb3c5c95a63,
|
||||
0x4ed8aa4ae3418acb,
|
||||
0x5b9cca4f7763e373,
|
||||
0x682e6ff3d6b2b8a3,
|
||||
0x748f82ee5defb2fc,
|
||||
0x78a5636f43172f60,
|
||||
0x84c87814a1f0ab72,
|
||||
0x8cc702081a6439ec,
|
||||
0x90befffa23631e28,
|
||||
0xa4506cebde82bde9,
|
||||
0xbef9a3f7b2c67915,
|
||||
0xc67178f2e372532b,
|
||||
0xca273eceea26619c,
|
||||
0xd186b8c721c0c207,
|
||||
0xeada7dd6cde0eb1e,
|
||||
0xf57d4f7fee6ed178,
|
||||
0x06f067aa72176fba,
|
||||
0x0a637dc5a2c898a6,
|
||||
0x113f9804bef90dae,
|
||||
0x1b710b35131c471b,
|
||||
0x28db77f523047d84,
|
||||
0x32caab7b40c72493,
|
||||
0x3c9ebe0a15c9bebc,
|
||||
0x431d67c49c100d4c,
|
||||
0x4cc5d4becb3e42b6,
|
||||
0x597f299cfc657e2a,
|
||||
0x5fcb6fab3ad6faec,
|
||||
0x6c44198c4a475817,
|
||||
}
|
||||
|
||||
func blockGeneric(dig *Digest, p []byte) {
|
||||
var w [80]uint64
|
||||
h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
|
||||
for len(p) >= chunk {
|
||||
for i := 0; i < 16; i++ {
|
||||
j := i * 8
|
||||
w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 |
|
||||
uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7])
|
||||
}
|
||||
for i := 16; i < 80; i++ {
|
||||
v1 := w[i-2]
|
||||
t1 := bits.RotateLeft64(v1, -19) ^ bits.RotateLeft64(v1, -61) ^ (v1 >> 6)
|
||||
v2 := w[i-15]
|
||||
t2 := bits.RotateLeft64(v2, -1) ^ bits.RotateLeft64(v2, -8) ^ (v2 >> 7)
|
||||
|
||||
w[i] = t1 + w[i-7] + t2 + w[i-16]
|
||||
}
|
||||
|
||||
a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
|
||||
|
||||
for i := 0; i < 80; i++ {
|
||||
t1 := h + (bits.RotateLeft64(e, -14) ^ bits.RotateLeft64(e, -18) ^ bits.RotateLeft64(e, -41)) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
|
||||
|
||||
t2 := (bits.RotateLeft64(a, -28) ^ bits.RotateLeft64(a, -34) ^ bits.RotateLeft64(a, -39)) + ((a & b) ^ (a & c) ^ (b & c))
|
||||
|
||||
h = g
|
||||
g = f
|
||||
f = e
|
||||
e = d + t1
|
||||
d = c
|
||||
c = b
|
||||
b = a
|
||||
a = t1 + t2
|
||||
}
|
||||
|
||||
h0 += a
|
||||
h1 += b
|
||||
h2 += c
|
||||
h3 += d
|
||||
h4 += e
|
||||
h5 += f
|
||||
h6 += g
|
||||
h7 += h
|
||||
|
||||
p = p[chunk:]
|
||||
}
|
||||
|
||||
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha512
|
||||
|
||||
func block(dig *Digest, p []byte) {
|
||||
blockGeneric(dig, p)
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package subtle
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"github.com/xtls/reality/byteorder"
|
||||
)
|
||||
|
||||
// ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents
|
||||
// and 0 otherwise. The time taken is a function of the length of the slices and
|
||||
// is independent of the contents. If the lengths of x and y do not match it
|
||||
// returns 0 immediately.
|
||||
func ConstantTimeCompare(x, y []byte) int {
|
||||
if len(x) != len(y) {
|
||||
return 0
|
||||
}
|
||||
|
||||
var v byte
|
||||
|
||||
for i := 0; i < len(x); i++ {
|
||||
v |= x[i] ^ y[i]
|
||||
}
|
||||
|
||||
return ConstantTimeByteEq(v, 0)
|
||||
}
|
||||
|
||||
// ConstantTimeLessOrEqBytes returns 1 if x <= y and 0 otherwise. The comparison
|
||||
// is lexigraphical, or big-endian. The time taken is a function of the length of
|
||||
// the slices and is independent of the contents. If the lengths of x and y do not
|
||||
// match it returns 0 immediately.
|
||||
func ConstantTimeLessOrEqBytes(x, y []byte) int {
|
||||
if len(x) != len(y) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Do a constant time subtraction chain y - x.
|
||||
// If there is no borrow at the end, then x <= y.
|
||||
var b uint64
|
||||
for len(x) > 8 {
|
||||
x0 := byteorder.BEUint64(x[len(x)-8:])
|
||||
y0 := byteorder.BEUint64(y[len(y)-8:])
|
||||
_, b = bits.Sub64(y0, x0, b)
|
||||
x = x[:len(x)-8]
|
||||
y = y[:len(y)-8]
|
||||
}
|
||||
if len(x) > 0 {
|
||||
xb := make([]byte, 8)
|
||||
yb := make([]byte, 8)
|
||||
copy(xb[8-len(x):], x)
|
||||
copy(yb[8-len(y):], y)
|
||||
x0 := byteorder.BEUint64(xb)
|
||||
y0 := byteorder.BEUint64(yb)
|
||||
_, b = bits.Sub64(y0, x0, b)
|
||||
}
|
||||
return int(b ^ 1)
|
||||
}
|
||||
|
||||
// ConstantTimeSelect returns x if v == 1 and y if v == 0.
|
||||
// Its behavior is undefined if v takes any other value.
|
||||
func ConstantTimeSelect(v, x, y int) int { return ^(v-1)&x | (v-1)&y }
|
||||
|
||||
// ConstantTimeByteEq returns 1 if x == y and 0 otherwise.
|
||||
func ConstantTimeByteEq(x, y uint8) int {
|
||||
return int((uint32(x^y) - 1) >> 31)
|
||||
}
|
||||
|
||||
// ConstantTimeEq returns 1 if x == y and 0 otherwise.
|
||||
func ConstantTimeEq(x, y int32) int {
|
||||
return int((uint64(uint32(x^y)) - 1) >> 63)
|
||||
}
|
||||
|
||||
// ConstantTimeCopy copies the contents of y into x (a slice of equal length)
|
||||
// if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v
|
||||
// takes any other value.
|
||||
func ConstantTimeCopy(v int, x, y []byte) {
|
||||
if len(x) != len(y) {
|
||||
panic("subtle: slices have different lengths")
|
||||
}
|
||||
|
||||
xmask := byte(v - 1)
|
||||
ymask := byte(^(v - 1))
|
||||
for i := 0; i < len(x); i++ {
|
||||
x[i] = x[i]&xmask | y[i]&ymask
|
||||
}
|
||||
}
|
||||
|
||||
// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise.
|
||||
// Its behavior is undefined if x or y are negative or > 2**31 - 1.
|
||||
func ConstantTimeLessOrEq(x, y int) int {
|
||||
x32 := int32(x)
|
||||
y32 := int32(y)
|
||||
return int(((x32 - y32 - 1) >> 31) & 1)
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package subtle
|
||||
|
||||
import "github.com/xtls/reality/alias"
|
||||
|
||||
// XORBytes sets dst[i] = x[i] ^ y[i] for all i < n = min(len(x), len(y)),
|
||||
// returning n, the number of bytes written to dst.
|
||||
//
|
||||
// If dst does not have length at least n,
|
||||
// XORBytes panics without writing anything to dst.
|
||||
//
|
||||
// dst and x or y may overlap exactly or not at all,
|
||||
// otherwise XORBytes may panic.
|
||||
func XORBytes(dst, x, y []byte) int {
|
||||
n := min(len(x), len(y))
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
if n > len(dst) {
|
||||
panic("subtle.XORBytes: dst too short")
|
||||
}
|
||||
if alias.InexactOverlap(dst[:n], x[:n]) || alias.InexactOverlap(dst[:n], y[:n]) {
|
||||
panic("subtle.XORBytes: invalid overlap")
|
||||
}
|
||||
xorBytes(&dst[0], &x[0], &y[0], n) // arch-specific
|
||||
return n
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package subtle
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
const supportsUnaligned = runtime.GOARCH == "386" ||
|
||||
runtime.GOARCH == "amd64" ||
|
||||
runtime.GOARCH == "ppc64" ||
|
||||
runtime.GOARCH == "ppc64le" ||
|
||||
runtime.GOARCH == "s390x"
|
||||
|
||||
func xorBytes(dstb, xb, yb *byte, n int) {
|
||||
// xorBytes assembly is written using pointers and n. Back to slices.
|
||||
dst := unsafe.Slice(dstb, n)
|
||||
x := unsafe.Slice(xb, n)
|
||||
y := unsafe.Slice(yb, n)
|
||||
|
||||
if supportsUnaligned || aligned(dstb, xb, yb) {
|
||||
xorLoop(words(dst), words(x), words(y))
|
||||
if uintptr(n)%wordSize == 0 {
|
||||
return
|
||||
}
|
||||
done := n &^ int(wordSize-1)
|
||||
dst = dst[done:]
|
||||
x = x[done:]
|
||||
y = y[done:]
|
||||
}
|
||||
xorLoop(dst, x, y)
|
||||
}
|
||||
|
||||
// aligned reports whether dst, x, and y are all word-aligned pointers.
|
||||
func aligned(dst, x, y *byte) bool {
|
||||
return (uintptr(unsafe.Pointer(dst))|uintptr(unsafe.Pointer(x))|uintptr(unsafe.Pointer(y)))&(wordSize-1) == 0
|
||||
}
|
||||
|
||||
// words returns a []uintptr pointing at the same data as x,
|
||||
// with any trailing partial word removed.
|
||||
func words(x []byte) []uintptr {
|
||||
n := uintptr(len(x)) / wordSize
|
||||
if n == 0 {
|
||||
// Avoid creating a *uintptr that refers to data smaller than a uintptr;
|
||||
// see issue 59334.
|
||||
return nil
|
||||
}
|
||||
return unsafe.Slice((*uintptr)(unsafe.Pointer(&x[0])), n)
|
||||
}
|
||||
|
||||
func xorLoop[T byte | uintptr](dst, x, y []T) {
|
||||
x = x[:len(dst)] // remove bounds check in loop
|
||||
y = y[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] = x[i] ^ y[i]
|
||||
}
|
||||
}
|
@ -5,15 +5,13 @@
|
||||
package tls12
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/hmac"
|
||||
// "github.com/xtls/reality/sha256"
|
||||
// "github.com/xtls/reality/sha512"
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// PRF implements the TLS 1.2 pseudo-random function, as defined in RFC 5246,
|
||||
// Section 5 and allowed by SP 800-135, Revision 1, Section 4.2.2.
|
||||
func PRF[H fips140.Hash](hash func() H, secret []byte, label string, seed []byte, keyLen int) []byte {
|
||||
func PRF[H hash.Hash](hash func() H, secret []byte, label string, seed []byte, keyLen int) []byte {
|
||||
labelAndSeed := make([]byte, len(label)+len(seed))
|
||||
copy(labelAndSeed, label)
|
||||
copy(labelAndSeed[len(label):], seed)
|
||||
@ -24,8 +22,8 @@ func PRF[H fips140.Hash](hash func() H, secret []byte, label string, seed []byte
|
||||
}
|
||||
|
||||
// pHash implements the P_hash function, as defined in RFC 5246, Section 5.
|
||||
func pHash[H fips140.Hash](hash func() H, result, secret, seed []byte) {
|
||||
h := hmac.New(hash, secret)
|
||||
func pHash[H hash.Hash](hash1 func() H, result, secret, seed []byte) {
|
||||
h := hmac.New(any(hash1).(func() hash.Hash), secret)
|
||||
h.Write(seed)
|
||||
a := h.Sum(nil)
|
||||
|
||||
@ -48,7 +46,7 @@ const extendedMasterSecretLabel = "extended master secret"
|
||||
|
||||
// MasterSecret implements the TLS 1.2 extended master secret derivation, as
|
||||
// defined in RFC 7627 and allowed by SP 800-135, Revision 1, Section 4.2.2.
|
||||
func MasterSecret[H fips140.Hash](hash func() H, preMasterSecret, transcript []byte) []byte {
|
||||
func MasterSecret[H hash.Hash](hash func() H, preMasterSecret, transcript []byte) []byte {
|
||||
// "The TLS 1.2 KDF is an approved KDF when the following conditions are
|
||||
// satisfied: [...] (3) P_HASH uses either SHA-256, SHA-384 or SHA-512."
|
||||
//h := hash()
|
||||
|
@ -7,9 +7,9 @@
|
||||
package tls13
|
||||
|
||||
import (
|
||||
"github.com/xtls/reality/byteorder"
|
||||
"github.com/xtls/reality/fips140"
|
||||
"github.com/xtls/reality/hkdf"
|
||||
"crypto/hkdf"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// We don't set the service indicator in this package but we delegate that to
|
||||
@ -17,7 +17,7 @@ import (
|
||||
// its own.
|
||||
|
||||
// ExpandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1.
|
||||
func ExpandLabel[H fips140.Hash](hash func() H, secret []byte, label string, context []byte, length int) []byte {
|
||||
func ExpandLabel[H hash.Hash](hash func() H, secret []byte, label string, context []byte, length int) []byte {
|
||||
if len("tls13 ")+len(label) > 255 || len(context) > 255 {
|
||||
// It should be impossible for this to panic: labels are fixed strings,
|
||||
// and context is either a fixed-length computed hash, or parsed from a
|
||||
@ -30,23 +30,25 @@ func ExpandLabel[H fips140.Hash](hash func() H, secret []byte, label string, con
|
||||
panic("tls13: label or context too long")
|
||||
}
|
||||
hkdfLabel := make([]byte, 0, 2+1+len("tls13 ")+len(label)+1+len(context))
|
||||
hkdfLabel = byteorder.BEAppendUint16(hkdfLabel, uint16(length))
|
||||
hkdfLabel = binary.BigEndian.AppendUint16(hkdfLabel, uint16(length))
|
||||
hkdfLabel = append(hkdfLabel, byte(len("tls13 ")+len(label)))
|
||||
hkdfLabel = append(hkdfLabel, "tls13 "...)
|
||||
hkdfLabel = append(hkdfLabel, label...)
|
||||
hkdfLabel = append(hkdfLabel, byte(len(context)))
|
||||
hkdfLabel = append(hkdfLabel, context...)
|
||||
return hkdf.Expand(hash, secret, string(hkdfLabel), length)
|
||||
b, _ := hkdf.Expand(hash, secret, string(hkdfLabel), length)
|
||||
return b
|
||||
}
|
||||
|
||||
func extract[H fips140.Hash](hash func() H, newSecret, currentSecret []byte) []byte {
|
||||
func extract[H hash.Hash](hash func() H, newSecret, currentSecret []byte) []byte {
|
||||
if newSecret == nil {
|
||||
newSecret = make([]byte, hash().Size())
|
||||
}
|
||||
return hkdf.Extract(hash, newSecret, currentSecret)
|
||||
b, _ := hkdf.Extract(hash, newSecret, currentSecret)
|
||||
return b
|
||||
}
|
||||
|
||||
func deriveSecret[H fips140.Hash](hash func() H, secret []byte, label string, transcript fips140.Hash) []byte {
|
||||
func deriveSecret[H hash.Hash](hash func() H, secret []byte, label string, transcript hash.Hash) []byte {
|
||||
if transcript == nil {
|
||||
transcript = hash()
|
||||
}
|
||||
@ -67,13 +69,13 @@ const (
|
||||
|
||||
type EarlySecret struct {
|
||||
secret []byte
|
||||
hash func() fips140.Hash
|
||||
hash func() hash.Hash
|
||||
}
|
||||
|
||||
func NewEarlySecret[H fips140.Hash](hash func() H, psk []byte) *EarlySecret {
|
||||
func NewEarlySecret[H hash.Hash](h func() H, psk []byte) *EarlySecret {
|
||||
return &EarlySecret{
|
||||
secret: extract(hash, psk, nil),
|
||||
hash: func() fips140.Hash { return hash() },
|
||||
secret: extract(h, psk, nil),
|
||||
hash: func() hash.Hash { return h() },
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,13 +85,13 @@ func (s *EarlySecret) ResumptionBinderKey() []byte {
|
||||
|
||||
// ClientEarlyTrafficSecret derives the client_early_traffic_secret from the
|
||||
// early secret and the transcript up to the ClientHello.
|
||||
func (s *EarlySecret) ClientEarlyTrafficSecret(transcript fips140.Hash) []byte {
|
||||
func (s *EarlySecret) ClientEarlyTrafficSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, clientEarlyTrafficLabel, transcript)
|
||||
}
|
||||
|
||||
type HandshakeSecret struct {
|
||||
secret []byte
|
||||
hash func() fips140.Hash
|
||||
hash func() hash.Hash
|
||||
}
|
||||
|
||||
func (s *EarlySecret) HandshakeSecret(sharedSecret []byte) *HandshakeSecret {
|
||||
@ -102,19 +104,19 @@ func (s *EarlySecret) HandshakeSecret(sharedSecret []byte) *HandshakeSecret {
|
||||
|
||||
// ClientHandshakeTrafficSecret derives the client_handshake_traffic_secret from
|
||||
// the handshake secret and the transcript up to the ServerHello.
|
||||
func (s *HandshakeSecret) ClientHandshakeTrafficSecret(transcript fips140.Hash) []byte {
|
||||
func (s *HandshakeSecret) ClientHandshakeTrafficSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, clientHandshakeTrafficLabel, transcript)
|
||||
}
|
||||
|
||||
// ServerHandshakeTrafficSecret derives the server_handshake_traffic_secret from
|
||||
// the handshake secret and the transcript up to the ServerHello.
|
||||
func (s *HandshakeSecret) ServerHandshakeTrafficSecret(transcript fips140.Hash) []byte {
|
||||
func (s *HandshakeSecret) ServerHandshakeTrafficSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, serverHandshakeTrafficLabel, transcript)
|
||||
}
|
||||
|
||||
type MasterSecret struct {
|
||||
secret []byte
|
||||
hash func() fips140.Hash
|
||||
hash func() hash.Hash
|
||||
}
|
||||
|
||||
func (s *HandshakeSecret) MasterSecret() *MasterSecret {
|
||||
@ -127,30 +129,30 @@ func (s *HandshakeSecret) MasterSecret() *MasterSecret {
|
||||
|
||||
// ClientApplicationTrafficSecret derives the client_application_traffic_secret_0
|
||||
// from the master secret and the transcript up to the server Finished.
|
||||
func (s *MasterSecret) ClientApplicationTrafficSecret(transcript fips140.Hash) []byte {
|
||||
func (s *MasterSecret) ClientApplicationTrafficSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, clientApplicationTrafficLabel, transcript)
|
||||
}
|
||||
|
||||
// ServerApplicationTrafficSecret derives the server_application_traffic_secret_0
|
||||
// from the master secret and the transcript up to the server Finished.
|
||||
func (s *MasterSecret) ServerApplicationTrafficSecret(transcript fips140.Hash) []byte {
|
||||
func (s *MasterSecret) ServerApplicationTrafficSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, serverApplicationTrafficLabel, transcript)
|
||||
}
|
||||
|
||||
// ResumptionMasterSecret derives the resumption_master_secret from the master secret
|
||||
// and the transcript up to the client Finished.
|
||||
func (s *MasterSecret) ResumptionMasterSecret(transcript fips140.Hash) []byte {
|
||||
func (s *MasterSecret) ResumptionMasterSecret(transcript hash.Hash) []byte {
|
||||
return deriveSecret(s.hash, s.secret, resumptionLabel, transcript)
|
||||
}
|
||||
|
||||
type ExporterMasterSecret struct {
|
||||
secret []byte
|
||||
hash func() fips140.Hash
|
||||
hash func() hash.Hash
|
||||
}
|
||||
|
||||
// ExporterMasterSecret derives the exporter_master_secret from the master secret
|
||||
// and the transcript up to the server Finished.
|
||||
func (s *MasterSecret) ExporterMasterSecret(transcript fips140.Hash) *ExporterMasterSecret {
|
||||
func (s *MasterSecret) ExporterMasterSecret(transcript hash.Hash) *ExporterMasterSecret {
|
||||
return &ExporterMasterSecret{
|
||||
secret: deriveSecret(s.hash, s.secret, exporterLabel, transcript),
|
||||
hash: s.hash,
|
||||
@ -159,7 +161,7 @@ func (s *MasterSecret) ExporterMasterSecret(transcript fips140.Hash) *ExporterMa
|
||||
|
||||
// EarlyExporterMasterSecret derives the exporter_master_secret from the early secret
|
||||
// and the transcript up to the ClientHello.
|
||||
func (s *EarlySecret) EarlyExporterMasterSecret(transcript fips140.Hash) *ExporterMasterSecret {
|
||||
func (s *EarlySecret) EarlyExporterMasterSecret(transcript hash.Hash) *ExporterMasterSecret {
|
||||
return &ExporterMasterSecret{
|
||||
secret: deriveSecret(s.hash, s.secret, earlyExporterLabel, transcript),
|
||||
hash: s.hash,
|
||||
|
Loading…
Reference in New Issue
Block a user