diff --git a/src/runtime/algorithm.go b/src/runtime/algorithm.go index 0f4b7786..15487176 100644 --- a/src/runtime/algorithm.go +++ b/src/runtime/algorithm.go @@ -3,7 +3,9 @@ package runtime // This file implements various core algorithms used in the runtime package and // standard library. -import "unsafe" +import ( + "unsafe" +) // This function is used by hash/maphash. func fastrand() uint32 { @@ -30,27 +32,3 @@ func memhash(p unsafe.Pointer, seed, s uintptr) uintptr { } return uintptr(hash32(p, s, seed)) } - -// Get FNV-1a hash of the given memory buffer. -// -// https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function#FNV-1a_hash -func hash32(ptr unsafe.Pointer, n uintptr, seed uintptr) uint32 { - var result uint32 = 2166136261 ^ uint32(seed) // FNV offset basis - for i := uintptr(0); i < n; i++ { - c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i)) - result ^= uint32(c) // XOR with byte - result *= 16777619 // FNV prime - } - return result -} - -// Also a FNV-1a hash. -func hash64(ptr unsafe.Pointer, n uintptr, seed uintptr) uint64 { - var result uint64 = 14695981039346656037 ^ uint64(seed) // FNV offset basis - for i := uintptr(0); i < n; i++ { - c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i)) - result ^= uint64(c) // XOR with byte - result *= 1099511628211 // FNV prime - } - return result -} diff --git a/src/runtime/memhash_fnv.go b/src/runtime/memhash_fnv.go new file mode 100644 index 00000000..d3bf370e --- /dev/null +++ b/src/runtime/memhash_fnv.go @@ -0,0 +1,32 @@ +//go:build !runtime_memhash_tsip +// +build !runtime_memhash_tsip + +package runtime + +import "unsafe" + +// Get FNV-1a hash of the given memory buffer. +// +// https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function#FNV-1a_hash +func hash32(ptr unsafe.Pointer, n, seed uintptr) uint32 { + var result uint32 = 2166136261 // FNV offset basis + result *= uint32(seed) + for i := uintptr(0); i < n; i++ { + c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i)) + result ^= uint32(c) // XOR with byte + result *= 16777619 // FNV prime + } + return result +} + +// Also a FNV-1a hash. +func hash64(ptr unsafe.Pointer, n, seed uintptr) uint64 { + var result uint64 = 14695981039346656037 // FNV offset basis + result *= uint64(seed) + for i := uintptr(0); i < n; i++ { + c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i)) + result ^= uint64(c) // XOR with byte + result *= 1099511628211 // FNV prime + } + return result +} diff --git a/src/runtime/memhash_tsip.go b/src/runtime/memhash_tsip.go new file mode 100644 index 00000000..f470c08a --- /dev/null +++ b/src/runtime/memhash_tsip.go @@ -0,0 +1,161 @@ +//go:build runtime_memhash_tsip +// +build runtime_memhash_tsip + +package runtime + +import ( + "encoding/binary" + "math/bits" + "unsafe" +) + +func ptrToSlice(ptr unsafe.Pointer, n uintptr) []byte { + var p []byte + + type _bslice struct { + ptr *byte + len uintptr + cap uintptr + } + + pslice := (*_bslice)(unsafe.Pointer(&p)) + pslice.ptr = (*byte)(ptr) + pslice.cap = n + pslice.len = n + + return p +} + +// tsip hash -- github.com/dgryski/tsip + +type sip struct { + v0, v1 uint64 +} + +func (s *sip) round() { + s.v0 += s.v1 + s.v1 = bits.RotateLeft64(s.v1, 13) ^ s.v0 + s.v0 = bits.RotateLeft64(s.v0, 35) + s.v1 + s.v1 = bits.RotateLeft64(s.v1, 17) ^ s.v0 + s.v0 = bits.RotateLeft64(s.v0, 21) +} + +func hash64(ptr unsafe.Pointer, n uintptr, seed uintptr) uint64 { + + p := ptrToSlice(ptr, n) + + k0 := uint64(seed) + k1 := uint64(0) + + s := sip{ + v0: k0 ^ 0x736f6d6570736575, + v1: k1 ^ 0x646f72616e646f6d, + } + + b := uint64(len(p)) << 56 + + for len(p) >= 8 { + m := binary.LittleEndian.Uint64(p[:8]) + s.v1 ^= m + s.round() + s.v0 ^= m + p = p[8:] + } + + switch len(p) { + case 7: + b |= uint64(p[6]) << 48 + fallthrough + case 6: + b |= uint64(p[5]) << 40 + fallthrough + case 5: + b |= uint64(p[4]) << 32 + fallthrough + case 4: + b |= uint64(p[3]) << 24 + fallthrough + case 3: + b |= uint64(p[2]) << 16 + fallthrough + case 2: + b |= uint64(p[1]) << 8 + fallthrough + case 1: + b |= uint64(p[0]) + } + + // last block + s.v1 ^= b + s.round() + s.v0 ^= b + + // finalization + s.v1 ^= 0xff + s.round() + s.v1 = bits.RotateLeft64(s.v1, 32) + s.round() + s.v1 = bits.RotateLeft64(s.v1, 32) + + return s.v0 ^ s.v1 +} + +type sip32 struct { + v0, v1 uint32 +} + +func (s *sip32) round() { + s.v0 += s.v1 + s.v1 = bits.RotateLeft32(s.v1, 5) ^ s.v0 + s.v0 = bits.RotateLeft32(s.v0, 8) + s.v1 + s.v1 = bits.RotateLeft32(s.v1, 13) ^ s.v0 + s.v0 = bits.RotateLeft32(s.v0, 7) +} + +func hash32(ptr unsafe.Pointer, n uintptr, seed uintptr) uint32 { + // TODO(dgryski): replace this messiness with unsafe.Slice when we can use 1.17 features + + p := ptrToSlice(ptr, n) + + k0 := uint32(seed) + k1 := uint32(seed >> 32) + + s := sip32{ + v0: k0 ^ 0x74656462, + v1: k1 ^ 0x6c796765, + } + b := uint32(len(p)) << 24 + + for len(p) >= 4 { + m := binary.LittleEndian.Uint32(p[:4]) + s.v1 ^= m + s.round() + s.v0 ^= m + p = p[4:] + } + + switch len(p) { + case 3: + b |= uint32(p[2]) << 16 + fallthrough + case 2: + b |= uint32(p[1]) << 8 + fallthrough + case 1: + b |= uint32(p[0]) + } + + // last block + s.v1 ^= b + s.round() + s.v0 ^= b + + // finalization + s.v1 ^= 0xff + s.round() + s.v1 = bits.RotateLeft32(s.v1, 16) + s.round() + s.v1 = bits.RotateLeft32(s.v1, 16) + + return s.v0 ^ s.v1 +}