all: use unsafe.Add instead of unsafe.Pointer(uintptr(...) + ...)
We have an optimization for this specific pattern, but it's really just a hack. With the addition of unsafe.Add in Go 1.17 we can directly specify the intent instead and eventually remove this special case. The code is also easier to read.
Этот коммит содержится в:
родитель
d98c0afbab
коммит
4ec1e58aa6
29 изменённых файлов: 84 добавлений и 95 удалений
|
@ -143,13 +143,13 @@ func (p Pin) configure(config PinConfig, signal uint32) {
|
|||
// outFunc returns the FUNCx_OUT_SEL_CFG register used for configuring the
|
||||
// output function selection.
|
||||
func (p Pin) outFunc() *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.GPIO.FUNC0_OUT_SEL_CFG)) + uintptr(p)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.GPIO.FUNC0_OUT_SEL_CFG), uintptr(p)*4))
|
||||
}
|
||||
|
||||
// inFunc returns the FUNCy_IN_SEL_CFG register used for configuring the input
|
||||
// function selection.
|
||||
func inFunc(signal uint32) *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.GPIO.FUNC0_IN_SEL_CFG)) + uintptr(signal)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.GPIO.FUNC0_IN_SEL_CFG), uintptr(signal)*4))
|
||||
}
|
||||
|
||||
// Set the pin to high or low.
|
||||
|
|
|
@ -108,24 +108,24 @@ func (p Pin) Configure(config PinConfig) {
|
|||
// outFunc returns the FUNCx_OUT_SEL_CFG register used for configuring the
|
||||
// output function selection.
|
||||
func (p Pin) outFunc() *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.GPIO.FUNC0_OUT_SEL_CFG)) + uintptr(p)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.GPIO.FUNC0_OUT_SEL_CFG), uintptr(p)*4))
|
||||
}
|
||||
|
||||
// inFunc returns the FUNCy_IN_SEL_CFG register used for configuring the input
|
||||
// function selection.
|
||||
func inFunc(signal uint32) *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.GPIO.FUNC0_IN_SEL_CFG)) + uintptr(signal)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.GPIO.FUNC0_IN_SEL_CFG), uintptr(signal)*4))
|
||||
}
|
||||
|
||||
// mux returns the I/O mux configuration register corresponding to the given
|
||||
// GPIO pin.
|
||||
func (p Pin) mux() *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.IO_MUX.GPIO0)) + uintptr(p)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.IO_MUX.GPIO0), uintptr(p)*4))
|
||||
}
|
||||
|
||||
// pin returns the PIN register corresponding to the given GPIO pin.
|
||||
func (p Pin) pin() *volatile.Register32 {
|
||||
return (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.GPIO.PIN0)) + uintptr(p)*4)))
|
||||
return (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.GPIO.PIN0), uintptr(p)*4))
|
||||
}
|
||||
|
||||
// Set the pin to high or low.
|
||||
|
|
|
@ -50,7 +50,7 @@ type pwmGroup struct {
|
|||
//
|
||||
// 0x14 is the size of a pwmGroup.
|
||||
func getPWMGroup(index uintptr) *pwmGroup {
|
||||
return (*pwmGroup)(unsafe.Pointer(uintptr(unsafe.Pointer(rp.PWM)) + 0x14*index))
|
||||
return (*pwmGroup)(unsafe.Add(unsafe.Pointer(rp.PWM), 0x14*index))
|
||||
}
|
||||
|
||||
// Hardware Pulse Width Modulation (PWM) API
|
||||
|
|
|
@ -31,8 +31,8 @@ func Swapper(slice interface{}) func(i, j int) {
|
|||
if uint(i) >= uint(header.len) || uint(j) >= uint(header.len) {
|
||||
panic("reflect: slice index out of range")
|
||||
}
|
||||
val1 := unsafe.Pointer(uintptr(header.data) + uintptr(i)*size)
|
||||
val2 := unsafe.Pointer(uintptr(header.data) + uintptr(j)*size)
|
||||
val1 := unsafe.Add(header.data, uintptr(i)*size)
|
||||
val2 := unsafe.Add(header.data, uintptr(j)*size)
|
||||
memcpy(tmp, val1, size)
|
||||
memcpy(val1, val2, size)
|
||||
memcpy(val2, tmp, size)
|
||||
|
|
|
@ -81,7 +81,7 @@ func valueInterfaceUnsafe(v Value) interface{} {
|
|||
// value.
|
||||
var value uintptr
|
||||
for j := v.typecode.Size(); j != 0; j-- {
|
||||
value = (value << 8) | uintptr(*(*uint8)(unsafe.Pointer(uintptr(v.value) + j - 1)))
|
||||
value = (value << 8) | uintptr(*(*uint8)(unsafe.Add(v.value, j-1)))
|
||||
}
|
||||
v.value = unsafe.Pointer(value)
|
||||
}
|
||||
|
@ -573,7 +573,7 @@ func (v Value) Field(i int) Value {
|
|||
return Value{
|
||||
flags: flags,
|
||||
typecode: fieldType,
|
||||
value: unsafe.Pointer(uintptr(v.value) + structField.Offset),
|
||||
value: unsafe.Add(v.value, structField.Offset),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -588,7 +588,7 @@ func (v Value) Field(i int) Value {
|
|||
return Value{
|
||||
flags: flags,
|
||||
typecode: fieldType,
|
||||
value: unsafe.Pointer(uintptr(0)),
|
||||
value: unsafe.Pointer(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,7 @@ func (v Value) Field(i int) Value {
|
|||
// The value was not stored in the interface before but will be
|
||||
// afterwards, so load the value (from the correct offset) and return
|
||||
// it.
|
||||
ptr := unsafe.Pointer(uintptr(v.value) + structField.Offset)
|
||||
ptr := unsafe.Add(v.value, structField.Offset)
|
||||
value := unsafe.Pointer(loadValue(ptr, fieldSize))
|
||||
return Value{
|
||||
flags: flags &^ valueFlagIndirect,
|
||||
|
@ -629,8 +629,7 @@ func (v Value) Index(i int) Value {
|
|||
typecode: v.typecode.elem(),
|
||||
flags: v.flags | valueFlagIndirect,
|
||||
}
|
||||
addr := uintptr(slice.data) + elem.typecode.Size()*uintptr(i) // pointer to new value
|
||||
elem.value = unsafe.Pointer(addr)
|
||||
elem.value = unsafe.Add(slice.data, elem.typecode.Size()*uintptr(i)) // pointer to new value
|
||||
return elem
|
||||
case String:
|
||||
// Extract a character from a string.
|
||||
|
@ -645,7 +644,7 @@ func (v Value) Index(i int) Value {
|
|||
}
|
||||
return Value{
|
||||
typecode: uint8Type,
|
||||
value: unsafe.Pointer(uintptr(*(*uint8)(unsafe.Pointer(uintptr(s.data) + uintptr(i))))),
|
||||
value: unsafe.Pointer(uintptr(*(*uint8)(unsafe.Add(s.data, i)))),
|
||||
flags: v.flags & valueFlagExported,
|
||||
}
|
||||
case Array:
|
||||
|
@ -665,18 +664,18 @@ func (v Value) Index(i int) Value {
|
|||
// indirect. Also, because size != 0 this implies that the array
|
||||
// length must be != 0, and thus that the total size is at least
|
||||
// elemSize.
|
||||
addr := uintptr(v.value) + elemSize*uintptr(i) // pointer to new value
|
||||
addr := unsafe.Add(v.value, elemSize*uintptr(i)) // pointer to new value
|
||||
return Value{
|
||||
typecode: v.typecode.elem(),
|
||||
flags: v.flags,
|
||||
value: unsafe.Pointer(addr),
|
||||
value: addr,
|
||||
}
|
||||
}
|
||||
|
||||
if size > unsafe.Sizeof(uintptr(0)) || v.isIndirect() {
|
||||
// The element fits in a pointer, but the array is not stored in the pointer directly.
|
||||
// Load the value from the pointer.
|
||||
addr := unsafe.Pointer(uintptr(v.value) + elemSize*uintptr(i)) // pointer to new value
|
||||
addr := unsafe.Add(v.value, elemSize*uintptr(i)) // pointer to new value
|
||||
value := addr
|
||||
if !v.isIndirect() {
|
||||
// Use a pointer to the value (don't load the value) if the
|
||||
|
@ -713,7 +712,7 @@ func loadValue(ptr unsafe.Pointer, size uintptr) uintptr {
|
|||
for i := uintptr(0); i < size; i++ {
|
||||
loadedValue |= uintptr(*(*byte)(ptr)) << shift
|
||||
shift += 8
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 1)
|
||||
ptr = unsafe.Add(ptr, 1)
|
||||
}
|
||||
return loadedValue
|
||||
}
|
||||
|
|
|
@ -243,13 +243,8 @@ func (ch *channel) push(value unsafe.Pointer) bool {
|
|||
|
||||
// copy value to buffer
|
||||
memcpy(
|
||||
unsafe.Pointer( // pointer to the base of the buffer + offset = pointer to destination element
|
||||
uintptr(ch.buf)+
|
||||
uintptr( // element size * equivalent slice index = offset
|
||||
ch.elementSize* // element size (bytes)
|
||||
ch.bufHead, // index of first available buffer entry
|
||||
),
|
||||
),
|
||||
unsafe.Add(ch.buf, // pointer to the base of the buffer + offset = pointer to destination element
|
||||
ch.elementSize*ch.bufHead), // element size * equivalent slice index = offset
|
||||
value,
|
||||
ch.elementSize,
|
||||
)
|
||||
|
@ -274,7 +269,7 @@ func (ch *channel) pop(value unsafe.Pointer) bool {
|
|||
}
|
||||
|
||||
// compute address of source
|
||||
addr := unsafe.Pointer(uintptr(ch.buf) + (ch.elementSize * ch.bufTail))
|
||||
addr := unsafe.Add(ch.buf, (ch.elementSize * ch.bufTail))
|
||||
|
||||
// copy value from buffer
|
||||
memcpy(
|
||||
|
|
|
@ -43,9 +43,9 @@ func dynamicLoader(base uintptr, dyn *dyn64) {
|
|||
relasz = uint64(dyn.Val) / uint64(unsafe.Sizeof(rela64{}))
|
||||
}
|
||||
|
||||
ptr := uintptr(unsafe.Pointer(dyn))
|
||||
ptr += unsafe.Sizeof(dyn64{})
|
||||
dyn = (*dyn64)(unsafe.Pointer(ptr))
|
||||
ptr := unsafe.Pointer(dyn)
|
||||
ptr = unsafe.Add(ptr, unsafe.Sizeof(dyn64{}))
|
||||
dyn = (*dyn64)(ptr)
|
||||
}
|
||||
|
||||
if rela == nil {
|
||||
|
@ -70,9 +70,9 @@ func dynamicLoader(base uintptr, dyn *dyn64) {
|
|||
}
|
||||
}
|
||||
|
||||
rptr := uintptr(unsafe.Pointer(rela))
|
||||
rptr += unsafe.Sizeof(rela64{})
|
||||
rela = (*rela64)(unsafe.Pointer(rptr))
|
||||
rptr := unsafe.Pointer(rela)
|
||||
rptr = unsafe.Add(rptr, unsafe.Sizeof(rela64{}))
|
||||
rela = (*rela64)(rptr)
|
||||
relasz--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func (b gcBlock) findNext() gcBlock {
|
|||
|
||||
// State returns the current block state.
|
||||
func (b gcBlock) state() blockState {
|
||||
stateBytePtr := (*uint8)(unsafe.Pointer(uintptr(metadataStart) + uintptr(b/blocksPerStateByte)))
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, b/blocksPerStateByte))
|
||||
return blockState(*stateBytePtr>>((b%blocksPerStateByte)*stateBits)) & blockStateMask
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ func (b gcBlock) state() blockState {
|
|||
// bits than the current state. Allowed transitions: from free to any state and
|
||||
// from head to mark.
|
||||
func (b gcBlock) setState(newState blockState) {
|
||||
stateBytePtr := (*uint8)(unsafe.Pointer(uintptr(metadataStart) + uintptr(b/blocksPerStateByte)))
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, b/blocksPerStateByte))
|
||||
*stateBytePtr |= uint8(newState << ((b % blocksPerStateByte) * stateBits))
|
||||
if gcAsserts && b.state() != newState {
|
||||
runtimePanic("gc: setState() was not successful")
|
||||
|
@ -163,7 +163,7 @@ func (b gcBlock) setState(newState blockState) {
|
|||
|
||||
// markFree sets the block state to free, no matter what state it was in before.
|
||||
func (b gcBlock) markFree() {
|
||||
stateBytePtr := (*uint8)(unsafe.Pointer(uintptr(metadataStart) + uintptr(b/blocksPerStateByte)))
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, b/blocksPerStateByte))
|
||||
*stateBytePtr &^= uint8(blockStateMask << ((b % blocksPerStateByte) * stateBits))
|
||||
if gcAsserts && b.state() != blockStateFree {
|
||||
runtimePanic("gc: markFree() was not successful")
|
||||
|
@ -180,7 +180,7 @@ func (b gcBlock) unmark() {
|
|||
runtimePanic("gc: unmark() on a block that is not marked")
|
||||
}
|
||||
clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state
|
||||
stateBytePtr := (*uint8)(unsafe.Pointer(uintptr(metadataStart) + uintptr(b/blocksPerStateByte)))
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, b/blocksPerStateByte))
|
||||
*stateBytePtr &^= uint8(clearMask << ((b % blocksPerStateByte) * stateBits))
|
||||
if gcAsserts && b.state() != blockStateHead {
|
||||
runtimePanic("gc: unmark() was not successful")
|
||||
|
|
|
@ -169,8 +169,7 @@ func hashmapSet(m *hashmap, key unsafe.Pointer, value unsafe.Pointer, hash uint3
|
|||
numBuckets := uintptr(1) << m.bucketBits
|
||||
bucketNumber := (uintptr(hash) & (numBuckets - 1))
|
||||
bucketSize := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*8
|
||||
bucketAddr := uintptr(m.buckets) + bucketSize*bucketNumber
|
||||
bucket := (*hashmapBucket)(unsafe.Pointer(bucketAddr))
|
||||
bucket := (*hashmapBucket)(unsafe.Add(m.buckets, bucketSize*bucketNumber))
|
||||
var lastBucket *hashmapBucket
|
||||
|
||||
// See whether the key already exists somewhere.
|
||||
|
@ -180,9 +179,9 @@ func hashmapSet(m *hashmap, key unsafe.Pointer, value unsafe.Pointer, hash uint3
|
|||
for bucket != nil {
|
||||
for i := uintptr(0); i < 8; i++ {
|
||||
slotKeyOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*uintptr(i)
|
||||
slotKey := unsafe.Pointer(uintptr(unsafe.Pointer(bucket)) + slotKeyOffset)
|
||||
slotKey := unsafe.Add(unsafe.Pointer(bucket), slotKeyOffset)
|
||||
slotValueOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*uintptr(i)
|
||||
slotValue := unsafe.Pointer(uintptr(unsafe.Pointer(bucket)) + slotValueOffset)
|
||||
slotValue := unsafe.Add(unsafe.Pointer(bucket), slotValueOffset)
|
||||
if bucket.tophash[i] == 0 && emptySlotKey == nil {
|
||||
// Found an empty slot, store it for if we couldn't find an
|
||||
// existing slot.
|
||||
|
@ -225,9 +224,9 @@ func hashmapInsertIntoNewBucket(m *hashmap, key, value unsafe.Pointer, tophash u
|
|||
bucketBuf := alloc(bucketBufSize, nil)
|
||||
// Insert into the first slot, which is empty as it has just been allocated.
|
||||
slotKeyOffset := unsafe.Sizeof(hashmapBucket{})
|
||||
slotKey := unsafe.Pointer(uintptr(bucketBuf) + slotKeyOffset)
|
||||
slotKey := unsafe.Add(bucketBuf, slotKeyOffset)
|
||||
slotValueOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8
|
||||
slotValue := unsafe.Pointer(uintptr(bucketBuf) + slotValueOffset)
|
||||
slotValue := unsafe.Add(bucketBuf, slotValueOffset)
|
||||
m.count++
|
||||
memcpy(slotKey, key, m.keySize)
|
||||
memcpy(slotValue, value, m.valueSize)
|
||||
|
@ -276,8 +275,7 @@ func hashmapGet(m *hashmap, key, value unsafe.Pointer, valueSize uintptr, hash u
|
|||
numBuckets := uintptr(1) << m.bucketBits
|
||||
bucketNumber := (uintptr(hash) & (numBuckets - 1))
|
||||
bucketSize := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*8
|
||||
bucketAddr := uintptr(m.buckets) + bucketSize*bucketNumber
|
||||
bucket := (*hashmapBucket)(unsafe.Pointer(bucketAddr))
|
||||
bucket := (*hashmapBucket)(unsafe.Add(m.buckets, bucketSize*bucketNumber))
|
||||
|
||||
tophash := uint8(hash >> 24)
|
||||
if tophash < 1 {
|
||||
|
@ -289,9 +287,9 @@ func hashmapGet(m *hashmap, key, value unsafe.Pointer, valueSize uintptr, hash u
|
|||
for bucket != nil {
|
||||
for i := uintptr(0); i < 8; i++ {
|
||||
slotKeyOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*uintptr(i)
|
||||
slotKey := unsafe.Pointer(uintptr(unsafe.Pointer(bucket)) + slotKeyOffset)
|
||||
slotKey := unsafe.Add(unsafe.Pointer(bucket), slotKeyOffset)
|
||||
slotValueOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*uintptr(i)
|
||||
slotValue := unsafe.Pointer(uintptr(unsafe.Pointer(bucket)) + slotValueOffset)
|
||||
slotValue := unsafe.Add(unsafe.Pointer(bucket), slotValueOffset)
|
||||
if bucket.tophash[i] == tophash {
|
||||
// This could be the key we're looking for.
|
||||
if m.keyEqual(key, slotKey, m.keySize) {
|
||||
|
@ -327,8 +325,7 @@ func hashmapDelete(m *hashmap, key unsafe.Pointer, hash uint32) {
|
|||
numBuckets := uintptr(1) << m.bucketBits
|
||||
bucketNumber := (uintptr(hash) & (numBuckets - 1))
|
||||
bucketSize := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*8
|
||||
bucketAddr := uintptr(m.buckets) + bucketSize*bucketNumber
|
||||
bucket := (*hashmapBucket)(unsafe.Pointer(bucketAddr))
|
||||
bucket := (*hashmapBucket)(unsafe.Add(m.buckets, bucketSize*bucketNumber))
|
||||
|
||||
tophash := uint8(hash >> 24)
|
||||
if tophash < 1 {
|
||||
|
@ -340,7 +337,7 @@ func hashmapDelete(m *hashmap, key unsafe.Pointer, hash uint32) {
|
|||
for bucket != nil {
|
||||
for i := uintptr(0); i < 8; i++ {
|
||||
slotKeyOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*uintptr(i)
|
||||
slotKey := unsafe.Pointer(uintptr(unsafe.Pointer(bucket)) + slotKeyOffset)
|
||||
slotKey := unsafe.Add(unsafe.Pointer(bucket), slotKeyOffset)
|
||||
if bucket.tophash[i] == tophash {
|
||||
// This could be the key we're looking for.
|
||||
if m.keyEqual(key, slotKey, m.keySize) {
|
||||
|
@ -382,8 +379,7 @@ func hashmapNext(m *hashmap, it *hashmapIterator, key, value unsafe.Pointer) boo
|
|||
return false
|
||||
}
|
||||
bucketSize := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*8
|
||||
bucketAddr := uintptr(it.buckets) + bucketSize*it.bucketNumber
|
||||
it.bucket = (*hashmapBucket)(unsafe.Pointer(bucketAddr))
|
||||
it.bucket = (*hashmapBucket)(unsafe.Add(it.buckets, bucketSize*it.bucketNumber))
|
||||
it.bucketNumber++ // next bucket
|
||||
}
|
||||
if it.bucket.tophash[it.bucketIndex] == 0 {
|
||||
|
@ -392,16 +388,15 @@ func hashmapNext(m *hashmap, it *hashmapIterator, key, value unsafe.Pointer) boo
|
|||
continue
|
||||
}
|
||||
|
||||
bucketAddr := uintptr(unsafe.Pointer(it.bucket))
|
||||
slotKeyOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*uintptr(it.bucketIndex)
|
||||
slotKey := unsafe.Pointer(bucketAddr + slotKeyOffset)
|
||||
slotKey := unsafe.Add(unsafe.Pointer(it.bucket), slotKeyOffset)
|
||||
memcpy(key, slotKey, m.keySize)
|
||||
|
||||
if it.buckets == m.buckets {
|
||||
// Our view of the buckets is the same as the parent map.
|
||||
// Just copy the value we have
|
||||
slotValueOffset := unsafe.Sizeof(hashmapBucket{}) + m.keySize*8 + m.valueSize*uintptr(it.bucketIndex)
|
||||
slotValue := unsafe.Pointer(bucketAddr + slotValueOffset)
|
||||
slotValue := unsafe.Add(unsafe.Pointer(it.bucket), slotValueOffset)
|
||||
memcpy(value, slotValue, m.valueSize)
|
||||
it.bucketIndex++
|
||||
} else {
|
||||
|
@ -574,10 +569,10 @@ func hashmapInterfaceHash(itf interface{}, seed uintptr) uint32 {
|
|||
case reflect.Float64:
|
||||
return hashmapFloat64Hash(ptr, seed)
|
||||
case reflect.Complex64:
|
||||
rptr, iptr := ptr, unsafe.Pointer(uintptr(ptr)+4)
|
||||
rptr, iptr := ptr, unsafe.Add(ptr, 4)
|
||||
return hashmapFloat32Hash(rptr, seed) ^ hashmapFloat32Hash(iptr, seed)
|
||||
case reflect.Complex128:
|
||||
rptr, iptr := ptr, unsafe.Pointer(uintptr(ptr)+8)
|
||||
rptr, iptr := ptr, unsafe.Add(ptr, 8)
|
||||
return hashmapFloat64Hash(rptr, seed) ^ hashmapFloat64Hash(iptr, seed)
|
||||
case reflect.String:
|
||||
return hashmapStringHash(x.String(), seed)
|
||||
|
|
|
@ -34,7 +34,7 @@ func (i Interrupt) Enable() error {
|
|||
esp.INTERRUPT_CORE0.CPU_INT_TYPE.SetBits(1 << i.num)
|
||||
|
||||
// Set default threshold to defaultThreshold
|
||||
reg := (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.INTERRUPT_CORE0.CPU_INT_PRI_0)) + uintptr(i.num)*4)))
|
||||
reg := (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.INTERRUPT_CORE0.CPU_INT_PRI_0), i.num*4))
|
||||
reg.Set(defaultThreshold)
|
||||
|
||||
// Reset interrupt before reenabling
|
||||
|
@ -171,7 +171,7 @@ func handleInterrupt() {
|
|||
mepc := riscv.MEPC.Get()
|
||||
// Useing threshold to temporary disable this interrupts.
|
||||
// FYI: using CPU interrupt enable bit make runtime to loose interrupts.
|
||||
reg := (*volatile.Register32)(unsafe.Pointer((uintptr(unsafe.Pointer(&esp.INTERRUPT_CORE0.CPU_INT_PRI_0)) + uintptr(interruptNumber)*4)))
|
||||
reg := (*volatile.Register32)(unsafe.Add(unsafe.Pointer(&esp.INTERRUPT_CORE0.CPU_INT_PRI_0), interruptNumber*4))
|
||||
thresholdSave := reg.Get()
|
||||
reg.Set(disableThreshold)
|
||||
riscv.Asm("fence")
|
||||
|
|
|
@ -11,7 +11,7 @@ func hash32(ptr unsafe.Pointer, n, seed uintptr) uint32 {
|
|||
var result uint32 = 2166136261 // FNV offset basis
|
||||
result *= uint32(seed)
|
||||
for i := uintptr(0); i < n; i++ {
|
||||
c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i))
|
||||
c := *(*uint8)(unsafe.Add(ptr, i))
|
||||
result ^= uint32(c) // XOR with byte
|
||||
result *= 16777619 // FNV prime
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ func hash64(ptr unsafe.Pointer, n, seed uintptr) uint64 {
|
|||
var result uint64 = 14695981039346656037 // FNV offset basis
|
||||
result *= uint64(seed)
|
||||
for i := uintptr(0); i < n; i++ {
|
||||
c := *(*uint8)(unsafe.Pointer(uintptr(ptr) + i))
|
||||
c := *(*uint8)(unsafe.Add(ptr, i))
|
||||
result ^= uint64(c) // XOR with byte
|
||||
result *= 1099511628211 // FNV prime
|
||||
}
|
||||
|
|
|
@ -109,6 +109,6 @@ func markGlobals() {
|
|||
|
||||
// Move on to the next load command (wich may or may not be a
|
||||
// LC_SEGMENT_64).
|
||||
cmd = (*segmentLoadCommand)(unsafe.Pointer(uintptr(unsafe.Pointer(cmd)) + uintptr(cmd.cmdsize)))
|
||||
cmd = (*segmentLoadCommand)(unsafe.Add(unsafe.Pointer(cmd), cmd.cmdsize))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func markGlobals() {
|
|||
markRoots(start, end)
|
||||
}
|
||||
}
|
||||
headerPtr = unsafe.Pointer(uintptr(headerPtr) + uintptr(ehdr_start.phentsize))
|
||||
headerPtr = unsafe.Add(headerPtr, ehdr_start.phentsize)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ func markGlobals() {
|
|||
}
|
||||
|
||||
// Find the PE header at offset 0x3C.
|
||||
pe := (*peHeader)(unsafe.Pointer(uintptr(unsafe.Pointer(module)) + uintptr(module.peHeader)))
|
||||
pe := (*peHeader)(unsafe.Add(unsafe.Pointer(module), module.peHeader))
|
||||
if gcAsserts && pe.magic != 0x00004550 { // 0x4550 is "PE"
|
||||
runtimePanic("cannot find PE header")
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func markGlobals() {
|
|||
end := uintptr(unsafe.Pointer(module)) + uintptr(section.virtualAddress) + uintptr(section.virtualSize)
|
||||
markRoots(start, end)
|
||||
}
|
||||
section = (*peSection)(unsafe.Pointer(uintptr(unsafe.Pointer(section)) + unsafe.Sizeof(peSection{})))
|
||||
section = (*peSection)(unsafe.Add(unsafe.Pointer(section), unsafe.Sizeof(peSection{})))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@ func malloc(size uintptr) unsafe.Pointer
|
|||
// Compare two same-size buffers for equality.
|
||||
func memequal(x, y unsafe.Pointer, n uintptr) bool {
|
||||
for i := uintptr(0); i < n; i++ {
|
||||
cx := *(*uint8)(unsafe.Pointer(uintptr(x) + i))
|
||||
cy := *(*uint8)(unsafe.Pointer(uintptr(y) + i))
|
||||
cx := *(*uint8)(unsafe.Add(x, i))
|
||||
cy := *(*uint8)(unsafe.Add(y, i))
|
||||
if cx != cy {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint32)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 4)
|
||||
ptr = unsafe.Add(ptr, 4)
|
||||
}
|
||||
|
||||
// Initialize .data: global variables initialized from flash.
|
||||
|
@ -62,8 +62,8 @@ func preinit() {
|
|||
dst := unsafe.Pointer(&_sdata)
|
||||
for dst != unsafe.Pointer(&_edata) {
|
||||
*(*uint32)(dst) = *(*uint32)(src)
|
||||
dst = unsafe.Pointer(uintptr(dst) + 4)
|
||||
src = unsafe.Pointer(uintptr(src) + 4)
|
||||
dst = unsafe.Add(dst, 4)
|
||||
src = unsafe.Add(src, 4)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint8)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 1)
|
||||
ptr = unsafe.Add(ptr, 1)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint32)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 4)
|
||||
ptr = unsafe.Add(ptr, 4)
|
||||
}
|
||||
|
||||
// Initialize .data: global variables initialized from flash.
|
||||
|
@ -34,8 +34,8 @@ func preinit() {
|
|||
dst := unsafe.Pointer(&_sdata)
|
||||
for dst != unsafe.Pointer(&_edata) {
|
||||
*(*uint32)(dst) = *(*uint32)(src)
|
||||
dst = unsafe.Pointer(uintptr(dst) + 4)
|
||||
src = unsafe.Pointer(uintptr(src) + 4)
|
||||
dst = unsafe.Add(dst, 4)
|
||||
src = unsafe.Add(src, 4)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ func interruptInit() {
|
|||
priReg := &esp.INTERRUPT_CORE0.CPU_INT_PRI_1
|
||||
for i := 0; i < 31; i++ {
|
||||
priReg.Set(0)
|
||||
priReg = (*volatile.Register32)(unsafe.Pointer(uintptr(unsafe.Pointer(priReg)) + uintptr(4)))
|
||||
priReg = (*volatile.Register32)(unsafe.Add(unsafe.Pointer(priReg), 4))
|
||||
}
|
||||
|
||||
// default threshold for interrupts is 5
|
||||
|
|
|
@ -32,7 +32,7 @@ func clearbss() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint32)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 4)
|
||||
ptr = unsafe.Add(ptr, 4)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint32)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 4)
|
||||
ptr = unsafe.Add(ptr, 4)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func write(fd int32, buf *byte, count int) int {
|
|||
// TODO: Proper handling write
|
||||
for i := 0; i < count; i++ {
|
||||
putchar(*buf)
|
||||
buf = (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(buf)) + 1))
|
||||
buf = (*byte)(unsafe.Add(unsafe.Pointer(buf), 1))
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint32)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 4)
|
||||
ptr = unsafe.Add(ptr, 4)
|
||||
}
|
||||
|
||||
// Initialize .data: global variables initialized from flash.
|
||||
|
@ -32,7 +32,7 @@ func preinit() {
|
|||
dst := unsafe.Pointer(&_sdata)
|
||||
for dst != unsafe.Pointer(&_edata) {
|
||||
*(*uint32)(dst) = *(*uint32)(src)
|
||||
dst = unsafe.Pointer(uintptr(dst) + 4)
|
||||
src = unsafe.Pointer(uintptr(src) + 4)
|
||||
dst = unsafe.Add(dst, 4)
|
||||
src = unsafe.Add(src, 4)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func preinit() {
|
|||
ptr := unsafe.Pointer(&_sbss)
|
||||
for ptr != unsafe.Pointer(&_ebss) {
|
||||
*(*uint64)(ptr) = 0
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + 8)
|
||||
ptr = unsafe.Add(ptr, 8)
|
||||
}
|
||||
|
||||
// Initialize .data: global variables initialized from flash.
|
||||
|
@ -32,7 +32,7 @@ func preinit() {
|
|||
dst := unsafe.Pointer(&_sdata)
|
||||
for dst != unsafe.Pointer(&_edata) {
|
||||
*(*uint64)(dst) = *(*uint64)(src)
|
||||
dst = unsafe.Pointer(uintptr(dst) + 8)
|
||||
src = unsafe.Pointer(uintptr(src) + 8)
|
||||
dst = unsafe.Add(dst, 8)
|
||||
src = unsafe.Add(src, 8)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ func os_runtime_args() []string {
|
|||
arg.length = length
|
||||
arg.ptr = (*byte)(*argv)
|
||||
// This is the Go equivalent of "argv++" in C.
|
||||
argv = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(argv)) + unsafe.Sizeof(argv)))
|
||||
argv = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(argv), unsafe.Sizeof(argv)))
|
||||
}
|
||||
}
|
||||
return args
|
||||
|
@ -129,7 +129,7 @@ func syscall_runtime_envs() []string {
|
|||
numEnvs := 0
|
||||
for *env != nil {
|
||||
numEnvs++
|
||||
env = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(env)) + unsafe.Sizeof(environ)))
|
||||
env = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(env), unsafe.Sizeof(environ)))
|
||||
}
|
||||
|
||||
// Create a string slice of all environment variables.
|
||||
|
@ -144,7 +144,7 @@ func syscall_runtime_envs() []string {
|
|||
length: length,
|
||||
}
|
||||
envs = append(envs, *(*string)(unsafe.Pointer(&s)))
|
||||
env = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(env)) + unsafe.Sizeof(environ)))
|
||||
env = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(env), unsafe.Sizeof(environ)))
|
||||
}
|
||||
|
||||
return envs
|
||||
|
|
|
@ -85,7 +85,7 @@ func os_runtime_args() []string {
|
|||
arg.length = length
|
||||
arg.ptr = (*byte)(*argv)
|
||||
// This is the Go equivalent of "argv++" in C.
|
||||
argv = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(argv)) + unsafe.Sizeof(argv)))
|
||||
argv = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(argv), unsafe.Sizeof(argv)))
|
||||
}
|
||||
}
|
||||
return args
|
||||
|
|
|
@ -37,7 +37,7 @@ func sliceAppend(srcBuf, elemsBuf unsafe.Pointer, srcLen, srcCap, elemsLen uintp
|
|||
}
|
||||
|
||||
// The slice fits (after possibly allocating a new one), append it in-place.
|
||||
memmove(unsafe.Pointer(uintptr(srcBuf)+srcLen*elemSize), elemsBuf, elemsLen*elemSize)
|
||||
memmove(unsafe.Add(srcBuf, srcLen*elemSize), elemsBuf, elemsLen*elemSize)
|
||||
return srcBuf, srcLen + elemsLen, srcCap
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ func stringConcat(x, y _string) _string {
|
|||
length := x.length + y.length
|
||||
buf := alloc(length, nil)
|
||||
memcpy(buf, unsafe.Pointer(x.ptr), x.length)
|
||||
memcpy(unsafe.Pointer(uintptr(buf)+x.length), unsafe.Pointer(y.ptr), y.length)
|
||||
memcpy(unsafe.Add(buf, x.length), unsafe.Pointer(y.ptr), y.length)
|
||||
return _string{ptr: (*byte)(buf), length: length}
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func stringFromRunes(runeSlice []rune) (s _string) {
|
|||
for _, r := range runeSlice {
|
||||
array, numBytes := encodeUTF8(r)
|
||||
for _, c := range array[:numBytes] {
|
||||
*(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(s.ptr)) + index)) = c
|
||||
*(*byte)(unsafe.Add(unsafe.Pointer(s.ptr), index)) = c
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func isContinuation(b byte) bool {
|
|||
func cgo_CString(s _string) unsafe.Pointer {
|
||||
buf := malloc(s.length + 1)
|
||||
memcpy(buf, unsafe.Pointer(s.ptr), s.length)
|
||||
*(*byte)(unsafe.Pointer(uintptr(buf) + s.length)) = 0 // trailing 0 byte
|
||||
*(*byte)(unsafe.Add(buf, s.length)) = 0 // trailing 0 byte
|
||||
return buf
|
||||
}
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ func Environ() []string {
|
|||
for environ := libc_environ; *environ != nil; {
|
||||
length += libc_strlen(*environ)
|
||||
vars++
|
||||
environ = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(environ)) + unsafe.Sizeof(environ)))
|
||||
environ = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(environ), unsafe.Sizeof(environ)))
|
||||
}
|
||||
|
||||
// allocate our backing slice for the strings
|
||||
|
@ -287,7 +287,7 @@ func Environ() []string {
|
|||
// add s to our list of environment variables
|
||||
envs = append(envs, s)
|
||||
// environ++
|
||||
environ = (*unsafe.Pointer)(unsafe.Pointer(uintptr(unsafe.Pointer(environ)) + unsafe.Sizeof(environ)))
|
||||
environ = (*unsafe.Pointer)(unsafe.Add(unsafe.Pointer(environ), unsafe.Sizeof(environ)))
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
|
Загрузка…
Создание таблицы
Сослаться в новой задаче