compiler: add support for atomic operations

This also implements DisableInterrupts/EnableInterrupts for RISC-V, as
those operations were needed to implement a few libcalls.
Этот коммит содержится в:
Ayke van Laethem 2020-05-15 23:24:41 +02:00 коммит произвёл Ron Evans
родитель 734613c20e
коммит fed433c046
10 изменённых файлов: 389 добавлений и 24 удалений

57
compiler/atomic.go Обычный файл
Просмотреть файл

@ -0,0 +1,57 @@
package compiler
import (
"golang.org/x/tools/go/ssa"
"tinygo.org/x/go-llvm"
)
// createAtomicOp lowers an atomic library call by lowering it as an LLVM atomic
// operation. It returns the result of the operation and true if the call could
// be lowered inline, and false otherwise.
func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) {
name := call.Value.(*ssa.Function).Name()
switch name {
case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
// Return the new value, not the original value returned by atomicrmw.
return b.CreateAdd(oldVal, val, ""), true
case "SwapInt32", "SwapInt64", "SwapUint32", "SwapUint64", "SwapUintptr", "SwapPointer":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
isPointer := val.Type().TypeKind() == llvm.PointerTypeKind
if isPointer {
// atomicrmw only supports integers, so cast to an integer.
val = b.CreatePtrToInt(val, b.uintptrType, "")
ptr = b.CreateBitCast(ptr, llvm.PointerType(val.Type(), 0), "")
}
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpXchg, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
if isPointer {
oldVal = b.CreateIntToPtr(oldVal, b.i8ptrType, "")
}
return oldVal, true
case "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", "CompareAndSwapPointer":
ptr := b.getValue(call.Args[0])
old := b.getValue(call.Args[1])
newVal := b.getValue(call.Args[2])
tuple := b.CreateAtomicCmpXchg(ptr, old, newVal, llvm.AtomicOrderingSequentiallyConsistent, llvm.AtomicOrderingSequentiallyConsistent, true)
swapped := b.CreateExtractValue(tuple, 1, "")
return swapped, true
case "LoadInt32", "LoadInt64", "LoadUint32", "LoadUint64", "LoadUintptr", "LoadPointer":
ptr := b.getValue(call.Args[0])
val := b.CreateLoad(ptr, "")
val.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
val.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
return val, true
case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
store := b.CreateStore(val, ptr)
store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
return store, true
default:
return llvm.Value{}, false
}
}

Просмотреть файл

@ -1323,6 +1323,14 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error)
return b.createVolatileLoad(instr)
case strings.HasPrefix(name, "runtime/volatile.Store"):
return b.createVolatileStore(instr)
case strings.HasPrefix(name, "sync/atomic."):
val, ok := b.createAtomicOp(instr)
if ok {
// This call could be lowered as an atomic operation.
return val, nil
}
// This call couldn't be lowered as an atomic operation, it's
// probably something else. Continue as usual.
case name == "runtime/interrupt.New":
return b.createInterruptGlobal(instr)
}

Просмотреть файл

@ -19,3 +19,19 @@ func Asm(asm string)
// You can use {} in the asm string (which expands to a register) to set the
// return value.
func AsmFull(asm string, regs map[string]interface{}) uintptr
// DisableInterrupts disables all interrupts, and returns the old interrupt
// state.
func DisableInterrupts() uintptr {
// Note: this can be optimized with a CSRRW instruction, which atomically
// swaps the value and returns the old value.
mask := MIE.Get()
MIE.Set(0)
return mask
}
// EnableInterrupts enables all interrupts again. The value passed in must be
// the mask returned by DisableInterrupts.
func EnableInterrupts(mask uintptr) {
MIE.Set(mask)
}

Просмотреть файл

@ -19,3 +19,86 @@ func align(ptr uintptr) uintptr {
func getCurrentStackPointer() uintptr {
return arm.AsmFull("mov {}, sp", nil)
}
// Documentation:
// * https://llvm.org/docs/Atomics.html
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
//
// In the case of Cortex-M, some atomic operations are emitted inline while
// others are emitted as libcalls. How many are emitted as libcalls depends on
// the MCU core variant (M3 and higher support some 32-bit atomic operations
// while M0 and M0+ do not).
//export __sync_fetch_and_add_4
func __sync_fetch_and_add_4(ptr *uint32, value uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
arm.EnableInterrupts(mask)
return oldValue
}
//export __sync_fetch_and_add_8
func __sync_fetch_and_add_8(ptr *uint64, value uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
arm.EnableInterrupts(mask)
return oldValue
}
//export __sync_lock_test_and_set_4
func __sync_lock_test_and_set_4(ptr *uint32, value uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = value
arm.EnableInterrupts(mask)
return oldValue
}
//export __sync_lock_test_and_set_8
func __sync_lock_test_and_set_8(ptr *uint64, value uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = value
arm.EnableInterrupts(mask)
return oldValue
}
//export __sync_val_compare_and_swap_4
func __sync_val_compare_and_swap_4(ptr *uint32, expected, desired uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
if oldValue == expected {
*ptr = desired
}
arm.EnableInterrupts(mask)
return oldValue
}
//export __sync_val_compare_and_swap_8
func __sync_val_compare_and_swap_8(ptr *uint64, expected, desired uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
if oldValue == expected {
*ptr = desired
}
arm.EnableInterrupts(mask)
return oldValue
}
// The safest thing to do here would just be to disable interrupts for
// procPin/procUnpin. Note that a global variable is safe in this case, as any
// access to procPinnedMask will happen with interrupts disabled.
var procPinnedMask uintptr
//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
procPinnedMask = arm.DisableInterrupts()
}
//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
arm.EnableInterrupts(procPinnedMask)
}

Просмотреть файл

@ -17,3 +17,76 @@ func align(ptr uintptr) uintptr {
func getCurrentStackPointer() uintptr {
return riscv.AsmFull("mv {}, sp", nil)
}
// Documentation:
// * https://llvm.org/docs/Atomics.html
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
//
// In the case of RISC-V, some operations may be implemented with libcalls if
// the operation is too big to be handled by assembly. Officially, these calls
// should be implemented with a lock-free algorithm but as (as of this time) all
// supported RISC-V chips have a single hart, we can simply disable interrupts
// to get the same behavior.
//export __atomic_load_8
func __atomic_load_8(ptr *uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
value := *ptr
riscv.EnableInterrupts(mask)
return value
}
//export __atomic_store_8
func __atomic_store_8(ptr *uint64, value uint64, ordering int32) {
mask := riscv.DisableInterrupts()
*ptr = value
riscv.EnableInterrupts(mask)
}
//export __atomic_exchange_8
func __atomic_exchange_8(ptr *uint64, value uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
oldValue := *ptr
*ptr = value
riscv.EnableInterrupts(mask)
return oldValue
}
//export __atomic_compare_exchange_8
func __atomic_compare_exchange_8(ptr, expected *uint64, desired uint64, success_ordering, failure_ordering int32) bool {
mask := riscv.DisableInterrupts()
oldValue := *ptr
success := oldValue == *expected
if success {
*ptr = desired
} else {
*expected = oldValue
}
riscv.EnableInterrupts(mask)
return success
}
//export __atomic_fetch_add_8
func __atomic_fetch_add_8(ptr *uint64, value uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
riscv.EnableInterrupts(mask)
return oldValue
}
// The safest thing to do here would just be to disable interrupts for
// procPin/procUnpin. Note that a global variable is safe in this case, as any
// access to procPinnedMask will happen with interrupts disabled.
var procPinnedMask uintptr
//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
procPinnedMask = riscv.DisableInterrupts()
}
//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
riscv.EnableInterrupts(procPinnedMask)
}

Просмотреть файл

@ -1,24 +0,0 @@
package runtime
// This file contains implementations for the sync/atomic package.
// All implementations assume there are no goroutines, threads or interrupts.
//go:linkname loadUint64 sync/atomic.LoadUint64
func loadUint64(addr *uint64) uint64 {
return *addr
}
//go:linkname storeUint32 sync/atomic.StoreUint32
func storeUint32(addr *uint32, val uint32) {
*addr = val
}
//go:linkname compareAndSwapUint64 sync/atomic.CompareAndSwapUint64
func compareAndSwapUint64(addr *uint64, old, new uint64) bool {
if *addr == old {
*addr = new
return true
}
return false
}

Просмотреть файл

@ -94,3 +94,14 @@ func extalloc(size uintptr) unsafe.Pointer {
//export free
func extfree(ptr unsafe.Pointer)
// TinyGo does not yet support any form of parallelism on an OS, so these can be
// left empty.
//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
}
//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
}

Просмотреть файл

@ -91,3 +91,14 @@ func ticks() timeUnit
func abort() {
trap()
}
// TinyGo does not yet support any form of parallelism on WebAssembly, so these
// can be left empty.
//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
}
//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
}

95
testdata/atomic.go предоставленный Обычный файл
Просмотреть файл

@ -0,0 +1,95 @@
package main
import (
"sync/atomic"
"unsafe"
)
func main() {
i32 := int32(-5)
println("AddInt32:", atomic.AddInt32(&i32, 8), i32)
i64 := int64(-5)
println("AddInt64:", atomic.AddInt64(&i64, 8), i64)
u32 := uint32(5)
println("AddUint32:", atomic.AddUint32(&u32, 8), u32)
u64 := uint64(5)
println("AddUint64:", atomic.AddUint64(&u64, 8), u64)
uptr := uintptr(5)
println("AddUintptr:", uint64(atomic.AddUintptr(&uptr, 8)), uint64(uptr))
println("SwapInt32:", atomic.SwapInt32(&i32, 33), i32)
println("SwapInt64:", atomic.SwapInt64(&i64, 33), i64)
println("SwapUint32:", atomic.SwapUint32(&u32, 33), u32)
println("SwapUint64:", atomic.SwapUint64(&u64, 33), u64)
println("SwapUintptr:", uint64(atomic.SwapUintptr(&uptr, 33)), uint64(uptr))
ptr := unsafe.Pointer(&i32)
println("SwapPointer:", atomic.SwapPointer(&ptr, unsafe.Pointer(&u32)) == unsafe.Pointer(&i32), ptr == unsafe.Pointer(&u32))
i32 = int32(-5)
println("CompareAndSwapInt32:", atomic.CompareAndSwapInt32(&i32, 5, 3), i32)
println("CompareAndSwapInt32:", atomic.CompareAndSwapInt32(&i32, -5, 3), i32)
i64 = int64(-5)
println("CompareAndSwapInt64:", atomic.CompareAndSwapInt64(&i64, 5, 3), i64)
println("CompareAndSwapInt64:", atomic.CompareAndSwapInt64(&i64, -5, 3), i64)
u32 = uint32(5)
println("CompareAndSwapUint32:", atomic.CompareAndSwapUint32(&u32, 4, 3), u32)
println("CompareAndSwapUint32:", atomic.CompareAndSwapUint32(&u32, 5, 3), u32)
u64 = uint64(5)
println("CompareAndSwapUint64:", atomic.CompareAndSwapUint64(&u64, 4, 3), u64)
println("CompareAndSwapUint64:", atomic.CompareAndSwapUint64(&u64, 5, 3), u64)
uptr = uintptr(5)
println("CompareAndSwapUintptr:", atomic.CompareAndSwapUintptr(&uptr, 4, 3), uint64(uptr))
println("CompareAndSwapUintptr:", atomic.CompareAndSwapUintptr(&uptr, 5, 3), uint64(uptr))
ptr = unsafe.Pointer(&i32)
println("CompareAndSwapPointer:", atomic.CompareAndSwapPointer(&ptr, unsafe.Pointer(&u32), unsafe.Pointer(&i64)), ptr == unsafe.Pointer(&i32))
println("CompareAndSwapPointer:", atomic.CompareAndSwapPointer(&ptr, unsafe.Pointer(&i32), unsafe.Pointer(&i64)), ptr == unsafe.Pointer(&i64))
println("LoadInt32:", atomic.LoadInt32(&i32))
println("LoadInt64:", atomic.LoadInt64(&i64))
println("LoadUint32:", atomic.LoadUint32(&u32))
println("LoadUint64:", atomic.LoadUint64(&u64))
println("LoadUintptr:", uint64(atomic.LoadUintptr(&uptr)))
println("LoadPointer:", atomic.LoadPointer(&ptr) == unsafe.Pointer(&i64))
atomic.StoreInt32(&i32, -20)
println("StoreInt32:", i32)
atomic.StoreInt64(&i64, -20)
println("StoreInt64:", i64)
atomic.StoreUint32(&u32, 20)
println("StoreUint32:", u32)
atomic.StoreUint64(&u64, 20)
println("StoreUint64:", u64)
atomic.StoreUintptr(&uptr, 20)
println("StoreUintptr:", uint64(uptr))
atomic.StorePointer(&ptr, unsafe.Pointer(&uptr))
println("StorePointer:", ptr == unsafe.Pointer(&uptr))
// test atomic.Value load/store operations
testValue(int(3), int(-2))
testValue("", "foobar", "baz")
}
func testValue(values ...interface{}) {
var av atomic.Value
for _, val := range values {
av.Store(val)
loadedVal := av.Load()
if loadedVal != val {
println("val store/load didn't work, expected", val, "but got", loadedVal)
}
}
}

35
testdata/atomic.txt предоставленный Обычный файл
Просмотреть файл

@ -0,0 +1,35 @@
AddInt32: 3 3
AddInt64: 3 3
AddUint32: 13 13
AddUint64: 13 13
AddUintptr: 13 13
SwapInt32: 3 33
SwapInt64: 3 33
SwapUint32: 13 33
SwapUint64: 13 33
SwapUintptr: 13 33
SwapPointer: true true
CompareAndSwapInt32: false -5
CompareAndSwapInt32: true 3
CompareAndSwapInt64: false -5
CompareAndSwapInt64: true 3
CompareAndSwapUint32: false 5
CompareAndSwapUint32: true 3
CompareAndSwapUint64: false 5
CompareAndSwapUint64: true 3
CompareAndSwapUintptr: false 5
CompareAndSwapUintptr: true 3
CompareAndSwapPointer: false true
CompareAndSwapPointer: true true
LoadInt32: 3
LoadInt64: 3
LoadUint32: 3
LoadUint64: 3
LoadUintptr: 3
LoadPointer: true
StoreInt32: -20
StoreInt64: -20
StoreUint32: 20
StoreUint64: 20
StoreUintptr: 20
StorePointer: true