From e2178ce682ef8cf72203b2ff26c1b3c828806a8b Mon Sep 17 00:00:00 2001 From: Nia Waldvogel Date: Thu, 13 Jan 2022 14:02:34 -0500 Subject: [PATCH] compiler: work around AVR atomics bugs The AVR backend has several critical atomics bugs. This change invokes libcalls for all atomic operations on AVR. Now `testdata/atomic.go` compiles and runs correctly. --- compiler/atomic.go | 32 ++++++++++++++++++++++++++++++++ main_test.go | 3 ++- src/runtime/arch_avr.go | 19 +++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/compiler/atomic.go b/compiler/atomic.go index f8fb51bf..7a931b7f 100644 --- a/compiler/atomic.go +++ b/compiler/atomic.go @@ -1,6 +1,7 @@ package compiler import ( + "fmt" "strings" "golang.org/x/tools/go/ssa" @@ -16,6 +17,20 @@ func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": ptr := b.getValue(call.Args[0]) val := b.getValue(call.Args[1]) + if strings.HasPrefix(b.Triple, "avr") { + // AtomicRMW does not work on AVR as intended: + // - There are some register allocation issues (fixed by https://reviews.llvm.org/D97127 which is not yet in a usable LLVM release) + // - The result is the new value instead of the old value + vType := val.Type() + name := fmt.Sprintf("__sync_fetch_and_add_%d", vType.IntTypeWidth()/8) + fn := b.mod.NamedFunction(name) + if fn.IsNil() { + fn = llvm.AddFunction(b.mod, name, llvm.FunctionType(vType, []llvm.Type{ptr.Type(), vType}, false)) + } + oldVal := b.createCall(fn, []llvm.Value{ptr, val}, "") + // Return the new value, not the original value returned. + return b.CreateAdd(oldVal, val, ""), true + } oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true) // Return the new value, not the original value returned by atomicrmw. return b.CreateAdd(oldVal, val, ""), true @@ -73,6 +88,23 @@ func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) { case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer": ptr := b.getValue(call.Args[0]) val := b.getValue(call.Args[1]) + if strings.HasPrefix(b.Triple, "avr") { + // SelectionDAGBuilder is currently missing the "are unaligned atomics allowed" check for stores. + vType := val.Type() + isPointer := vType.TypeKind() == llvm.PointerTypeKind + if isPointer { + // libcalls only supports integers, so cast to an integer. + vType = b.uintptrType + val = b.CreatePtrToInt(val, vType, "") + ptr = b.CreateBitCast(ptr, llvm.PointerType(vType, 0), "") + } + name := fmt.Sprintf("__atomic_store_%d", vType.IntTypeWidth()/8) + fn := b.mod.NamedFunction(name) + if fn.IsNil() { + fn = llvm.AddFunction(b.mod, name, llvm.FunctionType(vType, []llvm.Type{ptr.Type(), vType, b.uintptrType}, false)) + } + return b.createCall(fn, []llvm.Value{ptr, val, llvm.ConstInt(b.uintptrType, 5, false)}, ""), true + } store := b.CreateStore(val, ptr) store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent) store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required diff --git a/main_test.go b/main_test.go index 0f6db9c7..9660d33e 100644 --- a/main_test.go +++ b/main_test.go @@ -140,7 +140,8 @@ func TestBuild(t *testing.T) { for _, t := range tests { switch t { case "atomic.go": - // Not supported due to unaligned atomic accesses. + // Requires GCC 11.2.0 or above for interface comparison. + // https://github.com/gcc-mirror/gcc/commit/f30dd607669212de135dec1f1d8a93b8954c327c case "reflect.go": // Reflect tests do not work due to type code issues. diff --git a/src/runtime/arch_avr.go b/src/runtime/arch_avr.go index db8392bd..499db796 100644 --- a/src/runtime/arch_avr.go +++ b/src/runtime/arch_avr.go @@ -1,7 +1,10 @@ +//go:build avr // +build avr package runtime +import "runtime/interrupt" + const GOARCH = "arm" // avr pretends to be arm // The bitness of the CPU (e.g. 8, 32, 64). @@ -16,3 +19,19 @@ func align(ptr uintptr) uintptr { func getCurrentStackPointer() uintptr { return uintptr(stacksave()) } + +// The safest thing to do here would just be to disable interrupts for +// procPin/procUnpin. Note that a global variable is safe in this case, as any +// access to procPinnedMask will happen with interrupts disabled. + +var procPinnedMask interrupt.State + +//go:linkname procPin sync/atomic.runtime_procPin +func procPin() { + procPinnedMask = interrupt.Disable() +} + +//go:linkname procUnpin sync/atomic.runtime_procUnpin +func procUnpin() { + interrupt.Restore(procPinnedMask) +}