compiler: implement recover() built-in function
Этот коммит содержится в:
родитель
79ba6a50c3
коммит
8d6b210c09
31 изменённых файлов: 868 добавлений и 191 удалений
5
Makefile
5
Makefile
|
@ -307,11 +307,14 @@ TEST_PACKAGES_LINUX := \
|
|||
archive/zip \
|
||||
compress/flate \
|
||||
compress/lzw \
|
||||
crypto/hmac \
|
||||
debug/dwarf \
|
||||
debug/plan9obj \
|
||||
io/fs \
|
||||
io/ioutil \
|
||||
testing/fstest
|
||||
strconv \
|
||||
testing/fstest \
|
||||
text/template/parse
|
||||
|
||||
TEST_PACKAGES_DARWIN := $(TEST_PACKAGES_LINUX)
|
||||
|
||||
|
|
|
@ -302,7 +302,7 @@ func defaultTarget(goos, goarch, triple string) (*TargetSpec, error) {
|
|||
// systems so we need separate assembly files.
|
||||
suffix = "_windows"
|
||||
}
|
||||
spec.ExtraFiles = append(spec.ExtraFiles, "src/runtime/gc_"+goarch+suffix+".S")
|
||||
spec.ExtraFiles = append(spec.ExtraFiles, "src/runtime/asm_"+goarch+suffix+".S")
|
||||
spec.ExtraFiles = append(spec.ExtraFiles, "src/internal/task/task_stack_"+goarch+suffix+".S")
|
||||
}
|
||||
if goarch != runtime.GOARCH {
|
||||
|
|
|
@ -33,17 +33,36 @@ const (
|
|||
paramIsDeferenceableOrNull = 1 << iota
|
||||
)
|
||||
|
||||
// createCall creates a new call to runtime.<fnName> with the given arguments.
|
||||
func (b *builder) createRuntimeCall(fnName string, args []llvm.Value, name string) llvm.Value {
|
||||
// createRuntimeCallCommon creates a runtime call. Use createRuntimeCall or
|
||||
// createRuntimeInvoke instead.
|
||||
func (b *builder) createRuntimeCallCommon(fnName string, args []llvm.Value, name string, isInvoke bool) llvm.Value {
|
||||
fn := b.program.ImportedPackage("runtime").Members[fnName].(*ssa.Function)
|
||||
llvmFn := b.getFunction(fn)
|
||||
if llvmFn.IsNil() {
|
||||
panic("trying to call non-existent function: " + fn.RelString(nil))
|
||||
}
|
||||
args = append(args, llvm.Undef(b.i8ptrType)) // unused context parameter
|
||||
if isInvoke {
|
||||
return b.createInvoke(llvmFn, args, name)
|
||||
}
|
||||
return b.createCall(llvmFn, args, name)
|
||||
}
|
||||
|
||||
// createRuntimeCall creates a new call to runtime.<fnName> with the given
|
||||
// arguments.
|
||||
func (b *builder) createRuntimeCall(fnName string, args []llvm.Value, name string) llvm.Value {
|
||||
return b.createRuntimeCallCommon(fnName, args, name, false)
|
||||
}
|
||||
|
||||
// createRuntimeInvoke creates a new call to runtime.<fnName> with the given
|
||||
// arguments. If the runtime call panics, control flow is diverted to the
|
||||
// landing pad block.
|
||||
// Note that "invoke" here is meant in the LLVM sense (a call that can
|
||||
// panic/throw), not in the Go sense (an interface method call).
|
||||
func (b *builder) createRuntimeInvoke(fnName string, args []llvm.Value, name string) llvm.Value {
|
||||
return b.createRuntimeCallCommon(fnName, args, name, true)
|
||||
}
|
||||
|
||||
// createCall creates a call to the given function with the arguments possibly
|
||||
// expanded.
|
||||
func (b *builder) createCall(fn llvm.Value, args []llvm.Value, name string) llvm.Value {
|
||||
|
@ -55,6 +74,15 @@ func (b *builder) createCall(fn llvm.Value, args []llvm.Value, name string) llvm
|
|||
return b.CreateCall(fn, expanded, name)
|
||||
}
|
||||
|
||||
// createInvoke is like createCall but continues execution at the landing pad if
|
||||
// the call resulted in a panic.
|
||||
func (b *builder) createInvoke(fn llvm.Value, args []llvm.Value, name string) llvm.Value {
|
||||
if b.hasDeferFrame() {
|
||||
b.createInvokeCheckpoint()
|
||||
}
|
||||
return b.createCall(fn, args, name)
|
||||
}
|
||||
|
||||
// Expand an argument type to a list that can be used in a function call
|
||||
// parameter list.
|
||||
func (c *compilerContext) expandFormalParamType(t llvm.Type, name string, goType types.Type) []paramInfo {
|
||||
|
|
|
@ -143,6 +143,8 @@ type builder struct {
|
|||
currentBlock *ssa.BasicBlock
|
||||
phis []phiNode
|
||||
deferPtr llvm.Value
|
||||
deferFrame llvm.Value
|
||||
landingpad llvm.BasicBlock
|
||||
difunc llvm.Metadata
|
||||
dilocals map[*types.Var]llvm.Metadata
|
||||
initInlinedAt llvm.Metadata // fake inlinedAt position
|
||||
|
@ -1202,6 +1204,12 @@ func (b *builder) createFunction() {
|
|||
}
|
||||
}
|
||||
|
||||
if b.hasDeferFrame() {
|
||||
// Create the landing pad block, where execution continues after a
|
||||
// panic.
|
||||
b.createLandingPad()
|
||||
}
|
||||
|
||||
// Resolve phi nodes
|
||||
for _, phi := range b.phis {
|
||||
block := phi.ssa.Block()
|
||||
|
@ -1329,9 +1337,12 @@ func (b *builder) createInstruction(instr ssa.Instruction) {
|
|||
b.createMapUpdate(mapType.Key(), m, key, value, instr.Pos())
|
||||
case *ssa.Panic:
|
||||
value := b.getValue(instr.X)
|
||||
b.createRuntimeCall("_panic", []llvm.Value{value}, "")
|
||||
b.createRuntimeInvoke("_panic", []llvm.Value{value}, "")
|
||||
b.CreateUnreachable()
|
||||
case *ssa.Return:
|
||||
if b.hasDeferFrame() {
|
||||
b.createRuntimeCall("destroyDeferFrame", []llvm.Value{b.deferFrame}, "")
|
||||
}
|
||||
if len(instr.Results) == 0 {
|
||||
b.CreateRetVoid()
|
||||
} else if len(instr.Results) == 1 {
|
||||
|
@ -1520,7 +1531,13 @@ func (b *builder) createBuiltin(argTypes []types.Type, argValues []llvm.Value, c
|
|||
cplx := argValues[0]
|
||||
return b.CreateExtractValue(cplx, 0, "real"), nil
|
||||
case "recover":
|
||||
return b.createRuntimeCall("_recover", nil, ""), nil
|
||||
useParentFrame := uint64(0)
|
||||
if b.hasDeferFrame() {
|
||||
// recover() should return the panic value of the parent function,
|
||||
// not of the current function.
|
||||
useParentFrame = 1
|
||||
}
|
||||
return b.createRuntimeCall("_recover", []llvm.Value{llvm.ConstInt(b.ctx.Int1Type(), useParentFrame, false)}, ""), nil
|
||||
case "ssa:wrapnilchk":
|
||||
// TODO: do an actual nil check?
|
||||
return argValues[0], nil
|
||||
|
@ -1607,6 +1624,12 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error)
|
|||
return b.createVolatileLoad(instr)
|
||||
case strings.HasPrefix(name, "runtime/volatile.Store"):
|
||||
return b.createVolatileStore(instr)
|
||||
case name == "runtime.supportsRecover":
|
||||
supportsRecover := uint64(0)
|
||||
if b.supportsRecover() {
|
||||
supportsRecover = 1
|
||||
}
|
||||
return llvm.ConstInt(b.ctx.Int1Type(), supportsRecover, false), nil
|
||||
case strings.HasPrefix(name, "sync/atomic."):
|
||||
val, ok := b.createAtomicOp(instr)
|
||||
if ok {
|
||||
|
@ -1677,7 +1700,7 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error)
|
|||
params = append(params, context)
|
||||
}
|
||||
|
||||
return b.createCall(callee, params, ""), nil
|
||||
return b.createInvoke(callee, params, ""), nil
|
||||
}
|
||||
|
||||
// getValue returns the LLVM value of a constant, function value, global, or
|
||||
|
|
|
@ -94,6 +94,7 @@ func TestCompiler(t *testing.T) {
|
|||
}
|
||||
compilerConfig := &Config{
|
||||
Triple: config.Triple(),
|
||||
Features: config.Features(),
|
||||
GOOS: config.GOOS(),
|
||||
GOARCH: config.GOARCH(),
|
||||
CodeModel: config.CodeModel(),
|
||||
|
|
|
@ -22,6 +22,32 @@ import (
|
|||
"tinygo.org/x/go-llvm"
|
||||
)
|
||||
|
||||
// supportsRecover returns whether the compiler supports the recover() builtin
|
||||
// for the current architecture.
|
||||
func (b *builder) supportsRecover() bool {
|
||||
switch b.archFamily() {
|
||||
case "wasm32":
|
||||
// Probably needs to be implemented using the exception handling
|
||||
// proposal of WebAssembly:
|
||||
// https://github.com/WebAssembly/exception-handling
|
||||
return false
|
||||
case "avr", "riscv64", "xtensa":
|
||||
// TODO: add support for these architectures
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// hasDeferFrame returns whether the current function needs to catch panics and
|
||||
// run defers.
|
||||
func (b *builder) hasDeferFrame() bool {
|
||||
if b.fn.Recover == nil {
|
||||
return false
|
||||
}
|
||||
return b.supportsRecover()
|
||||
}
|
||||
|
||||
// deferInitFunc sets up this function for future deferred calls. It must be
|
||||
// called from within the entry block when this function contains deferred
|
||||
// calls.
|
||||
|
@ -37,6 +63,125 @@ func (b *builder) deferInitFunc() {
|
|||
deferType := llvm.PointerType(b.getLLVMRuntimeType("_defer"), 0)
|
||||
b.deferPtr = b.CreateAlloca(deferType, "deferPtr")
|
||||
b.CreateStore(llvm.ConstPointerNull(deferType), b.deferPtr)
|
||||
|
||||
if b.hasDeferFrame() {
|
||||
// Set up the defer frame with the current stack pointer.
|
||||
// This assumes that the stack pointer doesn't move outside of the
|
||||
// function prologue/epilogue (an invariant maintained by TinyGo but
|
||||
// possibly broken by the C alloca function).
|
||||
// The frame pointer is _not_ saved, because it is marked as clobbered
|
||||
// in the setjmp-like inline assembly.
|
||||
deferFrameType := b.getLLVMType(b.program.ImportedPackage("internal/task").Members["DeferFrame"].Type())
|
||||
b.deferFrame = b.CreateAlloca(deferFrameType, "deferframe.buf")
|
||||
stackPointer := b.readStackPointer()
|
||||
b.createRuntimeCall("setupDeferFrame", []llvm.Value{b.deferFrame, stackPointer}, "")
|
||||
|
||||
// Create the landing pad block, which is where control transfers after
|
||||
// a panic.
|
||||
b.landingpad = b.ctx.AddBasicBlock(b.llvmFn, "lpad")
|
||||
}
|
||||
}
|
||||
|
||||
// createLandingPad fills in the landing pad block. This block runs the deferred
|
||||
// functions and returns (by jumping to the recover block). If the function is
|
||||
// still panicking after the defers are run, the panic will be re-raised in
|
||||
// destroyDeferFrame.
|
||||
func (b *builder) createLandingPad() {
|
||||
b.SetInsertPointAtEnd(b.landingpad)
|
||||
|
||||
// Add debug info, if needed.
|
||||
// The location used is the closing bracket of the function.
|
||||
if b.Debug {
|
||||
pos := b.program.Fset.Position(b.fn.Syntax().End())
|
||||
b.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), b.difunc, llvm.Metadata{})
|
||||
}
|
||||
|
||||
b.createRunDefers()
|
||||
|
||||
// Continue at the 'recover' block, which returns to the parent in an
|
||||
// appropriate way.
|
||||
b.CreateBr(b.blockEntries[b.fn.Recover])
|
||||
}
|
||||
|
||||
// createInvokeCheckpoint saves the function state at the given point, to
|
||||
// continue at the landing pad if a panic happened. This is implemented using a
|
||||
// setjmp-like construct.
|
||||
func (b *builder) createInvokeCheckpoint() {
|
||||
// Construct inline assembly equivalents of setjmp.
|
||||
// The assembly works as follows:
|
||||
// * All registers (both callee-saved and caller saved) are clobbered
|
||||
// after the inline assembly returns.
|
||||
// * The assembly stores the address just past the end of the assembly
|
||||
// into the jump buffer.
|
||||
// * The return value (eax, rax, r0, etc) is set to zero in the inline
|
||||
// assembly but set to an unspecified non-zero value when jumping using
|
||||
// a longjmp.
|
||||
asmType := llvm.FunctionType(b.uintptrType, []llvm.Type{b.deferFrame.Type()}, false)
|
||||
var asmString, constraints string
|
||||
switch b.archFamily() {
|
||||
case "i386":
|
||||
asmString = `
|
||||
xorl %eax, %eax
|
||||
movl $$1f, 4(%ebx)
|
||||
1:`
|
||||
constraints = "={eax},{ebx},~{ebx},~{ecx},~{edx},~{esi},~{edi},~{ebp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}"
|
||||
// This doesn't include the floating point stack because TinyGo uses
|
||||
// newer floating point instructions.
|
||||
case "x86_64":
|
||||
asmString = `
|
||||
leaq 1f(%rip), %rax
|
||||
movq %rax, 8(%rbx)
|
||||
xorq %rax, %rax
|
||||
1:`
|
||||
constraints = "={rax},{rbx},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}"
|
||||
// This list doesn't include AVX/AVX512 registers because TinyGo
|
||||
// doesn't currently enable support for AVX instructions.
|
||||
case "arm":
|
||||
// Note: the following assembly takes into account that the PC is
|
||||
// always 4 bytes ahead on ARM. The PC that is stored always points
|
||||
// to the instruction just after the assembly fragment so that
|
||||
// tinygo_longjmp lands at the correct instruction.
|
||||
if b.isThumb() {
|
||||
// Instructions are 2 bytes in size.
|
||||
asmString = `
|
||||
movs r0, #0
|
||||
mov r2, pc
|
||||
str r2, [r1, #4]`
|
||||
} else {
|
||||
// Instructions are 4 bytes in size.
|
||||
asmString = `
|
||||
str pc, [r1, #4]
|
||||
movs r0, #0`
|
||||
}
|
||||
constraints = "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"
|
||||
case "aarch64":
|
||||
asmString = `
|
||||
adr x2, 1f
|
||||
str x2, [x1, #8]
|
||||
mov x0, #0
|
||||
1:
|
||||
`
|
||||
constraints = "={x0},{x1},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{nzcv},~{ffr},~{vg},~{memory}"
|
||||
// TODO: SVE registers, which we don't use in TinyGo at the moment.
|
||||
case "riscv32":
|
||||
asmString = `
|
||||
la a2, 1f
|
||||
sw a2, 4(a1)
|
||||
li a0, 0
|
||||
1:`
|
||||
constraints = "={a0},{a1},~{a1},~{a2},~{a3},~{a4},~{a5},~{a6},~{a7},~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{t0},~{t1},~{t2},~{t3},~{t4},~{t5},~{t6},~{ra},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{memory}"
|
||||
default:
|
||||
// This case should have been handled by b.supportsRecover().
|
||||
b.addError(b.fn.Pos(), "unknown architecture for defer: "+b.archFamily())
|
||||
}
|
||||
asm := llvm.InlineAsm(asmType, asmString, constraints, false, false, 0, false)
|
||||
result := b.CreateCall(asm, []llvm.Value{b.deferFrame}, "setjmp")
|
||||
result.AddCallSiteAttribute(-1, b.ctx.CreateEnumAttribute(llvm.AttributeKindID("returns_twice"), 0))
|
||||
isZero := b.CreateICmp(llvm.IntEQ, result, llvm.ConstInt(b.uintptrType, 0, false), "setjmp.result")
|
||||
continueBB := b.insertBasicBlock("")
|
||||
b.CreateCondBr(isZero, continueBB, b.landingpad)
|
||||
b.SetInsertPointAtEnd(continueBB)
|
||||
b.blockExits[b.currentBlock] = continueBB
|
||||
}
|
||||
|
||||
// isInLoop checks if there is a path from a basic block to itself.
|
||||
|
@ -202,30 +347,31 @@ func (b *builder) createDefer(instr *ssa.Defer) {
|
|||
}
|
||||
}
|
||||
|
||||
// Make a struct out of the collected values to put in the defer frame.
|
||||
deferFrameType := b.ctx.StructType(valueTypes, false)
|
||||
deferFrame := llvm.ConstNull(deferFrameType)
|
||||
// Make a struct out of the collected values to put in the deferred call
|
||||
// struct.
|
||||
deferredCallType := b.ctx.StructType(valueTypes, false)
|
||||
deferredCall := llvm.ConstNull(deferredCallType)
|
||||
for i, value := range values {
|
||||
deferFrame = b.CreateInsertValue(deferFrame, value, i, "")
|
||||
deferredCall = b.CreateInsertValue(deferredCall, value, i, "")
|
||||
}
|
||||
|
||||
// Put this struct in an allocation.
|
||||
var alloca llvm.Value
|
||||
if !isInLoop(instr.Block()) {
|
||||
// This can safely use a stack allocation.
|
||||
alloca = llvmutil.CreateEntryBlockAlloca(b.Builder, deferFrameType, "defer.alloca")
|
||||
alloca = llvmutil.CreateEntryBlockAlloca(b.Builder, deferredCallType, "defer.alloca")
|
||||
} else {
|
||||
// This may be hit a variable number of times, so use a heap allocation.
|
||||
size := b.targetData.TypeAllocSize(deferFrameType)
|
||||
size := b.targetData.TypeAllocSize(deferredCallType)
|
||||
sizeValue := llvm.ConstInt(b.uintptrType, size, false)
|
||||
nilPtr := llvm.ConstNull(b.i8ptrType)
|
||||
allocCall := b.createRuntimeCall("alloc", []llvm.Value{sizeValue, nilPtr}, "defer.alloc.call")
|
||||
alloca = b.CreateBitCast(allocCall, llvm.PointerType(deferFrameType, 0), "defer.alloc")
|
||||
alloca = b.CreateBitCast(allocCall, llvm.PointerType(deferredCallType, 0), "defer.alloc")
|
||||
}
|
||||
if b.NeedsStackObjects {
|
||||
b.trackPointer(alloca)
|
||||
}
|
||||
b.CreateStore(deferFrame, alloca)
|
||||
b.CreateStore(deferredCall, alloca)
|
||||
|
||||
// Push it on top of the linked list by replacing deferPtr.
|
||||
allocaCast := b.CreateBitCast(alloca, next.Type(), "defer.alloca.cast")
|
||||
|
@ -296,7 +442,7 @@ func (b *builder) createRunDefers() {
|
|||
valueTypes := []llvm.Type{b.uintptrType, llvm.PointerType(b.getLLVMRuntimeType("_defer"), 0)}
|
||||
|
||||
if !callback.IsInvoke() {
|
||||
//Expect funcValue to be passed through the defer frame.
|
||||
//Expect funcValue to be passed through the deferred call.
|
||||
valueTypes = append(valueTypes, b.getFuncType(callback.Signature()))
|
||||
} else {
|
||||
//Expect typecode
|
||||
|
@ -307,14 +453,14 @@ func (b *builder) createRunDefers() {
|
|||
valueTypes = append(valueTypes, b.getLLVMType(arg.Type()))
|
||||
}
|
||||
|
||||
deferFrameType := b.ctx.StructType(valueTypes, false)
|
||||
deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame")
|
||||
deferredCallType := b.ctx.StructType(valueTypes, false)
|
||||
deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall")
|
||||
|
||||
// Extract the params from the struct (including receiver).
|
||||
forwardParams := []llvm.Value{}
|
||||
zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
|
||||
for i := 2; i < len(valueTypes); i++ {
|
||||
gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "gep")
|
||||
gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "gep")
|
||||
forwardParam := b.CreateLoad(gep, "param")
|
||||
forwardParams = append(forwardParams, forwardParam)
|
||||
}
|
||||
|
@ -354,14 +500,14 @@ func (b *builder) createRunDefers() {
|
|||
for _, param := range getParams(callback.Signature) {
|
||||
valueTypes = append(valueTypes, b.getLLVMType(param.Type()))
|
||||
}
|
||||
deferFrameType := b.ctx.StructType(valueTypes, false)
|
||||
deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame")
|
||||
deferredCallType := b.ctx.StructType(valueTypes, false)
|
||||
deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall")
|
||||
|
||||
// Extract the params from the struct.
|
||||
forwardParams := []llvm.Value{}
|
||||
zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
|
||||
for i := range getParams(callback.Signature) {
|
||||
gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
|
||||
gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
|
||||
forwardParam := b.CreateLoad(gep, "param")
|
||||
forwardParams = append(forwardParams, forwardParam)
|
||||
}
|
||||
|
@ -375,7 +521,7 @@ func (b *builder) createRunDefers() {
|
|||
}
|
||||
|
||||
// Call real function.
|
||||
b.createCall(b.getFunction(callback), forwardParams, "")
|
||||
b.createInvoke(b.getFunction(callback), forwardParams, "")
|
||||
|
||||
case *ssa.MakeClosure:
|
||||
// Get the real defer struct type and cast to it.
|
||||
|
@ -386,14 +532,14 @@ func (b *builder) createRunDefers() {
|
|||
valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type()))
|
||||
}
|
||||
valueTypes = append(valueTypes, b.i8ptrType) // closure
|
||||
deferFrameType := b.ctx.StructType(valueTypes, false)
|
||||
deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame")
|
||||
deferredCallType := b.ctx.StructType(valueTypes, false)
|
||||
deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall")
|
||||
|
||||
// Extract the params from the struct.
|
||||
forwardParams := []llvm.Value{}
|
||||
zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
|
||||
for i := 2; i < len(valueTypes); i++ {
|
||||
gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "")
|
||||
gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "")
|
||||
forwardParam := b.CreateLoad(gep, "param")
|
||||
forwardParams = append(forwardParams, forwardParam)
|
||||
}
|
||||
|
@ -412,14 +558,14 @@ func (b *builder) createRunDefers() {
|
|||
valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type()))
|
||||
}
|
||||
|
||||
deferFrameType := b.ctx.StructType(valueTypes, false)
|
||||
deferFramePtr := b.CreateBitCast(deferData, llvm.PointerType(deferFrameType, 0), "deferFrame")
|
||||
deferredCallType := b.ctx.StructType(valueTypes, false)
|
||||
deferredCallPtr := b.CreateBitCast(deferData, llvm.PointerType(deferredCallType, 0), "defercall")
|
||||
|
||||
// Extract the params from the struct.
|
||||
var argValues []llvm.Value
|
||||
zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
|
||||
for i := 0; i < params.Len(); i++ {
|
||||
gep := b.CreateInBoundsGEP(deferFramePtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
|
||||
gep := b.CreateInBoundsGEP(deferredCallPtr, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
|
||||
forwardParam := b.CreateLoad(gep, "param")
|
||||
argValues = append(argValues, forwardParam)
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"go/token"
|
||||
"go/types"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/tinygo-org/tinygo/compiler/llvmutil"
|
||||
"tinygo.org/x/go-llvm"
|
||||
|
@ -270,3 +271,43 @@ func (c *compilerContext) getPointerBitmap(typ llvm.Type, pos token.Pos) *big.In
|
|||
panic("unknown LLVM type")
|
||||
}
|
||||
}
|
||||
|
||||
// archFamily returns the archtecture from the LLVM triple but with some
|
||||
// architecture names ("armv6", "thumbv7m", etc) merged into a single
|
||||
// architecture name ("arm").
|
||||
func (c *compilerContext) archFamily() string {
|
||||
arch := strings.Split(c.Triple, "-")[0]
|
||||
if strings.HasPrefix(arch, "arm") || strings.HasPrefix(arch, "thumb") {
|
||||
return "arm"
|
||||
}
|
||||
return arch
|
||||
}
|
||||
|
||||
// isThumb returns whether we're in ARM or in Thumb mode. It panics if the
|
||||
// features string is not one for an ARM architecture.
|
||||
func (c *compilerContext) isThumb() bool {
|
||||
var isThumb, isNotThumb bool
|
||||
for _, feature := range strings.Split(c.Features, ",") {
|
||||
if feature == "+thumb-mode" {
|
||||
isThumb = true
|
||||
}
|
||||
if feature == "-thumb-mode" {
|
||||
isNotThumb = true
|
||||
}
|
||||
}
|
||||
if isThumb == isNotThumb {
|
||||
panic("unexpected feature flags")
|
||||
}
|
||||
return isThumb
|
||||
}
|
||||
|
||||
// readStackPointer emits a LLVM intrinsic call that returns the current stack
|
||||
// pointer as an *i8.
|
||||
func (b *builder) readStackPointer() llvm.Value {
|
||||
stacksave := b.mod.NamedFunction("llvm.stacksave")
|
||||
if stacksave.IsNil() {
|
||||
fnType := llvm.FunctionType(b.i8ptrType, nil, false)
|
||||
stacksave = llvm.AddFunction(b.mod, "llvm.stacksave", fnType)
|
||||
}
|
||||
return b.CreateCall(stacksave, nil, "")
|
||||
}
|
||||
|
|
4
compiler/testdata/channel.ll
предоставленный
4
compiler/testdata/channel.ll
предоставленный
|
@ -5,10 +5,12 @@ target triple = "wasm32-unknown-wasi"
|
|||
|
||||
%runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* }
|
||||
%runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state" }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state", %"internal/task.DeferFrame"* }
|
||||
%"internal/task.gcData" = type { i8* }
|
||||
%"internal/task.state" = type { i32, i8*, %"internal/task.stackState", i1 }
|
||||
%"internal/task.stackState" = type { i32, i32 }
|
||||
%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface }
|
||||
%runtime._interface = type { i32, i8* }
|
||||
%runtime.chanSelectState = type { %runtime.channel*, i8* }
|
||||
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*)
|
||||
|
|
286
compiler/testdata/defer-cortex-m-qemu.ll
предоставленный
286
compiler/testdata/defer-cortex-m-qemu.ll
предоставленный
|
@ -4,94 +4,43 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
|
|||
target triple = "thumbv7m-unknown-unknown-eabi"
|
||||
|
||||
%runtime._defer = type { i32, %runtime._defer* }
|
||||
%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface }
|
||||
%runtime._interface = type { i32, i8* }
|
||||
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*)
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @main.external(i8*)
|
||||
declare void @main.external(i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.deferSimple(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.deferSimple(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%defer.alloca = alloca { i32, %runtime._defer* }, align 4
|
||||
%deferPtr = alloca %runtime._defer*, align 4
|
||||
store %runtime._defer* null, %runtime._defer** %deferPtr, align 4
|
||||
%deferframe.buf = alloca %"internal/task.DeferFrame", align 4
|
||||
%0 = call i8* @llvm.stacksave()
|
||||
call void @runtime.setupDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* %0, i8* undef) #3
|
||||
%defer.alloca.repack = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 0
|
||||
store i32 0, i32* %defer.alloca.repack, align 4
|
||||
%defer.alloca.repack1 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 1
|
||||
store %runtime._defer* null, %runtime._defer** %defer.alloca.repack1, align 4
|
||||
%0 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca, { i32, %runtime._defer* }** %0, align 4
|
||||
call void @main.external(i8* undef) #0
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.loophead: ; preds = %rundefers.callback0, %entry
|
||||
%1 = load %runtime._defer*, %runtime._defer** %deferPtr, align 4
|
||||
%stackIsNil = icmp eq %runtime._defer* %1, null
|
||||
br i1 %stackIsNil, label %rundefers.end, label %rundefers.loop
|
||||
|
||||
rundefers.loop: ; preds = %rundefers.loophead
|
||||
%stack.next.gep = getelementptr inbounds %runtime._defer, %runtime._defer* %1, i32 0, i32 1
|
||||
%stack.next = load %runtime._defer*, %runtime._defer** %stack.next.gep, align 4
|
||||
store %runtime._defer* %stack.next, %runtime._defer** %deferPtr, align 4
|
||||
%callback.gep = getelementptr inbounds %runtime._defer, %runtime._defer* %1, i32 0, i32 0
|
||||
%callback = load i32, i32* %callback.gep, align 4
|
||||
switch i32 %callback, label %rundefers.default [
|
||||
i32 0, label %rundefers.callback0
|
||||
]
|
||||
|
||||
rundefers.callback0: ; preds = %rundefers.loop
|
||||
call void @"main.deferSimple$1"(i8* undef)
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.default: ; preds = %rundefers.loop
|
||||
unreachable
|
||||
|
||||
rundefers.end: ; preds = %rundefers.loophead
|
||||
ret void
|
||||
|
||||
recover: ; No predecessors!
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.deferSimple$1"(i8* %context) unnamed_addr #0 {
|
||||
entry:
|
||||
call void @runtime.printint32(i32 3, i8* undef) #0
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @runtime.printint32(i32, i8*)
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.deferMultiple(i8* %context) unnamed_addr #0 {
|
||||
entry:
|
||||
%defer.alloca2 = alloca { i32, %runtime._defer* }, align 4
|
||||
%defer.alloca = alloca { i32, %runtime._defer* }, align 4
|
||||
%deferPtr = alloca %runtime._defer*, align 4
|
||||
store %runtime._defer* null, %runtime._defer** %deferPtr, align 4
|
||||
%defer.alloca.repack = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 0
|
||||
store i32 0, i32* %defer.alloca.repack, align 4
|
||||
%defer.alloca.repack5 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 1
|
||||
store %runtime._defer* null, %runtime._defer** %defer.alloca.repack5, align 4
|
||||
%0 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca, { i32, %runtime._defer* }** %0, align 4
|
||||
%defer.alloca2.repack = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca2, i32 0, i32 0
|
||||
store i32 1, i32* %defer.alloca2.repack, align 4
|
||||
%defer.alloca2.repack6 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca2, i32 0, i32 1
|
||||
%1 = bitcast %runtime._defer** %defer.alloca2.repack6 to { i32, %runtime._defer* }**
|
||||
%defer.alloca.repack16 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 1
|
||||
store %runtime._defer* null, %runtime._defer** %defer.alloca.repack16, align 4
|
||||
%1 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca, { i32, %runtime._defer* }** %1, align 4
|
||||
%2 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca2, { i32, %runtime._defer* }** %2, align 4
|
||||
call void @main.external(i8* undef) #0
|
||||
%setjmp = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result = icmp eq i32 %setjmp, 0
|
||||
br i1 %setjmp.result, label %2, label %lpad
|
||||
|
||||
2: ; preds = %entry
|
||||
call void @main.external(i8* undef) #3
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.loophead: ; preds = %rundefers.callback1, %rundefers.callback0, %entry
|
||||
rundefers.loophead: ; preds = %4, %2
|
||||
%3 = load %runtime._defer*, %runtime._defer** %deferPtr, align 4
|
||||
%stackIsNil = icmp eq %runtime._defer* %3, null
|
||||
br i1 %stackIsNil, label %rundefers.end, label %rundefers.loop
|
||||
|
@ -102,16 +51,142 @@ rundefers.loop: ; preds = %rundefers.loophead
|
|||
store %runtime._defer* %stack.next, %runtime._defer** %deferPtr, align 4
|
||||
%callback.gep = getelementptr inbounds %runtime._defer, %runtime._defer* %3, i32 0, i32 0
|
||||
%callback = load i32, i32* %callback.gep, align 4
|
||||
switch i32 %callback, label %rundefers.default [
|
||||
i32 0, label %rundefers.callback0
|
||||
]
|
||||
|
||||
rundefers.callback0: ; preds = %rundefers.loop
|
||||
%setjmp1 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result2 = icmp eq i32 %setjmp1, 0
|
||||
br i1 %setjmp.result2, label %4, label %lpad
|
||||
|
||||
4: ; preds = %rundefers.callback0
|
||||
call void @"main.deferSimple$1"(i8* undef)
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.default: ; preds = %rundefers.loop
|
||||
unreachable
|
||||
|
||||
rundefers.end: ; preds = %rundefers.loophead
|
||||
call void @runtime.destroyDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* undef) #3
|
||||
ret void
|
||||
|
||||
recover: ; preds = %rundefers.end3
|
||||
call void @runtime.destroyDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* undef) #3
|
||||
ret void
|
||||
|
||||
lpad: ; preds = %rundefers.callback012, %rundefers.callback0, %entry
|
||||
br label %rundefers.loophead6
|
||||
|
||||
rundefers.loophead6: ; preds = %6, %lpad
|
||||
%5 = load %runtime._defer*, %runtime._defer** %deferPtr, align 4
|
||||
%stackIsNil7 = icmp eq %runtime._defer* %5, null
|
||||
br i1 %stackIsNil7, label %rundefers.end3, label %rundefers.loop5
|
||||
|
||||
rundefers.loop5: ; preds = %rundefers.loophead6
|
||||
%stack.next.gep8 = getelementptr inbounds %runtime._defer, %runtime._defer* %5, i32 0, i32 1
|
||||
%stack.next9 = load %runtime._defer*, %runtime._defer** %stack.next.gep8, align 4
|
||||
store %runtime._defer* %stack.next9, %runtime._defer** %deferPtr, align 4
|
||||
%callback.gep10 = getelementptr inbounds %runtime._defer, %runtime._defer* %5, i32 0, i32 0
|
||||
%callback11 = load i32, i32* %callback.gep10, align 4
|
||||
switch i32 %callback11, label %rundefers.default4 [
|
||||
i32 0, label %rundefers.callback012
|
||||
]
|
||||
|
||||
rundefers.callback012: ; preds = %rundefers.loop5
|
||||
%setjmp14 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result15 = icmp eq i32 %setjmp14, 0
|
||||
br i1 %setjmp.result15, label %6, label %lpad
|
||||
|
||||
6: ; preds = %rundefers.callback012
|
||||
call void @"main.deferSimple$1"(i8* undef)
|
||||
br label %rundefers.loophead6
|
||||
|
||||
rundefers.default4: ; preds = %rundefers.loop5
|
||||
unreachable
|
||||
|
||||
rundefers.end3: ; preds = %rundefers.loophead6
|
||||
br label %recover
|
||||
}
|
||||
|
||||
; Function Attrs: nofree nosync nounwind willreturn
|
||||
declare i8* @llvm.stacksave() #2
|
||||
|
||||
declare void @runtime.setupDeferFrame(%"internal/task.DeferFrame"* dereferenceable_or_null(24), i8*, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.deferSimple$1"(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
call void @runtime.printint32(i32 3, i8* undef) #3
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @runtime.destroyDeferFrame(%"internal/task.DeferFrame"* dereferenceable_or_null(24), i8*) #0
|
||||
|
||||
declare void @runtime.printint32(i32, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.deferMultiple(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%defer.alloca2 = alloca { i32, %runtime._defer* }, align 4
|
||||
%defer.alloca = alloca { i32, %runtime._defer* }, align 4
|
||||
%deferPtr = alloca %runtime._defer*, align 4
|
||||
store %runtime._defer* null, %runtime._defer** %deferPtr, align 4
|
||||
%deferframe.buf = alloca %"internal/task.DeferFrame", align 4
|
||||
%0 = call i8* @llvm.stacksave()
|
||||
call void @runtime.setupDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* %0, i8* undef) #3
|
||||
%defer.alloca.repack = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 0
|
||||
store i32 0, i32* %defer.alloca.repack, align 4
|
||||
%defer.alloca.repack26 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca, i32 0, i32 1
|
||||
store %runtime._defer* null, %runtime._defer** %defer.alloca.repack26, align 4
|
||||
%1 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca, { i32, %runtime._defer* }** %1, align 4
|
||||
%defer.alloca2.repack = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca2, i32 0, i32 0
|
||||
store i32 1, i32* %defer.alloca2.repack, align 4
|
||||
%defer.alloca2.repack27 = getelementptr inbounds { i32, %runtime._defer* }, { i32, %runtime._defer* }* %defer.alloca2, i32 0, i32 1
|
||||
%2 = bitcast %runtime._defer** %defer.alloca2.repack27 to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca, { i32, %runtime._defer* }** %2, align 4
|
||||
%3 = bitcast %runtime._defer** %deferPtr to { i32, %runtime._defer* }**
|
||||
store { i32, %runtime._defer* }* %defer.alloca2, { i32, %runtime._defer* }** %3, align 4
|
||||
%setjmp = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result = icmp eq i32 %setjmp, 0
|
||||
br i1 %setjmp.result, label %4, label %lpad
|
||||
|
||||
4: ; preds = %entry
|
||||
call void @main.external(i8* undef) #3
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.loophead: ; preds = %7, %6, %4
|
||||
%5 = load %runtime._defer*, %runtime._defer** %deferPtr, align 4
|
||||
%stackIsNil = icmp eq %runtime._defer* %5, null
|
||||
br i1 %stackIsNil, label %rundefers.end, label %rundefers.loop
|
||||
|
||||
rundefers.loop: ; preds = %rundefers.loophead
|
||||
%stack.next.gep = getelementptr inbounds %runtime._defer, %runtime._defer* %5, i32 0, i32 1
|
||||
%stack.next = load %runtime._defer*, %runtime._defer** %stack.next.gep, align 4
|
||||
store %runtime._defer* %stack.next, %runtime._defer** %deferPtr, align 4
|
||||
%callback.gep = getelementptr inbounds %runtime._defer, %runtime._defer* %5, i32 0, i32 0
|
||||
%callback = load i32, i32* %callback.gep, align 4
|
||||
switch i32 %callback, label %rundefers.default [
|
||||
i32 0, label %rundefers.callback0
|
||||
i32 1, label %rundefers.callback1
|
||||
]
|
||||
|
||||
rundefers.callback0: ; preds = %rundefers.loop
|
||||
%setjmp4 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result5 = icmp eq i32 %setjmp4, 0
|
||||
br i1 %setjmp.result5, label %6, label %lpad
|
||||
|
||||
6: ; preds = %rundefers.callback0
|
||||
call void @"main.deferMultiple$1"(i8* undef)
|
||||
br label %rundefers.loophead
|
||||
|
||||
rundefers.callback1: ; preds = %rundefers.loop
|
||||
%setjmp7 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result8 = icmp eq i32 %setjmp7, 0
|
||||
br i1 %setjmp.result8, label %7, label %lpad
|
||||
|
||||
7: ; preds = %rundefers.callback1
|
||||
call void @"main.deferMultiple$2"(i8* undef)
|
||||
br label %rundefers.loophead
|
||||
|
||||
|
@ -119,24 +194,73 @@ rundefers.default: ; preds = %rundefers.loop
|
|||
unreachable
|
||||
|
||||
rundefers.end: ; preds = %rundefers.loophead
|
||||
call void @runtime.destroyDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* undef) #3
|
||||
ret void
|
||||
|
||||
recover: ; No predecessors!
|
||||
recover: ; preds = %rundefers.end9
|
||||
call void @runtime.destroyDeferFrame(%"internal/task.DeferFrame"* nonnull %deferframe.buf, i8* undef) #3
|
||||
ret void
|
||||
|
||||
lpad: ; preds = %rundefers.callback122, %rundefers.callback018, %rundefers.callback1, %rundefers.callback0, %entry
|
||||
br label %rundefers.loophead12
|
||||
|
||||
rundefers.loophead12: ; preds = %10, %9, %lpad
|
||||
%8 = load %runtime._defer*, %runtime._defer** %deferPtr, align 4
|
||||
%stackIsNil13 = icmp eq %runtime._defer* %8, null
|
||||
br i1 %stackIsNil13, label %rundefers.end9, label %rundefers.loop11
|
||||
|
||||
rundefers.loop11: ; preds = %rundefers.loophead12
|
||||
%stack.next.gep14 = getelementptr inbounds %runtime._defer, %runtime._defer* %8, i32 0, i32 1
|
||||
%stack.next15 = load %runtime._defer*, %runtime._defer** %stack.next.gep14, align 4
|
||||
store %runtime._defer* %stack.next15, %runtime._defer** %deferPtr, align 4
|
||||
%callback.gep16 = getelementptr inbounds %runtime._defer, %runtime._defer* %8, i32 0, i32 0
|
||||
%callback17 = load i32, i32* %callback.gep16, align 4
|
||||
switch i32 %callback17, label %rundefers.default10 [
|
||||
i32 0, label %rundefers.callback018
|
||||
i32 1, label %rundefers.callback122
|
||||
]
|
||||
|
||||
rundefers.callback018: ; preds = %rundefers.loop11
|
||||
%setjmp20 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result21 = icmp eq i32 %setjmp20, 0
|
||||
br i1 %setjmp.result21, label %9, label %lpad
|
||||
|
||||
9: ; preds = %rundefers.callback018
|
||||
call void @"main.deferMultiple$1"(i8* undef)
|
||||
br label %rundefers.loophead12
|
||||
|
||||
rundefers.callback122: ; preds = %rundefers.loop11
|
||||
%setjmp24 = call i32 asm "\0Amovs r0, #0\0Amov r2, pc\0Astr r2, [r1, #4]", "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"(%"internal/task.DeferFrame"* nonnull %deferframe.buf) #4
|
||||
%setjmp.result25 = icmp eq i32 %setjmp24, 0
|
||||
br i1 %setjmp.result25, label %10, label %lpad
|
||||
|
||||
10: ; preds = %rundefers.callback122
|
||||
call void @"main.deferMultiple$2"(i8* undef)
|
||||
br label %rundefers.loophead12
|
||||
|
||||
rundefers.default10: ; preds = %rundefers.loop11
|
||||
unreachable
|
||||
|
||||
rundefers.end9: ; preds = %rundefers.loophead12
|
||||
br label %recover
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.deferMultiple$1"(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
call void @runtime.printint32(i32 3, i8* undef) #3
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.deferMultiple$1"(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @"main.deferMultiple$2"(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
call void @runtime.printint32(i32 3, i8* undef) #0
|
||||
call void @runtime.printint32(i32 5, i8* undef) #3
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.deferMultiple$2"(i8* %context) unnamed_addr #0 {
|
||||
entry:
|
||||
call void @runtime.printint32(i32 5, i8* undef) #0
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #0 = { "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #1 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #2 = { nofree nosync nounwind willreturn }
|
||||
attributes #3 = { nounwind }
|
||||
attributes #4 = { nounwind returns_twice }
|
||||
|
|
108
compiler/testdata/goroutine-cortex-m-qemu-tasks.ll
предоставленный
108
compiler/testdata/goroutine-cortex-m-qemu-tasks.ll
предоставленный
|
@ -5,59 +5,61 @@ target triple = "thumbv7m-unknown-unknown-eabi"
|
|||
|
||||
%runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* }
|
||||
%runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state" }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state", %"internal/task.DeferFrame"* }
|
||||
%"internal/task.gcData" = type {}
|
||||
%"internal/task.state" = type { i32, i32* }
|
||||
%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface }
|
||||
%runtime._interface = type { i32, i8* }
|
||||
%runtime.chanSelectState = type { %runtime.channel*, i8* }
|
||||
|
||||
@"main$string" = internal unnamed_addr constant [4 x i8] c"test", align 1
|
||||
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*)
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.regularFunctionGoroutine(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.regularFunctionGoroutine(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.regularFunction$gowrapper" to i32), i8* undef) #0
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.regularFunction$gowrapper" to i32), i8* nonnull inttoptr (i32 5 to i8*), i32 %stacksize, i8* undef) #0
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.regularFunction$gowrapper" to i32), i8* undef) #8
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.regularFunction$gowrapper" to i32), i8* nonnull inttoptr (i32 5 to i8*), i32 %stacksize, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @main.regularFunction(i32, i8*)
|
||||
declare void @main.regularFunction(i32, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define linkonce_odr void @"main.regularFunction$gowrapper"(i8* %0) unnamed_addr #1 {
|
||||
define linkonce_odr void @"main.regularFunction$gowrapper"(i8* %0) unnamed_addr #2 {
|
||||
entry:
|
||||
%unpack.int = ptrtoint i8* %0 to i32
|
||||
call void @main.regularFunction(i32 %unpack.int, i8* undef) #0
|
||||
call void @main.regularFunction(i32 %unpack.int, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @"internal/task.getGoroutineStackSize"(i32, i8*)
|
||||
declare i32 @"internal/task.getGoroutineStackSize"(i32, i8*) #0
|
||||
|
||||
declare void @"internal/task.start"(i32, i8*, i32, i8*)
|
||||
declare void @"internal/task.start"(i32, i8*, i32, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.inlineFunctionGoroutine(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.inlineFunctionGoroutine(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.inlineFunctionGoroutine$1$gowrapper" to i32), i8* undef) #0
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.inlineFunctionGoroutine$1$gowrapper" to i32), i8* nonnull inttoptr (i32 5 to i8*), i32 %stacksize, i8* undef) #0
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.inlineFunctionGoroutine$1$gowrapper" to i32), i8* undef) #8
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.inlineFunctionGoroutine$1$gowrapper" to i32), i8* nonnull inttoptr (i32 5 to i8*), i32 %stacksize, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.inlineFunctionGoroutine$1"(i32 %x, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @"main.inlineFunctionGoroutine$1"(i32 %x, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define linkonce_odr void @"main.inlineFunctionGoroutine$1$gowrapper"(i8* %0) unnamed_addr #2 {
|
||||
define linkonce_odr void @"main.inlineFunctionGoroutine$1$gowrapper"(i8* %0) unnamed_addr #3 {
|
||||
entry:
|
||||
%unpack.int = ptrtoint i8* %0 to i32
|
||||
call void @"main.inlineFunctionGoroutine$1"(i32 %unpack.int, i8* undef)
|
||||
|
@ -65,26 +67,26 @@ entry:
|
|||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.closureFunctionGoroutine(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.closureFunctionGoroutine(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%n = call i8* @runtime.alloc(i32 4, i8* nonnull inttoptr (i32 3 to i8*), i8* undef) #0
|
||||
%n = call i8* @runtime.alloc(i32 4, i8* nonnull inttoptr (i32 3 to i8*), i8* undef) #8
|
||||
%0 = bitcast i8* %n to i32*
|
||||
store i32 3, i32* %0, align 4
|
||||
%1 = call i8* @runtime.alloc(i32 8, i8* null, i8* undef) #0
|
||||
%1 = call i8* @runtime.alloc(i32 8, i8* null, i8* undef) #8
|
||||
%2 = bitcast i8* %1 to i32*
|
||||
store i32 5, i32* %2, align 4
|
||||
%3 = getelementptr inbounds i8, i8* %1, i32 4
|
||||
%4 = bitcast i8* %3 to i8**
|
||||
store i8* %n, i8** %4, align 4
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.closureFunctionGoroutine$1$gowrapper" to i32), i8* undef) #0
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.closureFunctionGoroutine$1$gowrapper" to i32), i8* nonnull %1, i32 %stacksize, i8* undef) #0
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"main.closureFunctionGoroutine$1$gowrapper" to i32), i8* undef) #8
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"main.closureFunctionGoroutine$1$gowrapper" to i32), i8* nonnull %1, i32 %stacksize, i8* undef) #8
|
||||
%5 = load i32, i32* %0, align 4
|
||||
call void @runtime.printint32(i32 %5, i8* undef) #0
|
||||
call void @runtime.printint32(i32 %5, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @"main.closureFunctionGoroutine$1"(i32 %x, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @"main.closureFunctionGoroutine$1"(i32 %x, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%unpack.ptr = bitcast i8* %context to i32*
|
||||
store i32 7, i32* %unpack.ptr, align 4
|
||||
|
@ -92,7 +94,7 @@ entry:
|
|||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define linkonce_odr void @"main.closureFunctionGoroutine$1$gowrapper"(i8* %0) unnamed_addr #3 {
|
||||
define linkonce_odr void @"main.closureFunctionGoroutine$1$gowrapper"(i8* %0) unnamed_addr #4 {
|
||||
entry:
|
||||
%1 = bitcast i8* %0 to i32*
|
||||
%2 = load i32, i32* %1, align 4
|
||||
|
@ -103,12 +105,12 @@ entry:
|
|||
ret void
|
||||
}
|
||||
|
||||
declare void @runtime.printint32(i32, i8*)
|
||||
declare void @runtime.printint32(i32, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.funcGoroutine(i8* %fn.context, void ()* %fn.funcptr, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.funcGoroutine(i8* %fn.context, void ()* %fn.funcptr, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%0 = call i8* @runtime.alloc(i32 12, i8* null, i8* undef) #0
|
||||
%0 = call i8* @runtime.alloc(i32 12, i8* null, i8* undef) #8
|
||||
%1 = bitcast i8* %0 to i32*
|
||||
store i32 5, i32* %1, align 4
|
||||
%2 = getelementptr inbounds i8, i8* %0, i32 4
|
||||
|
@ -117,13 +119,13 @@ entry:
|
|||
%4 = getelementptr inbounds i8, i8* %0, i32 8
|
||||
%5 = bitcast i8* %4 to void ()**
|
||||
store void ()* %fn.funcptr, void ()** %5, align 4
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @main.funcGoroutine.gowrapper to i32), i8* undef) #0
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @main.funcGoroutine.gowrapper to i32), i8* nonnull %0, i32 %stacksize, i8* undef) #0
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @main.funcGoroutine.gowrapper to i32), i8* undef) #8
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @main.funcGoroutine.gowrapper to i32), i8* nonnull %0, i32 %stacksize, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define linkonce_odr void @main.funcGoroutine.gowrapper(i8* %0) unnamed_addr #4 {
|
||||
define linkonce_odr void @main.funcGoroutine.gowrapper(i8* %0) unnamed_addr #5 {
|
||||
entry:
|
||||
%1 = bitcast i8* %0 to i32*
|
||||
%2 = load i32, i32* %1, align 4
|
||||
|
@ -133,38 +135,38 @@ entry:
|
|||
%6 = getelementptr inbounds i8, i8* %0, i32 8
|
||||
%7 = bitcast i8* %6 to void (i32, i8*)**
|
||||
%8 = load void (i32, i8*)*, void (i32, i8*)** %7, align 4
|
||||
call void %8(i32 %2, i8* %5) #0
|
||||
call void %8(i32 %2, i8* %5) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.recoverBuiltinGoroutine(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.recoverBuiltinGoroutine(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.copyBuiltinGoroutine(i8* %dst.data, i32 %dst.len, i32 %dst.cap, i8* %src.data, i32 %src.len, i32 %src.cap, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.copyBuiltinGoroutine(i8* %dst.data, i32 %dst.len, i32 %dst.cap, i8* %src.data, i32 %src.len, i32 %src.cap, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%copy.n = call i32 @runtime.sliceCopy(i8* %dst.data, i8* %src.data, i32 %dst.len, i32 %src.len, i32 1, i8* undef) #0
|
||||
%copy.n = call i32 @runtime.sliceCopy(i8* %dst.data, i8* %src.data, i32 %dst.len, i32 %src.len, i32 1, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @runtime.sliceCopy(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i32, i8*)
|
||||
declare i32 @runtime.sliceCopy(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i32, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.closeBuiltinGoroutine(%runtime.channel* dereferenceable_or_null(32) %ch, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.closeBuiltinGoroutine(%runtime.channel* dereferenceable_or_null(32) %ch, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
call void @runtime.chanClose(%runtime.channel* %ch, i8* undef) #0
|
||||
call void @runtime.chanClose(%runtime.channel* %ch, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @runtime.chanClose(%runtime.channel* dereferenceable_or_null(32), i8*)
|
||||
declare void @runtime.chanClose(%runtime.channel* dereferenceable_or_null(32), i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.startInterfaceMethod(i32 %itf.typecode, i8* %itf.value, i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.startInterfaceMethod(i32 %itf.typecode, i8* %itf.value, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%0 = call i8* @runtime.alloc(i32 16, i8* null, i8* undef) #0
|
||||
%0 = call i8* @runtime.alloc(i32 16, i8* null, i8* undef) #8
|
||||
%1 = bitcast i8* %0 to i8**
|
||||
store i8* %itf.value, i8** %1, align 4
|
||||
%2 = getelementptr inbounds i8, i8* %0, i32 4
|
||||
|
@ -176,15 +178,15 @@ entry:
|
|||
%4 = getelementptr inbounds i8, i8* %0, i32 12
|
||||
%5 = bitcast i8* %4 to i32*
|
||||
store i32 %itf.typecode, i32* %5, align 4
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper" to i32), i8* undef) #0
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper" to i32), i8* nonnull %0, i32 %stacksize, i8* undef) #0
|
||||
%stacksize = call i32 @"internal/task.getGoroutineStackSize"(i32 ptrtoint (void (i8*)* @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper" to i32), i8* undef) #8
|
||||
call void @"internal/task.start"(i32 ptrtoint (void (i8*)* @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper" to i32), i8* nonnull %0, i32 %stacksize, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @"interface:{Print:func:{basic:string}{}}.Print$invoke"(i8*, i8*, i32, i32, i8*) #5
|
||||
declare void @"interface:{Print:func:{basic:string}{}}.Print$invoke"(i8*, i8*, i32, i32, i8*) #6
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define linkonce_odr void @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper"(i8* %0) unnamed_addr #6 {
|
||||
define linkonce_odr void @"interface:{Print:func:{basic:string}{}}.Print$invoke$gowrapper"(i8* %0) unnamed_addr #7 {
|
||||
entry:
|
||||
%1 = bitcast i8* %0 to i8**
|
||||
%2 = load i8*, i8** %1, align 4
|
||||
|
@ -197,14 +199,16 @@ entry:
|
|||
%9 = getelementptr inbounds i8, i8* %0, i32 12
|
||||
%10 = bitcast i8* %9 to i32*
|
||||
%11 = load i32, i32* %10, align 4
|
||||
call void @"interface:{Print:func:{basic:string}{}}.Print$invoke"(i8* %2, i8* %5, i32 %8, i32 %11, i8* undef) #0
|
||||
call void @"interface:{Print:func:{basic:string}{}}.Print$invoke"(i8* %2, i8* %5, i32 %8, i32 %11, i8* undef) #8
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind "tinygo-gowrapper"="main.regularFunction" }
|
||||
attributes #2 = { nounwind "tinygo-gowrapper"="main.inlineFunctionGoroutine$1" }
|
||||
attributes #3 = { nounwind "tinygo-gowrapper"="main.closureFunctionGoroutine$1" }
|
||||
attributes #4 = { nounwind "tinygo-gowrapper" }
|
||||
attributes #5 = { "tinygo-invoke"="reflect/methods.Print(string)" "tinygo-methods"="reflect/methods.Print(string)" }
|
||||
attributes #6 = { nounwind "tinygo-gowrapper"="interface:{Print:func:{basic:string}{}}.Print$invoke" }
|
||||
attributes #0 = { "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #1 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #2 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-gowrapper"="main.regularFunction" }
|
||||
attributes #3 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-gowrapper"="main.inlineFunctionGoroutine$1" }
|
||||
attributes #4 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-gowrapper"="main.closureFunctionGoroutine$1" }
|
||||
attributes #5 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-gowrapper" }
|
||||
attributes #6 = { "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-invoke"="reflect/methods.Print(string)" "tinygo-methods"="reflect/methods.Print(string)" }
|
||||
attributes #7 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "tinygo-gowrapper"="interface:{Print:func:{basic:string}{}}.Print$invoke" }
|
||||
attributes #8 = { nounwind }
|
||||
|
|
4
compiler/testdata/goroutine-wasm-asyncify.ll
предоставленный
4
compiler/testdata/goroutine-wasm-asyncify.ll
предоставленный
|
@ -5,10 +5,12 @@ target triple = "wasm32-unknown-wasi"
|
|||
|
||||
%runtime.channel = type { i32, i32, i8, %runtime.channelBlockedList*, i32, i32, i32, i8* }
|
||||
%runtime.channelBlockedList = type { %runtime.channelBlockedList*, %"internal/task.Task"*, %runtime.chanSelectState*, { %runtime.channelBlockedList*, i32, i32 } }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state" }
|
||||
%"internal/task.Task" = type { %"internal/task.Task"*, i8*, i64, %"internal/task.gcData", %"internal/task.state", %"internal/task.DeferFrame"* }
|
||||
%"internal/task.gcData" = type { i8* }
|
||||
%"internal/task.state" = type { i32, i8*, %"internal/task.stackState", i1 }
|
||||
%"internal/task.stackState" = type { i32, i32 }
|
||||
%"internal/task.DeferFrame" = type { i8*, i8*, %"internal/task.DeferFrame"*, i1, %runtime._interface }
|
||||
%runtime._interface = type { i32, i8* }
|
||||
%runtime.chanSelectState = type { %runtime.channel*, i8* }
|
||||
|
||||
@"main$string" = internal unnamed_addr constant [4 x i8] c"test", align 1
|
||||
|
|
20
compiler/testdata/intrinsics-cortex-m-qemu.ll
предоставленный
20
compiler/testdata/intrinsics-cortex-m-qemu.ll
предоставленный
|
@ -3,30 +3,32 @@ source_filename = "intrinsics.go"
|
|||
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
|
||||
target triple = "thumbv7m-unknown-unknown-eabi"
|
||||
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*)
|
||||
declare noalias nonnull i8* @runtime.alloc(i32, i8*, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #0 {
|
||||
define hidden void @main.init(i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden double @main.mySqrt(double %x, i8* %context) unnamed_addr #0 {
|
||||
define hidden double @main.mySqrt(double %x, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%0 = call double @math.Sqrt(double %x, i8* undef) #0
|
||||
%0 = call double @math.Sqrt(double %x, i8* undef) #2
|
||||
ret double %0
|
||||
}
|
||||
|
||||
declare double @math.Sqrt(double, i8*)
|
||||
declare double @math.Sqrt(double, i8*) #0
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define hidden double @main.myTrunc(double %x, i8* %context) unnamed_addr #0 {
|
||||
define hidden double @main.myTrunc(double %x, i8* %context) unnamed_addr #1 {
|
||||
entry:
|
||||
%0 = call double @math.Trunc(double %x, i8* undef) #0
|
||||
%0 = call double @math.Trunc(double %x, i8* undef) #2
|
||||
ret double %0
|
||||
}
|
||||
|
||||
declare double @math.Trunc(double, i8*)
|
||||
declare double @math.Trunc(double, i8*) #0
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #0 = { "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #1 = { nounwind "target-features"="+armv7-m,+hwdiv,+soft-float,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-dsp,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-neon,-pacbti,-ras,-sb,-sha2,-vfp2,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" }
|
||||
attributes #2 = { nounwind }
|
||||
|
|
|
@ -244,6 +244,13 @@ func runPlatTests(options compileopts.Options, tests []string, t *testing.T) {
|
|||
runTest("rand.go", options, t, nil, nil)
|
||||
})
|
||||
}
|
||||
if options.Target != "wasi" && options.Target != "wasm" {
|
||||
// The recover() builtin isn't supported yet on WebAssembly and Windows.
|
||||
t.Run("recover.go", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
runTest("recover.go", options, t, nil, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func emuCheck(t *testing.T, options compileopts.Options) {
|
||||
|
|
|
@ -20,6 +20,22 @@ type Task struct {
|
|||
|
||||
// state is the underlying running state of the task.
|
||||
state state
|
||||
|
||||
// DeferFrame stores a pointer to the (stack allocated) defer frame of the
|
||||
// goroutine that is used for the recover builtin.
|
||||
DeferFrame *DeferFrame
|
||||
}
|
||||
|
||||
// DeferFrame is a stack allocated object that stores information for the
|
||||
// current "defer frame", which is used in functions that use the `defer`
|
||||
// keyword.
|
||||
// The compiler knows the JumpPC struct offset.
|
||||
type DeferFrame struct {
|
||||
JumpSP unsafe.Pointer // stack pointer to return to
|
||||
JumpPC unsafe.Pointer // pc to return to
|
||||
Previous *DeferFrame // previous recover buffer pointer
|
||||
Panicking bool // true iff this defer frame is panicking
|
||||
PanicValue interface{} // panic value, might be nil for panic(nil) for example
|
||||
}
|
||||
|
||||
// getGoroutineStackSize is a compiler intrinsic that returns the stack size for
|
||||
|
|
|
@ -4,6 +4,9 @@ package task
|
|||
|
||||
import "unsafe"
|
||||
|
||||
// There is only one goroutine so the task struct can be a global.
|
||||
var mainTask Task
|
||||
|
||||
//go:linkname runtimePanic runtime.runtimePanic
|
||||
func runtimePanic(str string)
|
||||
|
||||
|
@ -12,8 +15,8 @@ func Pause() {
|
|||
}
|
||||
|
||||
func Current() *Task {
|
||||
runtimePanic("scheduler is disabled")
|
||||
return nil
|
||||
// Return a task struct, which is used for the recover builtin for example.
|
||||
return &mainTask
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
|
|
|
@ -21,3 +21,13 @@ tinygo_scanCurrentStack:
|
|||
// were only pushed to be discoverable by the GC.
|
||||
addl $28, %esp
|
||||
retl
|
||||
|
||||
|
||||
.section .text.tinygo_longjmp
|
||||
.global tinygo_longjmp
|
||||
tinygo_longjmp:
|
||||
// Note: the code we jump to assumes eax is set to a non-zero value if we
|
||||
// jump from here.
|
||||
movl 8(%esp), %eax // jumpPC (stash in volatile register)
|
||||
movl 4(%esp), %esp // jumpSP
|
||||
jmpl *%eax
|
|
@ -28,6 +28,22 @@ _tinygo_scanCurrentStack:
|
|||
addq $56, %rsp
|
||||
retq
|
||||
|
||||
|
||||
#ifdef __ELF__
|
||||
.section .text.tinygo_longjmp
|
||||
.global tinygo_longjmp
|
||||
tinygo_longjmp:
|
||||
#else // Darwin
|
||||
.global _tinygo_longjmp
|
||||
_tinygo_longjmp:
|
||||
#endif
|
||||
// Note: the code we jump to assumes rax is non-zero so we have to load it
|
||||
// with some value here.
|
||||
movq $1, %rax
|
||||
movq %rdi, %rsp // jumpSP
|
||||
jmpq *%rsi // jumpPC
|
||||
|
||||
|
||||
#ifdef __MACH__ // Darwin
|
||||
// allow these symbols to stripped as dead code
|
||||
.subsections_via_symbols
|
|
@ -20,3 +20,12 @@ tinygo_scanCurrentStack:
|
|||
// were only pushed to be discoverable by the GC.
|
||||
addq $72, %rsp
|
||||
retq
|
||||
|
||||
.section .text.tinygo_longjmp,"ax"
|
||||
.global tinygo_longjmp
|
||||
tinygo_longjmp:
|
||||
// Note: the code we jump to assumes rax is non-zero so we have to load it
|
||||
// with some value here.
|
||||
movq $1, %rax
|
||||
movq %rcx, %rsp // jumpSP
|
||||
jmpq *%rdx // jumpPC
|
|
@ -31,3 +31,16 @@ tinygo_scanCurrentStack:
|
|||
pop {pc}
|
||||
.cfi_endproc
|
||||
.size tinygo_scanCurrentStack, .-tinygo_scanCurrentStack
|
||||
|
||||
|
||||
.section .text.tinygo_longjmp
|
||||
.global tinygo_longjmp
|
||||
.type tinygo_longjmp, %function
|
||||
tinygo_longjmp:
|
||||
.cfi_startproc
|
||||
// Note: the code we jump to assumes r0 is set to a non-zero value if we
|
||||
// jump from here (which is conveniently already the case).
|
||||
mov sp, r0 // jumpSP
|
||||
mov pc, r1 // jumpPC
|
||||
.cfi_endproc
|
||||
.size tinygo_longjmp, .-tinygo_longjmp
|
|
@ -30,3 +30,18 @@ tinygo_scanCurrentStack:
|
|||
// Restore stack state and return.
|
||||
ldp x29, x30, [sp], #96
|
||||
ret
|
||||
|
||||
|
||||
#ifdef __MACH__
|
||||
.global _tinygo_longjmp
|
||||
_tinygo_longjmp:
|
||||
#else
|
||||
.section .text.tinygo_longjmp
|
||||
.global tinygo_longjmp
|
||||
.type tinygo_longjmp, %function
|
||||
tinygo_longjmp:
|
||||
#endif
|
||||
// Note: the code we jump to assumes x0 is set to a non-zero value if we
|
||||
// jump from here (which is conveniently already the case).
|
||||
mov sp, x0 // jumpSP
|
||||
br x1 // jumpPC
|
|
@ -40,3 +40,12 @@ tinygo_scanCurrentStack:
|
|||
|
||||
// Return to the caller.
|
||||
ret
|
||||
|
||||
|
||||
.section .text.tinygo_longjmp
|
||||
.global tinygo_longjmp
|
||||
tinygo_longjmp:
|
||||
// Note: the code we jump to assumes a0 is non-zero, which is already the
|
||||
// case because that's jumpSP (the stack pointer).
|
||||
mv sp, a0 // jumpSP
|
||||
jr a1 // jumpPC
|
|
@ -1,12 +1,35 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"internal/task"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// trap is a compiler hint that this function cannot be executed. It is
|
||||
// translated into either a trap instruction or a call to abort().
|
||||
//export llvm.trap
|
||||
func trap()
|
||||
|
||||
// Inline assembly stub. It is essentially C longjmp but modified a bit for the
|
||||
// purposes of TinyGo. It restores the stack pointer and jumps to the given pc.
|
||||
//export tinygo_longjmp
|
||||
func tinygo_longjmp(sp, pc unsafe.Pointer)
|
||||
|
||||
// Compiler intrinsic.
|
||||
// Returns whether recover is supported on the current architecture.
|
||||
func supportsRecover() bool
|
||||
|
||||
// Builtin function panic(msg), used as a compiler intrinsic.
|
||||
func _panic(message interface{}) {
|
||||
if supportsRecover() {
|
||||
frame := task.Current().DeferFrame
|
||||
if frame != nil {
|
||||
frame.PanicValue = message
|
||||
frame.Panicking = true
|
||||
tinygo_longjmp(frame.JumpSP, frame.JumpPC)
|
||||
// unreachable
|
||||
}
|
||||
}
|
||||
printstring("panic: ")
|
||||
printitf(message)
|
||||
printnl()
|
||||
|
@ -20,10 +43,59 @@ func runtimePanic(msg string) {
|
|||
abort()
|
||||
}
|
||||
|
||||
// Try to recover a panicking goroutine.
|
||||
func _recover() interface{} {
|
||||
// Deferred functions are currently not executed during panic, so there is
|
||||
// no way this can return anything besides nil.
|
||||
// Called at the start of a function that includes a deferred call.
|
||||
// It gets passed in the stack-allocated defer frame and configures it.
|
||||
// Note that the frame is not zeroed yet, so we need to initialize all values
|
||||
// that will be used.
|
||||
//go:inline
|
||||
//go:nobounds
|
||||
func setupDeferFrame(frame *task.DeferFrame, jumpSP unsafe.Pointer) {
|
||||
currentTask := task.Current()
|
||||
frame.Previous = currentTask.DeferFrame
|
||||
frame.JumpSP = jumpSP
|
||||
frame.Panicking = false
|
||||
currentTask.DeferFrame = frame
|
||||
}
|
||||
|
||||
// Called right before the return instruction. It pops the defer frame from the
|
||||
// linked list of defer frames. It also re-raises a panic if the goroutine is
|
||||
// still panicking.
|
||||
//go:inline
|
||||
//go:nobounds
|
||||
func destroyDeferFrame(frame *task.DeferFrame) {
|
||||
task.Current().DeferFrame = frame.Previous
|
||||
if frame.Panicking {
|
||||
// We're still panicking!
|
||||
// Re-raise the panic now.
|
||||
_panic(frame.PanicValue)
|
||||
}
|
||||
}
|
||||
|
||||
// _recover is the built-in recover() function. It tries to recover a currently
|
||||
// panicking goroutine.
|
||||
// useParentFrame is set when the caller of runtime._recover has a defer frame
|
||||
// itself. In that case, recover() shouldn't check that frame but one frame up.
|
||||
func _recover(useParentFrame bool) interface{} {
|
||||
if !supportsRecover() {
|
||||
// Compiling without stack unwinding support, so make this a no-op.
|
||||
return nil
|
||||
}
|
||||
// TODO: somehow check that recover() is called directly by a deferred
|
||||
// function in a panicking goroutine. Maybe this can be done by comparing
|
||||
// the frame pointer?
|
||||
frame := task.Current().DeferFrame
|
||||
if useParentFrame {
|
||||
// Don't recover panic from the current frame (which can't be panicking
|
||||
// already), but instead from the previous frame.
|
||||
frame = frame.Previous
|
||||
}
|
||||
if frame != nil && frame.Panicking {
|
||||
// Only the first call to recover returns the panic value. It also stops
|
||||
// the panicking sequence, hence setting panicking to false.
|
||||
frame.Panicking = false
|
||||
return frame.PanicValue
|
||||
}
|
||||
// Not panicking, so return a nil interface.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
],
|
||||
"extra-files": [
|
||||
"src/internal/task/task_stack_avr.S",
|
||||
"src/runtime/gc_avr.S"
|
||||
"src/runtime/asm_avr.S"
|
||||
],
|
||||
"gdb": ["avr-gdb"]
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
"extra-files": [
|
||||
"src/device/arm/cortexm.s",
|
||||
"src/internal/task/task_stack_cortexm.S",
|
||||
"src/runtime/gc_arm.S"
|
||||
"src/runtime/asm_arm.S"
|
||||
],
|
||||
"gdb": ["gdb-multiarch", "arm-none-eabi-gdb"]
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
"linkerscript": "targets/gameboy-advance.ld",
|
||||
"extra-files": [
|
||||
"targets/gameboy-advance.s",
|
||||
"src/runtime/gc_arm.S"
|
||||
"src/runtime/asm_arm.S"
|
||||
],
|
||||
"gdb": ["gdb-multiarch"],
|
||||
"emulator": "mgba -3 {}"
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
"extra-files": [
|
||||
"targets/nintendoswitch.s",
|
||||
"src/internal/task/task_stack_arm64.S",
|
||||
"src/runtime/gc_arm64.S",
|
||||
"src/runtime/asm_arm64.S",
|
||||
"src/runtime/runtime_nintendoswitch.s"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
"extra-files": [
|
||||
"src/device/riscv/start.S",
|
||||
"src/internal/task/task_stack_tinygoriscv.S",
|
||||
"src/runtime/gc_riscv.S",
|
||||
"src/runtime/asm_riscv.S",
|
||||
"src/device/riscv/handleinterrupt.S"
|
||||
],
|
||||
"gdb": ["riscv64-unknown-elf-gdb"]
|
||||
|
|
21
testdata/corpus.yaml
предоставленный
21
testdata/corpus.yaml
предоставленный
|
@ -107,6 +107,8 @@
|
|||
- pkg: blowfish
|
||||
- pkg: bn256
|
||||
- pkg: cast5
|
||||
- pkg: chacha20
|
||||
skipwasi: true # needs recover
|
||||
- pkg: chacha20poly1305
|
||||
- pkg: curve25519
|
||||
- pkg: ed25519
|
||||
|
@ -129,7 +131,6 @@
|
|||
- pkg: tea
|
||||
- pkg: twofish
|
||||
- pkg: xtea
|
||||
#- pkg: chacha20 # panic: chacha20: SetCounter attempted to rollback counter
|
||||
#- pkg: cryptobyte # panic: unimplemented: reflect.OverflowInt()
|
||||
#- pkg: salsa20/salsa # panic: runtime error: index out of range
|
||||
#- pkg: sha3 # panic: unimplemented: (reflect.Type).NumMethod()
|
||||
|
@ -209,15 +210,27 @@
|
|||
tags: noasm appengine
|
||||
subdirs:
|
||||
- pkg: blas/blas32
|
||||
- pkg: blas/blas64
|
||||
skipwasi: true # needs recover
|
||||
- pkg: blas/cblas64
|
||||
- pkg: blas/cblas128
|
||||
- pkg: blas/gonum
|
||||
skipwasi: true # needs recover
|
||||
- pkg: cmplxs
|
||||
skipwasi: true # needs recover
|
||||
- pkg: cmplxs/cscalar
|
||||
- pkg: diff/fd
|
||||
skipwasi: true # needs recover
|
||||
- pkg: dsp/window
|
||||
- pkg: floats
|
||||
skipwasi: true # needs recover
|
||||
- pkg: floats/scalar
|
||||
- pkg: integrate
|
||||
- pkg: integrate/quad
|
||||
- pkg: internal/cmplx64
|
||||
- pkg: internal/testrand
|
||||
- pkg: interp
|
||||
skipwasi: true # needs recover
|
||||
- pkg: lapack/gonum
|
||||
skipwasi: true # takes too long
|
||||
slow: true
|
||||
|
@ -236,15 +249,9 @@
|
|||
slow: true
|
||||
- pkg: stat/samplemv
|
||||
skipwasi: true # takes too long
|
||||
#- pkg: blas/blas64 # -- TestDasum panic: blas: n < 0
|
||||
#- pkg: blas/gonum # -- panic: blas: n < 0
|
||||
#- pkg: cmplxs # -- TestAdd panic: cmplxs: slice lengths do not match
|
||||
#- pkg: diff/fd # -- panic: fd: slice length mismatch
|
||||
#- pkg: floats # -- panic: floats: destination slice length does not match input
|
||||
#- pkg: graph # ld.lld-11: -- error: undefined symbol: reflect.mapiterkey (among other reflect errors)
|
||||
#- pkg: graph/topo # -- Reflect: Same as above
|
||||
#- pkg: internal/math32 # -- /usr/local/go/src/testing/quick/quick.go:273:11: fType.NumOut undefined (type reflect.Type has no field or method NumOut)
|
||||
#- pkg: interp # -- panic: interp: input slices have different lengths
|
||||
#- pkg: mat # -- panic: mat: row index out of range
|
||||
#- pkg: num/dual # TestFormat unexpected result for fmt.Sprintf("%#v", T{Real:1.1, Emag:2.1}): got:"T{Real:1.1, Emag:2.1}", want:"dual.Number{Real:1.1, Emag:2.1}" unexpected result for fmt.Sprintf("%#v", T{Real:-1.1, Emag:-2.1}): got:"T{Real:-1.1, Emag:-2.1}", want:"dual.Number{Real:-1.1, Emag:-2.1}"
|
||||
#- pkg: num/dualcmplx # TestFormat (similar to above)
|
||||
|
|
99
testdata/recover.go
предоставленный
Обычный файл
99
testdata/recover.go
предоставленный
Обычный файл
|
@ -0,0 +1,99 @@
|
|||
package main
|
||||
|
||||
func main() {
|
||||
println("# simple recover")
|
||||
recoverSimple()
|
||||
|
||||
println("\n# recover with result")
|
||||
result := recoverWithResult()
|
||||
println("result:", result)
|
||||
|
||||
println("\n# nested defer frame")
|
||||
nestedDefer()
|
||||
|
||||
println("\n# nested panic: panic inside recover")
|
||||
nestedPanic()
|
||||
|
||||
println("\n# panic inside defer")
|
||||
panicInsideDefer()
|
||||
|
||||
println("\n# panic replace")
|
||||
panicReplace()
|
||||
}
|
||||
|
||||
func recoverSimple() {
|
||||
defer func() {
|
||||
println("recovering...")
|
||||
printitf("recovered:", recover())
|
||||
}()
|
||||
println("running panic...")
|
||||
panic("panic")
|
||||
}
|
||||
|
||||
func recoverWithResult() (result int) {
|
||||
defer func() {
|
||||
printitf("recovered:", recover())
|
||||
}()
|
||||
result = 3
|
||||
println("running panic...")
|
||||
panic("panic")
|
||||
}
|
||||
|
||||
func nestedDefer() {
|
||||
defer func() {
|
||||
printitf("recovered:", recover())
|
||||
}()
|
||||
|
||||
func() {
|
||||
// The defer here doesn't catch the panic using recover(), so the outer
|
||||
// panic should do that.
|
||||
defer func() {
|
||||
println("deferred nested function")
|
||||
}()
|
||||
panic("panic")
|
||||
}()
|
||||
println("unreachable")
|
||||
}
|
||||
|
||||
func nestedPanic() {
|
||||
defer func() {
|
||||
printitf("recovered 1:", recover())
|
||||
|
||||
defer func() {
|
||||
printitf("recovered 2:", recover())
|
||||
}()
|
||||
|
||||
panic("foo")
|
||||
}()
|
||||
panic("panic")
|
||||
}
|
||||
|
||||
func panicInsideDefer() {
|
||||
defer func() {
|
||||
printitf("recovered:", recover())
|
||||
}()
|
||||
defer func() {
|
||||
panic("panic")
|
||||
}()
|
||||
}
|
||||
|
||||
func panicReplace() {
|
||||
defer func() {
|
||||
printitf("recovered:", recover())
|
||||
}()
|
||||
defer func() {
|
||||
println("panic 2")
|
||||
panic("panic 2")
|
||||
}()
|
||||
println("panic 1")
|
||||
panic("panic 1")
|
||||
}
|
||||
|
||||
func printitf(msg string, itf interface{}) {
|
||||
switch itf := itf.(type) {
|
||||
case string:
|
||||
println(msg, itf)
|
||||
default:
|
||||
println(msg, itf)
|
||||
}
|
||||
}
|
25
testdata/recover.txt
предоставленный
Обычный файл
25
testdata/recover.txt
предоставленный
Обычный файл
|
@ -0,0 +1,25 @@
|
|||
# simple recover
|
||||
running panic...
|
||||
recovering...
|
||||
recovered: panic
|
||||
|
||||
# recover with result
|
||||
running panic...
|
||||
recovered: panic
|
||||
result: 3
|
||||
|
||||
# nested defer frame
|
||||
deferred nested function
|
||||
recovered: panic
|
||||
|
||||
# nested panic: panic inside recover
|
||||
recovered 1: panic
|
||||
recovered 2: foo
|
||||
|
||||
# panic inside defer
|
||||
recovered: panic
|
||||
|
||||
# panic replace
|
||||
panic 1
|
||||
panic 2
|
||||
recovered: panic 2
|
Загрузка…
Создание таблицы
Сослаться в новой задаче