runtime (gc): add garbage collector that uses an external allocator
Этот коммит содержится в:
родитель
57320c0922
коммит
62e78c0a26
11 изменённых файлов: 683 добавлений и 25 удалений
|
@ -96,7 +96,7 @@ func (c *Config) CgoEnabled() bool {
|
|||
}
|
||||
|
||||
// GC returns the garbage collection strategy in use on this platform. Valid
|
||||
// values are "none", "leaking", and "conservative".
|
||||
// values are "none", "leaking", "extalloc", and "conservative".
|
||||
func (c *Config) GC() string {
|
||||
if c.Options.GC != "" {
|
||||
return c.Options.GC
|
||||
|
@ -104,22 +104,29 @@ func (c *Config) GC() string {
|
|||
if c.Target.GC != "" {
|
||||
return c.Target.GC
|
||||
}
|
||||
return "conservative"
|
||||
for _, tag := range c.Target.BuildTags {
|
||||
if tag == "baremetal" || tag == "wasm" {
|
||||
return "conservative"
|
||||
}
|
||||
}
|
||||
return "extalloc"
|
||||
}
|
||||
|
||||
// NeedsStackObjects returns true if the compiler should insert stack objects
|
||||
// that can be traced by the garbage collector.
|
||||
func (c *Config) NeedsStackObjects() bool {
|
||||
if c.GC() != "conservative" {
|
||||
switch c.GC() {
|
||||
case "conservative", "extalloc":
|
||||
for _, tag := range c.BuildTags() {
|
||||
if tag == "baremetal" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
for _, tag := range c.BuildTags() {
|
||||
if tag == "baremetal" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Scheduler returns the scheduler implementation. Valid values are "coroutines"
|
||||
|
|
2
main.go
2
main.go
|
@ -701,7 +701,7 @@ func handleCompilerError(err error) {
|
|||
func main() {
|
||||
outpath := flag.String("o", "", "output filename")
|
||||
opt := flag.String("opt", "z", "optimization level: 0, 1, 2, s, z")
|
||||
gc := flag.String("gc", "", "garbage collector to use (none, leaking, conservative)")
|
||||
gc := flag.String("gc", "", "garbage collector to use (none, leaking, extalloc, conservative)")
|
||||
panicStrategy := flag.String("panic", "print", "panic strategy (print, trap)")
|
||||
scheduler := flag.String("scheduler", "", "which scheduler to use (coroutines, tasks)")
|
||||
printIR := flag.Bool("printir", false, "print LLVM IR")
|
||||
|
|
604
src/runtime/gc_extalloc.go
Обычный файл
604
src/runtime/gc_extalloc.go
Обычный файл
|
@ -0,0 +1,604 @@
|
|||
// +build gc.extalloc
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// This garbage collector implementation allows TinyGo to use an external memory allocator.
|
||||
// It appends a header to the end of every allocation which the garbage collector uses for tracking purposes.
|
||||
// This is also a conservative collector.
|
||||
|
||||
const (
|
||||
gcDebug = false
|
||||
gcAsserts = false
|
||||
)
|
||||
|
||||
func initHeap() {}
|
||||
|
||||
// memTreap is a treap which is used to track allocations for the garbage collector.
|
||||
type memTreap struct {
|
||||
root *memTreapNode
|
||||
}
|
||||
|
||||
// printNode recursively prints a subtree at a given indentation depth.
|
||||
func (t *memTreap) printNode(n *memTreapNode, depth int) {
|
||||
for i := 0; i < depth; i++ {
|
||||
print(" ")
|
||||
}
|
||||
println(n, n.priority())
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if gcAsserts && n.parent == nil && t.root != n {
|
||||
runtimePanic("parent missing")
|
||||
}
|
||||
t.printNode(n.left, depth+1)
|
||||
t.printNode(n.right, depth+1)
|
||||
}
|
||||
|
||||
// print the treap.
|
||||
func (t *memTreap) print() {
|
||||
println("treap:")
|
||||
t.printNode(t.root, 1)
|
||||
}
|
||||
|
||||
// empty returns whether the treap contains any nodes.
|
||||
func (t *memTreap) empty() bool {
|
||||
return t.root == nil
|
||||
}
|
||||
|
||||
// minAddr returns the lowest address contained in an allocation in the treap.
|
||||
func (t *memTreap) minAddr() uintptr {
|
||||
// Find the rightmost node.
|
||||
n := t.root
|
||||
for n.right != nil {
|
||||
n = n.right
|
||||
}
|
||||
|
||||
// The lowest address is the base of the rightmost node.
|
||||
return uintptr(unsafe.Pointer(&n.base))
|
||||
}
|
||||
|
||||
// maxAddr returns the highest address contained in an allocation in the treap.
|
||||
func (t *memTreap) maxAddr() uintptr {
|
||||
// Find the leftmost node.
|
||||
n := t.root
|
||||
for n.left != nil {
|
||||
n = n.left
|
||||
}
|
||||
|
||||
// The highest address is the end of the leftmost node.
|
||||
return uintptr(unsafe.Pointer(&n.base)) + n.size
|
||||
}
|
||||
|
||||
// rotateRight does a right rotation of p and q.
|
||||
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
|
||||
func (t *memTreap) rotateRight(p, q *memTreapNode) {
|
||||
if t.root == q {
|
||||
t.root = p
|
||||
} else {
|
||||
*q.parentSlot() = p
|
||||
}
|
||||
|
||||
//a := p.left
|
||||
b := p.right
|
||||
//c := q.right
|
||||
|
||||
p.parent = q.parent
|
||||
p.right = q
|
||||
|
||||
q.parent = p
|
||||
q.left = b
|
||||
|
||||
if b != nil {
|
||||
b.parent = q
|
||||
}
|
||||
}
|
||||
|
||||
// rotateLeft does a left rotation of p and q.
|
||||
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
|
||||
func (t *memTreap) rotateLeft(p, q *memTreapNode) {
|
||||
if t.root == p {
|
||||
t.root = q
|
||||
} else {
|
||||
*p.parentSlot() = q
|
||||
}
|
||||
|
||||
//a := p.left
|
||||
b := q.left
|
||||
//c := q.right
|
||||
|
||||
q.parent = p.parent
|
||||
q.left = p
|
||||
|
||||
p.parent = q
|
||||
p.right = b
|
||||
|
||||
if b != nil {
|
||||
b.parent = p
|
||||
}
|
||||
}
|
||||
|
||||
// rotate rotates a lower node up to its parent.
|
||||
// The node n must be a child of m, and will be the parent of m after the rotation.
|
||||
func (t *memTreap) rotate(n, m *memTreapNode) {
|
||||
// https://en.wikipedia.org/wiki/Tree_rotation#/media/File:Tree_rotation.png
|
||||
if uintptr(unsafe.Pointer(n)) > uintptr(unsafe.Pointer(m)) {
|
||||
t.rotateRight(n, m)
|
||||
} else {
|
||||
t.rotateLeft(m, n)
|
||||
}
|
||||
}
|
||||
|
||||
// insert a node into the treap.
|
||||
func (t *memTreap) insert(n *memTreapNode) {
|
||||
if gcAsserts && (n.parent != nil || n.left != nil || n.right != nil) {
|
||||
runtimePanic("tried to insert unzeroed treap node")
|
||||
}
|
||||
|
||||
if t.root == nil {
|
||||
// This is the first node, and can be inserted directly into the root.
|
||||
t.root = n
|
||||
return
|
||||
}
|
||||
|
||||
// Insert like a regular binary search tree.
|
||||
for n.parent = t.root; *n.parentSlot() != nil; n.parent = *n.parentSlot() {
|
||||
}
|
||||
*n.parentSlot() = n
|
||||
|
||||
// Rotate the tree to restore the heap invariant.
|
||||
priority := n.priority()
|
||||
for n.parent != nil && priority > n.parent.priority() {
|
||||
t.rotate(n, n.parent)
|
||||
}
|
||||
}
|
||||
|
||||
// lookupAddr finds the treap node with the allocation containing the specified address.
|
||||
// If the address is not contained in any allocations in this treap, nil is returned.
|
||||
// NOTE: fields of memTreapNodes are not considered part of the allocations.
|
||||
func (t *memTreap) lookupAddr(addr uintptr) *memTreapNode {
|
||||
n := t.root
|
||||
for n != nil && !n.contains(addr) {
|
||||
if addr > uintptr(unsafe.Pointer(n)) {
|
||||
n = n.left
|
||||
} else {
|
||||
n = n.right
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// replace a node with another node on the treap.
|
||||
func (t *memTreap) replace(old, new *memTreapNode) {
|
||||
if gcAsserts && (old == nil || new == nil) {
|
||||
if gcDebug {
|
||||
println("tried to replace:", old, "->", new)
|
||||
}
|
||||
runtimePanic("invalid replacement")
|
||||
}
|
||||
if gcAsserts && old.parent == nil && old != t.root {
|
||||
if gcDebug {
|
||||
println("tried to replace:", old, "->", new)
|
||||
t.print()
|
||||
}
|
||||
runtimePanic("corrupted tree")
|
||||
}
|
||||
new.parent = old.parent
|
||||
if old == t.root {
|
||||
t.root = new
|
||||
} else {
|
||||
*new.parentSlot() = new
|
||||
}
|
||||
}
|
||||
|
||||
// remove a node from the treap.
|
||||
// This does not free the allocation.
|
||||
func (t *memTreap) remove(n *memTreapNode) {
|
||||
scan:
|
||||
for {
|
||||
switch {
|
||||
case n.left == nil && n.right == nil && n.parent == nil:
|
||||
// This is the only node - uproot it.
|
||||
t.root = nil
|
||||
break scan
|
||||
case n.left == nil && n.right == nil:
|
||||
// There are no nodes beneath here, so just remove this node from the parent.
|
||||
*n.parentSlot() = nil
|
||||
break scan
|
||||
case n.left != nil && n.right == nil:
|
||||
t.replace(n, n.left)
|
||||
break scan
|
||||
case n.right != nil && n.left == nil:
|
||||
t.replace(n, n.right)
|
||||
break scan
|
||||
default:
|
||||
// Rotate this node downward.
|
||||
if n.left.priority() > n.right.priority() {
|
||||
t.rotate(n.left, n)
|
||||
} else {
|
||||
t.rotate(n.right, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.left = nil
|
||||
n.right = nil
|
||||
n.parent = nil
|
||||
}
|
||||
|
||||
// memTreapNode is a treap node used to track allocations for the garbage collector.
|
||||
// This struct is prepended to every allocation.
|
||||
type memTreapNode struct {
|
||||
parent, left, right *memTreapNode
|
||||
size uintptr
|
||||
base struct{}
|
||||
}
|
||||
|
||||
// priority computes a pseudo-random priority value for this treap node.
|
||||
// This value is a fibonacci hash (https://en.wikipedia.org/wiki/Hash_function#Fibonacci_hashing) of the node's memory address.
|
||||
func (n *memTreapNode) priority() uintptr {
|
||||
// Select fibonacci multiplier for this bit-width.
|
||||
var fibonacciMultiplier uint64
|
||||
switch 8 * unsafe.Sizeof(uintptr(0)) {
|
||||
case 16:
|
||||
fibonacciMultiplier = 40503
|
||||
case 32:
|
||||
fibonacciMultiplier = 2654435769
|
||||
case 64:
|
||||
fibonacciMultiplier = 11400714819323198485
|
||||
default:
|
||||
runtimePanic("invalid size of uintptr")
|
||||
}
|
||||
|
||||
// Hash the pointer.
|
||||
return uintptr(fibonacciMultiplier) * uintptr(unsafe.Pointer(n))
|
||||
}
|
||||
|
||||
// contains returns whether this allocation contains a given address.
|
||||
func (n *memTreapNode) contains(addr uintptr) bool {
|
||||
return addr >= uintptr(unsafe.Pointer(&n.base)) && addr < uintptr(unsafe.Pointer(&n.base))+n.size
|
||||
}
|
||||
|
||||
// parentSlot returns a pointer to the parent's reference to this node.
|
||||
func (n *memTreapNode) parentSlot() **memTreapNode {
|
||||
if uintptr(unsafe.Pointer(n)) > uintptr(unsafe.Pointer(n.parent)) {
|
||||
return &n.parent.left
|
||||
} else {
|
||||
return &n.parent.right
|
||||
}
|
||||
}
|
||||
|
||||
// memScanQueue is a queue of memTreapNodes.
|
||||
type memScanQueue struct {
|
||||
head, tail *memTreapNode
|
||||
}
|
||||
|
||||
// push adds an allocation onto the queue.
|
||||
func (q *memScanQueue) push(n *memTreapNode) {
|
||||
if gcAsserts && (n.left != nil || n.right != nil || n.parent != nil) {
|
||||
runtimePanic("tried to push a treap node that is in use")
|
||||
}
|
||||
|
||||
if q.head == nil {
|
||||
q.tail = n
|
||||
} else {
|
||||
q.head.left = n
|
||||
}
|
||||
n.right = q.head
|
||||
q.head = n
|
||||
}
|
||||
|
||||
// pop removes the next allocation from the queue.
|
||||
func (q *memScanQueue) pop() *memTreapNode {
|
||||
n := q.tail
|
||||
q.tail = n.left
|
||||
if q.tail == nil {
|
||||
q.head = nil
|
||||
}
|
||||
n.left = nil
|
||||
n.right = nil
|
||||
return n
|
||||
}
|
||||
|
||||
// empty returns whether the queue contains any allocations.
|
||||
func (q *memScanQueue) empty() bool {
|
||||
return q.tail == nil
|
||||
}
|
||||
|
||||
// allocations is a treap containing all allocations.
|
||||
var allocations memTreap
|
||||
|
||||
// usedMem is the total amount of allocated memory (including the space taken up by memory treap nodes).
|
||||
var usedMem uintptr
|
||||
|
||||
// firstPtr and lastPtr are the bounds of memory used by the heap.
|
||||
// They are computed before the collector starts marking, and are used to quickly eliminate false positives.
|
||||
var firstPtr, lastPtr uintptr
|
||||
|
||||
// scanQueue is a queue of marked allocations to scan.
|
||||
var scanQueue memScanQueue
|
||||
|
||||
// mark searches for an allocation containing the given address and marks it if found.
|
||||
func mark(addr uintptr) bool {
|
||||
if addr < firstPtr || addr > lastPtr {
|
||||
// Pointer is outside of allocated bounds.
|
||||
return false
|
||||
}
|
||||
|
||||
node := allocations.lookupAddr(addr)
|
||||
if node != nil {
|
||||
if gcDebug {
|
||||
println("mark:", addr)
|
||||
}
|
||||
allocations.remove(node)
|
||||
scanQueue.push(node)
|
||||
}
|
||||
|
||||
return node != nil
|
||||
}
|
||||
|
||||
func markRoot(addr uintptr, root uintptr) {
|
||||
marked := mark(root)
|
||||
if gcDebug {
|
||||
if marked {
|
||||
println("marked root:", root, "at", addr)
|
||||
} else if addr != 0 {
|
||||
println("did not mark root:", root, "at", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func markRoots(start uintptr, end uintptr) {
|
||||
scan(start, end)
|
||||
}
|
||||
|
||||
// scan loads all pointer-aligned words and marks any pointers that it finds.
|
||||
func scan(start uintptr, end uintptr) {
|
||||
// Align start and end pointers.
|
||||
start = (start + unsafe.Alignof(unsafe.Pointer(nil)) - 1) &^ (unsafe.Alignof(unsafe.Pointer(nil)) - 1)
|
||||
end &^= unsafe.Alignof(unsafe.Pointer(nil)) - 1
|
||||
|
||||
// Mark all pointers.
|
||||
for ptr := start; ptr < end; ptr += unsafe.Alignof(unsafe.Pointer(nil)) {
|
||||
mark(*(*uintptr)(unsafe.Pointer(ptr)))
|
||||
}
|
||||
}
|
||||
|
||||
// scan marks all allocations referenced by this allocation.
|
||||
// This should only be invoked by the garbage collector.
|
||||
func (n *memTreapNode) scan() {
|
||||
start := uintptr(unsafe.Pointer(&n.base))
|
||||
end := start + n.size
|
||||
scan(start, end)
|
||||
}
|
||||
|
||||
// destroy removes and frees all allocations in the treap.
|
||||
func (t *memTreap) destroy() {
|
||||
n := t.root
|
||||
for n != nil {
|
||||
switch {
|
||||
case n.left != nil:
|
||||
// Destroy the left subtree.
|
||||
n = n.left
|
||||
case n.right != nil:
|
||||
// Destroy the right subtree.
|
||||
n = n.right
|
||||
default:
|
||||
// This is a leaf node, so delete it and jump back to the parent.
|
||||
|
||||
// Save the parent to jump back to.
|
||||
parent := n.parent
|
||||
|
||||
if parent != nil {
|
||||
*n.parentSlot() = nil
|
||||
} else {
|
||||
t.root = nil
|
||||
}
|
||||
|
||||
// Update used memory.
|
||||
usedMem -= unsafe.Sizeof(memTreapNode{}) + n.size
|
||||
if gcDebug {
|
||||
println("collecting:", &n.base, "size:", n.size)
|
||||
println("used memory:", usedMem)
|
||||
}
|
||||
|
||||
// Free the node.
|
||||
extfree(unsafe.Pointer(n))
|
||||
|
||||
// Jump back to the parent node.
|
||||
n = parent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gcrunning is used by gcAsserts to determine whether the garbage collector is running.
|
||||
// This is used to detect if the collector is invoking itself or trying to allocate memory.
|
||||
var gcrunning bool
|
||||
|
||||
// activeMem is a treap used to store marked allocations which have already been scanned.
|
||||
// This is only used when the garbage collector is running.
|
||||
var activeMem memTreap
|
||||
|
||||
func GC() {
|
||||
if gcDebug {
|
||||
println("running GC")
|
||||
}
|
||||
if allocations.empty() {
|
||||
// Skip collection because the heap is empty.
|
||||
if gcDebug {
|
||||
println("nothing to collect")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if gcAsserts {
|
||||
if gcrunning {
|
||||
runtimePanic("GC called itself")
|
||||
}
|
||||
gcrunning = true
|
||||
}
|
||||
|
||||
if gcDebug {
|
||||
println("pre-GC allocations:")
|
||||
allocations.print()
|
||||
}
|
||||
|
||||
// Before scanning, find the lowest and highest allocated pointers.
|
||||
// These can be quickly compared against to eliminate most false positives.
|
||||
firstPtr, lastPtr = allocations.minAddr(), allocations.maxAddr()
|
||||
|
||||
// Start by scanning all of the global variables and the stack.
|
||||
markGlobals()
|
||||
markStack()
|
||||
|
||||
// Scan all referenced allocations, building a new treap with marked allocations.
|
||||
// The marking process deletes the allocations from the old allocations treap, so they are only queued once.
|
||||
for !scanQueue.empty() {
|
||||
// Pop a marked node off of the scan queue.
|
||||
n := scanQueue.pop()
|
||||
|
||||
// Scan and mark all nodes that this references.
|
||||
n.scan()
|
||||
|
||||
// Insert this node into the new treap.
|
||||
activeMem.insert(n)
|
||||
}
|
||||
|
||||
// The allocations treap now only contains unreferenced nodes. Destroy them all.
|
||||
allocations.destroy()
|
||||
if gcAsserts && !allocations.empty() {
|
||||
runtimePanic("failed to fully destroy allocations")
|
||||
}
|
||||
|
||||
// Replace the allocations treap with the new treap.
|
||||
allocations = activeMem
|
||||
activeMem = memTreap{}
|
||||
|
||||
if gcDebug {
|
||||
println("GC finished")
|
||||
}
|
||||
|
||||
if gcAsserts {
|
||||
gcrunning = false
|
||||
}
|
||||
}
|
||||
|
||||
// heapBound is used to control the growth of the heap.
|
||||
// When the heap exceeds this size, the garbage collector is run.
|
||||
// If the garbage collector cannot free up enough memory, the bound is doubled until the allocation fits.
|
||||
var heapBound uintptr = 4 * unsafe.Sizeof(memTreapNode{})
|
||||
|
||||
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
|
||||
var zeroSizedAlloc uint8
|
||||
|
||||
// alloc tries to find some free space on the heap, possibly doing a garbage
|
||||
// collection cycle if needed. If no space is free, it panics.
|
||||
//go:noinline
|
||||
func alloc(size uintptr) unsafe.Pointer {
|
||||
if size == 0 {
|
||||
return unsafe.Pointer(&zeroSizedAlloc)
|
||||
}
|
||||
|
||||
if gcAsserts && gcrunning {
|
||||
runtimePanic("allocated inside the garbage collector")
|
||||
}
|
||||
|
||||
// Calculate size of allocation including treap node.
|
||||
allocSize := unsafe.Sizeof(memTreapNode{}) + size
|
||||
|
||||
var gcRan bool
|
||||
for {
|
||||
// Try to bound heap growth.
|
||||
if usedMem+allocSize < usedMem {
|
||||
if gcDebug {
|
||||
println("current mem:", usedMem, "alloc size:", allocSize)
|
||||
}
|
||||
runtimePanic("target heap size exceeds address space size")
|
||||
}
|
||||
if usedMem+allocSize > heapBound {
|
||||
if !gcRan {
|
||||
// Run the garbage collector before growing the heap.
|
||||
if gcDebug {
|
||||
println("heap reached size limit")
|
||||
}
|
||||
GC()
|
||||
gcRan = true
|
||||
continue
|
||||
} else {
|
||||
// Grow the heap bound to fit the allocation.
|
||||
for heapBound != 0 && usedMem+allocSize > heapBound {
|
||||
heapBound <<= 1
|
||||
}
|
||||
if heapBound == 0 {
|
||||
// This is only possible on hosted 32-bit systems.
|
||||
// Allow the heap bound to encompass everything.
|
||||
heapBound = ^uintptr(0)
|
||||
}
|
||||
if gcDebug {
|
||||
println("raising heap size limit to", heapBound)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate the memory.
|
||||
ptr := extalloc(allocSize)
|
||||
if ptr == nil {
|
||||
if gcDebug {
|
||||
println("extalloc failed")
|
||||
}
|
||||
if gcRan {
|
||||
// Garbage collector was not able to free up enough memory.
|
||||
runtimePanic("out of memory")
|
||||
} else {
|
||||
// Run the garbage collector and try again.
|
||||
GC()
|
||||
gcRan = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the memory treap node.
|
||||
node := (*memTreapNode)(ptr)
|
||||
*node = memTreapNode{
|
||||
size: size,
|
||||
}
|
||||
|
||||
// Insert allocation into the allocations treap.
|
||||
allocations.insert(node)
|
||||
|
||||
// Extract the user's section of the allocation.
|
||||
ptr = unsafe.Pointer(&node.base)
|
||||
if gcAsserts && !node.contains(uintptr(ptr)) {
|
||||
runtimePanic("node is not self-contained")
|
||||
}
|
||||
if gcAsserts {
|
||||
check := allocations.lookupAddr(uintptr(ptr))
|
||||
if check == nil {
|
||||
if gcDebug {
|
||||
println("failed to find:", ptr)
|
||||
allocations.print()
|
||||
}
|
||||
runtimePanic("bad insert")
|
||||
}
|
||||
}
|
||||
|
||||
// Zero the allocation.
|
||||
memzero(ptr, size)
|
||||
|
||||
// Update used memory.
|
||||
usedMem += allocSize
|
||||
|
||||
if gcDebug {
|
||||
println("allocated:", uintptr(ptr), "size:", size)
|
||||
println("used memory:", usedMem)
|
||||
}
|
||||
|
||||
return ptr
|
||||
}
|
||||
}
|
||||
|
||||
func free(ptr unsafe.Pointer) {
|
||||
// Currently unimplemented due to bugs in coroutine lowering.
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// +build gc.conservative
|
||||
// +build gc.conservative gc.extalloc
|
||||
// +build baremetal
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build gc.conservative
|
||||
// +build gc.conservative gc.extalloc
|
||||
// +build !baremetal
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build gc.conservative
|
||||
// +build gc.conservative gc.extalloc
|
||||
// +build !baremetal
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build gc.conservative
|
||||
// +build gc.conservative gc.extalloc
|
||||
// +build baremetal
|
||||
|
||||
package runtime
|
||||
|
|
|
@ -24,10 +24,6 @@ func exit(code int)
|
|||
//go:export clock_gettime
|
||||
func clock_gettime(clk_id int32, ts *timespec)
|
||||
|
||||
const heapSize = 1 * 1024 * 1024 // 1MB to start
|
||||
|
||||
var heapStart, heapEnd uintptr
|
||||
|
||||
type timeUnit int64
|
||||
|
||||
const tickMicros = 1
|
||||
|
@ -47,8 +43,7 @@ func postinit() {}
|
|||
// Entry point for Go. Initialize all packages and call main.main().
|
||||
//go:export main
|
||||
func main() int {
|
||||
heapStart = uintptr(malloc(heapSize))
|
||||
heapEnd = heapStart + heapSize
|
||||
preinit()
|
||||
|
||||
run()
|
||||
|
||||
|
@ -83,3 +78,10 @@ func ticks() timeUnit {
|
|||
func syscall_Exit(code int) {
|
||||
exit(code)
|
||||
}
|
||||
|
||||
func extalloc(size uintptr) unsafe.Pointer {
|
||||
return malloc(size)
|
||||
}
|
||||
|
||||
//go:export free
|
||||
func extfree(ptr unsafe.Pointer)
|
||||
|
|
14
src/runtime/runtime_unix_heap.go
Обычный файл
14
src/runtime/runtime_unix_heap.go
Обычный файл
|
@ -0,0 +1,14 @@
|
|||
// +build darwin linux,!baremetal freebsd,!baremetal
|
||||
|
||||
// +build gc.conservative gc.leaking
|
||||
|
||||
package runtime
|
||||
|
||||
const heapSize = 1 * 1024 * 1024 // 1MB to start
|
||||
|
||||
var heapStart, heapEnd uintptr
|
||||
|
||||
func preinit() {
|
||||
heapStart = uintptr(malloc(heapSize))
|
||||
heapEnd = heapStart + heapSize
|
||||
}
|
7
src/runtime/runtime_unix_noheap.go
Обычный файл
7
src/runtime/runtime_unix_noheap.go
Обычный файл
|
@ -0,0 +1,7 @@
|
|||
// +build darwin linux,!baremetal freebsd,!baremetal
|
||||
|
||||
// +build gc.none gc.extalloc
|
||||
|
||||
package runtime
|
||||
|
||||
func preinit() {}
|
|
@ -60,10 +60,11 @@ func LowerCoroutines(mod llvm.Module, needStackSlots bool) error {
|
|||
defer target.Dispose()
|
||||
|
||||
pass := &coroutineLoweringPass{
|
||||
mod: mod,
|
||||
ctx: ctx,
|
||||
builder: builder,
|
||||
target: target,
|
||||
mod: mod,
|
||||
ctx: ctx,
|
||||
builder: builder,
|
||||
target: target,
|
||||
needStackSlots: needStackSlots,
|
||||
}
|
||||
|
||||
err := pass.load()
|
||||
|
@ -149,6 +150,9 @@ type coroutineLoweringPass struct {
|
|||
|
||||
// llvm.coro intrinsics
|
||||
coroId, coroSize, coroBegin, coroSuspend, coroEnd, coroFree, coroSave llvm.Value
|
||||
|
||||
trackPointer llvm.Value
|
||||
needStackSlots bool
|
||||
}
|
||||
|
||||
// findAsyncFuncs finds all asynchronous functions.
|
||||
|
@ -265,6 +269,13 @@ func (c *coroutineLoweringPass) load() error {
|
|||
return ErrMissingIntrinsic{"internal/task.createTask"}
|
||||
}
|
||||
|
||||
if c.needStackSlots {
|
||||
c.trackPointer = c.mod.NamedFunction("runtime.trackPointer")
|
||||
if c.trackPointer.IsNil() {
|
||||
return ErrMissingIntrinsic{"runtime.trackPointer"}
|
||||
}
|
||||
}
|
||||
|
||||
// Find async functions.
|
||||
c.findAsyncFuncs()
|
||||
|
||||
|
@ -297,6 +308,15 @@ func (c *coroutineLoweringPass) load() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *coroutineLoweringPass) track(ptr llvm.Value) {
|
||||
if c.needStackSlots {
|
||||
if ptr.Type() != c.i8ptr {
|
||||
ptr = c.builder.CreateBitCast(ptr, c.i8ptr, "track.bitcast")
|
||||
}
|
||||
c.builder.CreateCall(c.trackPointer, []llvm.Value{ptr, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
|
||||
}
|
||||
}
|
||||
|
||||
// lowerStartSync lowers a goroutine start of a synchronous function to a synchronous call.
|
||||
func (c *coroutineLoweringPass) lowerStartSync(start llvm.Value) {
|
||||
c.builder.SetInsertPointBefore(start)
|
||||
|
@ -662,6 +682,7 @@ func (c *coroutineLoweringPass) lowerFuncCoro(fn *asyncFunc) {
|
|||
coroAlloc := c.builder.CreateCall(c.alloc, []llvm.Value{coroSize, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "coro.alloc")
|
||||
// %coro.state = call noalias i8* @llvm.coro.begin(token %coro.id, i8* %coro.alloc)
|
||||
coroState := c.builder.CreateCall(c.coroBegin, []llvm.Value{coroId, coroAlloc}, "coro.state")
|
||||
c.track(coroState)
|
||||
// Store state into task.
|
||||
task := c.builder.CreateCall(c.current, []llvm.Value{llvm.Undef(c.i8ptr), fn.rawTask}, "task")
|
||||
parentState := c.builder.CreateCall(c.setState, []llvm.Value{task, coroState, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "task.state.parent")
|
||||
|
@ -795,6 +816,9 @@ func (c *coroutineLoweringPass) lowerFuncCoro(fn *asyncFunc) {
|
|||
if call.CalledValue() == c.pause {
|
||||
call.EraseFromParentAsInstruction()
|
||||
}
|
||||
|
||||
c.builder.SetInsertPointBefore(wakeup.FirstInstruction())
|
||||
c.track(coroState)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Создание таблицы
Сослаться в новой задаче