diff --git a/src/pkg/runtime/export_test.go b/src/pkg/runtime/export_test.go index 51921135be..d50040adcf 100644 --- a/src/pkg/runtime/export_test.go +++ b/src/pkg/runtime/export_test.go @@ -25,3 +25,14 @@ var Entersyscall = entersyscall var Exitsyscall = exitsyscall var LockedOSThread = golockedOSThread var Stackguard = stackguard + +type LFNode struct { + Next *LFNode + Pushcnt uintptr +} + +func lfstackpush(head *uint64, node *LFNode) +func lfstackpop2(head *uint64) *LFNode + +var LFStackPush = lfstackpush +var LFStackPop = lfstackpop2 diff --git a/src/pkg/runtime/lfstack.c b/src/pkg/runtime/lfstack.c new file mode 100644 index 0000000000..e4ea6e83da --- /dev/null +++ b/src/pkg/runtime/lfstack.c @@ -0,0 +1,64 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lock-free stack. + +#include "runtime.h" +#include "arch_GOARCH.h" + +#ifdef _64BIT +// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag. +// So we use 17msb of pointers as ABA counter. +# define PTR_BITS 47 +#else +# define PTR_BITS 32 +#endif +#define PTR_MASK ((1ull<pushcnt++; + new = (uint64)node|(((uint64)node->pushcnt)<next = (LFNode*)(old&PTR_MASK); + if(runtime·cas64(head, &old, new)) + break; + } +} + +LFNode* +runtime·lfstackpop(uint64 *head) +{ + LFNode *node, *node2; + uint64 old, new; + + old = runtime·atomicload64(head); + for(;;) { + if(old == 0) + return nil; + node = (LFNode*)(old&PTR_MASK); + node2 = runtime·atomicloadp(&node->next); + new = 0; + if(node2 != nil) + new = (uint64)node2|(((uint64)node2->pushcnt)<