From 50cabf1d9505e31754cf1af2ff3ca6d137f4a3a3 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 15 Jun 2018 22:59:02 +0100 Subject: [PATCH] sync: implement spinlock primitive --- src/gopheros/kernel/sync/spinlock.go | 38 +++++++++++++++++++++ src/gopheros/kernel/sync/spinlock_amd64.s | 41 +++++++++++++++++++++++ src/gopheros/kernel/sync/spinlock_test.go | 39 +++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 src/gopheros/kernel/sync/spinlock.go create mode 100644 src/gopheros/kernel/sync/spinlock_amd64.s create mode 100644 src/gopheros/kernel/sync/spinlock_test.go diff --git a/src/gopheros/kernel/sync/spinlock.go b/src/gopheros/kernel/sync/spinlock.go new file mode 100644 index 0000000..2759e2f --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock.go @@ -0,0 +1,38 @@ +// Package sync provides synchronization primitive implementations for spinlocks +// and semaphore. +package sync + +import "sync/atomic" + +var ( + // TODO: replace with real yield function when context-switching is implemented. + yieldFn func() +) + +// Spinlock implements a lock where each task trying to acquire it busy-waits +// till the lock becomes available. +type Spinlock struct { + state uint32 +} + +// Acquire blocks until the lock can be acquired by the currently active task. +// Any attempt to re-acquire a lock already held by the current task will cause +// a deadlock. +func (l *Spinlock) Acquire() { + archAcquireSpinlock(&l.state, 1) +} + +// TryToAcquire attempts to acquire the lock and returns true if the lock could +// be acquired or false otherwise. +func (l *Spinlock) TryToAcquire() bool { + return atomic.SwapUint32(&l.state, 1) == 0 +} + +// Release relinquishes a held lock allowing other tasks to acquire it. Calling +// Release while the lock is free has no effect. +func (l *Spinlock) Release() { + atomic.StoreUint32(&l.state, 0) +} + +// archAcquireSpinlock is an arch-specific implementation for acquiring the lock. +func archAcquireSpinlock(state *uint32, attemptsBeforeYielding uint32) diff --git a/src/gopheros/kernel/sync/spinlock_amd64.s b/src/gopheros/kernel/sync/spinlock_amd64.s new file mode 100644 index 0000000..634066e --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock_amd64.s @@ -0,0 +1,41 @@ +#include "textflag.h" + +TEXT ·archAcquireSpinlock(SB),NOSPLIT,$0-12 + MOVQ state+0(FP), AX + MOVL attemptsBeforeYielding+8(FP), CX + +try_acquire: + MOVL $1, BX + XCHGL 0(AX), BX + TESTL BX, BX + JNZ spin + + // Lock succesfully acquired + RET + +spin: + // Send hint to the CPU that we are in a spinlock loop + PAUSE + + // Do a dirty read to check the state and try to acquire the lock + // once we detect it is free + MOVL 0(AX), BX + TESTL BX, BX + JZ try_acquire + + // Keep retrying till we exceed attemptsBeforeYielding; this allows us + // to grab the lock if a task on another CPU releases the lock while we + // spin. + DECL CX + JNZ spin + + // Yield (if yieldFn is set) and spin again + MOVQ ·yieldFn+0(SB), AX + TESTQ AX, AX + JZ replenish_attempt_counter + CALL 0(AX) + +replenish_attempt_counter: + MOVQ state+0(FP), AX + MOVL attemptsBeforeYielding+8(FP), CX + JMP spin diff --git a/src/gopheros/kernel/sync/spinlock_test.go b/src/gopheros/kernel/sync/spinlock_test.go new file mode 100644 index 0000000..39286e4 --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock_test.go @@ -0,0 +1,39 @@ +package sync + +import ( + "runtime" + "sync" + "testing" + "time" +) + +func TestSpinlock(t *testing.T) { + // Substitute the yieldFn with runtime.Gosched to avoid deadlocks while testing + defer func(origYieldFn func()) { yieldFn = origYieldFn }(yieldFn) + yieldFn = runtime.Gosched + + var ( + sl Spinlock + wg sync.WaitGroup + numWorkers = 10 + ) + + sl.Acquire() + + if sl.TryToAcquire() != false { + t.Error("expected TryToAcquire to return false when lock is held") + } + + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go func(worker int) { + sl.Acquire() + sl.Release() + wg.Done() + }(i) + } + + <-time.After(100 * time.Millisecond) + sl.Release() + wg.Wait() +}