From 50cabf1d9505e31754cf1af2ff3ca6d137f4a3a3 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 15 Jun 2018 22:59:02 +0100 Subject: [PATCH 1/3] sync: implement spinlock primitive --- src/gopheros/kernel/sync/spinlock.go | 38 +++++++++++++++++++++ src/gopheros/kernel/sync/spinlock_amd64.s | 41 +++++++++++++++++++++++ src/gopheros/kernel/sync/spinlock_test.go | 39 +++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 src/gopheros/kernel/sync/spinlock.go create mode 100644 src/gopheros/kernel/sync/spinlock_amd64.s create mode 100644 src/gopheros/kernel/sync/spinlock_test.go diff --git a/src/gopheros/kernel/sync/spinlock.go b/src/gopheros/kernel/sync/spinlock.go new file mode 100644 index 0000000..2759e2f --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock.go @@ -0,0 +1,38 @@ +// Package sync provides synchronization primitive implementations for spinlocks +// and semaphore. +package sync + +import "sync/atomic" + +var ( + // TODO: replace with real yield function when context-switching is implemented. + yieldFn func() +) + +// Spinlock implements a lock where each task trying to acquire it busy-waits +// till the lock becomes available. +type Spinlock struct { + state uint32 +} + +// Acquire blocks until the lock can be acquired by the currently active task. +// Any attempt to re-acquire a lock already held by the current task will cause +// a deadlock. +func (l *Spinlock) Acquire() { + archAcquireSpinlock(&l.state, 1) +} + +// TryToAcquire attempts to acquire the lock and returns true if the lock could +// be acquired or false otherwise. +func (l *Spinlock) TryToAcquire() bool { + return atomic.SwapUint32(&l.state, 1) == 0 +} + +// Release relinquishes a held lock allowing other tasks to acquire it. Calling +// Release while the lock is free has no effect. +func (l *Spinlock) Release() { + atomic.StoreUint32(&l.state, 0) +} + +// archAcquireSpinlock is an arch-specific implementation for acquiring the lock. +func archAcquireSpinlock(state *uint32, attemptsBeforeYielding uint32) diff --git a/src/gopheros/kernel/sync/spinlock_amd64.s b/src/gopheros/kernel/sync/spinlock_amd64.s new file mode 100644 index 0000000..634066e --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock_amd64.s @@ -0,0 +1,41 @@ +#include "textflag.h" + +TEXT ·archAcquireSpinlock(SB),NOSPLIT,$0-12 + MOVQ state+0(FP), AX + MOVL attemptsBeforeYielding+8(FP), CX + +try_acquire: + MOVL $1, BX + XCHGL 0(AX), BX + TESTL BX, BX + JNZ spin + + // Lock succesfully acquired + RET + +spin: + // Send hint to the CPU that we are in a spinlock loop + PAUSE + + // Do a dirty read to check the state and try to acquire the lock + // once we detect it is free + MOVL 0(AX), BX + TESTL BX, BX + JZ try_acquire + + // Keep retrying till we exceed attemptsBeforeYielding; this allows us + // to grab the lock if a task on another CPU releases the lock while we + // spin. + DECL CX + JNZ spin + + // Yield (if yieldFn is set) and spin again + MOVQ ·yieldFn+0(SB), AX + TESTQ AX, AX + JZ replenish_attempt_counter + CALL 0(AX) + +replenish_attempt_counter: + MOVQ state+0(FP), AX + MOVL attemptsBeforeYielding+8(FP), CX + JMP spin diff --git a/src/gopheros/kernel/sync/spinlock_test.go b/src/gopheros/kernel/sync/spinlock_test.go new file mode 100644 index 0000000..39286e4 --- /dev/null +++ b/src/gopheros/kernel/sync/spinlock_test.go @@ -0,0 +1,39 @@ +package sync + +import ( + "runtime" + "sync" + "testing" + "time" +) + +func TestSpinlock(t *testing.T) { + // Substitute the yieldFn with runtime.Gosched to avoid deadlocks while testing + defer func(origYieldFn func()) { yieldFn = origYieldFn }(yieldFn) + yieldFn = runtime.Gosched + + var ( + sl Spinlock + wg sync.WaitGroup + numWorkers = 10 + ) + + sl.Acquire() + + if sl.TryToAcquire() != false { + t.Error("expected TryToAcquire to return false when lock is held") + } + + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go func(worker int) { + sl.Acquire() + sl.Release() + wg.Done() + }(i) + } + + <-time.After(100 * time.Millisecond) + sl.Release() + wg.Wait() +} From 3662f649077498884419168e0807c15970377b61 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 15 Jun 2018 23:02:57 +0100 Subject: [PATCH 2/3] pmm: guard AllocFrame/FreeFrame access with a spinlock --- src/gopheros/kernel/mm/pmm/bitmap_allocator.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/gopheros/kernel/mm/pmm/bitmap_allocator.go b/src/gopheros/kernel/mm/pmm/bitmap_allocator.go index 62a4618..d919776 100644 --- a/src/gopheros/kernel/mm/pmm/bitmap_allocator.go +++ b/src/gopheros/kernel/mm/pmm/bitmap_allocator.go @@ -5,6 +5,7 @@ import ( "gopheros/kernel/kfmt" "gopheros/kernel/mm" "gopheros/kernel/mm/vmm" + "gopheros/kernel/sync" "gopheros/multiboot" "math" "reflect" @@ -51,6 +52,8 @@ type framePool struct { // BitmapAllocator implements a physical frame allocator that tracks frame // reservations across the available memory pools using bitmaps. type BitmapAllocator struct { + mutex sync.Spinlock + // totalPages tracks the total number of pages across all pools. totalPages uint32 @@ -240,6 +243,8 @@ func (alloc *BitmapAllocator) printStats() { // AllocFrame reserves and returns a physical memory frame. An error will be // returned if no more memory can be allocated. func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) { + alloc.mutex.Acquire() + for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ { if alloc.pools[poolIndex].freeCount == 0 { continue @@ -260,11 +265,13 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) { alloc.pools[poolIndex].freeCount-- alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask alloc.reservedPages++ + alloc.mutex.Release() return alloc.pools[poolIndex].startFrame + mm.Frame((blockIndex<<6)+blockOffset), nil } } } + alloc.mutex.Release() return mm.InvalidFrame, errBitmapAllocOutOfMemory } @@ -272,8 +279,11 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) { // Trying to release a frame not part of the allocator pools or a frame that // is already marked as free will cause an error to be returned. func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error { + alloc.mutex.Acquire() + poolIndex := alloc.poolForFrame(frame) if poolIndex < 0 { + alloc.mutex.Release() return errBitmapAllocFrameNotManaged } @@ -282,11 +292,13 @@ func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error { mask := uint64(1 << (63 - (relFrame - block<<6))) if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 { + alloc.mutex.Release() return errBitmapAllocDoubleFree } alloc.pools[poolIndex].freeBitmap[block] &^= mask alloc.pools[poolIndex].freeCount++ alloc.reservedPages-- + alloc.mutex.Release() return nil } From 66d1750e4ebf230fc0864428a785c1f85a197b41 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Sat, 16 Jun 2018 07:34:13 +0100 Subject: [PATCH 3/3] makefile: move GOOS envvar to correct location and add linter exception --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 730df2a..2a8a905 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,6 @@ QEMU ?= qemu-system-x86_64 # If your go is called something else set it on the commandline, like this: make run GO=go1.8 GO ?= go -GOOS := linux GOARCH := amd64 GOROOT := $(shell $(GO) env GOROOT) @@ -34,6 +33,8 @@ FUZZ_PKG_LIST := src/gopheros/device/acpi/aml # FUZZ_PKG_LIST += path-to-pkg ifeq ($(OS), Linux) +GOOS := linux + MIN_OBJCOPY_VERSION := 2.26.0 HAVE_VALID_OBJCOPY := $(shell objcopy -V | head -1 | awk -F ' ' '{print "$(MIN_OBJCOPY_VERSION)\n" $$NF}' | sort -ct. -k1,1n -k2,2n && echo "y") @@ -190,6 +191,7 @@ lint: lint-check-deps --exclude 'x \^ 0 always equals x' \ --exclude 'dispatchInterrupt is unused' \ --exclude 'interruptGateEntries is unused' \ + --exclude 'yieldFn is unused' \ src/... lint-check-deps: