1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Merge pull request #71 from achilleasa/implement-and-use-spinlocks

Implement and use spinlocks
This commit is contained in:
Achilleas Anagnostopoulos 2018-06-16 08:01:40 +01:00 committed by GitHub
commit 1a7aca464e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 133 additions and 1 deletions

View File

@ -9,7 +9,6 @@ QEMU ?= qemu-system-x86_64
# If your go is called something else set it on the commandline, like this: make run GO=go1.8
GO ?= go
GOOS := linux
GOARCH := amd64
GOROOT := $(shell $(GO) env GOROOT)
@ -34,6 +33,8 @@ FUZZ_PKG_LIST := src/gopheros/device/acpi/aml
# FUZZ_PKG_LIST += path-to-pkg
ifeq ($(OS), Linux)
GOOS := linux
MIN_OBJCOPY_VERSION := 2.26.0
HAVE_VALID_OBJCOPY := $(shell objcopy -V | head -1 | awk -F ' ' '{print "$(MIN_OBJCOPY_VERSION)\n" $$NF}' | sort -ct. -k1,1n -k2,2n && echo "y")
@ -190,6 +191,7 @@ lint: lint-check-deps
--exclude 'x \^ 0 always equals x' \
--exclude 'dispatchInterrupt is unused' \
--exclude 'interruptGateEntries is unused' \
--exclude 'yieldFn is unused' \
src/...
lint-check-deps:

View File

@ -5,6 +5,7 @@ import (
"gopheros/kernel/kfmt"
"gopheros/kernel/mm"
"gopheros/kernel/mm/vmm"
"gopheros/kernel/sync"
"gopheros/multiboot"
"math"
"reflect"
@ -51,6 +52,8 @@ type framePool struct {
// BitmapAllocator implements a physical frame allocator that tracks frame
// reservations across the available memory pools using bitmaps.
type BitmapAllocator struct {
mutex sync.Spinlock
// totalPages tracks the total number of pages across all pools.
totalPages uint32
@ -240,6 +243,8 @@ func (alloc *BitmapAllocator) printStats() {
// AllocFrame reserves and returns a physical memory frame. An error will be
// returned if no more memory can be allocated.
func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
alloc.mutex.Acquire()
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
if alloc.pools[poolIndex].freeCount == 0 {
continue
@ -260,11 +265,13 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
alloc.pools[poolIndex].freeCount--
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
alloc.reservedPages++
alloc.mutex.Release()
return alloc.pools[poolIndex].startFrame + mm.Frame((blockIndex<<6)+blockOffset), nil
}
}
}
alloc.mutex.Release()
return mm.InvalidFrame, errBitmapAllocOutOfMemory
}
@ -272,8 +279,11 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
// Trying to release a frame not part of the allocator pools or a frame that
// is already marked as free will cause an error to be returned.
func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error {
alloc.mutex.Acquire()
poolIndex := alloc.poolForFrame(frame)
if poolIndex < 0 {
alloc.mutex.Release()
return errBitmapAllocFrameNotManaged
}
@ -282,11 +292,13 @@ func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error {
mask := uint64(1 << (63 - (relFrame - block<<6)))
if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 {
alloc.mutex.Release()
return errBitmapAllocDoubleFree
}
alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++
alloc.reservedPages--
alloc.mutex.Release()
return nil
}

View File

@ -0,0 +1,38 @@
// Package sync provides synchronization primitive implementations for spinlocks
// and semaphore.
package sync
import "sync/atomic"
var (
// TODO: replace with real yield function when context-switching is implemented.
yieldFn func()
)
// Spinlock implements a lock where each task trying to acquire it busy-waits
// till the lock becomes available.
type Spinlock struct {
state uint32
}
// Acquire blocks until the lock can be acquired by the currently active task.
// Any attempt to re-acquire a lock already held by the current task will cause
// a deadlock.
func (l *Spinlock) Acquire() {
archAcquireSpinlock(&l.state, 1)
}
// TryToAcquire attempts to acquire the lock and returns true if the lock could
// be acquired or false otherwise.
func (l *Spinlock) TryToAcquire() bool {
return atomic.SwapUint32(&l.state, 1) == 0
}
// Release relinquishes a held lock allowing other tasks to acquire it. Calling
// Release while the lock is free has no effect.
func (l *Spinlock) Release() {
atomic.StoreUint32(&l.state, 0)
}
// archAcquireSpinlock is an arch-specific implementation for acquiring the lock.
func archAcquireSpinlock(state *uint32, attemptsBeforeYielding uint32)

View File

@ -0,0 +1,41 @@
#include "textflag.h"
TEXT ·archAcquireSpinlock(SB),NOSPLIT,$0-12
MOVQ state+0(FP), AX
MOVL attemptsBeforeYielding+8(FP), CX
try_acquire:
MOVL $1, BX
XCHGL 0(AX), BX
TESTL BX, BX
JNZ spin
// Lock succesfully acquired
RET
spin:
// Send hint to the CPU that we are in a spinlock loop
PAUSE
// Do a dirty read to check the state and try to acquire the lock
// once we detect it is free
MOVL 0(AX), BX
TESTL BX, BX
JZ try_acquire
// Keep retrying till we exceed attemptsBeforeYielding; this allows us
// to grab the lock if a task on another CPU releases the lock while we
// spin.
DECL CX
JNZ spin
// Yield (if yieldFn is set) and spin again
MOVQ ·yieldFn+0(SB), AX
TESTQ AX, AX
JZ replenish_attempt_counter
CALL 0(AX)
replenish_attempt_counter:
MOVQ state+0(FP), AX
MOVL attemptsBeforeYielding+8(FP), CX
JMP spin

View File

@ -0,0 +1,39 @@
package sync
import (
"runtime"
"sync"
"testing"
"time"
)
func TestSpinlock(t *testing.T) {
// Substitute the yieldFn with runtime.Gosched to avoid deadlocks while testing
defer func(origYieldFn func()) { yieldFn = origYieldFn }(yieldFn)
yieldFn = runtime.Gosched
var (
sl Spinlock
wg sync.WaitGroup
numWorkers = 10
)
sl.Acquire()
if sl.TryToAcquire() != false {
t.Error("expected TryToAcquire to return false when lock is held")
}
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func(worker int) {
sl.Acquire()
sl.Release()
wg.Done()
}(i)
}
<-time.After(100 * time.Millisecond)
sl.Release()
wg.Wait()
}