1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

pmm: guard AllocFrame/FreeFrame access with a spinlock

This commit is contained in:
Achilleas Anagnostopoulos 2018-06-15 23:02:57 +01:00
parent 50cabf1d95
commit 3662f64907

View File

@ -5,6 +5,7 @@ import (
"gopheros/kernel/kfmt" "gopheros/kernel/kfmt"
"gopheros/kernel/mm" "gopheros/kernel/mm"
"gopheros/kernel/mm/vmm" "gopheros/kernel/mm/vmm"
"gopheros/kernel/sync"
"gopheros/multiboot" "gopheros/multiboot"
"math" "math"
"reflect" "reflect"
@ -51,6 +52,8 @@ type framePool struct {
// BitmapAllocator implements a physical frame allocator that tracks frame // BitmapAllocator implements a physical frame allocator that tracks frame
// reservations across the available memory pools using bitmaps. // reservations across the available memory pools using bitmaps.
type BitmapAllocator struct { type BitmapAllocator struct {
mutex sync.Spinlock
// totalPages tracks the total number of pages across all pools. // totalPages tracks the total number of pages across all pools.
totalPages uint32 totalPages uint32
@ -240,6 +243,8 @@ func (alloc *BitmapAllocator) printStats() {
// AllocFrame reserves and returns a physical memory frame. An error will be // AllocFrame reserves and returns a physical memory frame. An error will be
// returned if no more memory can be allocated. // returned if no more memory can be allocated.
func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) { func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
alloc.mutex.Acquire()
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ { for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
if alloc.pools[poolIndex].freeCount == 0 { if alloc.pools[poolIndex].freeCount == 0 {
continue continue
@ -260,11 +265,13 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
alloc.pools[poolIndex].freeCount-- alloc.pools[poolIndex].freeCount--
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
alloc.reservedPages++ alloc.reservedPages++
alloc.mutex.Release()
return alloc.pools[poolIndex].startFrame + mm.Frame((blockIndex<<6)+blockOffset), nil return alloc.pools[poolIndex].startFrame + mm.Frame((blockIndex<<6)+blockOffset), nil
} }
} }
} }
alloc.mutex.Release()
return mm.InvalidFrame, errBitmapAllocOutOfMemory return mm.InvalidFrame, errBitmapAllocOutOfMemory
} }
@ -272,8 +279,11 @@ func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
// Trying to release a frame not part of the allocator pools or a frame that // Trying to release a frame not part of the allocator pools or a frame that
// is already marked as free will cause an error to be returned. // is already marked as free will cause an error to be returned.
func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error { func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error {
alloc.mutex.Acquire()
poolIndex := alloc.poolForFrame(frame) poolIndex := alloc.poolForFrame(frame)
if poolIndex < 0 { if poolIndex < 0 {
alloc.mutex.Release()
return errBitmapAllocFrameNotManaged return errBitmapAllocFrameNotManaged
} }
@ -282,11 +292,13 @@ func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error {
mask := uint64(1 << (63 - (relFrame - block<<6))) mask := uint64(1 << (63 - (relFrame - block<<6)))
if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 { if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 {
alloc.mutex.Release()
return errBitmapAllocDoubleFree return errBitmapAllocDoubleFree
} }
alloc.pools[poolIndex].freeBitmap[block] &^= mask alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++ alloc.pools[poolIndex].freeCount++
alloc.reservedPages-- alloc.reservedPages--
alloc.mutex.Release()
return nil return nil
} }