1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Reserve and initialize the BitmapAllocator frame pools

This commit is contained in:
Achilleas Anagnostopoulos 2017-06-15 16:39:17 +01:00
parent 1c3bfcd58d
commit bc44151c93
4 changed files with 347 additions and 28 deletions

View File

@ -0,0 +1,164 @@
package allocator
import (
"reflect"
"unsafe"
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
)
var (
// FrameAllocator is a BitmapAllocator instance that serves as the
// primary allocator for reserving pages.
FrameAllocator BitmapAllocator
// The followning functions are used by tests to mock calls to the vmm package
// and are automatically inlined by the compiler.
reserveRegionFn = vmm.EarlyReserveRegion
mapFn = vmm.Map
)
type framePool struct {
// startFrame is the frame number for the first page in this pool.
// each free bitmap entry i corresponds to frame (startFrame + i).
startFrame pmm.Frame
// endFrame tracks the last frame in the pool. The total number of
// frames is given by: (endFrame - startFrame) - 1
endFrame pmm.Frame
// freeCount tracks the available pages in this pool. The allocator
// can use this field to skip fully allocated pools without the need
// to scan the free bitmap.
freeCount uint32
// freeBitmap tracks used/free pages in the pool.
freeBitmap []uint64
freeBitmapHdr reflect.SliceHeader
}
// BitmapAllocator implements a physical frame allocator that tracks frame
// reservations across the available memory pools using bitmaps.
type BitmapAllocator struct {
// totalPages tracks the total number of pages across all pools.
totalPages uint32
// reservedPages tracks the number of reserved pages across all pools.
reservedPages uint32
pools []framePool
poolsHdr reflect.SliceHeader
}
// init allocates space for the allocator structures using the early bootmem
// allocator and flags any allocated pages as reserved.
func (alloc *BitmapAllocator) init() *kernel.Error {
return alloc.setupPoolBitmaps()
}
// setupPoolBitmaps uses the early allocator and vmm region reservation helper
// to initialize the list of available pools and their free bitmap slices.
func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
var (
err *kernel.Error
sizeofPool = unsafe.Sizeof(framePool{})
pageSizeMinus1 = uint64(mem.PageSize - 1)
requiredBitmapBytes mem.Size
)
// Detect available memory regions and calculate their pool bitmap
// requirements.
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
alloc.poolsHdr.Len++
alloc.poolsHdr.Cap++
// Reported addresses may not be page-aligned; round up to get
// the start frame and round down to get the end frame
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
pageCount := uint32(regionEndFrame - regionStartFrame)
alloc.totalPages += pageCount
// To represent the free page bitmap we need pageCount bits. Since our
// slice uses uint64 for storing the bitmap we need to round up the
// required bits so they are a multiple of 64 bits
requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3)
return true
})
// Reserve enough pages to hold the allocator state
requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1)
requiredPages := requiredBytes >> mem.PageShift
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
if err != nil {
return err
}
for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 {
nextFrame, err := earlyAllocFrame()
if err != nil {
return err
}
if err = mapFn(page, nextFrame, vmm.FlagPresent|vmm.FlagRW|vmm.FlagNoExecute); err != nil {
return err
}
mem.Memset(page.Address(), 0, mem.PageSize)
}
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
// Run a second pass to initialize the free bitmap slices for all pools
bitmapStartAddr := alloc.poolsHdr.Data + uintptr(alloc.poolsHdr.Len)*sizeofPool
poolIndex := 0
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3)
alloc.pools[poolIndex].startFrame = regionStartFrame
alloc.pools[poolIndex].endFrame = regionEndFrame
alloc.pools[poolIndex].freeCount = uint32(regionEndFrame - regionStartFrame + 1)
alloc.pools[poolIndex].freeBitmapHdr.Len = int(bitmapBytes >> 3)
alloc.pools[poolIndex].freeBitmapHdr.Cap = alloc.pools[poolIndex].freeBitmapHdr.Len
alloc.pools[poolIndex].freeBitmapHdr.Data = bitmapStartAddr
alloc.pools[poolIndex].freeBitmap = *(*[]uint64)(unsafe.Pointer(&alloc.pools[poolIndex].freeBitmapHdr))
bitmapStartAddr += bitmapBytes
poolIndex++
return true
})
return nil
}
// earlyAllocFrame is a helper that delegates a frame allocation request to the
// early allocator instance. This function is passed as an argument to
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter
// confuses the compiler's escape analysis into thinking that
// earlyAllocator.Frame escapes to heap.
func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
return earlyAllocator.AllocFrame()
}
// Init sets up the kernel physical memory allocation sub-system.
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
earlyAllocator.init(kernelStart, kernelEnd)
earlyAllocator.printMemoryMap()
vmm.SetFrameAllocator(earlyAllocFrame)
return FrameAllocator.init()
}

View File

@ -0,0 +1,183 @@
package allocator
import (
"bytes"
"math"
"testing"
"unsafe"
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
)
func TestSetupPoolBitmaps(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// The captured multiboot data corresponds to qemu running with 128M RAM.
// The allocator will need to reserve 2 pages to store the bitmap data.
var (
alloc BitmapAllocator
physMem = make([]byte, 2*mem.PageSize)
)
// Init phys mem with junk
for i := 0; i < len(physMem); i++ {
physMem[i] = 0xf0
}
mapCallCount := 0
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
mapCallCount++
return nil
}
reserveCallCount := 0
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
reserveCallCount++
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
if err := alloc.setupPoolBitmaps(); err != nil {
t.Fatal(err)
}
if exp := 2; mapCallCount != exp {
t.Fatalf("expected allocator to call vmm.Map %d times; called %d", exp, mapCallCount)
}
if exp := 1; reserveCallCount != exp {
t.Fatalf("expected allocator to call vmm.EarlyReserveRegion %d times; called %d", exp, reserveCallCount)
}
if exp, got := 2, len(alloc.pools); got != exp {
t.Fatalf("expected allocator to initialize %d pools; got %d", exp, got)
}
for poolIndex, pool := range alloc.pools {
if expFreeCount := uint32(pool.endFrame - pool.startFrame + 1); pool.freeCount != expFreeCount {
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount, pool.freeCount)
}
if exp, got := int(math.Ceil(float64(pool.freeCount)/64.0)), len(pool.freeBitmap); got != exp {
t.Errorf("[pool %d] expected bitmap len to be %d; got %d", poolIndex, exp, got)
}
for blockIndex, block := range pool.freeBitmap {
if block != 0 {
t.Errorf("[pool %d] expected bitmap block %d to be cleared; got %d", poolIndex, blockIndex, block)
}
}
}
}
func TestSetupPoolBitmapsErrors(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
var alloc BitmapAllocator
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("vmm.Map returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, nil
}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("earlyAllocator returns an error", func(t *testing.T) {
emptyInfoData := []byte{
0, 0, 0, 0, // size
0, 0, 0, 0, // reserved
0, 0, 0, 0, // tag with type zero and length zero
0, 0, 0, 0,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
if err := alloc.setupPoolBitmaps(); err != errBootAllocOutOfMemory {
t.Fatalf("expected to get error: %v; got %v", errBootAllocOutOfMemory, err)
}
})
}
func TestAllocatorPackageInit(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
var (
physMem = make([]byte, 2*mem.PageSize)
fb = mockTTY()
buf bytes.Buffer
)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
t.Run("success", func(t *testing.T) {
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return nil
}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
if err := Init(0x100000, 0x1fa7c8); err != nil {
t.Fatal(err)
}
for i := 0; i < len(fb); i += 2 {
if fb[i] == 0x0 {
continue
}
buf.WriteByte(fb[i])
}
exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251"
if got := buf.String(); got != exp {
t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got)
}
})
t.Run("error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := Init(0x100000, 0x1fa7c8); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
}

View File

@ -134,10 +134,3 @@ func (alloc *bootMemAllocator) printMemoryMap() {
uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1),
)
}
// Init sets up the kernel physical memory allocation sub-system.
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
earlyAllocator.init(kernelStart, kernelEnd)
earlyAllocator.printMemoryMap()
return nil
}

View File

@ -1,7 +1,6 @@
package allocator
import (
"bytes"
"testing"
"unsafe"
@ -94,26 +93,6 @@ func TestBootMemoryAllocator(t *testing.T) {
}
}
func TestAllocatorPackageInit(t *testing.T) {
fb := mockTTY()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
Init(0x100000, 0x1fa7c8)
var buf bytes.Buffer
for i := 0; i < len(fb); i += 2 {
if fb[i] == 0x0 {
continue
}
buf.WriteByte(fb[i])
}
exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251"
if got := buf.String(); got != exp {
t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got)
}
}
var (
// A dump of multiboot data when running under qemu containing only the
// memory region tag. The dump encodes the following available memory