1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Merge pull request #27 from achilleasa/implement-bitmap-allocator

Implement bitmap-based physical page allocator
This commit is contained in:
Achilleas Anagnostopoulos 2017-06-19 06:18:18 +01:00 committed by GitHub
commit 3923e09aac
12 changed files with 872 additions and 60 deletions

View File

@ -297,10 +297,11 @@ _rt0_enter_long_mode:
or eax, 1 << 5
mov cr4, eax
; Now enable long mode by modifying the EFER MSR
; Now enable long mode (bit 8) and the no-execute support (bit 11) by
; modifying the EFER MSR
mov ecx, 0xc0000080
rdmsr ; read msr value to eax
or eax, 1 << 8
or eax, (1 << 8) | (1<<11)
wrmsr
; Finally enable paging

View File

@ -0,0 +1,327 @@
package allocator
import (
"math"
"reflect"
"unsafe"
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
"github.com/achilleasa/gopher-os/kernel/kfmt/early"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
)
var (
// FrameAllocator is a BitmapAllocator instance that serves as the
// primary allocator for reserving pages.
FrameAllocator BitmapAllocator
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"}
// The followning functions are used by tests to mock calls to the vmm package
// and are automatically inlined by the compiler.
reserveRegionFn = vmm.EarlyReserveRegion
mapFn = vmm.Map
)
type markAs bool
const (
markReserved markAs = false
markFree = true
)
type framePool struct {
// startFrame is the frame number for the first page in this pool.
// each free bitmap entry i corresponds to frame (startFrame + i).
startFrame pmm.Frame
// endFrame tracks the last frame in the pool. The total number of
// frames is given by: (endFrame - startFrame) - 1
endFrame pmm.Frame
// freeCount tracks the available pages in this pool. The allocator
// can use this field to skip fully allocated pools without the need
// to scan the free bitmap.
freeCount uint32
// freeBitmap tracks used/free pages in the pool.
freeBitmap []uint64
freeBitmapHdr reflect.SliceHeader
}
// BitmapAllocator implements a physical frame allocator that tracks frame
// reservations across the available memory pools using bitmaps.
type BitmapAllocator struct {
// totalPages tracks the total number of pages across all pools.
totalPages uint32
// reservedPages tracks the number of reserved pages across all pools.
reservedPages uint32
pools []framePool
poolsHdr reflect.SliceHeader
}
// init allocates space for the allocator structures using the early bootmem
// allocator and flags any allocated pages as reserved.
func (alloc *BitmapAllocator) init() *kernel.Error {
if err := alloc.setupPoolBitmaps(); err != nil {
return err
}
alloc.reserveKernelFrames()
alloc.reserveEarlyAllocatorFrames()
alloc.printStats()
return nil
}
// setupPoolBitmaps uses the early allocator and vmm region reservation helper
// to initialize the list of available pools and their free bitmap slices.
func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
var (
err *kernel.Error
sizeofPool = unsafe.Sizeof(framePool{})
pageSizeMinus1 = uint64(mem.PageSize - 1)
requiredBitmapBytes mem.Size
)
// Detect available memory regions and calculate their pool bitmap
// requirements.
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
alloc.poolsHdr.Len++
alloc.poolsHdr.Cap++
// Reported addresses may not be page-aligned; round up to get
// the start frame and round down to get the end frame
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
pageCount := uint32(regionEndFrame - regionStartFrame)
alloc.totalPages += pageCount
// To represent the free page bitmap we need pageCount bits. Since our
// slice uses uint64 for storing the bitmap we need to round up the
// required bits so they are a multiple of 64 bits
requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3)
return true
})
// Reserve enough pages to hold the allocator state
requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1)
requiredPages := requiredBytes >> mem.PageShift
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
if err != nil {
return err
}
for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 {
nextFrame, err := earlyAllocFrame()
if err != nil {
return err
}
if err = mapFn(page, nextFrame, vmm.FlagPresent|vmm.FlagRW|vmm.FlagNoExecute); err != nil {
return err
}
mem.Memset(page.Address(), 0, mem.PageSize)
}
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
// Run a second pass to initialize the free bitmap slices for all pools
bitmapStartAddr := alloc.poolsHdr.Data + uintptr(alloc.poolsHdr.Len)*sizeofPool
poolIndex := 0
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3)
alloc.pools[poolIndex].startFrame = regionStartFrame
alloc.pools[poolIndex].endFrame = regionEndFrame
alloc.pools[poolIndex].freeCount = uint32(regionEndFrame - regionStartFrame + 1)
alloc.pools[poolIndex].freeBitmapHdr.Len = int(bitmapBytes >> 3)
alloc.pools[poolIndex].freeBitmapHdr.Cap = alloc.pools[poolIndex].freeBitmapHdr.Len
alloc.pools[poolIndex].freeBitmapHdr.Data = bitmapStartAddr
alloc.pools[poolIndex].freeBitmap = *(*[]uint64)(unsafe.Pointer(&alloc.pools[poolIndex].freeBitmapHdr))
bitmapStartAddr += bitmapBytes
poolIndex++
return true
})
return nil
}
// markFrame updates the reservation flag for the bitmap entry that corresponds
// to the supplied frame.
func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag markAs) {
if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame {
return
}
// The offset in the block is given by: frame % 64. As the bitmap uses a
// big-ending representation we need to set the bit at index: 63 - offset
relFrame := frame - alloc.pools[poolIndex].startFrame
block := relFrame >> 6
mask := uint64(1 << (63 - (relFrame - block<<6)))
switch flag {
case markFree:
alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++
alloc.reservedPages--
case markReserved:
alloc.pools[poolIndex].freeBitmap[block] |= mask
alloc.pools[poolIndex].freeCount--
alloc.reservedPages++
}
}
// poolForFrame returns the index of the pool that contains frame or -1 if
// the frame is not contained in any of the available memory pools (e.g it
// points to a reserved memory region).
func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int {
for poolIndex, pool := range alloc.pools {
if frame >= pool.startFrame && frame <= pool.endFrame {
return poolIndex
}
}
return -1
}
// reserveKernelFrames makes as reserved the bitmap entries for the frames
// occupied by the kernel image.
func (alloc *BitmapAllocator) reserveKernelFrames() {
// Flag frames used by kernel image as reserved. Since the kernel must
// occupy a contiguous memory block we assume that all its frames will
// fall into one of the available memory pools
poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame)
for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ {
alloc.markFrame(poolIndex, frame, markReserved)
}
}
// reserveEarlyAllocatorFrames makes as reserved the bitmap entries for the frames
// already allocated by the early allocator.
func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() {
// We now need to decomission the early allocator by flagging all frames
// allocated by it as reserved. The allocator itself does not track
// individual frames but only a counter of allocated frames. To get
// the list of frames we reset its internal state and "replay" the
// allocation requests to get the correct frames.
allocCount := earlyAllocator.allocCount
earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0
for i := uint64(0); i < allocCount; i++ {
frame, _ := earlyAllocator.AllocFrame()
alloc.markFrame(
alloc.poolForFrame(frame),
frame,
markReserved,
)
}
}
func (alloc *BitmapAllocator) printStats() {
early.Printf(
"[bitmap_alloc] page stats: free: %d/%d (%d reserved)\n",
alloc.totalPages-alloc.reservedPages,
alloc.totalPages,
alloc.reservedPages,
)
}
// AllocFrame reserves and returns a physical memory frame. An error will be
// returned if no more memory can be allocated.
func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
if alloc.pools[poolIndex].freeCount == 0 {
continue
}
fullBlock := uint64(math.MaxUint64)
for blockIndex, block := range alloc.pools[poolIndex].freeBitmap {
if block == fullBlock {
continue
}
// Block has at least one free slot; we need to scan its bits
for blockOffset, mask := 0, uint64(1<<63); mask > 0; blockOffset, mask = blockOffset+1, mask>>1 {
if block&mask != 0 {
continue
}
alloc.pools[poolIndex].freeCount--
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
alloc.reservedPages++
return alloc.pools[poolIndex].startFrame + pmm.Frame((blockIndex<<6)+blockOffset), nil
}
}
}
return pmm.InvalidFrame, errBitmapAllocOutOfMemory
}
// FreeFrame releases a frame previously allocated via a call to AllocFrame.
// Trying to release a frame not part of the allocator pools or a frame that
// is already marked as free will cause an error to be returned.
func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error {
poolIndex := alloc.poolForFrame(frame)
if poolIndex < 0 {
return errBitmapAllocFrameNotManaged
}
relFrame := frame - alloc.pools[poolIndex].startFrame
block := relFrame >> 6
mask := uint64(1 << (63 - (relFrame - block<<6)))
if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 {
return errBitmapAllocDoubleFree
}
alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++
alloc.reservedPages--
return nil
}
// earlyAllocFrame is a helper that delegates a frame allocation request to the
// early allocator instance. This function is passed as an argument to
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter
// confuses the compiler's escape analysis into thinking that
// earlyAllocator.Frame escapes to heap.
func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
return earlyAllocator.AllocFrame()
}
// sysAllocFrame is a helper that delegates a frame allocation request to the
// bitmap allocator instance.
func sysAllocFrame() (pmm.Frame, *kernel.Error) {
return FrameAllocator.AllocFrame()
}
// Init sets up the kernel physical memory allocation sub-system.
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
earlyAllocator.init(kernelStart, kernelEnd)
earlyAllocator.printMemoryMap()
vmm.SetFrameAllocator(earlyAllocFrame)
if err := FrameAllocator.init(); err != nil {
return err
}
vmm.SetFrameAllocator(sysAllocFrame)
return nil
}

View File

@ -0,0 +1,432 @@
package allocator
import (
"math"
"strconv"
"testing"
"unsafe"
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
)
func TestSetupPoolBitmaps(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// The captured multiboot data corresponds to qemu running with 128M RAM.
// The allocator will need to reserve 2 pages to store the bitmap data.
var (
alloc BitmapAllocator
physMem = make([]byte, 2*mem.PageSize)
)
// Init phys mem with junk
for i := 0; i < len(physMem); i++ {
physMem[i] = 0xf0
}
mapCallCount := 0
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
mapCallCount++
return nil
}
reserveCallCount := 0
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
reserveCallCount++
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
if err := alloc.setupPoolBitmaps(); err != nil {
t.Fatal(err)
}
if exp := 2; mapCallCount != exp {
t.Fatalf("expected allocator to call vmm.Map %d times; called %d", exp, mapCallCount)
}
if exp := 1; reserveCallCount != exp {
t.Fatalf("expected allocator to call vmm.EarlyReserveRegion %d times; called %d", exp, reserveCallCount)
}
if exp, got := 2, len(alloc.pools); got != exp {
t.Fatalf("expected allocator to initialize %d pools; got %d", exp, got)
}
for poolIndex, pool := range alloc.pools {
if expFreeCount := uint32(pool.endFrame - pool.startFrame + 1); pool.freeCount != expFreeCount {
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount, pool.freeCount)
}
if exp, got := int(math.Ceil(float64(pool.freeCount)/64.0)), len(pool.freeBitmap); got != exp {
t.Errorf("[pool %d] expected bitmap len to be %d; got %d", poolIndex, exp, got)
}
for blockIndex, block := range pool.freeBitmap {
if block != 0 {
t.Errorf("[pool %d] expected bitmap block %d to be cleared; got %d", poolIndex, blockIndex, block)
}
}
}
}
func TestSetupPoolBitmapsErrors(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
var alloc BitmapAllocator
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("vmm.Map returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, nil
}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("earlyAllocator returns an error", func(t *testing.T) {
emptyInfoData := []byte{
0, 0, 0, 0, // size
0, 0, 0, 0, // reserved
0, 0, 0, 0, // tag with type zero and length zero
0, 0, 0, 0,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
if err := alloc.setupPoolBitmaps(); err != errBootAllocOutOfMemory {
t.Fatalf("expected to get error: %v; got %v", errBootAllocOutOfMemory, err)
}
})
}
func TestBitmapAllocatorMarkFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(127),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 128,
}
lastFrame := pmm.Frame(alloc.totalPages)
for frame := pmm.Frame(0); frame < lastFrame; frame++ {
alloc.markFrame(0, frame, markReserved)
block := uint64(frame / 64)
blockOffset := uint64(frame % 64)
bitIndex := (63 - blockOffset)
bitMask := uint64(1 << bitIndex)
if alloc.pools[0].freeBitmap[block]&bitMask != bitMask {
t.Errorf("[frame %d] expected block[%d], bit %d to be set", frame, block, bitIndex)
}
alloc.markFrame(0, frame, markFree)
if alloc.pools[0].freeBitmap[block]&bitMask != 0 {
t.Errorf("[frame %d] expected block[%d], bit %d to be unset", frame, block, bitIndex)
}
}
// Calling markFrame with a frame not part of the pool should be a no-op
alloc.markFrame(0, pmm.Frame(0xbadf00d), markReserved)
for blockIndex, block := range alloc.pools[0].freeBitmap {
if block != 0 {
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
}
}
// Calling markFrame with a negative pool index should be a no-op
alloc.markFrame(-1, pmm.Frame(0), markReserved)
for blockIndex, block := range alloc.pools[0].freeBitmap {
if block != 0 {
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
}
}
}
func TestBitmapAllocatorPoolForFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(63),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(128),
endFrame: pmm.Frame(191),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
},
totalPages: 128,
}
specs := []struct {
frame pmm.Frame
expIndex int
}{
{pmm.Frame(0), 0},
{pmm.Frame(63), 0},
{pmm.Frame(64), -1},
{pmm.Frame(128), 1},
{pmm.Frame(192), -1},
}
for specIndex, spec := range specs {
if got := alloc.poolForFrame(spec.frame); got != spec.expIndex {
t.Errorf("[spec %d] expected to get pool index %d; got %d", specIndex, spec.expIndex, got)
}
}
}
func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(7),
freeCount: 8,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 136,
}
// kernel occupies 16 frames and starts at the beginning of pool 1
earlyAllocator.kernelStartFrame = pmm.Frame(64)
earlyAllocator.kernelEndFrame = pmm.Frame(79)
kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1)
alloc.reserveKernelFrames()
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := uint32(8), alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := 128-kernelSizePages, alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 1 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[1].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 1 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(63),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 64,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// Simulate 16 allocations made using the early allocator in region 0
// as reported by the multiboot data and move the kernel to pool 1
allocCount := uint32(16)
earlyAllocator.allocCount = uint64(allocCount)
earlyAllocator.kernelStartFrame = pmm.Frame(256)
earlyAllocator.kernelEndFrame = pmm.Frame(256)
alloc.reserveEarlyAllocatorFrames()
if exp, got := allocCount, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := 64-allocCount, alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := uint32(128), alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 0 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[0].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 0 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(7),
freeCount: 8,
// only the first 8 bits of block 0 are used
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 136,
}
// Test Alloc
for poolIndex, pool := range alloc.pools {
for expFrame := pool.startFrame; expFrame <= pool.endFrame; expFrame++ {
got, err := alloc.AllocFrame()
if err != nil {
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
}
if got != expFrame {
t.Errorf("[pool %d] expected allocated frame to be %d; got %d", poolIndex, expFrame, got)
}
}
if alloc.pools[poolIndex].freeCount != 0 {
t.Errorf("[pool %d] expected free count to be 0; got %d", poolIndex, alloc.pools[poolIndex].freeCount)
}
}
if alloc.reservedPages != alloc.totalPages {
t.Errorf("expected reservedPages to match totalPages(%d); got %d", alloc.totalPages, alloc.reservedPages)
}
if _, err := alloc.AllocFrame(); err != errBitmapAllocOutOfMemory {
t.Fatalf("expected error errBitmapAllocOutOfMemory; got %v", err)
}
// Test Free
expFreeCount := []uint32{8, 128}
for poolIndex, pool := range alloc.pools {
for frame := pool.startFrame; frame <= pool.endFrame; frame++ {
if err := alloc.FreeFrame(frame); err != nil {
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
}
}
if alloc.pools[poolIndex].freeCount != expFreeCount[poolIndex] {
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount[poolIndex], alloc.pools[poolIndex].freeCount)
}
}
if alloc.reservedPages != 0 {
t.Errorf("expected reservedPages to be 0; got %d", alloc.reservedPages)
}
// Test Free errors
if err := alloc.FreeFrame(pmm.Frame(0)); err != errBitmapAllocDoubleFree {
t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err)
}
if err := alloc.FreeFrame(pmm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged {
t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err)
}
}
func TestAllocatorPackageInit(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
var (
physMem = make([]byte, 2*mem.PageSize)
)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
t.Run("success", func(t *testing.T) {
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return nil
}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
mockTTY()
if err := Init(0x100000, 0x1fa7c8); err != nil {
t.Fatal(err)
}
// At this point sysAllocFrame should work
if _, err := sysAllocFrame(); err != nil {
t.Fatal(err)
}
})
t.Run("error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := Init(0x100000, 0x1fa7c8); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
}

View File

@ -134,10 +134,3 @@ func (alloc *bootMemAllocator) printMemoryMap() {
uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1),
)
}
// Init sets up the kernel physical memory allocation sub-system.
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
earlyAllocator.init(kernelStart, kernelEnd)
earlyAllocator.printMemoryMap()
return nil
}

View File

@ -1,7 +1,6 @@
package allocator
import (
"bytes"
"testing"
"unsafe"
@ -94,26 +93,6 @@ func TestBootMemoryAllocator(t *testing.T) {
}
}
func TestAllocatorPackageInit(t *testing.T) {
fb := mockTTY()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
Init(0x100000, 0x1fa7c8)
var buf bytes.Buffer
for i := 0; i < len(fb); i += 2 {
if fb[i] == 0x0 {
continue
}
buf.WriteByte(fb[i])
}
exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251"
if got := buf.String(); got != exp {
t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got)
}
}
var (
// A dump of multiboot data when running under qemu containing only the
// memory region tag. The dump encodes the following available memory

View File

@ -0,0 +1,35 @@
package vmm
import (
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/mem"
)
var (
// earlyReserveLastUsed tracks the last reserved page address and is
// decreased after each allocation request. Initially, it points to
// tempMappingAddr which coincides with the end of the kernel address
// space.
earlyReserveLastUsed = tempMappingAddr
errEarlyReserveNoSpace = &kernel.Error{Module: "early_reserve", Message: "remaining virtual address space not large enough to satisfy reservation request"}
)
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
// with the requested size in the kernel address space and returns its virtual
// address. If size is not a multiple of mem.PageSize it will be automatically
// rounded up.
//
// This function allocates regions starting at the end of the kernel address
// space. It should only be used during the early stages of kernel initialization.
func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) {
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
// reserving a region of the requested size will cause an underflow
if uintptr(size) > earlyReserveLastUsed {
return 0, errEarlyReserveNoSpace
}
earlyReserveLastUsed -= uintptr(size)
return earlyReserveLastUsed, nil
}

View File

@ -0,0 +1,29 @@
package vmm
import (
"runtime"
"testing"
)
func TestEarlyReserveAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origLastUsed uintptr) {
earlyReserveLastUsed = origLastUsed
}(earlyReserveLastUsed)
earlyReserveLastUsed = 4096
next, err := EarlyReserveRegion(42)
if err != nil {
t.Fatal(err)
}
if exp := uintptr(0); next != exp {
t.Fatal("expected reservation request to be rounded to nearest page")
}
if _, err = EarlyReserveRegion(1); err != errEarlyReserveNoSpace {
t.Fatalf("expected to get errEarlyReserveNoSpace; got %v", err)
}
}

View File

@ -23,14 +23,11 @@ var (
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
)
// FrameAllocatorFn is a function that can allocate physical frames.
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
// Map establishes a mapping between a virtual page and a physical memory frame
// using the currently active page directory table. Calls to Map will use the
// supplied physical frame allocator to initialize missing page tables at each
// paging level supported by the MMU.
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
@ -53,7 +50,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
// physical frame for it map it and clear its contents.
if !pte.HasFlags(FlagPresent) {
var newTableFrame pmm.Frame
newTableFrame, err = allocFn()
newTableFrame, err = frameAllocator()
if err != nil {
return false
}
@ -78,8 +75,8 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
// to a fixed virtual address overwriting any previous mapping. The temporary
// mapping mechanism is primarily used by the kernel to access and initialize
// inactive page tables.
func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) {
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil {
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW); err != nil {
return 0, err
}

View File

@ -26,17 +26,18 @@ func TestMapTemporaryAmd64(t *testing.T) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
frameAllocator = nil
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
nextPhysPage := 0
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
allocFn := func() (pmm.Frame, *kernel.Error) {
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
nextPhysPage++
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
}
})
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
@ -64,7 +65,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
frame := pmm.Frame(123)
levelIndices := []uint{510, 511, 511, 511}
page, err := MapTemporary(frame, allocFn)
page, err := MapTemporary(frame)
if err != nil {
t.Fatal(err)
}
@ -124,21 +125,22 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
return unsafe.Pointer(&physPages[0][pteIndex])
}
if _, err := MapTemporary(frame, nil); err != errNoHugePageSupport {
if _, err := MapTemporary(frame); err != errNoHugePageSupport {
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
}
})
t.Run("allocFn returns an error", func(t *testing.T) {
defer func() { frameAllocator = nil }()
physPages[0][p4Index] = 0
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
allocFn := func() (pmm.Frame, *kernel.Error) {
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
return 0, expErr
}
})
if _, err := MapTemporary(frame, allocFn); err != expErr {
if _, err := MapTemporary(frame); err != expErr {
t.Fatalf("got unexpected error %v", err)
}
})

View File

@ -39,7 +39,7 @@ type PageDirectoryTable struct {
// Init can:
// - call mem.Memset to clear the frame contents
// - setup a recursive mapping for the last table entry to the page itself.
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error {
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error {
pdt.pdtFrame = pdtFrame
// Check active PDT physical address. If it matches the input pdt then
@ -50,7 +50,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn
}
// Create a temporary mapping for the pdt frame so we can work on it
pdtPage, err := mapTemporaryFn(pdtFrame, allocFn)
pdtPage, err := mapTemporaryFn(pdtFrame)
if err != nil {
return err
}
@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn
// function with the difference that it also supports inactive page PDTs by
// establishing a temporary mapping so that Map() can access the inactive PDT
// entries.
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr
@ -89,7 +89,7 @@ func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEnt
flushTLBEntryFn(lastPdtEntryAddr)
}
err := mapFn(page, frame, flags, allocFn)
err := mapFn(page, frame, flags)
if activePdtFrame != pdt.pdtFrame {
lastPdtEntry.SetFrame(activePdtFrame)

View File

@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapTemporaryFn = origMapTemporary
@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return pdtFrame.Address()
}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
t.Fatal("unexpected call to MapTemporary")
return 0, nil
}
@ -42,7 +42,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return nil
}
if err := pdt.Init(pdtFrame, nil); err != nil {
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
})
@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return 0
}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
}
@ -73,7 +73,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return nil
}
if err := pdt.Init(pdtFrame, nil); err != nil {
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return 0, expErr
}
@ -119,7 +119,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return nil
}
if err := pdt.Init(pdtFrame, nil); err != expErr {
if err := pdt.Init(pdtFrame); err != expErr {
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
}
})
@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) {
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapFn = origMap
@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
return pdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
@ -156,7 +156,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil {
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}
@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
return activePdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
@ -205,7 +205,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil {
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}

17
kernel/mem/vmm/vmm.go Normal file
View File

@ -0,0 +1,17 @@
package vmm
import (
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
)
var frameAllocator FrameAllocatorFn
// FrameAllocatorFn is a function that can allocate physical frames.
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
// SetFrameAllocator registers a frame allocator function that will be used by
// the vmm code when new physical frames need to be allocated.
func SetFrameAllocator(allocFn FrameAllocatorFn) {
frameAllocator = allocFn
}