1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Reserve kernel image pages and decomission early allocator

This commit is contained in:
Achilleas Anagnostopoulos 2017-06-18 08:06:41 +01:00
parent 8b22862784
commit 6ca86e55f8
2 changed files with 148 additions and 16 deletions

View File

@ -6,6 +6,7 @@ import (
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
"github.com/achilleasa/gopher-os/kernel/kfmt/early"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
@ -64,7 +65,14 @@ type BitmapAllocator struct {
// init allocates space for the allocator structures using the early bootmem
// allocator and flags any allocated pages as reserved.
func (alloc *BitmapAllocator) init() *kernel.Error {
return alloc.setupPoolBitmaps()
if err := alloc.setupPoolBitmaps(); err != nil {
return err
}
alloc.reserveKernelFrames()
alloc.reserveEarlyAllocatorFrames()
alloc.printStats()
return nil
}
// setupPoolBitmaps uses the early allocator and vmm region reservation helper
@ -189,6 +197,47 @@ func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int {
return -1
}
// reserveKernelFrames makes as reserved the bitmap entries for the frames
// occupied by the kernel image.
func (alloc *BitmapAllocator) reserveKernelFrames() {
// Flag frames used by kernel image as reserved. Since the kernel must
// occupy a contiguous memory block we assume that all its frames will
// fall into one of the available memory pools
poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame)
for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ {
alloc.markFrame(poolIndex, frame, markReserved)
}
}
// reserveEarlyAllocatorFrames makes as reserved the bitmap entries for the frames
// already allocated by the early allocator.
func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() {
// We now need to decomission the early allocator by flagging all frames
// allocated by it as reserved. The allocator itself does not track
// individual frames but only a counter of allocated frames. To get
// the list of frames we reset its internal state and "replay" the
// allocation requests to get the correct frames.
allocCount := earlyAllocator.allocCount
earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0
for i := uint64(0); i < allocCount; i++ {
frame, _ := earlyAllocator.AllocFrame()
alloc.markFrame(
alloc.poolForFrame(frame),
frame,
markReserved,
)
}
}
func (alloc *BitmapAllocator) printStats() {
early.Printf(
"[bitmap_alloc] page stats: free: %d/%d (%d reserved)\n",
alloc.totalPages-alloc.reservedPages,
alloc.totalPages,
alloc.reservedPages,
)
}
// earlyAllocFrame is a helper that delegates a frame allocation request to the
// early allocator instance. This function is passed as an argument to
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter

View File

@ -1,8 +1,8 @@
package allocator
import (
"bytes"
"math"
"strconv"
"testing"
"unsafe"
@ -217,6 +217,102 @@ func TestBitmapAllocatorPoolForFrame(t *testing.T) {
}
}
func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(7),
freeCount: 8,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 136,
}
// kernel occupies 16 frames and starts at the beginning of pool 1
earlyAllocator.kernelStartFrame = pmm.Frame(64)
earlyAllocator.kernelEndFrame = pmm.Frame(79)
kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1)
alloc.reserveKernelFrames()
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := uint32(8), alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := 128-kernelSizePages, alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 1 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[1].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 1 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(63),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 64,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// Simulate 16 allocations made using the early allocator in region 0
// as reported by the multiboot data and move the kernel to pool 1
allocCount := uint32(16)
earlyAllocator.allocCount = uint64(allocCount)
earlyAllocator.kernelStartFrame = pmm.Frame(256)
earlyAllocator.kernelEndFrame = pmm.Frame(256)
alloc.reserveEarlyAllocatorFrames()
if exp, got := allocCount, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := 64-allocCount, alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := uint32(128), alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 0 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[0].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 0 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestAllocatorPackageInit(t *testing.T) {
defer func() {
mapFn = vmm.Map
@ -225,8 +321,6 @@ func TestAllocatorPackageInit(t *testing.T) {
var (
physMem = make([]byte, 2*mem.PageSize)
fb = mockTTY()
buf bytes.Buffer
)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
@ -239,21 +333,10 @@ func TestAllocatorPackageInit(t *testing.T) {
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
mockTTY()
if err := Init(0x100000, 0x1fa7c8); err != nil {
t.Fatal(err)
}
for i := 0; i < len(fb); i += 2 {
if fb[i] == 0x0 {
continue
}
buf.WriteByte(fb[i])
}
exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251"
if got := buf.String(); got != exp {
t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got)
}
})
t.Run("error", func(t *testing.T) {