From 1b88764676695c6934c201e1874e1452312d0805 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 14 Jun 2017 17:22:31 +0100 Subject: [PATCH 1/8] Enable support for the no-execute (NX) bit --- arch/x86_64/asm/rt0_32.s | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/asm/rt0_32.s b/arch/x86_64/asm/rt0_32.s index 4e6fc3f..59ffdc2 100644 --- a/arch/x86_64/asm/rt0_32.s +++ b/arch/x86_64/asm/rt0_32.s @@ -297,10 +297,11 @@ _rt0_enter_long_mode: or eax, 1 << 5 mov cr4, eax - ; Now enable long mode by modifying the EFER MSR + ; Now enable long mode (bit 8) and the no-execute support (bit 11) by + ; modifying the EFER MSR mov ecx, 0xc0000080 rdmsr ; read msr value to eax - or eax, 1 << 8 + or eax, (1 << 8) | (1<<11) wrmsr ; Finally enable paging From dbdf686d2771dc9a839528bfbb041ef79f07c9fc Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 15 Jun 2017 09:44:29 +0100 Subject: [PATCH 2/8] Provide helper for setting the active frame allocator This allows us to remove the allocFn argument from the vmm functions which causes the compiler's escape analysis to sometimes incorectly flag it as escaping to the heap. --- kernel/mem/vmm/map.go | 11 ++++------- kernel/mem/vmm/map_test.go | 16 +++++++++------- kernel/mem/vmm/pdt.go | 8 ++++---- kernel/mem/vmm/pdt_test.go | 24 ++++++++++++------------ kernel/mem/vmm/vmm.go | 17 +++++++++++++++++ 5 files changed, 46 insertions(+), 30 deletions(-) create mode 100644 kernel/mem/vmm/vmm.go diff --git a/kernel/mem/vmm/map.go b/kernel/mem/vmm/map.go index 68aadf9..4dfe64d 100644 --- a/kernel/mem/vmm/map.go +++ b/kernel/mem/vmm/map.go @@ -23,14 +23,11 @@ var ( errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"} ) -// FrameAllocatorFn is a function that can allocate physical frames. -type FrameAllocatorFn func() (pmm.Frame, *kernel.Error) - // Map establishes a mapping between a virtual page and a physical memory frame // using the currently active page directory table. Calls to Map will use the // supplied physical frame allocator to initialize missing page tables at each // paging level supported by the MMU. -func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { +func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { var err *kernel.Error walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool { @@ -53,7 +50,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // physical frame for it map it and clear its contents. if !pte.HasFlags(FlagPresent) { var newTableFrame pmm.Frame - newTableFrame, err = allocFn() + newTableFrame, err = frameAllocator() if err != nil { return false } @@ -78,8 +75,8 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // to a fixed virtual address overwriting any previous mapping. The temporary // mapping mechanism is primarily used by the kernel to access and initialize // inactive page tables. -func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) { - if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil { +func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) { + if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW); err != nil { return 0, err } diff --git a/kernel/mem/vmm/map_test.go b/kernel/mem/vmm/map_test.go index 9c961ee..cee8379 100644 --- a/kernel/mem/vmm/map_test.go +++ b/kernel/mem/vmm/map_test.go @@ -26,17 +26,18 @@ func TestMapTemporaryAmd64(t *testing.T) { ptePtrFn = origPtePtr nextAddrFn = origNextAddrFn flushTLBEntryFn = origFlushTLBEntryFn + frameAllocator = nil }(ptePtrFn, nextAddrFn, flushTLBEntryFn) var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry nextPhysPage := 0 // allocFn returns pages from index 1; we keep index 0 for the P4 entry - allocFn := func() (pmm.Frame, *kernel.Error) { + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { nextPhysPage++ pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0]) return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil - } + }) pteCallCount := 0 ptePtrFn = func(entry uintptr) unsafe.Pointer { @@ -64,7 +65,7 @@ func TestMapTemporaryAmd64(t *testing.T) { frame := pmm.Frame(123) levelIndices := []uint{510, 511, 511, 511} - page, err := MapTemporary(frame, allocFn) + page, err := MapTemporary(frame) if err != nil { t.Fatal(err) } @@ -124,21 +125,22 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) { return unsafe.Pointer(&physPages[0][pteIndex]) } - if _, err := MapTemporary(frame, nil); err != errNoHugePageSupport { + if _, err := MapTemporary(frame); err != errNoHugePageSupport { t.Fatalf("expected to get errNoHugePageSupport; got %v", err) } }) t.Run("allocFn returns an error", func(t *testing.T) { + defer func() { frameAllocator = nil }() physPages[0][p4Index] = 0 expErr := &kernel.Error{Module: "test", Message: "out of memory"} - allocFn := func() (pmm.Frame, *kernel.Error) { + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return 0, expErr - } + }) - if _, err := MapTemporary(frame, allocFn); err != expErr { + if _, err := MapTemporary(frame); err != expErr { t.Fatalf("got unexpected error %v", err) } }) diff --git a/kernel/mem/vmm/pdt.go b/kernel/mem/vmm/pdt.go index 8011d31..e60b336 100644 --- a/kernel/mem/vmm/pdt.go +++ b/kernel/mem/vmm/pdt.go @@ -39,7 +39,7 @@ type PageDirectoryTable struct { // Init can: // - call mem.Memset to clear the frame contents // - setup a recursive mapping for the last table entry to the page itself. -func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error { +func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error { pdt.pdtFrame = pdtFrame // Check active PDT physical address. If it matches the input pdt then @@ -50,7 +50,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn } // Create a temporary mapping for the pdt frame so we can work on it - pdtPage, err := mapTemporaryFn(pdtFrame, allocFn) + pdtPage, err := mapTemporaryFn(pdtFrame) if err != nil { return err } @@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn // function with the difference that it also supports inactive page PDTs by // establishing a temporary mapping so that Map() can access the inactive PDT // entries. -func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { +func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { var ( activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift) lastPdtEntryAddr uintptr @@ -89,7 +89,7 @@ func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEnt flushTLBEntryFn(lastPdtEntryAddr) } - err := mapFn(page, frame, flags, allocFn) + err := mapFn(page, frame, flags) if activePdtFrame != pdt.pdtFrame { lastPdtEntry.SetFrame(activePdtFrame) diff --git a/kernel/mem/vmm/pdt_test.go b/kernel/mem/vmm/pdt_test.go index 8bc9442..c971030 100644 --- a/kernel/mem/vmm/pdt_test.go +++ b/kernel/mem/vmm/pdt_test.go @@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapTemporaryFn = origMapTemporary @@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return pdtFrame.Address() } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { t.Fatal("unexpected call to MapTemporary") return 0, nil } @@ -42,7 +42,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != nil { + if err := pdt.Init(pdtFrame); err != nil { t.Fatal(err) } }) @@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return 0 } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil } @@ -73,7 +73,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != nil { + if err := pdt.Init(pdtFrame); err != nil { t.Fatal(err) } @@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { expErr := &kernel.Error{Module: "test", Message: "error mapping page"} - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { return 0, expErr } @@ -119,7 +119,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != expErr { + if err := pdt.Init(pdtFrame); err != expErr { t.Fatalf("expected to get error: %v; got %v", *expErr, err) } }) @@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapFn = origMap @@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return pdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error { return nil } @@ -156,7 +156,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { flushCallCount++ } - if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil { + if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil { t.Fatal(err) } @@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return activePdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error { return nil } @@ -205,7 +205,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { flushCallCount++ } - if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil { + if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil { t.Fatal(err) } diff --git a/kernel/mem/vmm/vmm.go b/kernel/mem/vmm/vmm.go new file mode 100644 index 0000000..2ee2971 --- /dev/null +++ b/kernel/mem/vmm/vmm.go @@ -0,0 +1,17 @@ +package vmm + +import ( + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" +) + +var frameAllocator FrameAllocatorFn + +// FrameAllocatorFn is a function that can allocate physical frames. +type FrameAllocatorFn func() (pmm.Frame, *kernel.Error) + +// SetFrameAllocator registers a frame allocator function that will be used by +// the vmm code when new physical frames need to be allocated. +func SetFrameAllocator(allocFn FrameAllocatorFn) { + frameAllocator = allocFn +} From 1c3bfcd58d5e82588351a9766c2b0af574f9ab1e Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 14 Jun 2017 17:09:05 +0100 Subject: [PATCH 3/8] Implement early virtual address space reservation helper Function EarlyReserveRegion reserves contiguous virtual address space regions beginning at the end of the available kernel space and moving towards lower virtual addresses. The only state that is tracked by this function is the last allocated virtual page address which is adjusted after each reservation request. Starting at the end of the kernel address space ensures that we will not step on the virtual addresses used by the kernel code and data sections. --- kernel/mem/vmm/addr_space.go | 35 +++++++++++++++++++++++++++++++ kernel/mem/vmm/addr_space_test.go | 29 +++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 kernel/mem/vmm/addr_space.go create mode 100644 kernel/mem/vmm/addr_space_test.go diff --git a/kernel/mem/vmm/addr_space.go b/kernel/mem/vmm/addr_space.go new file mode 100644 index 0000000..695168c --- /dev/null +++ b/kernel/mem/vmm/addr_space.go @@ -0,0 +1,35 @@ +package vmm + +import ( + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/mem" +) + +var ( + // earlyReserveLastUsed tracks the last reserved page address and is + // decreased after each allocation request. Initially, it points to + // tempMappingAddr which coincides with the end of the kernel address + // space. + earlyReserveLastUsed = tempMappingAddr + + errEarlyReserveNoSpace = &kernel.Error{Module: "early_reserve", Message: "remaining virtual address space not large enough to satisfy reservation request"} +) + +// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region +// with the requested size in the kernel address space and returns its virtual +// address. If size is not a multiple of mem.PageSize it will be automatically +// rounded up. +// +// This function allocates regions starting at the end of the kernel address +// space. It should only be used during the early stages of kernel initialization. +func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) { + size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1) + + // reserving a region of the requested size will cause an underflow + if uintptr(size) > earlyReserveLastUsed { + return 0, errEarlyReserveNoSpace + } + + earlyReserveLastUsed -= uintptr(size) + return earlyReserveLastUsed, nil +} diff --git a/kernel/mem/vmm/addr_space_test.go b/kernel/mem/vmm/addr_space_test.go new file mode 100644 index 0000000..e457381 --- /dev/null +++ b/kernel/mem/vmm/addr_space_test.go @@ -0,0 +1,29 @@ +package vmm + +import ( + "runtime" + "testing" +) + +func TestEarlyReserveAmd64(t *testing.T) { + if runtime.GOARCH != "amd64" { + t.Skip("test requires amd64 runtime; skipping") + } + + defer func(origLastUsed uintptr) { + earlyReserveLastUsed = origLastUsed + }(earlyReserveLastUsed) + + earlyReserveLastUsed = 4096 + next, err := EarlyReserveRegion(42) + if err != nil { + t.Fatal(err) + } + if exp := uintptr(0); next != exp { + t.Fatal("expected reservation request to be rounded to nearest page") + } + + if _, err = EarlyReserveRegion(1); err != errEarlyReserveNoSpace { + t.Fatalf("expected to get errEarlyReserveNoSpace; got %v", err) + } +} From bc44151c938cca3461f3e9c7268edbc91716e1d0 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 15 Jun 2017 16:39:17 +0100 Subject: [PATCH 4/8] Reserve and initialize the BitmapAllocator frame pools --- kernel/mem/pmm/allocator/bitmap_allocator.go | 164 ++++++++++++++++ .../pmm/allocator/bitmap_allocator_test.go | 183 ++++++++++++++++++ kernel/mem/pmm/allocator/bootmem.go | 7 - kernel/mem/pmm/allocator/bootmem_test.go | 21 -- 4 files changed, 347 insertions(+), 28 deletions(-) create mode 100644 kernel/mem/pmm/allocator/bitmap_allocator.go create mode 100644 kernel/mem/pmm/allocator/bitmap_allocator_test.go diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go new file mode 100644 index 0000000..339847f --- /dev/null +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -0,0 +1,164 @@ +package allocator + +import ( + "reflect" + "unsafe" + + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/hal/multiboot" + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" + "github.com/achilleasa/gopher-os/kernel/mem/vmm" +) + +var ( + // FrameAllocator is a BitmapAllocator instance that serves as the + // primary allocator for reserving pages. + FrameAllocator BitmapAllocator + + // The followning functions are used by tests to mock calls to the vmm package + // and are automatically inlined by the compiler. + reserveRegionFn = vmm.EarlyReserveRegion + mapFn = vmm.Map +) + +type framePool struct { + // startFrame is the frame number for the first page in this pool. + // each free bitmap entry i corresponds to frame (startFrame + i). + startFrame pmm.Frame + + // endFrame tracks the last frame in the pool. The total number of + // frames is given by: (endFrame - startFrame) - 1 + endFrame pmm.Frame + + // freeCount tracks the available pages in this pool. The allocator + // can use this field to skip fully allocated pools without the need + // to scan the free bitmap. + freeCount uint32 + + // freeBitmap tracks used/free pages in the pool. + freeBitmap []uint64 + freeBitmapHdr reflect.SliceHeader +} + +// BitmapAllocator implements a physical frame allocator that tracks frame +// reservations across the available memory pools using bitmaps. +type BitmapAllocator struct { + // totalPages tracks the total number of pages across all pools. + totalPages uint32 + + // reservedPages tracks the number of reserved pages across all pools. + reservedPages uint32 + + pools []framePool + poolsHdr reflect.SliceHeader +} + +// init allocates space for the allocator structures using the early bootmem +// allocator and flags any allocated pages as reserved. +func (alloc *BitmapAllocator) init() *kernel.Error { + return alloc.setupPoolBitmaps() +} + +// setupPoolBitmaps uses the early allocator and vmm region reservation helper +// to initialize the list of available pools and their free bitmap slices. +func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error { + var ( + err *kernel.Error + sizeofPool = unsafe.Sizeof(framePool{}) + pageSizeMinus1 = uint64(mem.PageSize - 1) + requiredBitmapBytes mem.Size + ) + + // Detect available memory regions and calculate their pool bitmap + // requirements. + multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { + if region.Type != multiboot.MemAvailable { + return true + } + + alloc.poolsHdr.Len++ + alloc.poolsHdr.Cap++ + + // Reported addresses may not be page-aligned; round up to get + // the start frame and round down to get the end frame + regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift) + regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1 + pageCount := uint32(regionEndFrame - regionStartFrame) + alloc.totalPages += pageCount + + // To represent the free page bitmap we need pageCount bits. Since our + // slice uses uint64 for storing the bitmap we need to round up the + // required bits so they are a multiple of 64 bits + requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3) + return true + }) + + // Reserve enough pages to hold the allocator state + requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1) + requiredPages := requiredBytes >> mem.PageShift + alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes) + if err != nil { + return err + } + + for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 { + nextFrame, err := earlyAllocFrame() + if err != nil { + return err + } + + if err = mapFn(page, nextFrame, vmm.FlagPresent|vmm.FlagRW|vmm.FlagNoExecute); err != nil { + return err + } + + mem.Memset(page.Address(), 0, mem.PageSize) + } + + alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr)) + + // Run a second pass to initialize the free bitmap slices for all pools + bitmapStartAddr := alloc.poolsHdr.Data + uintptr(alloc.poolsHdr.Len)*sizeofPool + poolIndex := 0 + multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { + if region.Type != multiboot.MemAvailable { + return true + } + + regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift) + regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1 + bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3) + + alloc.pools[poolIndex].startFrame = regionStartFrame + alloc.pools[poolIndex].endFrame = regionEndFrame + alloc.pools[poolIndex].freeCount = uint32(regionEndFrame - regionStartFrame + 1) + alloc.pools[poolIndex].freeBitmapHdr.Len = int(bitmapBytes >> 3) + alloc.pools[poolIndex].freeBitmapHdr.Cap = alloc.pools[poolIndex].freeBitmapHdr.Len + alloc.pools[poolIndex].freeBitmapHdr.Data = bitmapStartAddr + alloc.pools[poolIndex].freeBitmap = *(*[]uint64)(unsafe.Pointer(&alloc.pools[poolIndex].freeBitmapHdr)) + + bitmapStartAddr += bitmapBytes + poolIndex++ + return true + }) + + return nil +} + +// earlyAllocFrame is a helper that delegates a frame allocation request to the +// early allocator instance. This function is passed as an argument to +// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter +// confuses the compiler's escape analysis into thinking that +// earlyAllocator.Frame escapes to heap. +func earlyAllocFrame() (pmm.Frame, *kernel.Error) { + return earlyAllocator.AllocFrame() +} + +// Init sets up the kernel physical memory allocation sub-system. +func Init(kernelStart, kernelEnd uintptr) *kernel.Error { + earlyAllocator.init(kernelStart, kernelEnd) + earlyAllocator.printMemoryMap() + + vmm.SetFrameAllocator(earlyAllocFrame) + return FrameAllocator.init() +} diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go new file mode 100644 index 0000000..48ba488 --- /dev/null +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -0,0 +1,183 @@ +package allocator + +import ( + "bytes" + "math" + "testing" + "unsafe" + + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/hal/multiboot" + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" + "github.com/achilleasa/gopher-os/kernel/mem/vmm" +) + +func TestSetupPoolBitmaps(t *testing.T) { + defer func() { + mapFn = vmm.Map + reserveRegionFn = vmm.EarlyReserveRegion + }() + + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + + // The captured multiboot data corresponds to qemu running with 128M RAM. + // The allocator will need to reserve 2 pages to store the bitmap data. + var ( + alloc BitmapAllocator + physMem = make([]byte, 2*mem.PageSize) + ) + + // Init phys mem with junk + for i := 0; i < len(physMem); i++ { + physMem[i] = 0xf0 + } + + mapCallCount := 0 + mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + mapCallCount++ + return nil + } + + reserveCallCount := 0 + reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) { + reserveCallCount++ + return uintptr(unsafe.Pointer(&physMem[0])), nil + } + + if err := alloc.setupPoolBitmaps(); err != nil { + t.Fatal(err) + } + + if exp := 2; mapCallCount != exp { + t.Fatalf("expected allocator to call vmm.Map %d times; called %d", exp, mapCallCount) + } + + if exp := 1; reserveCallCount != exp { + t.Fatalf("expected allocator to call vmm.EarlyReserveRegion %d times; called %d", exp, reserveCallCount) + } + + if exp, got := 2, len(alloc.pools); got != exp { + t.Fatalf("expected allocator to initialize %d pools; got %d", exp, got) + } + + for poolIndex, pool := range alloc.pools { + if expFreeCount := uint32(pool.endFrame - pool.startFrame + 1); pool.freeCount != expFreeCount { + t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount, pool.freeCount) + } + + if exp, got := int(math.Ceil(float64(pool.freeCount)/64.0)), len(pool.freeBitmap); got != exp { + t.Errorf("[pool %d] expected bitmap len to be %d; got %d", poolIndex, exp, got) + } + + for blockIndex, block := range pool.freeBitmap { + if block != 0 { + t.Errorf("[pool %d] expected bitmap block %d to be cleared; got %d", poolIndex, blockIndex, block) + } + } + } +} + +func TestSetupPoolBitmapsErrors(t *testing.T) { + defer func() { + mapFn = vmm.Map + reserveRegionFn = vmm.EarlyReserveRegion + }() + + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + var alloc BitmapAllocator + + t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "something went wrong"} + + reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) { + return 0, expErr + } + + if err := alloc.setupPoolBitmaps(); err != expErr { + t.Fatalf("expected to get error: %v; got %v", expErr, err) + } + }) + t.Run("vmm.Map returns an error", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "something went wrong"} + + reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) { + return 0, nil + } + + mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + return expErr + } + + if err := alloc.setupPoolBitmaps(); err != expErr { + t.Fatalf("expected to get error: %v; got %v", expErr, err) + } + }) + + t.Run("earlyAllocator returns an error", func(t *testing.T) { + emptyInfoData := []byte{ + 0, 0, 0, 0, // size + 0, 0, 0, 0, // reserved + 0, 0, 0, 0, // tag with type zero and length zero + 0, 0, 0, 0, + } + + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0]))) + + if err := alloc.setupPoolBitmaps(); err != errBootAllocOutOfMemory { + t.Fatalf("expected to get error: %v; got %v", errBootAllocOutOfMemory, err) + } + }) +} + +func TestAllocatorPackageInit(t *testing.T) { + defer func() { + mapFn = vmm.Map + reserveRegionFn = vmm.EarlyReserveRegion + }() + + var ( + physMem = make([]byte, 2*mem.PageSize) + fb = mockTTY() + buf bytes.Buffer + ) + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + + t.Run("success", func(t *testing.T) { + mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + return nil + } + + reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) { + return uintptr(unsafe.Pointer(&physMem[0])), nil + } + + if err := Init(0x100000, 0x1fa7c8); err != nil { + t.Fatal(err) + } + + for i := 0; i < len(fb); i += 2 { + if fb[i] == 0x0 { + continue + } + buf.WriteByte(fb[i]) + } + + exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251" + if got := buf.String(); got != exp { + t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got) + } + }) + + t.Run("error", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "something went wrong"} + + mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + return expErr + } + + if err := Init(0x100000, 0x1fa7c8); err != expErr { + t.Fatalf("expected to get error: %v; got %v", expErr, err) + } + }) +} diff --git a/kernel/mem/pmm/allocator/bootmem.go b/kernel/mem/pmm/allocator/bootmem.go index 4f6910c..86473a9 100644 --- a/kernel/mem/pmm/allocator/bootmem.go +++ b/kernel/mem/pmm/allocator/bootmem.go @@ -134,10 +134,3 @@ func (alloc *bootMemAllocator) printMemoryMap() { uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1), ) } - -// Init sets up the kernel physical memory allocation sub-system. -func Init(kernelStart, kernelEnd uintptr) *kernel.Error { - earlyAllocator.init(kernelStart, kernelEnd) - earlyAllocator.printMemoryMap() - return nil -} diff --git a/kernel/mem/pmm/allocator/bootmem_test.go b/kernel/mem/pmm/allocator/bootmem_test.go index f6964c5..ebc4dd7 100644 --- a/kernel/mem/pmm/allocator/bootmem_test.go +++ b/kernel/mem/pmm/allocator/bootmem_test.go @@ -1,7 +1,6 @@ package allocator import ( - "bytes" "testing" "unsafe" @@ -94,26 +93,6 @@ func TestBootMemoryAllocator(t *testing.T) { } } -func TestAllocatorPackageInit(t *testing.T) { - fb := mockTTY() - multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) - - Init(0x100000, 0x1fa7c8) - - var buf bytes.Buffer - for i := 0; i < len(fb); i += 2 { - if fb[i] == 0x0 { - continue - } - buf.WriteByte(fb[i]) - } - - exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251" - if got := buf.String(); got != exp { - t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got) - } -} - var ( // A dump of multiboot data when running under qemu containing only the // memory region tag. The dump encodes the following available memory From 8b2286278447d89374789843e445bc7caec23913 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Sun, 18 Jun 2017 07:34:49 +0100 Subject: [PATCH 5/8] Support looking up pools by frame and flagging bitmap entries as reserved/free --- kernel/mem/pmm/allocator/bitmap_allocator.go | 44 ++++++++++ .../pmm/allocator/bitmap_allocator_test.go | 87 +++++++++++++++++++ 2 files changed, 131 insertions(+) diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go index 339847f..4698531 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -22,6 +22,13 @@ var ( mapFn = vmm.Map ) +type markAs bool + +const ( + markReserved markAs = false + markFree = true +) + type framePool struct { // startFrame is the frame number for the first page in this pool. // each free bitmap entry i corresponds to frame (startFrame + i). @@ -145,6 +152,43 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error { return nil } +// markFrame updates the reservation flag for the bitmap entry that corresponds +// to the supplied frame. +func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag markAs) { + if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame { + return + } + + // The offset in the block is given by: frame % 64. As the bitmap uses a + // big-ending representation we need to set the bit at index: 63 - offset + relFrame := frame - alloc.pools[poolIndex].startFrame + block := relFrame >> 6 + mask := uint64(1 << (63 - (relFrame - block<<6))) + switch flag { + case markFree: + alloc.pools[poolIndex].freeBitmap[block] &^= mask + alloc.pools[poolIndex].freeCount++ + alloc.reservedPages-- + case markReserved: + alloc.pools[poolIndex].freeBitmap[block] |= mask + alloc.pools[poolIndex].freeCount-- + alloc.reservedPages++ + } +} + +// poolForFrame returns the index of the pool that contains frame or -1 if +// the frame is not contained in any of the available memory pools (e.g it +// points to a reserved memory region). +func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int { + for poolIndex, pool := range alloc.pools { + if frame >= pool.startFrame && frame <= pool.endFrame { + return poolIndex + } + } + + return -1 +} + // earlyAllocFrame is a helper that delegates a frame allocation request to the // early allocator instance. This function is passed as an argument to // vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go index 48ba488..befaa74 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator_test.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -130,6 +130,93 @@ func TestSetupPoolBitmapsErrors(t *testing.T) { }) } +func TestBitmapAllocatorMarkFrame(t *testing.T) { + var alloc = BitmapAllocator{ + pools: []framePool{ + { + startFrame: pmm.Frame(0), + endFrame: pmm.Frame(127), + freeCount: 128, + freeBitmap: make([]uint64, 2), + }, + }, + totalPages: 128, + } + + lastFrame := pmm.Frame(alloc.totalPages) + for frame := pmm.Frame(0); frame < lastFrame; frame++ { + alloc.markFrame(0, frame, markReserved) + + block := uint64(frame / 64) + blockOffset := uint64(frame % 64) + bitIndex := (63 - blockOffset) + bitMask := uint64(1 << bitIndex) + + if alloc.pools[0].freeBitmap[block]&bitMask != bitMask { + t.Errorf("[frame %d] expected block[%d], bit %d to be set", frame, block, bitIndex) + } + + alloc.markFrame(0, frame, markFree) + + if alloc.pools[0].freeBitmap[block]&bitMask != 0 { + t.Errorf("[frame %d] expected block[%d], bit %d to be unset", frame, block, bitIndex) + } + } + + // Calling markFrame with a frame not part of the pool should be a no-op + alloc.markFrame(0, pmm.Frame(0xbadf00d), markReserved) + for blockIndex, block := range alloc.pools[0].freeBitmap { + if block != 0 { + t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block) + } + } + + // Calling markFrame with a negative pool index should be a no-op + alloc.markFrame(-1, pmm.Frame(0), markReserved) + for blockIndex, block := range alloc.pools[0].freeBitmap { + if block != 0 { + t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block) + } + } +} + +func TestBitmapAllocatorPoolForFrame(t *testing.T) { + var alloc = BitmapAllocator{ + pools: []framePool{ + { + startFrame: pmm.Frame(0), + endFrame: pmm.Frame(63), + freeCount: 64, + freeBitmap: make([]uint64, 1), + }, + { + startFrame: pmm.Frame(128), + endFrame: pmm.Frame(191), + freeCount: 64, + freeBitmap: make([]uint64, 1), + }, + }, + totalPages: 128, + } + + specs := []struct { + frame pmm.Frame + expIndex int + }{ + {pmm.Frame(0), 0}, + {pmm.Frame(63), 0}, + {pmm.Frame(64), -1}, + {pmm.Frame(128), 1}, + {pmm.Frame(192), -1}, + } + + for specIndex, spec := range specs { + if got := alloc.poolForFrame(spec.frame); got != spec.expIndex { + t.Errorf("[spec %d] expected to get pool index %d; got %d", specIndex, spec.expIndex, got) + } + } +} + func TestAllocatorPackageInit(t *testing.T) { defer func() { mapFn = vmm.Map From 6ca86e55f89e37a56222d36260d11edd9f10a26d Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Sun, 18 Jun 2017 08:06:41 +0100 Subject: [PATCH 6/8] Reserve kernel image pages and decomission early allocator --- kernel/mem/pmm/allocator/bitmap_allocator.go | 51 +++++++- .../pmm/allocator/bitmap_allocator_test.go | 113 +++++++++++++++--- 2 files changed, 148 insertions(+), 16 deletions(-) diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go index 4698531..a4cb686 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -6,6 +6,7 @@ import ( "github.com/achilleasa/gopher-os/kernel" "github.com/achilleasa/gopher-os/kernel/hal/multiboot" + "github.com/achilleasa/gopher-os/kernel/kfmt/early" "github.com/achilleasa/gopher-os/kernel/mem" "github.com/achilleasa/gopher-os/kernel/mem/pmm" "github.com/achilleasa/gopher-os/kernel/mem/vmm" @@ -64,7 +65,14 @@ type BitmapAllocator struct { // init allocates space for the allocator structures using the early bootmem // allocator and flags any allocated pages as reserved. func (alloc *BitmapAllocator) init() *kernel.Error { - return alloc.setupPoolBitmaps() + if err := alloc.setupPoolBitmaps(); err != nil { + return err + } + + alloc.reserveKernelFrames() + alloc.reserveEarlyAllocatorFrames() + alloc.printStats() + return nil } // setupPoolBitmaps uses the early allocator and vmm region reservation helper @@ -189,6 +197,47 @@ func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int { return -1 } +// reserveKernelFrames makes as reserved the bitmap entries for the frames +// occupied by the kernel image. +func (alloc *BitmapAllocator) reserveKernelFrames() { + // Flag frames used by kernel image as reserved. Since the kernel must + // occupy a contiguous memory block we assume that all its frames will + // fall into one of the available memory pools + poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame) + for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ { + alloc.markFrame(poolIndex, frame, markReserved) + } +} + +// reserveEarlyAllocatorFrames makes as reserved the bitmap entries for the frames +// already allocated by the early allocator. +func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() { + // We now need to decomission the early allocator by flagging all frames + // allocated by it as reserved. The allocator itself does not track + // individual frames but only a counter of allocated frames. To get + // the list of frames we reset its internal state and "replay" the + // allocation requests to get the correct frames. + allocCount := earlyAllocator.allocCount + earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0 + for i := uint64(0); i < allocCount; i++ { + frame, _ := earlyAllocator.AllocFrame() + alloc.markFrame( + alloc.poolForFrame(frame), + frame, + markReserved, + ) + } +} + +func (alloc *BitmapAllocator) printStats() { + early.Printf( + "[bitmap_alloc] page stats: free: %d/%d (%d reserved)\n", + alloc.totalPages-alloc.reservedPages, + alloc.totalPages, + alloc.reservedPages, + ) +} + // earlyAllocFrame is a helper that delegates a frame allocation request to the // early allocator instance. This function is passed as an argument to // vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go index befaa74..4af2d36 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator_test.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -1,8 +1,8 @@ package allocator import ( - "bytes" "math" + "strconv" "testing" "unsafe" @@ -217,6 +217,102 @@ func TestBitmapAllocatorPoolForFrame(t *testing.T) { } } +func TestBitmapAllocatorReserveKernelFrames(t *testing.T) { + var alloc = BitmapAllocator{ + pools: []framePool{ + { + startFrame: pmm.Frame(0), + endFrame: pmm.Frame(7), + freeCount: 8, + freeBitmap: make([]uint64, 1), + }, + { + startFrame: pmm.Frame(64), + endFrame: pmm.Frame(191), + freeCount: 128, + freeBitmap: make([]uint64, 2), + }, + }, + totalPages: 136, + } + + // kernel occupies 16 frames and starts at the beginning of pool 1 + earlyAllocator.kernelStartFrame = pmm.Frame(64) + earlyAllocator.kernelEndFrame = pmm.Frame(79) + kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1) + alloc.reserveKernelFrames() + + if exp, got := kernelSizePages, alloc.reservedPages; got != exp { + t.Fatalf("expected reserved page counter to be %d; got %d", exp, got) + } + + if exp, got := uint32(8), alloc.pools[0].freeCount; got != exp { + t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got) + } + + if exp, got := 128-kernelSizePages, alloc.pools[1].freeCount; got != exp { + t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got) + } + + // The first 16 bits of block 0 in pool 1 should all be set to 1 + if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[1].freeBitmap[0]; got != exp { + t.Fatalf("expected block 0 in pool 1 to be:\n%064s\ngot:\n%064s", + strconv.FormatUint(exp, 2), + strconv.FormatUint(got, 2), + ) + } +} + +func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) { + var alloc = BitmapAllocator{ + pools: []framePool{ + { + startFrame: pmm.Frame(0), + endFrame: pmm.Frame(63), + freeCount: 64, + freeBitmap: make([]uint64, 1), + }, + { + startFrame: pmm.Frame(64), + endFrame: pmm.Frame(191), + freeCount: 128, + freeBitmap: make([]uint64, 2), + }, + }, + totalPages: 64, + } + + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + + // Simulate 16 allocations made using the early allocator in region 0 + // as reported by the multiboot data and move the kernel to pool 1 + allocCount := uint32(16) + earlyAllocator.allocCount = uint64(allocCount) + earlyAllocator.kernelStartFrame = pmm.Frame(256) + earlyAllocator.kernelEndFrame = pmm.Frame(256) + alloc.reserveEarlyAllocatorFrames() + + if exp, got := allocCount, alloc.reservedPages; got != exp { + t.Fatalf("expected reserved page counter to be %d; got %d", exp, got) + } + + if exp, got := 64-allocCount, alloc.pools[0].freeCount; got != exp { + t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got) + } + + if exp, got := uint32(128), alloc.pools[1].freeCount; got != exp { + t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got) + } + + // The first 16 bits of block 0 in pool 0 should all be set to 1 + if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[0].freeBitmap[0]; got != exp { + t.Fatalf("expected block 0 in pool 0 to be:\n%064s\ngot:\n%064s", + strconv.FormatUint(exp, 2), + strconv.FormatUint(got, 2), + ) + } +} + func TestAllocatorPackageInit(t *testing.T) { defer func() { mapFn = vmm.Map @@ -225,8 +321,6 @@ func TestAllocatorPackageInit(t *testing.T) { var ( physMem = make([]byte, 2*mem.PageSize) - fb = mockTTY() - buf bytes.Buffer ) multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) @@ -239,21 +333,10 @@ func TestAllocatorPackageInit(t *testing.T) { return uintptr(unsafe.Pointer(&physMem[0])), nil } + mockTTY() if err := Init(0x100000, 0x1fa7c8); err != nil { t.Fatal(err) } - - for i := 0; i < len(fb); i += 2 { - if fb[i] == 0x0 { - continue - } - buf.WriteByte(fb[i]) - } - - exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251" - if got := buf.String(); got != exp { - t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got) - } }) t.Run("error", func(t *testing.T) { From 4de2d54ed40c2a99b3f71f1e3312801cf4a05b48 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Sun, 18 Jun 2017 08:53:55 +0100 Subject: [PATCH 7/8] Implement AllocFrame/FreeFrame --- kernel/mem/pmm/allocator/bitmap_allocator.go | 59 +++++++++++++++ .../pmm/allocator/bitmap_allocator_test.go | 74 +++++++++++++++++++ 2 files changed, 133 insertions(+) diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go index a4cb686..7fcd817 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -1,6 +1,7 @@ package allocator import ( + "math" "reflect" "unsafe" @@ -17,6 +18,10 @@ var ( // primary allocator for reserving pages. FrameAllocator BitmapAllocator + errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"} + errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"} + errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"} + // The followning functions are used by tests to mock calls to the vmm package // and are automatically inlined by the compiler. reserveRegionFn = vmm.EarlyReserveRegion @@ -238,6 +243,60 @@ func (alloc *BitmapAllocator) printStats() { ) } +// AllocFrame reserves and returns a physical memory frame. An error will be +// returned if no more memory can be allocated. +func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) { + for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ { + if alloc.pools[poolIndex].freeCount == 0 { + continue + } + + fullBlock := uint64(math.MaxUint64) + for blockIndex, block := range alloc.pools[poolIndex].freeBitmap { + if block == fullBlock { + continue + } + + // Block has at least one free slot; we need to scan its bits + for blockOffset, mask := 0, uint64(1<<63); mask > 0; blockOffset, mask = blockOffset+1, mask>>1 { + if block&mask != 0 { + continue + } + + alloc.pools[poolIndex].freeCount-- + alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask + alloc.reservedPages++ + return alloc.pools[poolIndex].startFrame + pmm.Frame((blockIndex<<6)+blockOffset), nil + } + } + } + + return pmm.InvalidFrame, errBitmapAllocOutOfMemory +} + +// FreeFrame releases a frame previously allocated via a call to AllocFrame. +// Trying to release a frame not part of the allocator pools or a frame that +// is already marked as free will cause an error to be returned. +func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error { + poolIndex := alloc.poolForFrame(frame) + if poolIndex < 0 { + return errBitmapAllocFrameNotManaged + } + + relFrame := frame - alloc.pools[poolIndex].startFrame + block := relFrame >> 6 + mask := uint64(1 << (63 - (relFrame - block<<6))) + + if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 { + return errBitmapAllocDoubleFree + } + + alloc.pools[poolIndex].freeBitmap[block] &^= mask + alloc.pools[poolIndex].freeCount++ + alloc.reservedPages-- + return nil +} + // earlyAllocFrame is a helper that delegates a frame allocation request to the // early allocator instance. This function is passed as an argument to // vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go index 4af2d36..702e329 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator_test.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -313,6 +313,80 @@ func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) { } } +func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) { + var alloc = BitmapAllocator{ + pools: []framePool{ + { + startFrame: pmm.Frame(0), + endFrame: pmm.Frame(7), + freeCount: 8, + // only the first 8 bits of block 0 are used + freeBitmap: make([]uint64, 1), + }, + { + startFrame: pmm.Frame(64), + endFrame: pmm.Frame(191), + freeCount: 128, + freeBitmap: make([]uint64, 2), + }, + }, + totalPages: 136, + } + + // Test Alloc + for poolIndex, pool := range alloc.pools { + for expFrame := pool.startFrame; expFrame <= pool.endFrame; expFrame++ { + got, err := alloc.AllocFrame() + if err != nil { + t.Fatalf("[pool %d] unexpected error: %v", err) + } + + if got != expFrame { + t.Errorf("[pool %d] expected allocated frame to be %d; got %d", poolIndex, expFrame, got) + } + } + + if alloc.pools[poolIndex].freeCount != 0 { + t.Errorf("[pool %d] expected free count to be 0; got %d", poolIndex, alloc.pools[poolIndex].freeCount) + } + } + + if alloc.reservedPages != alloc.totalPages { + t.Errorf("expected reservedPages to match totalPages(%d); got %d", alloc.totalPages, alloc.reservedPages) + } + + if _, err := alloc.AllocFrame(); err != errBitmapAllocOutOfMemory { + t.Fatalf("expected error errBitmapAllocOutOfMemory; got %v", err) + } + + // Test Free + expFreeCount := []uint32{8, 128} + for poolIndex, pool := range alloc.pools { + for frame := pool.startFrame; frame <= pool.endFrame; frame++ { + if err := alloc.FreeFrame(frame); err != nil { + t.Fatalf("[pool %d] unexpected error: %v", err) + } + } + + if alloc.pools[poolIndex].freeCount != expFreeCount[poolIndex] { + t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount[poolIndex], alloc.pools[poolIndex].freeCount) + } + } + + if alloc.reservedPages != 0 { + t.Errorf("expected reservedPages to be 0; got %d", alloc.reservedPages) + } + + // Test Free errors + if err := alloc.FreeFrame(pmm.Frame(0)); err != errBitmapAllocDoubleFree { + t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err) + } + + if err := alloc.FreeFrame(pmm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged { + t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err) + } +} + func TestAllocatorPackageInit(t *testing.T) { defer func() { mapFn = vmm.Map From 6d195d82f54f9f445eb9438f957a0f9eb00a4ae6 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Sun, 18 Jun 2017 08:58:05 +0100 Subject: [PATCH 8/8] Make vmm use the bitmap allocator after it is initialized --- kernel/mem/pmm/allocator/bitmap_allocator.go | 13 ++++++++++++- kernel/mem/pmm/allocator/bitmap_allocator_test.go | 9 +++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go index 7fcd817..392f237 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -306,11 +306,22 @@ func earlyAllocFrame() (pmm.Frame, *kernel.Error) { return earlyAllocator.AllocFrame() } +// sysAllocFrame is a helper that delegates a frame allocation request to the +// bitmap allocator instance. +func sysAllocFrame() (pmm.Frame, *kernel.Error) { + return FrameAllocator.AllocFrame() +} + // Init sets up the kernel physical memory allocation sub-system. func Init(kernelStart, kernelEnd uintptr) *kernel.Error { earlyAllocator.init(kernelStart, kernelEnd) earlyAllocator.printMemoryMap() vmm.SetFrameAllocator(earlyAllocFrame) - return FrameAllocator.init() + if err := FrameAllocator.init(); err != nil { + return err + } + vmm.SetFrameAllocator(sysAllocFrame) + + return nil } diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go index 702e329..0bb97e6 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator_test.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -338,7 +338,7 @@ func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) { for expFrame := pool.startFrame; expFrame <= pool.endFrame; expFrame++ { got, err := alloc.AllocFrame() if err != nil { - t.Fatalf("[pool %d] unexpected error: %v", err) + t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err) } if got != expFrame { @@ -364,7 +364,7 @@ func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) { for poolIndex, pool := range alloc.pools { for frame := pool.startFrame; frame <= pool.endFrame; frame++ { if err := alloc.FreeFrame(frame); err != nil { - t.Fatalf("[pool %d] unexpected error: %v", err) + t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err) } } @@ -411,6 +411,11 @@ func TestAllocatorPackageInit(t *testing.T) { if err := Init(0x100000, 0x1fa7c8); err != nil { t.Fatal(err) } + + // At this point sysAllocFrame should work + if _, err := sysAllocFrame(); err != nil { + t.Fatal(err) + } }) t.Run("error", func(t *testing.T) {