diff --git a/arch/x86_64/asm/rt0_64.s b/arch/x86_64/asm/rt0_64.s index e39a2f9..19fd1b1 100644 --- a/arch/x86_64/asm/rt0_64.s +++ b/arch/x86_64/asm/rt0_64.s @@ -1,4 +1,5 @@ ; vim: set ft=nasm : +%include "constants.inc" section .bss align 8 @@ -55,8 +56,14 @@ _rt0_64_entry: ; Call the kernel entry point passing a pointer to the multiboot data ; copied by the 32-bit entry code extern multiboot_data + extern _kernel_start + extern _kernel_end extern kernel.Kmain + mov rax, _kernel_end - PAGE_OFFSET + push rax + mov rax, _kernel_start - PAGE_OFFSET + push rax mov rax, multiboot_data push rax call kernel.Kmain diff --git a/arch/x86_64/script/linker.ld.in b/arch/x86_64/script/linker.ld.in index 7d3cf53..31a8ae0 100644 --- a/arch/x86_64/script/linker.ld.in +++ b/arch/x86_64/script/linker.ld.in @@ -7,6 +7,8 @@ SECTIONS { * but load it at physical address 1M */ . = VMA; + _kernel_start = .; + .text BLOCK(4K) : AT(ADDR(.text) - PAGE_OFFSET) { /* The multiboot header must be present in the first 4K of the kernel @@ -36,4 +38,6 @@ SECTIONS { *(COMMON) *(.bss) } + + _kernel_end = ALIGN(4K); } diff --git a/kernel/kmain/kmain.go b/kernel/kmain/kmain.go index 9ee1f8f..8d84538 100644 --- a/kernel/kmain/kmain.go +++ b/kernel/kmain/kmain.go @@ -3,7 +3,8 @@ package kmain import ( "github.com/achilleasa/gopher-os/kernel/hal" "github.com/achilleasa/gopher-os/kernel/hal/multiboot" - "github.com/achilleasa/gopher-os/kernel/mem/pmm" + "github.com/achilleasa/gopher-os/kernel/kfmt/early" + "github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator" ) // Kmain is the only Go symbol that is visible (exported) from the rt0 initialization @@ -12,16 +13,18 @@ import ( // allocated by the assembly code. // // The rt0 code passes the address of the multiboot info payload provided by the -// bootloader. +// bootloader as well as the physical addresses for the kernel start/end. // // Kmain is not expected to return. If it does, the rt0 code will halt the CPU. // //go:noinline -func Kmain(multibootInfoPtr uintptr) { +func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) { multiboot.SetInfoPtr(multibootInfoPtr) hal.InitTerminal() hal.ActiveTerminal.Clear() - pmm.EarlyAllocator.Init() + if err := allocator.Init(kernelStart, kernelEnd); err != nil { + early.Printf("[%s] error: %s\n", err.Module, err.Message) + } } diff --git a/kernel/mem/pmm/allocator/bootmem.go b/kernel/mem/pmm/allocator/bootmem.go new file mode 100644 index 0000000..4f6910c --- /dev/null +++ b/kernel/mem/pmm/allocator/bootmem.go @@ -0,0 +1,143 @@ +package allocator + +import ( + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/hal/multiboot" + "github.com/achilleasa/gopher-os/kernel/kfmt/early" + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" +) + +var ( + // earlyAllocator is a boot mem allocator instance used for page + // allocations before switching to a more advanced allocator. + earlyAllocator bootMemAllocator + + errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"} +) + +// bootMemAllocator implements a rudimentary physical memory allocator which is +// used to bootstrap the kernel. +// +// The allocator implementation uses the memory region information provided by +// the bootloader to detect free memory blocks and return the next available +// free frame. Allocations are tracked via an internal counter that contains +// the last allocated frame. +// +// Due to the way that the allocator works, it is not possible to free +// allocated pages. Once the kernel is properly initialized, the allocated +// blocks will be handed over to a more advanced memory allocator that does +// support freeing. +type bootMemAllocator struct { + // allocCount tracks the total number of allocated frames. + allocCount uint64 + + // lastAllocFrame tracks the last allocated frame number. + lastAllocFrame pmm.Frame + + // Keep track of kernel location so we exclude this region. + kernelStartAddr, kernelEndAddr uintptr + kernelStartFrame, kernelEndFrame pmm.Frame +} + +// init sets up the boot memory allocator internal state. +func (alloc *bootMemAllocator) init(kernelStart, kernelEnd uintptr) { + // round down kernel start to the nearest page and round up kernel end + // to the nearest page. + pageSizeMinus1 := uintptr(mem.PageSize - 1) + alloc.kernelStartAddr = kernelStart + alloc.kernelEndAddr = kernelEnd + alloc.kernelStartFrame = pmm.Frame((kernelStart & ^pageSizeMinus1) >> mem.PageShift) + alloc.kernelEndFrame = pmm.Frame(((kernelEnd+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift) - 1 + +} + +// AllocFrame scans the system memory regions reported by the bootloader and +// reserves the next available free frame. +// +// AllocFrame returns an error if no more memory can be allocated. +func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) { + var err = errBootAllocOutOfMemory + + multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { + // Ignore reserved regions and regions smaller than a single page + if region.Type != multiboot.MemAvailable || region.Length < uint64(mem.PageSize) { + return true + } + + // Reported addresses may not be page-aligned; round up to get + // the start frame and round down to get the end frame + pageSizeMinus1 := uint64(mem.PageSize - 1) + regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift) + regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1 + + // Skip over already allocated regions + if alloc.lastAllocFrame >= regionEndFrame { + return true + } + + // If last frame used a different region and the kernel image + // is located at the beginning of this region OR we are in + // current region but lastAllocFrame + 1 points to the kernel + // start we need to jump to the page following the kernel end + // frame + if (alloc.lastAllocFrame <= regionStartFrame && alloc.kernelStartFrame == regionStartFrame) || + (alloc.lastAllocFrame <= regionEndFrame && alloc.lastAllocFrame+1 == alloc.kernelStartFrame) { + //fmt.Printf("last: %d, case: 1, set last: %d\n", alloc.lastAllocFrame, alloc.kernelEndFrame+1) + alloc.lastAllocFrame = alloc.kernelEndFrame + 1 + } else if alloc.lastAllocFrame < regionStartFrame || alloc.allocCount == 0 { + // we are in the previous region and need to jump to this one OR + // this is the first allocation and the region begins at frame 0 + //fmt.Printf("last: %d, case: 2, set last: %d\n", alloc.lastAllocFrame, regionStartFrame) + alloc.lastAllocFrame = regionStartFrame + } else { + // we are in the region and we can select the next frame + //fmt.Printf("last: %d, case: 3, set last: %d\n", alloc.lastAllocFrame, alloc.lastAllocFrame+1) + alloc.lastAllocFrame++ + } + + // The above adjustment might push lastAllocFrame outside of the + // region end (e.g kernel ends at last page in the region) + if alloc.lastAllocFrame > regionEndFrame { + return true + } + + err = nil + return false + }) + + if err != nil { + return pmm.InvalidFrame, errBootAllocOutOfMemory + } + + alloc.allocCount++ + return alloc.lastAllocFrame, nil +} + +// printMemoryMap scans the memory region information provided by the +// bootloader and prints out the system's memory map. +func (alloc *bootMemAllocator) printMemoryMap() { + early.Printf("[boot_mem_alloc] system memory map:\n") + var totalFree mem.Size + multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { + early.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String()) + + if region.Type == multiboot.MemAvailable { + totalFree += mem.Size(region.Length) + } + return true + }) + early.Printf("[boot_mem_alloc] available memory: %dKb\n", uint64(totalFree/mem.Kb)) + early.Printf("[boot_mem_alloc] kernel loaded at 0x%x - 0x%x\n", alloc.kernelStartAddr, alloc.kernelEndAddr) + early.Printf("[boot_mem_alloc] size: %d bytes, reserved pages: %d\n", + uint64(alloc.kernelEndAddr-alloc.kernelStartAddr), + uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1), + ) +} + +// Init sets up the kernel physical memory allocation sub-system. +func Init(kernelStart, kernelEnd uintptr) *kernel.Error { + earlyAllocator.init(kernelStart, kernelEnd) + earlyAllocator.printMemoryMap() + return nil +} diff --git a/kernel/mem/pmm/allocator/bootmem_test.go b/kernel/mem/pmm/allocator/bootmem_test.go new file mode 100644 index 0000000..f6964c5 --- /dev/null +++ b/kernel/mem/pmm/allocator/bootmem_test.go @@ -0,0 +1,152 @@ +package allocator + +import ( + "bytes" + "testing" + "unsafe" + + "github.com/achilleasa/gopher-os/kernel/driver/video/console" + "github.com/achilleasa/gopher-os/kernel/hal" + "github.com/achilleasa/gopher-os/kernel/hal/multiboot" +) + +func TestBootMemoryAllocator(t *testing.T) { + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + + specs := []struct { + kernelStart, kernelEnd uintptr + expAllocCount uint64 + }{ + { + // the kernel is loaded in a reserved memory region + 0xa0000, + 0xa0000, + // region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158] + // region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735] + 159 + 32480, + }, + { + // the kernel is loaded at the beginning of region 1 taking 2.5 pages + 0x0, + 0x2800, + // region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these + // frames 0,1 and 2 (round up kernel end) are used by the kernel + // region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735] + 159 - 3 + 32480, + }, + { + // the kernel is loaded at the end of region 1 taking 2.5 pages + 0x9c800, + 0x9f000, + // region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these + // frames 156,157 and 158 (round down kernel start) are used by the kernel + // region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735] + 159 - 3 + 32480, + }, + { + // the kernel (after rounding) uses the entire region 1 + 0x123, + 0x9fc00, + // region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; all are used + // by the kernel + // region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735] + 32480, + }, + { + // the kernel is loaded at region 2 start + 2K taking 1.5 pages + 0x100800, + 0x102000, + // region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158] + // region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]; + // out of these frames 256 (kernel start rounded down) and 257 is used by the kernel + 159 + 32480 - 2, + }, + } + + var alloc bootMemAllocator + for specIndex, spec := range specs { + alloc.allocCount = 0 + alloc.lastAllocFrame = 0 + alloc.init(spec.kernelStart, spec.kernelEnd) + + for { + frame, err := alloc.AllocFrame() + if err != nil { + if err == errBootAllocOutOfMemory { + break + } + t.Errorf("[spec %d] [frame %d] unexpected allocator error: %v", specIndex, alloc.allocCount, err) + break + } + + if frame != alloc.lastAllocFrame { + t.Errorf("[spec %d] [frame %d] expected allocated frame to be %d; got %d", specIndex, alloc.allocCount, alloc.lastAllocFrame, frame) + } + + if !frame.Valid() { + t.Errorf("[spec %d] [frame %d] expected IsValid() to return true", specIndex, alloc.allocCount) + } + } + + if alloc.allocCount != spec.expAllocCount { + t.Errorf("[spec %d] expected allocator to allocate %d frames; allocated %d", specIndex, spec.expAllocCount, alloc.allocCount) + } + } +} + +func TestAllocatorPackageInit(t *testing.T) { + fb := mockTTY() + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) + + Init(0x100000, 0x1fa7c8) + + var buf bytes.Buffer + for i := 0; i < len(fb); i += 2 { + if fb[i] == 0x0 { + continue + } + buf.WriteByte(fb[i]) + } + + exp := "[boot_mem_alloc] system memory map: [0x0000000000 - 0x000009fc00], size: 654336, type: available [0x000009fc00 - 0x00000a0000], size: 1024, type: reserved [0x00000f0000 - 0x0000100000], size: 65536, type: reserved [0x0000100000 - 0x0007fe0000], size: 133038080, type: available [0x0007fe0000 - 0x0008000000], size: 131072, type: reserved [0x00fffc0000 - 0x0100000000], size: 262144, type: reserved[boot_mem_alloc] available memory: 130559Kb[boot_mem_alloc] kernel loaded at 0x100000 - 0x1fa7c8[boot_mem_alloc] size: 1025992 bytes, reserved pages: 251" + if got := buf.String(); got != exp { + t.Fatalf("expected printMemoryMap to generate the following output:\n%q\ngot:\n%q", exp, got) + } +} + +var ( + // A dump of multiboot data when running under qemu containing only the + // memory region tag. The dump encodes the following available memory + // regions: + // [ 0 - 9fc00] length: 654336 + // [100000 - 7fe0000] length: 133038080 + multibootMemoryMap = []byte{ + 72, 5, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, + 0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0, + 0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0, + 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, + 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0, + 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } +) + +func mockTTY() []byte { + // Mock a tty to handle early.Printf output + mockConsoleFb := make([]byte, 160*25) + mockConsole := &console.Ega{} + mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0]))) + hal.ActiveTerminal.AttachTo(mockConsole) + + return mockConsoleFb +} diff --git a/kernel/mem/pmm/bootmem_allocator.go b/kernel/mem/pmm/bootmem_allocator.go deleted file mode 100644 index fe3af20..0000000 --- a/kernel/mem/pmm/bootmem_allocator.go +++ /dev/null @@ -1,112 +0,0 @@ -package pmm - -import ( - "github.com/achilleasa/gopher-os/kernel" - "github.com/achilleasa/gopher-os/kernel/hal/multiboot" - "github.com/achilleasa/gopher-os/kernel/kfmt/early" - "github.com/achilleasa/gopher-os/kernel/mem" -) - -var ( - // EarlyAllocator points to a static instance of the boot memory allocator - // which is used to bootstrap the kernel before initializing a more - // advanced memory allocator. - EarlyAllocator BootMemAllocator - - errBootAllocUnsupportedPageSize = &kernel.Error{Module: "pmm.BootMemAllocator", Message: "allocator only support allocation requests of order(0)"} - errBootAllocOutOfMemory = &kernel.Error{Module: "pmm.BootMemAllocator", Message: "out of memory"} -) - -// BootMemAllocator implements a rudimentary physical memory allocator which is used -// to bootstrap the kernel. -// -// The allocator implementation uses the memory region information provided by -// the bootloader to detect free memory blocks and return the next available -// free frame. -// -// Allocations are tracked via an internal counter that contains the last -// allocated frame index. The system memory regions are mapped into a linear -// page index by aligning the region start address to the system's page size -// and then dividing by the page size. -// -// Due to the way that the allocator works, it is not possible to free -// allocated pages. Once the kernel is properly initialized, the allocated -// blocks will be handed over to a more advanced memory allocator that does -// support freeing. -type BootMemAllocator struct { - // allocCount tracks the total number of allocated frames. - allocCount uint64 - - // lastAllocIndex tracks the last allocated frame index. - lastAllocIndex int64 -} - -// Init sets up the boot memory allocator internal state and prints out the -// system memory map. -func (alloc *BootMemAllocator) Init() { - alloc.lastAllocIndex = -1 - - early.Printf("[boot_mem_alloc] system memory map:\n") - var totalFree mem.Size - multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { - early.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String()) - - if region.Type == multiboot.MemAvailable { - totalFree += mem.Size(region.Length) - } - return true - }) - early.Printf("[boot_mem_alloc] free memory: %dKb\n", uint64(totalFree/mem.Kb)) -} - -// AllocFrame scans the system memory regions reported by the bootloader and -// reserves the next available free frame. -// -// AllocFrame returns an error if no more memory can be allocated or when the -// requested page order is > 0. -func (alloc *BootMemAllocator) AllocFrame(order mem.PageOrder) (Frame, *kernel.Error) { - if order > 0 { - return InvalidFrame, errBootAllocUnsupportedPageSize - } - - var ( - foundPageIndex int64 = -1 - regionStartPageIndex, regionEndPageIndex int64 - ) - multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { - if region.Type != multiboot.MemAvailable { - return true - } - - // Align region start address to a page boundary and find the start - // and end page indices for the region - regionStartPageIndex = int64(((mem.Size(region.PhysAddress) + (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift) - regionEndPageIndex = int64(((mem.Size(region.PhysAddress+region.Length) - (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift) - - // Ignore already allocated regions - if alloc.lastAllocIndex >= regionEndPageIndex { - return true - } - - // We found a block that can be allocated. The last allocated - // index will be either pointing to a previous region or will - // point inside this region. In the first case we just need to - // select the regionStartPageIndex. In the latter case we can - // simply select the next available page in the current region. - if alloc.lastAllocIndex < regionStartPageIndex { - foundPageIndex = regionStartPageIndex - } else { - foundPageIndex = alloc.lastAllocIndex + 1 - } - return false - }) - - if foundPageIndex == -1 { - return InvalidFrame, errBootAllocOutOfMemory - } - - alloc.allocCount++ - alloc.lastAllocIndex = foundPageIndex - - return Frame(foundPageIndex), nil -} diff --git a/kernel/mem/pmm/bootmem_allocator_test.go b/kernel/mem/pmm/bootmem_allocator_test.go deleted file mode 100644 index a7319d7..0000000 --- a/kernel/mem/pmm/bootmem_allocator_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package pmm - -import ( - "testing" - "unsafe" - - "github.com/achilleasa/gopher-os/kernel/driver/video/console" - "github.com/achilleasa/gopher-os/kernel/hal" - "github.com/achilleasa/gopher-os/kernel/hal/multiboot" - "github.com/achilleasa/gopher-os/kernel/mem" -) - -func TestBootMemoryAllocator(t *testing.T) { - // Mock a tty to handle early.Printf output - mockConsoleFb := make([]byte, 160*25) - mockConsole := &console.Ega{} - mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0]))) - hal.ActiveTerminal.AttachTo(mockConsole) - - multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0]))) - - var totalFreeFrames uint64 - multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool { - if region.Type == multiboot.MemAvailable { - regionStartFrameIndex := uint64(((mem.Size(region.PhysAddress) + (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift) - regionEndFrameIndex := uint64(((mem.Size(region.PhysAddress+region.Length) - (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift) - - totalFreeFrames += regionEndFrameIndex - regionStartFrameIndex + 1 - } - - return true - }) - - var ( - alloc BootMemAllocator - allocFrameCount uint64 - ) - for alloc.Init(); ; allocFrameCount++ { - frame, err := alloc.AllocFrame(mem.PageOrder(0)) - if err != nil { - if err == errBootAllocOutOfMemory { - break - } - t.Fatalf("[frame %d] unexpected allocator error: %v", allocFrameCount, err) - } - - expAddress := uintptr(uint64(alloc.lastAllocIndex) * uint64(mem.PageSize)) - if got := frame.Address(); got != expAddress { - t.Errorf("[frame %d] expected frame address to be 0x%x; got 0x%x", allocFrameCount, expAddress, got) - } - - if !frame.Valid() { - t.Errorf("[frame %d] expected IsValid() to return true", allocFrameCount) - } - } - - if allocFrameCount != totalFreeFrames { - t.Fatalf("expected allocator to allocate %d frames; allocated %d", totalFreeFrames, allocFrameCount) - } - - // This allocator only works with order(0) blocks - if frame, err := alloc.AllocFrame(mem.PageOrder(1)); err != errBootAllocUnsupportedPageSize || frame.Valid() { - t.Fatalf("expected allocator to return errBootAllocUnsupportedPageSize and an invalid frame when requested to allocate a block with order > 0; got %v, %v", err, frame) - } -} - -var ( - // A dump of multiboot data when running under qemu containing only the memory region tag. - multibootMemoryMap = []byte{ - 72, 5, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0, - 0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, - 0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0, - 0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, - 9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0, - 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, - 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0, - 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } -) diff --git a/kernel/mem/size.go b/kernel/mem/size.go index 92ed323..626d4d5 100644 --- a/kernel/mem/size.go +++ b/kernel/mem/size.go @@ -10,11 +10,3 @@ const ( Mb = 1024 * Kb Gb = 1024 * Mb ) - -// PageOrder represents a power-of-two multiple of the base page size and is -// used as an argument to page-based memory allocators. -// -// PageOrder(0) refers to a page with size PageSize << 0 -// PageOrder(1) refers to a page with size PageSize << 1 -// ... -type PageOrder uint8 diff --git a/kernel/mem/vmm/map.go b/kernel/mem/vmm/map.go index bd8781c..68aadf9 100644 --- a/kernel/mem/vmm/map.go +++ b/kernel/mem/vmm/map.go @@ -23,14 +23,14 @@ var ( errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"} ) -// FrameAllocator is a function that can allocate physical frames of the specified order. -type FrameAllocator func(mem.PageOrder) (pmm.Frame, *kernel.Error) +// FrameAllocatorFn is a function that can allocate physical frames. +type FrameAllocatorFn func() (pmm.Frame, *kernel.Error) // Map establishes a mapping between a virtual page and a physical memory frame // using the currently active page directory table. Calls to Map will use the // supplied physical frame allocator to initialize missing page tables at each // paging level supported by the MMU. -func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error { +func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { var err *kernel.Error walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool { @@ -53,7 +53,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // physical frame for it map it and clear its contents. if !pte.HasFlags(FlagPresent) { var newTableFrame pmm.Frame - newTableFrame, err = allocFn(mem.PageOrder(0)) + newTableFrame, err = allocFn() if err != nil { return false } @@ -78,7 +78,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // to a fixed virtual address overwriting any previous mapping. The temporary // mapping mechanism is primarily used by the kernel to access and initialize // inactive page tables. -func MapTemporary(frame pmm.Frame, allocFn FrameAllocator) (Page, *kernel.Error) { +func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) { if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil { return 0, err } diff --git a/kernel/mem/vmm/map_test.go b/kernel/mem/vmm/map_test.go index ee03a3e..9c961ee 100644 --- a/kernel/mem/vmm/map_test.go +++ b/kernel/mem/vmm/map_test.go @@ -32,7 +32,7 @@ func TestMapTemporaryAmd64(t *testing.T) { nextPhysPage := 0 // allocFn returns pages from index 1; we keep index 0 for the P4 entry - allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) { + allocFn := func() (pmm.Frame, *kernel.Error) { nextPhysPage++ pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0]) return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil @@ -134,7 +134,7 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) { expErr := &kernel.Error{Module: "test", Message: "out of memory"} - allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) { + allocFn := func() (pmm.Frame, *kernel.Error) { return 0, expErr } diff --git a/kernel/mem/vmm/pdt.go b/kernel/mem/vmm/pdt.go index 9762573..8011d31 100644 --- a/kernel/mem/vmm/pdt.go +++ b/kernel/mem/vmm/pdt.go @@ -39,7 +39,7 @@ type PageDirectoryTable struct { // Init can: // - call mem.Memset to clear the frame contents // - setup a recursive mapping for the last table entry to the page itself. -func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator) *kernel.Error { +func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error { pdt.pdtFrame = pdtFrame // Check active PDT physical address. If it matches the input pdt then @@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator) // function with the difference that it also supports inactive page PDTs by // establishing a temporary mapping so that Map() can access the inactive PDT // entries. -func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error { +func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { var ( activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift) lastPdtEntryAddr uintptr diff --git a/kernel/mem/vmm/pdt_test.go b/kernel/mem/vmm/pdt_test.go index 64172b7..8bc9442 100644 --- a/kernel/mem/vmm/pdt_test.go +++ b/kernel/mem/vmm/pdt_test.go @@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocator) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapTemporaryFn = origMapTemporary @@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return pdtFrame.Address() } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { t.Fatal("unexpected call to MapTemporary") return 0, nil } @@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return 0 } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil } @@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { expErr := &kernel.Error{Module: "test", Message: "error mapping page"} - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { return 0, expErr } @@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocator) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapFn = origMap @@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return pdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { return nil } @@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return activePdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { return nil } diff --git a/stub.go b/stub.go index 5263408..0e62711 100644 --- a/stub.go +++ b/stub.go @@ -11,5 +11,5 @@ var multibootInfoPtr uintptr // A global variable is passed as an argument to Kmain to prevent the compiler // from inlining the actual call and removing Kmain from the generated .o file. func main() { - kmain.Kmain(multibootInfoPtr) + kmain.Kmain(multibootInfoPtr, 0, 0) }