From 9d2e53bac477d4c9a6b87ae2a3b189e63edee311 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 12 Jul 2017 23:24:51 +0100 Subject: [PATCH 1/6] Redirect runtime.init to an empty stub --- src/gopheros/kernel/goruntime/bootstrap.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/gopheros/kernel/goruntime/bootstrap.go b/src/gopheros/kernel/goruntime/bootstrap.go index 06d06b4..1010a18 100644 --- a/src/gopheros/kernel/goruntime/bootstrap.go +++ b/src/gopheros/kernel/goruntime/bootstrap.go @@ -51,6 +51,16 @@ func mSysStatInc(*uint64, uintptr) //go:linkname initGoPackages main.init func initGoPackages() +// Some of the package init functions (e.g reflect.init) call runtime.init +// which attempts to create a new process and eventually crashes the kernel. +// Since the kernel does its own initialization, we can safely redirect +// runtime.init +// to this empty stub. +//go:redirect-from runtime.init +//go:noinline +func runtimeInit() { +} + // sysReserve reserves address space without allocating any memory or // establishing any page mappings. // @@ -188,6 +198,7 @@ func init() { zeroPtr = unsafe.Pointer(uintptr(0)) ) + runtimeInit() sysReserve(zeroPtr, 0, &reserved) sysMap(zeroPtr, 0, reserved, &stat) sysAlloc(0, &stat) From 4e0ad81770c993c62553faa01d31ebc2db303526 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 12 Jul 2017 23:31:54 +0100 Subject: [PATCH 2/6] Reload GDT with the descriptor VMA once the CPU switches to 64-bit mode The GDT is initially loaded in the 32-bit rt0 code where we cannot use the 48-bit VMA for the GDT table and instead we use its physical address. This approach works as the rt0 code establishes an identity mapping for the region 0-8M. However, when the kernel creates a more granular PDT it only includes the VMA addresses for the kernel ELF image sections making the 0-8M invalid. Unless the GDT is reloaded with the VMA of the table, the CPU will cause a non-recoverable page fault when it tries to restore the segment registers while returning from a recoverable page fault. --- src/arch/x86_64/asm/rt0_32.s | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/arch/x86_64/asm/rt0_32.s b/src/arch/x86_64/asm/rt0_32.s index ae4b124..e7a227f 100644 --- a/src/arch/x86_64/asm/rt0_32.s +++ b/src/arch/x86_64/asm/rt0_32.s @@ -355,6 +355,20 @@ write_string: ;------------------------------------------------------------------------------ bits 64 _rt0_64_entry_trampoline: + ; The currently loaded GDT points to the physical address of gdt0. This + ; works for now since we identity map the first 8M of the kernel. When + ; we set up a proper PDT for the VMA address of the kernel, the 0-8M + ; mapping will be invalid causing a page fault when the CPU tries to + ; restore the segment registers while returning from the page fault + ; handler. + ; + ; To fix this, we need to update the GDT so it uses the 48-bit virtual + ; address of gdt0. + mov rax, gdt0_desc + mov rbx, gdt0 + mov qword [rax+2], rbx + lgdt [rax] + mov rsp, stack_top ; now that paging is enabled we can load the stack ; with the virtual address of the allocated stack. From fdd56112206433bdfbd7ed944bbdebdbff751bec Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 12 Jul 2017 23:34:08 +0100 Subject: [PATCH 3/6] Fix bug in the ASM code used to load the PDT to the CR2 register The previous implementation in Go assembly did not translate to the correct assembly instructions for loading CR2 --- src/gopheros/kernel/cpu/cpu_amd64.s | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/gopheros/kernel/cpu/cpu_amd64.s b/src/gopheros/kernel/cpu/cpu_amd64.s index df3bcf3..1304ca4 100644 --- a/src/gopheros/kernel/cpu/cpu_amd64.s +++ b/src/gopheros/kernel/cpu/cpu_amd64.s @@ -19,7 +19,8 @@ TEXT ·FlushTLBEntry(SB),NOSPLIT,$0 TEXT ·SwitchPDT(SB),NOSPLIT,$0 // loading CR3 also triggers a TLB flush - MOVQ pdtPhysAddr+0(FP), CR3 + MOVQ pdtPhysAddr+0(FP), AX + MOVQ AX, CR3 RET TEXT ·ActivePDT(SB),NOSPLIT,$0 From a2d58f894944b40c8448c6c591aa20196c1bd025 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 12 Jul 2017 23:38:01 +0100 Subject: [PATCH 4/6] Switch to a visitor-based no-alloc implementation for fetching ELF sections This change is required as the code to create a new PDT should execute before the Go allocator is bootstrapped. --- .../kernel/hal/multiboot/multiboot.go | 70 +++++++------------ .../kernel/hal/multiboot/multiboot_test.go | 70 ++++++++++--------- 2 files changed, 61 insertions(+), 79 deletions(-) diff --git a/src/gopheros/kernel/hal/multiboot/multiboot.go b/src/gopheros/kernel/hal/multiboot/multiboot.go index 8886907..3f34653 100644 --- a/src/gopheros/kernel/hal/multiboot/multiboot.go +++ b/src/gopheros/kernel/hal/multiboot/multiboot.go @@ -7,9 +7,8 @@ import ( ) var ( - infoData uintptr - cmdLineKV map[string]string - elfSectionList []*ElfSection + infoData uintptr + cmdLineKV map[string]string ) type tagType uint32 @@ -149,7 +148,7 @@ const ( // MemRegionVisitor defies a visitor function that gets invoked by VisitMemRegions // for each memory region provided by the boot loader. The visitor must return true // to continue or false to abort the scan. -type MemRegionVisitor func(entry *MemoryMapEntry) bool +type MemRegionVisitor func(*MemoryMapEntry) bool // MemoryMapEntry describes a memory region entry, namely its physical address, // its length and its type. @@ -230,63 +229,44 @@ const ( ElfSectionExecutable ) -// ElfSection deefines the name, flags and virtual address of an ELF section -// which is part of the kernel image. -type ElfSection struct { - // The section name. - Name string - - // The list of flags associated with this section - Flags ElfSectionFlag - - // The virtual address of this section. - Address uintptr -} - -// GetElfSections returns a slice of ElfSections for the loaded kernel image. -func GetElfSections() []*ElfSection { - if elfSectionList != nil { - return elfSectionList - } +// ElfSectionVisitor defies a visitor function that gets invoked by VisitElfSections +// for rach ELF section that belongs to the loaded kernel image. +type ElfSectionVisitor func(name string, flags ElfSectionFlag, address uintptr, size uint64) +// VisitElfSections invokes visitor for each ELF entry that belongs to the +// loaded kernel image. +func VisitElfSections(visitor ElfSectionVisitor) { curPtr, size := findTagByType(tagElfSymbols) if size == 0 { - return nil + return } - ptrElfSections := (*elfSections)(unsafe.Pointer(curPtr)) - sectionData := *(*[]elfSection64)(unsafe.Pointer(&reflect.SliceHeader{ - Len: int(ptrElfSections.numSections), - Cap: int(ptrElfSections.numSections), - Data: uintptr(unsafe.Pointer(&ptrElfSections.sectionData)), - })) - var ( - strTable = *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: int(sectionData[ptrElfSections.strtabSectionIndex].size), - Cap: int(sectionData[ptrElfSections.strtabSectionIndex].size), - Data: uintptr(sectionData[ptrElfSections.strtabSectionIndex].address), - })) + sectionPayload elfSection64 + ptrElfSections = (*elfSections)(unsafe.Pointer(curPtr)) + secPtr = uintptr(unsafe.Pointer(&ptrElfSections.sectionData)) + sizeofSection = unsafe.Sizeof(sectionPayload) + strTableSection = (*elfSection64)(unsafe.Pointer(secPtr + uintptr(ptrElfSections.strtabSectionIndex)*sizeofSection)) + secName string + secNameHeader = (*reflect.StringHeader)(unsafe.Pointer(&secName)) ) - for _, secData := range sectionData { + for secIndex := uint16(0); secIndex < ptrElfSections.numSections; secIndex, secPtr = secIndex+1, secPtr+sizeofSection { + secData := (*elfSection64)(unsafe.Pointer(secPtr)) if secData.size == 0 { continue } // String table entries are C-style NULL-terminated strings - end := secData.nameIndex - for ; strTable[end] != 0; end++ { + end := uintptr(secData.nameIndex) + for ; *(*byte)(unsafe.Pointer(uintptr(strTableSection.address) + end)) != 0; end++ { } - elfSectionList = append(elfSectionList, &ElfSection{ - Name: string(strTable[secData.nameIndex:end]), - Flags: ElfSectionFlag(secData.flags), - Address: uintptr(secData.address), - }) - } + secNameHeader.Len = int(end - uintptr(secData.nameIndex)) + secNameHeader.Data = uintptr(unsafe.Pointer(uintptr(strTableSection.address) + uintptr(secData.nameIndex))) - return elfSectionList + visitor(secName, ElfSectionFlag(secData.flags), uintptr(secData.address), secData.size) + } } // SetInfoPtr updates the internal multiboot information pointer to the given diff --git a/src/gopheros/kernel/hal/multiboot/multiboot_test.go b/src/gopheros/kernel/hal/multiboot/multiboot_test.go index 99217b2..a806573 100644 --- a/src/gopheros/kernel/hal/multiboot/multiboot_test.go +++ b/src/gopheros/kernel/hal/multiboot/multiboot_test.go @@ -196,9 +196,9 @@ func TestGetBootCmdLine(t *testing.T) { func TestGetElfSections(t *testing.T) { SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0]))) - if GetElfSections() != nil { + VisitElfSections(func(_ string, _ ElfSectionFlag, _ uintptr, _ uint64) { t.Fatalf("expected GetElfSections() to return nil when no elf sections tag is present") - } + }) SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0]))) @@ -209,43 +209,45 @@ func TestGetElfSections(t *testing.T) { multibootInfoTestData[1660+i] = b } - sections := GetElfSections() + // There are more sections in the test data but we will only focus on these ones for the test + var ( + expSections = []struct { + secName string + expFlags ElfSectionFlag + }{ + {".text", ElfSectionAllocated | ElfSectionExecutable}, + {".bss", ElfSectionAllocated | ElfSectionWritable}, + {".noptrbss", ElfSectionAllocated | ElfSectionWritable}, + {".data", ElfSectionAllocated | ElfSectionWritable}, + {".rodata", ElfSectionAllocated}, + {".strtab", 0}, + } + matchedSections int + ) - specs := []struct { - secName string - expFlags ElfSectionFlag - }{ - {".text", ElfSectionAllocated | ElfSectionExecutable}, - {".bss", ElfSectionAllocated | ElfSectionWritable}, - {".noptrbss", ElfSectionAllocated | ElfSectionWritable}, - {".data", ElfSectionAllocated | ElfSectionWritable}, - {".rodata", ElfSectionAllocated}, - {".strtab", 0}, - } - - for specIndex, spec := range specs { - var found *ElfSection - for _, sec := range sections { - if sec.Name == spec.secName { - found = sec - break + VisitElfSections(func(secName string, secFlags ElfSectionFlag, _ uintptr, secSize uint64) { + for secIndex, sec := range expSections { + if secName != sec.secName { + continue } - } - if found == nil { - t.Errorf("[spec %d] missing section %q", specIndex, spec.secName) - continue - } + if secFlags != sec.expFlags { + t.Errorf("[section %d] expected section flags to be: 0x%x; got 0x%x", secIndex, sec.expFlags, secFlags) + return + } - if found.Flags != spec.expFlags { - t.Errorf("[spec %d] expected section flags to be: 0x%x; got 0x%x", specIndex, spec.expFlags, found.Flags) - } - } + if secSize == 0 { + t.Errorf("[section %d] expected section size to be > 0", secIndex) + return + } - // Second call should return the memoized data - sections[0].Name = "foo" - if sections2 := GetElfSections(); !reflect.DeepEqual(sections2, sections) { - t.Error("expected second call to GetElfSections() to return the memoized section list") + matchedSections++ + return + } + }) + + if exp := len(expSections); matchedSections != exp { + t.Fatalf("expected to match %d sections; matched %d", exp, matchedSections) } } From cc4364f55c2d038a9318600b81c256aeb64f0433 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Wed, 12 Jul 2017 23:40:56 +0100 Subject: [PATCH 5/6] Pass the virtual page offset for the kernel to kernel.Kmain Ths page offset is defined in arch/XXX/constants.inc and needs to be passed to the kernel so we can correctly calculate the physical frame addresses that correspond to the ELF section virtual memory addresses. --- src/arch/x86_64/asm/rt0_64.s | 2 ++ src/gopheros/kernel/kmain/kmain.go | 7 ++++--- src/gopheros/stub.go | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/arch/x86_64/asm/rt0_64.s b/src/arch/x86_64/asm/rt0_64.s index efa37a4..efffa3f 100644 --- a/src/arch/x86_64/asm/rt0_64.s +++ b/src/arch/x86_64/asm/rt0_64.s @@ -51,6 +51,8 @@ _rt0_64_entry: extern _kernel_end extern kernel.Kmain + mov rax, PAGE_OFFSET + push rax mov rax, _kernel_end - PAGE_OFFSET push rax mov rax, _kernel_start - PAGE_OFFSET diff --git a/src/gopheros/kernel/kmain/kmain.go b/src/gopheros/kernel/kmain/kmain.go index 7274f59..ff41b03 100644 --- a/src/gopheros/kernel/kmain/kmain.go +++ b/src/gopheros/kernel/kmain/kmain.go @@ -20,18 +20,19 @@ var ( // allocated by the assembly code. // // The rt0 code passes the address of the multiboot info payload provided by the -// bootloader as well as the physical addresses for the kernel start/end. +// bootloader as well as the physical addresses for the kernel start/end. In +// addition, the start of the kernel virtual address space is passed to the +// kernelPageOffset argument. // // Kmain is not expected to return. If it does, the rt0 code will halt the CPU. // //go:noinline -func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) { +func Kmain(multibootInfoPtr, kernelStart, kernelEnd, kernelPageOffset uintptr) { multiboot.SetInfoPtr(multibootInfoPtr) var err *kernel.Error if err = allocator.Init(kernelStart, kernelEnd); err != nil { panic(err) - } else if err = vmm.Init(); err != nil { panic(err) } else if err = goruntime.Init(); err != nil { panic(err) diff --git a/src/gopheros/stub.go b/src/gopheros/stub.go index 73c6d49..3446069 100644 --- a/src/gopheros/stub.go +++ b/src/gopheros/stub.go @@ -11,5 +11,5 @@ var multibootInfoPtr uintptr // A global variable is passed as an argument to Kmain to prevent the compiler // from inlining the actual call and removing Kmain from the generated .o file. func main() { - kmain.Kmain(multibootInfoPtr, 0, 0) + kmain.Kmain(multibootInfoPtr, 0, 0, 0) } From 6195f3fc3b2a7ea8fe60ff55ed2355a9da59bdb2 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 13 Jul 2017 06:35:22 +0100 Subject: [PATCH 6/6] Setup a more granular PDT based on the ELF image section addresses --- src/gopheros/kernel/kmain/kmain.go | 1 + src/gopheros/kernel/mem/vmm/vmm.go | 103 +++++++++- src/gopheros/kernel/mem/vmm/vmm_test.go | 245 +++++++++++++++++++++++- 3 files changed, 341 insertions(+), 8 deletions(-) diff --git a/src/gopheros/kernel/kmain/kmain.go b/src/gopheros/kernel/kmain/kmain.go index ff41b03..2e3f2df 100644 --- a/src/gopheros/kernel/kmain/kmain.go +++ b/src/gopheros/kernel/kmain/kmain.go @@ -33,6 +33,7 @@ func Kmain(multibootInfoPtr, kernelStart, kernelEnd, kernelPageOffset uintptr) { var err *kernel.Error if err = allocator.Init(kernelStart, kernelEnd); err != nil { panic(err) + } else if err = vmm.Init(kernelPageOffset); err != nil { panic(err) } else if err = goruntime.Init(); err != nil { panic(err) diff --git a/src/gopheros/kernel/mem/vmm/vmm.go b/src/gopheros/kernel/mem/vmm/vmm.go index b63b1f9..1e8fa98 100644 --- a/src/gopheros/kernel/mem/vmm/vmm.go +++ b/src/gopheros/kernel/mem/vmm/vmm.go @@ -3,10 +3,12 @@ package vmm import ( "gopheros/kernel" "gopheros/kernel/cpu" + "gopheros/kernel/hal/multiboot" "gopheros/kernel/irq" "gopheros/kernel/kfmt" "gopheros/kernel/mem" "gopheros/kernel/mem/pmm" + "unsafe" ) var ( @@ -18,6 +20,8 @@ var ( // inlined by the compiler. handleExceptionWithCodeFn = irq.HandleExceptionWithCode readCR2Fn = cpu.ReadCR2 + translateFn = Translate + visitElfSectionsFn = multiboot.VisitElfSections errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"} ) @@ -142,9 +146,13 @@ func reserveZeroedFrame() *kernel.Error { return nil } -// Init initializes the vmm system and installs paging-related exception -// handlers. -func Init() *kernel.Error { +// Init initializes the vmm system, creates a granular PDT for the kernel and +// installs paging-related exception handlers. +func Init(kernelPageOffset uintptr) *kernel.Error { + if err := setupPDTForKernel(kernelPageOffset); err != nil { + return err + } + if err := reserveZeroedFrame(); err != nil { return err } @@ -153,3 +161,92 @@ func Init() *kernel.Error { handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler) return nil } + +// setupPDTForKernel queries the multiboot package for the ELF sections that +// correspond to the loaded kernel image and establishes a new granular PDT for +// the kernel's VMA using the appropriate flags (e.g. NX for data sections, RW +// for writable sections e.t.c). +func setupPDTForKernel(kernelPageOffset uintptr) *kernel.Error { + var pdt PageDirectoryTable + + // Allocate frame for the page directory and initialize it + pdtFrame, err := frameAllocator() + if err != nil { + return err + } + + if err = pdt.Init(pdtFrame); err != nil { + return err + } + + // Query the ELF sections of the kernel image and establish mappings + // for each one using the appropriate flags + pageSizeMinus1 := uint64(mem.PageSize - 1) + var visitor = func(_ string, secFlags multiboot.ElfSectionFlag, secAddress uintptr, secSize uint64) { + // Bail out if we have encountered an error; also ignore sections + // not using the kernel's VMA + if err != nil || secAddress < kernelPageOffset { + return + } + + flags := FlagPresent + + if (secFlags & multiboot.ElfSectionExecutable) == 0 { + flags |= FlagNoExecute + } + + if (secFlags & multiboot.ElfSectionWritable) != 0 { + flags |= FlagRW + } + + // We assume that all sections are page-aligned by the linker script + curPage := PageFromAddress(secAddress) + curFrame := pmm.Frame((secAddress - kernelPageOffset) >> mem.PageShift) + endFrame := curFrame + pmm.Frame(((secSize+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift) + for ; curFrame < endFrame; curFrame, curPage = curFrame+1, curPage+1 { + if err = pdt.Map(curPage, curFrame, flags); err != nil { + return + } + } + } + + // Use the noescape hack to prevent the compiler from leaking the visitor + // function literal to the heap. + visitElfSectionsFn( + *(*multiboot.ElfSectionVisitor)(noEscape(unsafe.Pointer(&visitor))), + ) + + // If an error occurred while maping the ELF sections bail out + if err != nil { + return err + } + + // Ensure that any pages mapped by the memory allocator using + // EarlyReserveRegion are copied to the new page directory. + for rsvAddr := earlyReserveLastUsed; rsvAddr < tempMappingAddr; rsvAddr += uintptr(mem.PageSize) { + page := PageFromAddress(rsvAddr) + + frameAddr, err := translateFn(rsvAddr) + if err != nil { + return err + } + + if err = pdt.Map(page, pmm.Frame(frameAddr>>mem.PageShift), FlagPresent|FlagRW); err != nil { + return err + } + } + + // Activate the new PDT. After this point, the identify mapping for the + // physical memory addresses where the kernel is loaded becomes invalid. + pdt.Activate() + + return nil +} + +// noEscape hides a pointer from escape analysis. This function is copied over +// from runtime/stubs.go +//go:nosplit +func noEscape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} diff --git a/src/gopheros/kernel/mem/vmm/vmm_test.go b/src/gopheros/kernel/mem/vmm/vmm_test.go index c41bd79..c9b8fa3 100644 --- a/src/gopheros/kernel/mem/vmm/vmm_test.go +++ b/src/gopheros/kernel/mem/vmm/vmm_test.go @@ -5,6 +5,7 @@ import ( "fmt" "gopheros/kernel" "gopheros/kernel/cpu" + "gopheros/kernel/hal/multiboot" "gopheros/kernel/irq" "gopheros/kernel/kfmt" "gopheros/kernel/mem" @@ -165,7 +166,7 @@ func TestNonRecoverablePageFault(t *testing.T) { } } -func TestGPtHandler(t *testing.T) { +func TestGPFHandler(t *testing.T) { defer func() { readCR2Fn = cpu.ReadCR2 }() @@ -191,6 +192,9 @@ func TestGPtHandler(t *testing.T) { func TestInit(t *testing.T) { defer func() { frameAllocator = nil + activePDTFn = cpu.ActivePDT + switchPDTFn = cpu.SwitchPDT + translateFn = Translate mapTemporaryFn = MapTemporary unmapFn = Unmap handleExceptionWithCodeFn = irq.HandleExceptionWithCode @@ -199,6 +203,8 @@ func TestInit(t *testing.T) { // reserve space for an allocated page reservedPage := make([]byte, mem.PageSize) + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0]))) + t.Run("success", func(t *testing.T) { // fill page with junk for i := 0; i < len(reservedPage); i++ { @@ -209,11 +215,15 @@ func TestInit(t *testing.T) { addr := uintptr(unsafe.Pointer(&reservedPage[0])) return pmm.Frame(addr >> mem.PageShift), nil }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} unmapFn = func(p Page) *kernel.Error { return nil } mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} - if err := Init(); err != nil { + if err := Init(0); err != nil { t.Fatal(err) } @@ -225,15 +235,45 @@ func TestInit(t *testing.T) { } }) + t.Run("setupPDT fails", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "out of memory"} + + // Allow the PDT allocation to succeed and then return an error when + // trying to allocate the blank fram + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + return pmm.InvalidFrame, expErr + }) + + if err := Init(0); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) + t.Run("blank page allocation error", func(t *testing.T) { expErr := &kernel.Error{Module: "test", Message: "out of memory"} - SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr }) + // Allow the PDT allocation to succeed and then return an error when + // trying to allocate the blank fram + var allocCount int + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + defer func() { allocCount++ }() + + if allocCount == 0 { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + } + + return pmm.InvalidFrame, expErr + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} unmapFn = func(p Page) *kernel.Error { return nil } mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} - if err := Init(); err != expErr { + if err := Init(0); err != expErr { t.Fatalf("expected error: %v; got %v", expErr, err) } }) @@ -245,12 +285,207 @@ func TestInit(t *testing.T) { addr := uintptr(unsafe.Pointer(&reservedPage[0])) return pmm.Frame(addr >> mem.PageShift), nil }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} unmapFn = func(p Page) *kernel.Error { return nil } mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr } handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} - if err := Init(); err != expErr { + if err := Init(0); err != expErr { t.Fatalf("expected error: %v; got %v", expErr, err) } }) } + +func TestSetupPDTForKernel(t *testing.T) { + defer func() { + frameAllocator = nil + activePDTFn = cpu.ActivePDT + switchPDTFn = cpu.SwitchPDT + translateFn = Translate + mapFn = Map + mapTemporaryFn = MapTemporary + unmapFn = Unmap + earlyReserveLastUsed = tempMappingAddr + }() + + // reserve space for an allocated page + reservedPage := make([]byte, mem.PageSize) + + multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0]))) + + t.Run("map kernel sections", func(t *testing.T) { + defer func() { visitElfSectionsFn = multiboot.VisitElfSections }() + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} + translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) { + v(".debug", 0, 0, uint64(mem.PageSize>>1)) // address < VMA; should be ignored + v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mem.PageSize>>1)) + v(".data", multiboot.ElfSectionWritable, 0xbadc0ffee, uint64(mem.PageSize)) + v(".rodata", 0, 0xbadc0ffee, uint64(mem.PageSize<<1)) + } + mapCount := 0 + mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { + defer func() { mapCount++ }() + + var expFlags PageTableEntryFlag + + switch mapCount { + case 0: + expFlags = FlagPresent + case 1: + expFlags = FlagPresent | FlagNoExecute | FlagRW + case 2, 3: + expFlags = FlagPresent | FlagNoExecute + } + + if (flags & expFlags) != expFlags { + t.Errorf("[map call %d] expected flags to be %d; got %d", mapCount, expFlags, flags) + } + + return nil + } + + if err := setupPDTForKernel(0x123); err != nil { + t.Fatal(err) + } + + if exp := 4; mapCount != exp { + t.Errorf("expected Map to be called %d times; got %d", exp, mapCount) + } + }) + + t.Run("map of kernel sections fials", func(t *testing.T) { + defer func() { visitElfSectionsFn = multiboot.VisitElfSections }() + expErr := &kernel.Error{Module: "test", Message: "map failed"} + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} + translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) { + v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mem.PageSize>>1)) + } + mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { + return expErr + } + + if err := setupPDTForKernel(0); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) + + t.Run("copy allocator reservations to PDT", func(t *testing.T) { + earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize) + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + switchPDTFn = func(_ uintptr) {} + translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil } + unmapFn = func(p Page) *kernel.Error { return nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { + if exp := PageFromAddress(earlyReserveLastUsed); page != exp { + t.Errorf("expected Map to be called with page %d; got %d", exp, page) + } + + if exp := pmm.Frame(0xbadf00d000 >> mem.PageShift); frame != exp { + t.Errorf("expected Map to be called with frame %d; got %d", exp, frame) + } + + if flags&(FlagPresent|FlagRW) != (FlagPresent | FlagRW) { + t.Error("expected Map to be called FlagPresent | FlagRW") + } + return nil + } + + if err := setupPDTForKernel(0); err != nil { + t.Fatal(err) + } + }) + + t.Run("pdt init fails", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "translate failed"} + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { return 0 } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return 0, expErr } + + if err := setupPDTForKernel(0); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) + + t.Run("translation fails for page in reserved address space", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "translate failed"} + + earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize) + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + translateFn = func(_ uintptr) (uintptr, *kernel.Error) { + return 0, expErr + } + + if err := setupPDTForKernel(0); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) + + t.Run("map fails for page in reserved address space", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "map failed"} + + earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize) + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + activePDTFn = func() uintptr { + return uintptr(unsafe.Pointer(&reservedPage[0])) + } + translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { return expErr } + + if err := setupPDTForKernel(0); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) +} + +var ( + emptyInfoData = []byte{ + 0, 0, 0, 0, // size + 0, 0, 0, 0, // reserved + 0, 0, 0, 0, // tag with type zero and length zero + 0, 0, 0, 0, + } +)