1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Merge pull request #43 from achilleasa/setup-granular-pdt-for-kernel

Setup granular PDT for kernel
This commit is contained in:
Achilleas Anagnostopoulos 2017-07-13 06:57:01 +01:00 committed by GitHub
commit 4e5cee30ba
10 changed files with 436 additions and 92 deletions

View File

@ -355,6 +355,20 @@ write_string:
;------------------------------------------------------------------------------
bits 64
_rt0_64_entry_trampoline:
; The currently loaded GDT points to the physical address of gdt0. This
; works for now since we identity map the first 8M of the kernel. When
; we set up a proper PDT for the VMA address of the kernel, the 0-8M
; mapping will be invalid causing a page fault when the CPU tries to
; restore the segment registers while returning from the page fault
; handler.
;
; To fix this, we need to update the GDT so it uses the 48-bit virtual
; address of gdt0.
mov rax, gdt0_desc
mov rbx, gdt0
mov qword [rax+2], rbx
lgdt [rax]
mov rsp, stack_top ; now that paging is enabled we can load the stack
; with the virtual address of the allocated stack.

View File

@ -51,6 +51,8 @@ _rt0_64_entry:
extern _kernel_end
extern kernel.Kmain
mov rax, PAGE_OFFSET
push rax
mov rax, _kernel_end - PAGE_OFFSET
push rax
mov rax, _kernel_start - PAGE_OFFSET

View File

@ -19,7 +19,8 @@ TEXT ·FlushTLBEntry(SB),NOSPLIT,$0
TEXT ·SwitchPDT(SB),NOSPLIT,$0
// loading CR3 also triggers a TLB flush
MOVQ pdtPhysAddr+0(FP), CR3
MOVQ pdtPhysAddr+0(FP), AX
MOVQ AX, CR3
RET
TEXT ·ActivePDT(SB),NOSPLIT,$0

View File

@ -51,6 +51,16 @@ func mSysStatInc(*uint64, uintptr)
//go:linkname initGoPackages main.init
func initGoPackages()
// Some of the package init functions (e.g reflect.init) call runtime.init
// which attempts to create a new process and eventually crashes the kernel.
// Since the kernel does its own initialization, we can safely redirect
// runtime.init
// to this empty stub.
//go:redirect-from runtime.init
//go:noinline
func runtimeInit() {
}
// sysReserve reserves address space without allocating any memory or
// establishing any page mappings.
//
@ -188,6 +198,7 @@ func init() {
zeroPtr = unsafe.Pointer(uintptr(0))
)
runtimeInit()
sysReserve(zeroPtr, 0, &reserved)
sysMap(zeroPtr, 0, reserved, &stat)
sysAlloc(0, &stat)

View File

@ -7,9 +7,8 @@ import (
)
var (
infoData uintptr
cmdLineKV map[string]string
elfSectionList []*ElfSection
infoData uintptr
cmdLineKV map[string]string
)
type tagType uint32
@ -149,7 +148,7 @@ const (
// MemRegionVisitor defies a visitor function that gets invoked by VisitMemRegions
// for each memory region provided by the boot loader. The visitor must return true
// to continue or false to abort the scan.
type MemRegionVisitor func(entry *MemoryMapEntry) bool
type MemRegionVisitor func(*MemoryMapEntry) bool
// MemoryMapEntry describes a memory region entry, namely its physical address,
// its length and its type.
@ -230,63 +229,44 @@ const (
ElfSectionExecutable
)
// ElfSection deefines the name, flags and virtual address of an ELF section
// which is part of the kernel image.
type ElfSection struct {
// The section name.
Name string
// The list of flags associated with this section
Flags ElfSectionFlag
// The virtual address of this section.
Address uintptr
}
// GetElfSections returns a slice of ElfSections for the loaded kernel image.
func GetElfSections() []*ElfSection {
if elfSectionList != nil {
return elfSectionList
}
// ElfSectionVisitor defies a visitor function that gets invoked by VisitElfSections
// for rach ELF section that belongs to the loaded kernel image.
type ElfSectionVisitor func(name string, flags ElfSectionFlag, address uintptr, size uint64)
// VisitElfSections invokes visitor for each ELF entry that belongs to the
// loaded kernel image.
func VisitElfSections(visitor ElfSectionVisitor) {
curPtr, size := findTagByType(tagElfSymbols)
if size == 0 {
return nil
return
}
ptrElfSections := (*elfSections)(unsafe.Pointer(curPtr))
sectionData := *(*[]elfSection64)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(ptrElfSections.numSections),
Cap: int(ptrElfSections.numSections),
Data: uintptr(unsafe.Pointer(&ptrElfSections.sectionData)),
}))
var (
strTable = *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(sectionData[ptrElfSections.strtabSectionIndex].size),
Cap: int(sectionData[ptrElfSections.strtabSectionIndex].size),
Data: uintptr(sectionData[ptrElfSections.strtabSectionIndex].address),
}))
sectionPayload elfSection64
ptrElfSections = (*elfSections)(unsafe.Pointer(curPtr))
secPtr = uintptr(unsafe.Pointer(&ptrElfSections.sectionData))
sizeofSection = unsafe.Sizeof(sectionPayload)
strTableSection = (*elfSection64)(unsafe.Pointer(secPtr + uintptr(ptrElfSections.strtabSectionIndex)*sizeofSection))
secName string
secNameHeader = (*reflect.StringHeader)(unsafe.Pointer(&secName))
)
for _, secData := range sectionData {
for secIndex := uint16(0); secIndex < ptrElfSections.numSections; secIndex, secPtr = secIndex+1, secPtr+sizeofSection {
secData := (*elfSection64)(unsafe.Pointer(secPtr))
if secData.size == 0 {
continue
}
// String table entries are C-style NULL-terminated strings
end := secData.nameIndex
for ; strTable[end] != 0; end++ {
end := uintptr(secData.nameIndex)
for ; *(*byte)(unsafe.Pointer(uintptr(strTableSection.address) + end)) != 0; end++ {
}
elfSectionList = append(elfSectionList, &ElfSection{
Name: string(strTable[secData.nameIndex:end]),
Flags: ElfSectionFlag(secData.flags),
Address: uintptr(secData.address),
})
}
secNameHeader.Len = int(end - uintptr(secData.nameIndex))
secNameHeader.Data = uintptr(unsafe.Pointer(uintptr(strTableSection.address) + uintptr(secData.nameIndex)))
return elfSectionList
visitor(secName, ElfSectionFlag(secData.flags), uintptr(secData.address), secData.size)
}
}
// SetInfoPtr updates the internal multiboot information pointer to the given

View File

@ -196,9 +196,9 @@ func TestGetBootCmdLine(t *testing.T) {
func TestGetElfSections(t *testing.T) {
SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
if GetElfSections() != nil {
VisitElfSections(func(_ string, _ ElfSectionFlag, _ uintptr, _ uint64) {
t.Fatalf("expected GetElfSections() to return nil when no elf sections tag is present")
}
})
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
@ -209,43 +209,45 @@ func TestGetElfSections(t *testing.T) {
multibootInfoTestData[1660+i] = b
}
sections := GetElfSections()
// There are more sections in the test data but we will only focus on these ones for the test
var (
expSections = []struct {
secName string
expFlags ElfSectionFlag
}{
{".text", ElfSectionAllocated | ElfSectionExecutable},
{".bss", ElfSectionAllocated | ElfSectionWritable},
{".noptrbss", ElfSectionAllocated | ElfSectionWritable},
{".data", ElfSectionAllocated | ElfSectionWritable},
{".rodata", ElfSectionAllocated},
{".strtab", 0},
}
matchedSections int
)
specs := []struct {
secName string
expFlags ElfSectionFlag
}{
{".text", ElfSectionAllocated | ElfSectionExecutable},
{".bss", ElfSectionAllocated | ElfSectionWritable},
{".noptrbss", ElfSectionAllocated | ElfSectionWritable},
{".data", ElfSectionAllocated | ElfSectionWritable},
{".rodata", ElfSectionAllocated},
{".strtab", 0},
}
for specIndex, spec := range specs {
var found *ElfSection
for _, sec := range sections {
if sec.Name == spec.secName {
found = sec
break
VisitElfSections(func(secName string, secFlags ElfSectionFlag, _ uintptr, secSize uint64) {
for secIndex, sec := range expSections {
if secName != sec.secName {
continue
}
}
if found == nil {
t.Errorf("[spec %d] missing section %q", specIndex, spec.secName)
continue
}
if secFlags != sec.expFlags {
t.Errorf("[section %d] expected section flags to be: 0x%x; got 0x%x", secIndex, sec.expFlags, secFlags)
return
}
if found.Flags != spec.expFlags {
t.Errorf("[spec %d] expected section flags to be: 0x%x; got 0x%x", specIndex, spec.expFlags, found.Flags)
}
}
if secSize == 0 {
t.Errorf("[section %d] expected section size to be > 0", secIndex)
return
}
// Second call should return the memoized data
sections[0].Name = "foo"
if sections2 := GetElfSections(); !reflect.DeepEqual(sections2, sections) {
t.Error("expected second call to GetElfSections() to return the memoized section list")
matchedSections++
return
}
})
if exp := len(expSections); matchedSections != exp {
t.Fatalf("expected to match %d sections; matched %d", exp, matchedSections)
}
}

View File

@ -20,18 +20,20 @@ var (
// allocated by the assembly code.
//
// The rt0 code passes the address of the multiboot info payload provided by the
// bootloader as well as the physical addresses for the kernel start/end.
// bootloader as well as the physical addresses for the kernel start/end. In
// addition, the start of the kernel virtual address space is passed to the
// kernelPageOffset argument.
//
// Kmain is not expected to return. If it does, the rt0 code will halt the CPU.
//
//go:noinline
func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) {
func Kmain(multibootInfoPtr, kernelStart, kernelEnd, kernelPageOffset uintptr) {
multiboot.SetInfoPtr(multibootInfoPtr)
var err *kernel.Error
if err = allocator.Init(kernelStart, kernelEnd); err != nil {
panic(err)
} else if err = vmm.Init(); err != nil {
} else if err = vmm.Init(kernelPageOffset); err != nil {
panic(err)
} else if err = goruntime.Init(); err != nil {
panic(err)

View File

@ -3,10 +3,12 @@ package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/hal/multiboot"
"gopheros/kernel/irq"
"gopheros/kernel/kfmt"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"unsafe"
)
var (
@ -18,6 +20,8 @@ var (
// inlined by the compiler.
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
readCR2Fn = cpu.ReadCR2
translateFn = Translate
visitElfSectionsFn = multiboot.VisitElfSections
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
)
@ -142,9 +146,13 @@ func reserveZeroedFrame() *kernel.Error {
return nil
}
// Init initializes the vmm system and installs paging-related exception
// handlers.
func Init() *kernel.Error {
// Init initializes the vmm system, creates a granular PDT for the kernel and
// installs paging-related exception handlers.
func Init(kernelPageOffset uintptr) *kernel.Error {
if err := setupPDTForKernel(kernelPageOffset); err != nil {
return err
}
if err := reserveZeroedFrame(); err != nil {
return err
}
@ -153,3 +161,92 @@ func Init() *kernel.Error {
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
return nil
}
// setupPDTForKernel queries the multiboot package for the ELF sections that
// correspond to the loaded kernel image and establishes a new granular PDT for
// the kernel's VMA using the appropriate flags (e.g. NX for data sections, RW
// for writable sections e.t.c).
func setupPDTForKernel(kernelPageOffset uintptr) *kernel.Error {
var pdt PageDirectoryTable
// Allocate frame for the page directory and initialize it
pdtFrame, err := frameAllocator()
if err != nil {
return err
}
if err = pdt.Init(pdtFrame); err != nil {
return err
}
// Query the ELF sections of the kernel image and establish mappings
// for each one using the appropriate flags
pageSizeMinus1 := uint64(mem.PageSize - 1)
var visitor = func(_ string, secFlags multiboot.ElfSectionFlag, secAddress uintptr, secSize uint64) {
// Bail out if we have encountered an error; also ignore sections
// not using the kernel's VMA
if err != nil || secAddress < kernelPageOffset {
return
}
flags := FlagPresent
if (secFlags & multiboot.ElfSectionExecutable) == 0 {
flags |= FlagNoExecute
}
if (secFlags & multiboot.ElfSectionWritable) != 0 {
flags |= FlagRW
}
// We assume that all sections are page-aligned by the linker script
curPage := PageFromAddress(secAddress)
curFrame := pmm.Frame((secAddress - kernelPageOffset) >> mem.PageShift)
endFrame := curFrame + pmm.Frame(((secSize+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift)
for ; curFrame < endFrame; curFrame, curPage = curFrame+1, curPage+1 {
if err = pdt.Map(curPage, curFrame, flags); err != nil {
return
}
}
}
// Use the noescape hack to prevent the compiler from leaking the visitor
// function literal to the heap.
visitElfSectionsFn(
*(*multiboot.ElfSectionVisitor)(noEscape(unsafe.Pointer(&visitor))),
)
// If an error occurred while maping the ELF sections bail out
if err != nil {
return err
}
// Ensure that any pages mapped by the memory allocator using
// EarlyReserveRegion are copied to the new page directory.
for rsvAddr := earlyReserveLastUsed; rsvAddr < tempMappingAddr; rsvAddr += uintptr(mem.PageSize) {
page := PageFromAddress(rsvAddr)
frameAddr, err := translateFn(rsvAddr)
if err != nil {
return err
}
if err = pdt.Map(page, pmm.Frame(frameAddr>>mem.PageShift), FlagPresent|FlagRW); err != nil {
return err
}
}
// Activate the new PDT. After this point, the identify mapping for the
// physical memory addresses where the kernel is loaded becomes invalid.
pdt.Activate()
return nil
}
// noEscape hides a pointer from escape analysis. This function is copied over
// from runtime/stubs.go
//go:nosplit
func noEscape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
return unsafe.Pointer(x ^ 0)
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/hal/multiboot"
"gopheros/kernel/irq"
"gopheros/kernel/kfmt"
"gopheros/kernel/mem"
@ -165,7 +166,7 @@ func TestNonRecoverablePageFault(t *testing.T) {
}
}
func TestGPtHandler(t *testing.T) {
func TestGPFHandler(t *testing.T) {
defer func() {
readCR2Fn = cpu.ReadCR2
}()
@ -191,6 +192,9 @@ func TestGPtHandler(t *testing.T) {
func TestInit(t *testing.T) {
defer func() {
frameAllocator = nil
activePDTFn = cpu.ActivePDT
switchPDTFn = cpu.SwitchPDT
translateFn = Translate
mapTemporaryFn = MapTemporary
unmapFn = Unmap
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
@ -199,6 +203,8 @@ func TestInit(t *testing.T) {
// reserve space for an allocated page
reservedPage := make([]byte, mem.PageSize)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
t.Run("success", func(t *testing.T) {
// fill page with junk
for i := 0; i < len(reservedPage); i++ {
@ -209,11 +215,15 @@ func TestInit(t *testing.T) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != nil {
if err := Init(0); err != nil {
t.Fatal(err)
}
@ -225,15 +235,45 @@ func TestInit(t *testing.T) {
}
})
t.Run("setupPDT fails", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
// Allow the PDT allocation to succeed and then return an error when
// trying to allocate the blank fram
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
return pmm.InvalidFrame, expErr
})
if err := Init(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("blank page allocation error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr })
// Allow the PDT allocation to succeed and then return an error when
// trying to allocate the blank fram
var allocCount int
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
defer func() { allocCount++ }()
if allocCount == 0 {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
}
return pmm.InvalidFrame, expErr
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
if err := Init(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
@ -245,12 +285,207 @@ func TestInit(t *testing.T) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
if err := Init(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
}
func TestSetupPDTForKernel(t *testing.T) {
defer func() {
frameAllocator = nil
activePDTFn = cpu.ActivePDT
switchPDTFn = cpu.SwitchPDT
translateFn = Translate
mapFn = Map
mapTemporaryFn = MapTemporary
unmapFn = Unmap
earlyReserveLastUsed = tempMappingAddr
}()
// reserve space for an allocated page
reservedPage := make([]byte, mem.PageSize)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
t.Run("map kernel sections", func(t *testing.T) {
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
v(".debug", 0, 0, uint64(mem.PageSize>>1)) // address < VMA; should be ignored
v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mem.PageSize>>1))
v(".data", multiboot.ElfSectionWritable, 0xbadc0ffee, uint64(mem.PageSize))
v(".rodata", 0, 0xbadc0ffee, uint64(mem.PageSize<<1))
}
mapCount := 0
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
defer func() { mapCount++ }()
var expFlags PageTableEntryFlag
switch mapCount {
case 0:
expFlags = FlagPresent
case 1:
expFlags = FlagPresent | FlagNoExecute | FlagRW
case 2, 3:
expFlags = FlagPresent | FlagNoExecute
}
if (flags & expFlags) != expFlags {
t.Errorf("[map call %d] expected flags to be %d; got %d", mapCount, expFlags, flags)
}
return nil
}
if err := setupPDTForKernel(0x123); err != nil {
t.Fatal(err)
}
if exp := 4; mapCount != exp {
t.Errorf("expected Map to be called %d times; got %d", exp, mapCount)
}
})
t.Run("map of kernel sections fials", func(t *testing.T) {
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
expErr := &kernel.Error{Module: "test", Message: "map failed"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mem.PageSize>>1))
}
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := setupPDTForKernel(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("copy allocator reservations to PDT", func(t *testing.T) {
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
switchPDTFn = func(_ uintptr) {}
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
if exp := PageFromAddress(earlyReserveLastUsed); page != exp {
t.Errorf("expected Map to be called with page %d; got %d", exp, page)
}
if exp := pmm.Frame(0xbadf00d000 >> mem.PageShift); frame != exp {
t.Errorf("expected Map to be called with frame %d; got %d", exp, frame)
}
if flags&(FlagPresent|FlagRW) != (FlagPresent | FlagRW) {
t.Error("expected Map to be called FlagPresent | FlagRW")
}
return nil
}
if err := setupPDTForKernel(0); err != nil {
t.Fatal(err)
}
})
t.Run("pdt init fails", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr { return 0 }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return 0, expErr }
if err := setupPDTForKernel(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("translation fails for page in reserved address space", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
translateFn = func(_ uintptr) (uintptr, *kernel.Error) {
return 0, expErr
}
if err := setupPDTForKernel(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("map fails for page in reserved address space", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "map failed"}
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
activePDTFn = func() uintptr {
return uintptr(unsafe.Pointer(&reservedPage[0]))
}
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { return expErr }
if err := setupPDTForKernel(0); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
}
var (
emptyInfoData = []byte{
0, 0, 0, 0, // size
0, 0, 0, 0, // reserved
0, 0, 0, 0, // tag with type zero and length zero
0, 0, 0, 0,
}
)

View File

@ -11,5 +11,5 @@ var multibootInfoPtr uintptr
// A global variable is passed as an argument to Kmain to prevent the compiler
// from inlining the actual call and removing Kmain from the generated .o file.
func main() {
kmain.Kmain(multibootInfoPtr, 0, 0)
kmain.Kmain(multibootInfoPtr, 0, 0, 0)
}