1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Use pwd as a workspace; move sources to src/gopheros and rewrite imports

By setting up pwd as a Go workspace, we can trim import paths from
something like "github.com/achilleasa/gopher-os/kernel" to just
"kernel".

These changes make forking easier and also allows us to move the code to
a different git hosting provider without having to rewrite the imports.
This commit is contained in:
Achilleas Anagnostopoulos
2017-07-01 20:37:09 +01:00
parent 7b93d01c6e
commit 8dfc5d4e92
61 changed files with 93 additions and 114 deletions

View File

@@ -0,0 +1,35 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
)
var (
// earlyReserveLastUsed tracks the last reserved page address and is
// decreased after each allocation request. Initially, it points to
// tempMappingAddr which coincides with the end of the kernel address
// space.
earlyReserveLastUsed = tempMappingAddr
errEarlyReserveNoSpace = &kernel.Error{Module: "early_reserve", Message: "remaining virtual address space not large enough to satisfy reservation request"}
)
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
// with the requested size in the kernel address space and returns its virtual
// address. If size is not a multiple of mem.PageSize it will be automatically
// rounded up.
//
// This function allocates regions starting at the end of the kernel address
// space. It should only be used during the early stages of kernel initialization.
func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) {
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
// reserving a region of the requested size will cause an underflow
if uintptr(size) > earlyReserveLastUsed {
return 0, errEarlyReserveNoSpace
}
earlyReserveLastUsed -= uintptr(size)
return earlyReserveLastUsed, nil
}

View File

@@ -0,0 +1,29 @@
package vmm
import (
"runtime"
"testing"
)
func TestEarlyReserveAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origLastUsed uintptr) {
earlyReserveLastUsed = origLastUsed
}(earlyReserveLastUsed)
earlyReserveLastUsed = 4096
next, err := EarlyReserveRegion(42)
if err != nil {
t.Fatal(err)
}
if exp := uintptr(0); next != exp {
t.Fatal("expected reservation request to be rounded to nearest page")
}
if _, err = EarlyReserveRegion(1); err != errEarlyReserveNoSpace {
t.Fatalf("expected to get errEarlyReserveNoSpace; got %v", err)
}
}

View File

@@ -0,0 +1,89 @@
// +build amd64
package vmm
import "math"
const (
// pageLevels indicates the number of page levels supported by the amd64 architecture.
pageLevels = 4
// ptePhysPageMask is a mask that allows us to extract the physical memory
// address pointed to by a page table entry. For this particular architecture,
// bits 12-51 contain the physical memory address.
ptePhysPageMask = uintptr(0x000ffffffffff000)
// tempMappingAddr is a reserved virtual page address used for
// temporary physical page mappings (e.g. when mapping inactive PDT
// pages). For amd64 this address uses the following table indices:
// 510, 511, 511, 511.
tempMappingAddr = uintptr(0Xffffff7ffffff000)
)
var (
// pdtVirtualAddr is a special virtual address that exploits the
// recursive mapping used in the last PDT entry for each page directory
// to allow accessing the PDT (P4) table using the system's MMU address
// translation mechanism. By setting all page level bits to 1 the MMU
// keeps following the last P4 entry for all page levels landing on the
// P4.
pdtVirtualAddr = uintptr(math.MaxUint64 &^ ((1 << 12) - 1))
// pageLevelBits defines the number of virtual address bits that correspond to each
// page level. For the amd64 architecture each PageLevel uses 9 bits which amounts to
// 512 entries for each page level.
pageLevelBits = [pageLevels]uint8{
9,
9,
9,
9,
}
// pageLevelShifts defines the shift required to access each page table component
// of a virtual address.
pageLevelShifts = [pageLevels]uint8{
39,
30,
21,
12,
}
)
const (
// FlagPresent is set when the page is available in memory and not swapped out.
FlagPresent PageTableEntryFlag = 1 << iota
// FlagRW is set if the page can be written to.
FlagRW
// FlagUserAccessible is set if user-mode processes can access this page. If
// not set only kernel code can access this page.
FlagUserAccessible
// FlagWriteThroughCaching implies write-through caching when set and write-back
// caching if cleared.
FlagWriteThroughCaching
// FlagDoNotCache prevents this page from being cached if set.
FlagDoNotCache
// FlagAccessed is set by the CPU when this page is accessed.
FlagAccessed
// FlagDirty is set by the CPU when this page is modified.
FlagDirty
// FlagHugePage is set if when using 2Mb pages instead of 4K pages.
FlagHugePage
// FlagGlobal if set, prevents the TLB from flushing the cached memory address
// for this page when the swapping page tables by updating the CR3 register.
FlagGlobal
// FlagCopyOnWrite is used to implement copy-on-write functionality. This
// flag and FlagRW are mutually exclusive.
FlagCopyOnWrite = 1 << 9
// FlagNoExecute if set, indicates that a page contains non-executable code.
FlagNoExecute = 1 << 63
)

View File

@@ -0,0 +1,154 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"unsafe"
)
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
// vmm package's Init function. The purpose of this frame is to assist
// in implementing on-demand memory allocation when mapping it in
// conjunction with the CopyOnWrite flag. Here is an example of how it
// can be used:
//
// func ReserveOnDemand(start vmm.Page, pageCount int) *kernel.Error {
// var err *kernel.Error
// mapFlags := vmm.FlagPresent|vmm.FlagCopyOnWrite
// for page := start; pageCount > 0; pageCount, page = pageCount-1, page+1 {
// if err = vmm.Map(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
// return err
// }
// }
// return nil
// }
//
// In the above example, page mappings are set up for the requested number of
// pages but no physical memory is reserved for their contents. A write to any
// of the above pages will trigger a page-fault causing a new frame to be
// allocated, cleared (the blank frame is copied to the new frame) and
// installed in-place with RW permissions.
var ReservedZeroedFrame pmm.Frame
var (
// protectReservedZeroedPage is set to true to prevent mapping to
protectReservedZeroedPage bool
// nextAddrFn is used by used by tests to override the nextTableAddr
// calculations used by Map. When compiling the kernel this function
// will be automatically inlined.
nextAddrFn = func(entryAddr uintptr) uintptr {
return entryAddr
}
// flushTLBEntryFn is used by tests to override calls to flushTLBEntry
// which will cause a fault if called in user-mode.
flushTLBEntryFn = cpu.FlushTLBEntry
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
)
// Map establishes a mapping between a virtual page and a physical memory frame
// using the currently active page directory table. Calls to Map will use the
// supplied physical frame allocator to initialize missing page tables at each
// paging level supported by the MMU.
//
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
return errAttemptToRWMapReservedFrame
}
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to map the
// frame in place and flag it as present and flush its TLB entry
if pteLevel == pageLevels-1 {
*pte = 0
pte.SetFrame(frame)
pte.SetFlags(flags)
flushTLBEntryFn(page.Address())
return true
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
// Next table does not yet exist; we need to allocate a
// physical frame for it map it and clear its contents.
if !pte.HasFlags(FlagPresent) {
var newTableFrame pmm.Frame
newTableFrame, err = frameAllocator()
if err != nil {
return false
}
*pte = 0
pte.SetFrame(newTableFrame)
pte.SetFlags(FlagPresent | FlagRW)
// The next pte entry becomes available but we need to
// make sure that the new page is properly cleared
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
}
return true
})
return err
}
// MapTemporary establishes a temporary RW mapping of a physical memory frame
// to a fixed virtual address overwriting any previous mapping. The temporary
// mapping mechanism is primarily used by the kernel to access and initialize
// inactive page tables.
//
// Attempts to map ReservedZeroedFrame will result in an error.
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
return 0, errAttemptToRWMapReservedFrame
}
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
return 0, err
}
return PageFromAddress(tempMappingAddr), nil
}
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
func Unmap(page Page) *kernel.Error {
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to set the
// page as non-present and flush its TLB entry
if pteLevel == pageLevels-1 {
pte.ClearFlags(FlagPresent)
flushTLBEntryFn(page.Address())
return true
}
// Next table is not present; this is an invalid mapping
if !pte.HasFlags(FlagPresent) {
err = ErrInvalidMapping
return false
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
return true
})
return err
}

View File

@@ -0,0 +1,270 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestNextAddrFn(t *testing.T) {
// Dummy test to keep coverage happy
if exp, got := uintptr(123), nextAddrFn(uintptr(123)); exp != got {
t.Fatalf("expected nextAddrFn to return %v; got %v", exp, got)
}
}
func TestMapTemporaryAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
frameAllocator = nil
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
nextPhysPage := 0
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
nextPhysPage++
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
})
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
pteCallCount++
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
}
nextAddrFn = func(entry uintptr) uintptr {
return uintptr(unsafe.Pointer(&physPages[nextPhysPage][0]))
}
flushTLBEntryCallCount := 0
flushTLBEntryFn = func(uintptr) {
flushTLBEntryCallCount++
}
// The temporary mappin address breaks down to:
// p4 index: 510
// p3 index: 511
// p2 index: 511
// p1 index: 511
frame := pmm.Frame(123)
levelIndices := []uint{510, 511, 511, 511}
page, err := MapTemporary(frame)
if err != nil {
t.Fatal(err)
}
if got := page.Address(); got != tempMappingAddr {
t.Fatalf("expected temp mapping virtual address to be %x; got %x", tempMappingAddr, got)
}
for level, physPage := range physPages {
pte := physPage[levelIndices[level]]
if !pte.HasFlags(FlagPresent | FlagRW) {
t.Errorf("[pte at level %d] expected entry to have FlagPresent and FlagRW set", level)
}
switch {
case level < pageLevels-1:
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
}
default:
// The last pte entry should point to frame
if got := pte.Frame(); got != frame {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
}
}
}
if exp := 1; flushTLBEntryCallCount != exp {
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
}
}
func TestMapTemporaryErrorsAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
p4Index := 510
frame := pmm.Frame(123)
t.Run("encounter huge page", func(t *testing.T) {
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
ptePtrFn = func(entry uintptr) unsafe.Pointer {
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[0][pteIndex])
}
if _, err := MapTemporary(frame); err != errNoHugePageSupport {
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
}
})
t.Run("allocFn returns an error", func(t *testing.T) {
defer func() { frameAllocator = nil }()
physPages[0][p4Index] = 0
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
return 0, expErr
})
if _, err := MapTemporary(frame); err != expErr {
t.Fatalf("got unexpected error %v", err)
}
})
t.Run("map BlankReservedFrame RW", func(t *testing.T) {
defer func() { protectReservedZeroedPage = false }()
protectReservedZeroedPage = true
if err := Map(Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame {
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
}
})
t.Run("temp-map BlankReservedFrame RW", func(t *testing.T) {
defer func() { protectReservedZeroedPage = false }()
protectReservedZeroedPage = true
if _, err := MapTemporary(ReservedZeroedFrame); err != errAttemptToRWMapReservedFrame {
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
}
})
}
func TestUnmapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, flushTLBEntryFn)
var (
physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
frame = pmm.Frame(123)
)
// Emulate a page mapped to virtAddr 0 across all page levels
for level := 0; level < pageLevels; level++ {
physPages[level][0].SetFlags(FlagPresent | FlagRW)
if level < pageLevels-1 {
physPages[level][0].SetFrame(pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mem.PageShift))
} else {
physPages[level][0].SetFrame(frame)
}
}
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
pteCallCount++
return unsafe.Pointer(&physPages[pteCallCount-1][0])
}
flushTLBEntryCallCount := 0
flushTLBEntryFn = func(uintptr) {
flushTLBEntryCallCount++
}
if err := Unmap(PageFromAddress(0)); err != nil {
t.Fatal(err)
}
for level, physPage := range physPages {
pte := physPage[0]
switch {
case level < pageLevels-1:
if !pte.HasFlags(FlagPresent) {
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
}
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
}
default:
if pte.HasFlags(FlagPresent) {
t.Errorf("[pte at level %d] expected entry not to have FlagPresent set", level)
}
// The last pte entry should still point to frame
if got := pte.Frame(); got != frame {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
}
}
}
if exp := 1; flushTLBEntryCallCount != exp {
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
}
}
func TestUnmapErrorsAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
t.Run("encounter huge page", func(t *testing.T) {
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
ptePtrFn = func(entry uintptr) unsafe.Pointer {
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[0][pteIndex])
}
if err := Unmap(PageFromAddress(0)); err != errNoHugePageSupport {
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
}
})
t.Run("virtual address not mapped", func(t *testing.T) {
physPages[0][0].ClearFlags(FlagPresent)
if err := Unmap(PageFromAddress(0)); err != ErrInvalidMapping {
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
}
})
}

View File

@@ -0,0 +1,19 @@
package vmm
import "gopheros/kernel/mem"
// Page describes a virtual memory page index.
type Page uintptr
// Address returns a pointer to the virtual memory address pointed to by this Page.
func (f Page) Address() uintptr {
return uintptr(f << mem.PageShift)
}
// PageFromAddress returns a Page that corresponds to the given virtual
// address. This function can handle both page-aligned and not aligned virtual
// addresses. in the latter case, the input address will be rounded down to the
// page that contains it.
func PageFromAddress(virtAddr uintptr) Page {
return Page((virtAddr & ^(uintptr(mem.PageSize - 1))) >> mem.PageShift)
}

View File

@@ -0,0 +1,34 @@
package vmm
import (
"gopheros/kernel/mem"
"testing"
)
func TestPageMethods(t *testing.T) {
for pageIndex := uint64(0); pageIndex < 128; pageIndex++ {
page := Page(pageIndex)
if exp, got := uintptr(pageIndex<<mem.PageShift), page.Address(); got != exp {
t.Errorf("expected page (%d, index: %d) call to Address() to return %x; got %x", page, pageIndex, exp, got)
}
}
}
func TestPageFromAddress(t *testing.T) {
specs := []struct {
input uintptr
expPage Page
}{
{0, Page(0)},
{4095, Page(0)},
{4096, Page(1)},
{4123, Page(1)},
}
for specIndex, spec := range specs {
if got := PageFromAddress(spec.input); got != spec.expPage {
t.Errorf("[spec %d] expected returned page to be %v; got %v", specIndex, spec.expPage, got)
}
}
}

View File

@@ -0,0 +1,135 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"unsafe"
)
var (
// activePDTFn is used by tests to override calls to activePDT which
// will cause a fault if called in user-mode.
activePDTFn = cpu.ActivePDT
// switchPDTFn is used by tests to override calls to switchPDT which
// will cause a fault if called in user-mode.
switchPDTFn = cpu.SwitchPDT
// mapFn is used by tests and is automatically inlined by the compiler.
mapFn = Map
// mapTemporaryFn is used by tests and is automatically inlined by the compiler.
mapTemporaryFn = MapTemporary
// unmapmFn is used by tests and is automatically inlined by the compiler.
unmapFn = Unmap
)
// PageDirectoryTable describes the top-most table in a multi-level paging scheme.
type PageDirectoryTable struct {
pdtFrame pmm.Frame
}
// Init sets up the page table directory starting at the supplied physical
// address. If the supplied frame does not match the currently active PDT, then
// Init assumes that this is a new page table directory that needs
// bootstapping. In such a case, a temporary mapping is established so that
// Init can:
// - call mem.Memset to clear the frame contents
// - setup a recursive mapping for the last table entry to the page itself.
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error {
pdt.pdtFrame = pdtFrame
// Check active PDT physical address. If it matches the input pdt then
// nothing more needs to be done
activePdtAddr := activePDTFn()
if pdtFrame.Address() == activePdtAddr {
return nil
}
// Create a temporary mapping for the pdt frame so we can work on it
pdtPage, err := mapTemporaryFn(pdtFrame)
if err != nil {
return err
}
// Clear the page contents and setup recursive mapping for the last PDT entry
mem.Memset(pdtPage.Address(), 0, mem.PageSize)
lastPdtEntry := (*pageTableEntry)(unsafe.Pointer(pdtPage.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)))
*lastPdtEntry = 0
lastPdtEntry.SetFlags(FlagPresent | FlagRW)
lastPdtEntry.SetFrame(pdtFrame)
// Remove temporary mapping
unmapFn(pdtPage)
return nil
}
// Map establishes a mapping between a virtual page and a physical memory frame
// using this PDT. This method behaves in a similar fashion to the global Map()
// function with the difference that it also supports inactive page PDTs by
// establishing a temporary mapping so that Map() can access the inactive PDT
// entries.
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr
lastPdtEntry *pageTableEntry
)
// If this table is not active we need to temporarily map it to the
// last entry in the active PDT so we can access it using the recursive
// virtual address scheme.
if activePdtFrame != pdt.pdtFrame {
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
lastPdtEntry.SetFrame(pdt.pdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
err := mapFn(page, frame, flags)
if activePdtFrame != pdt.pdtFrame {
lastPdtEntry.SetFrame(activePdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
return err
}
// Unmap removes a mapping previousle installed by a call to Map() on this PDT.
// This method behaves in a similar fashion to the global Unmap() function with
// the difference that it also supports inactive page PDTs by establishing a
// temporary mapping so that Unmap() can access the inactive PDT entries.
func (pdt PageDirectoryTable) Unmap(page Page) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr
lastPdtEntry *pageTableEntry
)
// If this table is not active we need to temporarily map it to the
// last entry in the active PDT so we can access it using the recursive
// virtual address scheme.
if activePdtFrame != pdt.pdtFrame {
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
lastPdtEntry.SetFrame(pdt.pdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
err := unmapFn(page)
if activePdtFrame != pdt.pdtFrame {
lastPdtEntry.SetFrame(activePdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
return err
}
// Activate enables this page directory table and flushes the TLB
func (pdt PageDirectoryTable) Activate() {
switchPDTFn(pdt.pdtFrame.Address())
}

View File

@@ -0,0 +1,330 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestPageDirectoryTableInitAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapTemporaryFn = origMapTemporary
unmapFn = origUnmap
}(flushTLBEntryFn, activePDTFn, mapTemporaryFn, unmapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
t.Fatal("unexpected call to MapTemporary")
return 0, nil
}
unmapFn = func(_ Page) *kernel.Error {
t.Fatal("unexpected call to Unmap")
return nil
}
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
physPage [mem.PageSize >> mem.PointerShift]pageTableEntry
)
// Fill phys page with random junk
mem.Memset(uintptr(unsafe.Pointer(&physPage[0])), 0xf0, mem.PageSize)
activePDTFn = func() uintptr {
return 0
}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
}
flushTLBEntryFn = func(_ uintptr) {}
unmapCallCount := 0
unmapFn = func(_ Page) *kernel.Error {
unmapCallCount++
return nil
}
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
if unmapCallCount != 1 {
t.Fatalf("expected Unmap to be called 1 time; called %d", unmapCallCount)
}
for i := 0; i < len(physPage)-1; i++ {
if physPage[i] != 0 {
t.Errorf("expected PDT entry %d to be cleared; got %x", i, physPage[i])
}
}
// The last page should be recursively mapped to the PDT
lastPdtEntry := physPage[len(physPage)-1]
if !lastPdtEntry.HasFlags(FlagPresent | FlagRW) {
t.Fatal("expected last PDT entry to have FlagPresent and FlagRW set")
}
if lastPdtEntry.Frame() != pdtFrame {
t.Fatalf("expected last PDT entry to be recursively mapped to physical frame %x; got %x", pdtFrame, lastPdtEntry.Frame())
}
})
t.Run("temporary mapping failure", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
)
activePDTFn = func() uintptr {
return 0
}
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return 0, expErr
}
unmapFn = func(_ Page) *kernel.Error {
t.Fatal("unexpected call to Unmap")
return nil
}
if err := pdt.Init(pdtFrame); err != expErr {
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
}
})
}
func TestPageDirectoryTableMapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapFn = origMap
}(flushTLBEntryFn, activePDTFn, mapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}
if exp := 0; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
)
// Initially, activePhysPage is recursively mapped to itself
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
activePDTFn = func() uintptr {
return activePdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
switch flushCallCount {
case 0:
// the first time we flush the tlb entry, the last entry of
// the active pdt should be pointing to pdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
}
case 1:
// the second time we flush the tlb entry, the last entry of
// the active pdt should be pointing back to activePdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
}
}
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}
if exp := 2; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
}
func TestPageDirectoryTableUnmapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
unmapFn = origUnmap
}(flushTLBEntryFn, activePDTFn, unmapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
unmapFn = func(_ Page) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
flushCallCount++
}
if err := pdt.Unmap(page); err != nil {
t.Fatal(err)
}
if exp := 0; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
)
// Initially, activePhysPage is recursively mapped to itself
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
activePDTFn = func() uintptr {
return activePdtFrame.Address()
}
unmapFn = func(_ Page) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
switch flushCallCount {
case 0:
// the first time we flush the tlb entry, the last entry of
// the active pdt should be pointing to pdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
}
case 1:
// the second time we flush the tlb entry, the last entry of
// the active pdt should be pointing back to activePdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
}
}
flushCallCount++
}
if err := pdt.Unmap(page); err != nil {
t.Fatal(err)
}
if exp := 2; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
}
func TestPageDirectoryTableActivateAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origSwitchPDT func(uintptr)) {
switchPDTFn = origSwitchPDT
}(switchPDTFn)
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
)
switchPDTCallCount := 0
switchPDTFn = func(_ uintptr) {
switchPDTCallCount++
}
pdt.Activate()
if exp := 1; switchPDTCallCount != exp {
t.Fatalf("expected switchPDT to be called %d times; called %d", exp, switchPDTCallCount)
}
}

View File

@@ -0,0 +1,74 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
)
var (
// ErrInvalidMapping is returned when trying to lookup a virtual memory address that is not yet mapped.
ErrInvalidMapping = &kernel.Error{Module: "vmm", Message: "virtual address does not point to a mapped physical page"}
)
// PageTableEntryFlag describes a flag that can be applied to a page table entry.
type PageTableEntryFlag uintptr
// pageTableEntry describes a page table entry. These entries encode
// a physical frame address and a set of flags. The actual format
// of the entry and flags is architecture-dependent.
type pageTableEntry uintptr
// HasFlags returns true if this entry has all the input flags set.
func (pte pageTableEntry) HasFlags(flags PageTableEntryFlag) bool {
return (uintptr(pte) & uintptr(flags)) == uintptr(flags)
}
// HasAnyFlag returns true if this entry has at least one of the input flags set.
func (pte pageTableEntry) HasAnyFlag(flags PageTableEntryFlag) bool {
return (uintptr(pte) & uintptr(flags)) != 0
}
// SetFlags sets the input list of flags to the page table entry.
func (pte *pageTableEntry) SetFlags(flags PageTableEntryFlag) {
*pte = (pageTableEntry)(uintptr(*pte) | uintptr(flags))
}
// ClearFlags unsets the input list of flags from the page table entry.
func (pte *pageTableEntry) ClearFlags(flags PageTableEntryFlag) {
*pte = (pageTableEntry)(uintptr(*pte) &^ uintptr(flags))
}
// Frame returns the physical page frame that this page table entry points to.
func (pte pageTableEntry) Frame() pmm.Frame {
return pmm.Frame((uintptr(pte) & ptePhysPageMask) >> mem.PageShift)
}
// SetFrame updates the page table entry to point the the given physical frame .
func (pte *pageTableEntry) SetFrame(frame pmm.Frame) {
*pte = (pageTableEntry)((uintptr(*pte) &^ ptePhysPageMask) | frame.Address())
}
// pteForAddress returns the final page table entry that correspond to a
// particular virtual address. The function performs a page table walk till it
// reaches the final page table entry returning ErrInvalidMapping if the page
// is not present.
func pteForAddress(virtAddr uintptr) (*pageTableEntry, *kernel.Error) {
var (
err *kernel.Error
entry *pageTableEntry
)
walk(virtAddr, func(pteLevel uint8, pte *pageTableEntry) bool {
if !pte.HasFlags(FlagPresent) {
entry = nil
err = ErrInvalidMapping
return false
}
entry = pte
return true
})
return entry, err
}

View File

@@ -0,0 +1,60 @@
package vmm
import (
"gopheros/kernel/mem/pmm"
"testing"
)
func TestPageTableEntryFlags(t *testing.T) {
var (
pte pageTableEntry
flag1 = PageTableEntryFlag(1 << 10)
flag2 = PageTableEntryFlag(1 << 21)
)
if pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return false")
}
pte.SetFlags(flag1 | flag2)
if !pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return true")
}
if !pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return true")
}
pte.ClearFlags(flag1)
if !pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return true")
}
if pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return false")
}
pte.ClearFlags(flag1 | flag2)
if pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return false")
}
if pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return false")
}
}
func TestPageTableEntryFrameEncoding(t *testing.T) {
var (
pte pageTableEntry
physFrame = pmm.Frame(123)
)
pte.SetFrame(physFrame)
if got := pte.Frame(); got != physFrame {
t.Fatalf("expected pte.Frame() to return %v; got %v", physFrame, got)
}
}

View File

@@ -0,0 +1,19 @@
package vmm
import "gopheros/kernel"
// Translate returns the physical address that corresponds to the supplied
// virtual address or ErrInvalidMapping if the virtual address does not
// correspond to a mapped physical address.
func Translate(virtAddr uintptr) (uintptr, *kernel.Error) {
pte, err := pteForAddress(virtAddr)
if err != nil {
return 0, err
}
// Calculate the physical address by taking the physical frame address and
// appending the offset from the virtual address
physAddr := pte.Frame().Address() + (virtAddr & ((1 << pageLevelShifts[pageLevels-1]) - 1))
return physAddr, nil
}

View File

@@ -0,0 +1,72 @@
package vmm
import (
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestTranslateAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
}(ptePtrFn)
// the virtual address just contains the page offset
virtAddr := uintptr(1234)
expFrame := pmm.Frame(42)
expPhysAddr := expFrame.Address() + virtAddr
specs := [][pageLevels]bool{
{true, true, true, true},
{false, true, true, true},
{true, false, true, true},
{true, true, false, true},
{true, true, true, false},
}
for specIndex, spec := range specs {
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
var pte pageTableEntry
pte.SetFrame(expFrame)
if specs[specIndex][pteCallCount] {
pte.SetFlags(FlagPresent)
}
pteCallCount++
return unsafe.Pointer(&pte)
}
// An error is expected if any page level contains a non-present page
expError := false
for _, hasMapping := range spec {
if !hasMapping {
expError = true
break
}
}
physAddr, err := Translate(virtAddr)
switch {
case expError && err != ErrInvalidMapping:
t.Errorf("[spec %d] expected to get ErrInvalidMapping; got %v", specIndex, err)
case !expError && err != nil:
t.Errorf("[spec %d] unexpected error %v", specIndex, err)
case !expError && physAddr != expPhysAddr:
t.Errorf("[spec %d] expected phys addr to be 0x%x; got 0x%x", specIndex, expPhysAddr, physAddr)
}
}
}
/*
phys, err := vmm.Translate(uintptr(100 * mem.Mb))
if err != nil {
early.Printf("err: %s\n", err.Error())
} else {
early.Printf("phys: 0x%x\n", phys)
}
*/

View File

@@ -0,0 +1,155 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/irq"
"gopheros/kernel/kfmt/early"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
)
var (
// frameAllocator points to a frame allocator function registered using
// SetFrameAllocator.
frameAllocator FrameAllocatorFn
// the following functions are mocked by tests and are automatically
// inlined by the compiler.
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
readCR2Fn = cpu.ReadCR2
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
)
// FrameAllocatorFn is a function that can allocate physical frames.
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
// SetFrameAllocator registers a frame allocator function that will be used by
// the vmm code when new physical frames need to be allocated.
func SetFrameAllocator(allocFn FrameAllocatorFn) {
frameAllocator = allocFn
}
func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) {
var (
faultAddress = uintptr(readCR2Fn())
faultPage = PageFromAddress(faultAddress)
pageEntry *pageTableEntry
)
// Lookup entry for the page where the fault occurred
walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
nextIsPresent := pte.HasFlags(FlagPresent)
if pteLevel == pageLevels-1 && nextIsPresent {
pageEntry = pte
}
// Abort walk if the next page table entry is missing
return nextIsPresent
})
// CoW is supported for RO pages with the CoW flag set
if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) {
var (
copy pmm.Frame
tmpPage Page
err *kernel.Error
)
if copy, err = frameAllocator(); err != nil {
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
} else if tmpPage, err = mapTemporaryFn(copy); err != nil {
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
} else {
// Copy page contents, mark as RW and remove CoW flag
mem.Memcopy(faultPage.Address(), tmpPage.Address(), mem.PageSize)
unmapFn(tmpPage)
// Update mapping to point to the new frame, flag it as RW and
// remove the CoW flag
pageEntry.ClearFlags(FlagCopyOnWrite)
pageEntry.SetFlags(FlagPresent | FlagRW)
pageEntry.SetFrame(copy)
flushTLBEntryFn(faultPage.Address())
// Fault recovered; retry the instruction that caused the fault
return
}
}
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, errUnrecoverableFault)
}
func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) {
early.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress)
switch {
case errorCode == 0:
early.Printf("read from non-present page")
case errorCode == 1:
early.Printf("page protection violation (read)")
case errorCode == 2:
early.Printf("write to non-present page")
case errorCode == 3:
early.Printf("page protection violation (write)")
case errorCode == 4:
early.Printf("page-fault in user-mode")
case errorCode == 8:
early.Printf("page table has reserved bit set")
case errorCode == 16:
early.Printf("instruction fetch")
default:
early.Printf("unknown")
}
early.Printf("\n\nRegisters:\n")
regs.Print()
frame.Print()
// TODO: Revisit this when user-mode tasks are implemented
panic(err)
}
func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) {
early.Printf("\nGeneral protection fault while accessing address: 0x%x\n", readCR2Fn())
early.Printf("Registers:\n")
regs.Print()
frame.Print()
// TODO: Revisit this when user-mode tasks are implemented
panic(errUnrecoverableFault)
}
// reserveZeroedFrame reserves a physical frame to be used together with
// FlagCopyOnWrite for lazy allocation requests.
func reserveZeroedFrame() *kernel.Error {
var (
err *kernel.Error
tempPage Page
)
if ReservedZeroedFrame, err = frameAllocator(); err != nil {
return err
} else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil {
return err
}
mem.Memset(tempPage.Address(), 0, mem.PageSize)
unmapFn(tempPage)
// From this point on, ReservedZeroedFrame cannot be mapped with a RW flag
protectReservedZeroedPage = true
return nil
}
// Init initializes the vmm system and installs paging-related exception
// handlers.
func Init() *kernel.Error {
if err := reserveZeroedFrame(); err != nil {
return err
}
handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler)
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
return nil
}

View File

@@ -0,0 +1,281 @@
package vmm
import (
"bytes"
"fmt"
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/driver/video/console"
"gopheros/kernel/hal"
"gopheros/kernel/irq"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"strings"
"testing"
"unsafe"
)
func TestRecoverablePageFault(t *testing.T) {
var (
frame irq.Frame
regs irq.Regs
pageEntry pageTableEntry
origPage = make([]byte, mem.PageSize)
clonedPage = make([]byte, mem.PageSize)
err = &kernel.Error{Module: "test", Message: "something went wrong"}
)
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
readCR2Fn = cpu.ReadCR2
frameAllocator = nil
mapTemporaryFn = MapTemporary
unmapFn = Unmap
flushTLBEntryFn = cpu.FlushTLBEntry
}(ptePtrFn)
specs := []struct {
pteFlags PageTableEntryFlag
allocError *kernel.Error
mapError *kernel.Error
expPanic bool
}{
// Missing pge
{0, nil, nil, true},
// Page is present but CoW flag not set
{FlagPresent, nil, nil, true},
// Page is present but both CoW and RW flags set
{FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true},
// Page is present with CoW flag set but allocating a page copy fails
{FlagPresent | FlagCopyOnWrite, err, nil, true},
// Page is present with CoW flag set but mapping the page copy fails
{FlagPresent | FlagCopyOnWrite, nil, err, true},
// Page is present with CoW flag set
{FlagPresent | FlagCopyOnWrite, nil, nil, false},
}
mockTTY()
ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) }
readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) }
unmapFn = func(_ Page) *kernel.Error { return nil }
flushTLBEntryFn = func(_ uintptr) {}
for specIndex, spec := range specs {
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
defer func() {
err := recover()
if spec.expPanic && err == nil {
t.Error("expected a panic")
} else if !spec.expPanic {
if err != nil {
t.Error("unexpected panic")
return
}
for i := 0; i < len(origPage); i++ {
if origPage[i] != clonedPage[i] {
t.Errorf("expected clone page to be a copy of the original page; mismatch at index %d", i)
}
}
}
}()
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), spec.mapError }
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&clonedPage[0]))
return pmm.Frame(addr >> mem.PageShift), spec.allocError
})
for i := 0; i < len(origPage); i++ {
origPage[i] = byte(i % 256)
clonedPage[i] = 0
}
pageEntry = 0
pageEntry.SetFlags(spec.pteFlags)
pageFaultHandler(2, &frame, &regs)
})
}
}
func TestNonRecoverablePageFault(t *testing.T) {
specs := []struct {
errCode uint64
expReason string
}{
{
0,
"read from non-present page",
},
{
1,
"page protection violation (read)",
},
{
2,
"write to non-present page",
},
{
3,
"page protection violation (write)",
},
{
4,
"page-fault in user-mode",
},
{
8,
"page table has reserved bit set",
},
{
16,
"instruction fetch",
},
{
0xf00,
"unknown",
},
}
var (
regs irq.Regs
frame irq.Frame
)
for specIndex, spec := range specs {
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
defer func() {
if err := recover(); err != errUnrecoverableFault {
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
}
}()
fb := mockTTY()
nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, &regs, errUnrecoverableFault)
if got := readTTY(fb); !strings.Contains(got, spec.expReason) {
t.Errorf("expected reason %q; got output:\n%q", spec.expReason, got)
}
})
}
}
func TestGPtHandler(t *testing.T) {
defer func() {
readCR2Fn = cpu.ReadCR2
}()
var (
regs irq.Regs
frame irq.Frame
)
readCR2Fn = func() uint64 {
return 0xbadf00d000
}
defer func() {
if err := recover(); err != errUnrecoverableFault {
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
}
}()
mockTTY()
generalProtectionFaultHandler(0, &frame, &regs)
}
func TestInit(t *testing.T) {
defer func() {
frameAllocator = nil
mapTemporaryFn = MapTemporary
unmapFn = Unmap
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
}()
// reserve space for an allocated page
reservedPage := make([]byte, mem.PageSize)
t.Run("success", func(t *testing.T) {
// fill page with junk
for i := 0; i < len(reservedPage); i++ {
reservedPage[i] = byte(i % 256)
}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != nil {
t.Fatal(err)
}
// reserved page should be zeroed
for i := 0; i < len(reservedPage); i++ {
if reservedPage[i] != 0 {
t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i)
}
}
})
t.Run("blank page allocation error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr })
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("blank page mapping error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "map failed"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
}
func readTTY(fb []byte) string {
var buf bytes.Buffer
for i := 0; i < len(fb); i += 2 {
ch := fb[i]
if ch == 0 {
if i+2 < len(fb) && fb[i+2] != 0 {
buf.WriteByte('\n')
}
continue
}
buf.WriteByte(ch)
}
return buf.String()
}
func mockTTY() []byte {
// Mock a tty to handle early.Printf output
mockConsoleFb := make([]byte, 160*25)
mockConsole := &console.Ega{}
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
hal.ActiveTerminal.AttachTo(mockConsole)
return mockConsoleFb
}

View File

@@ -0,0 +1,55 @@
package vmm
import (
"gopheros/kernel/mem"
"unsafe"
)
var (
// ptePointerFn returns a pointer to the supplied entry address. It is
// used by tests to override the generated page table entry pointers so
// walk() can be properly tested. When compiling the kernel this function
// will be automatically inlined.
ptePtrFn = func(entryAddr uintptr) unsafe.Pointer {
return unsafe.Pointer(entryAddr)
}
)
// pageTableWalker is a function that can be passed to the walk method. The
// function receives the current page level and page table entry as its
// arguments. If the function returns false, then the page walk is aborted.
type pageTableWalker func(pteLevel uint8, pte *pageTableEntry) bool
// walk performs a page table walk for the given virtual address. It calls the
// suppplied walkFn with the page table entry that corresponds to each page
// table level. If walkFn returns an error then the walk is aborted and the
// error is returned to the caller.
func walk(virtAddr uintptr, walkFn pageTableWalker) {
var (
level uint8
tableAddr, entryAddr, entryIndex uintptr
ok bool
)
// tableAddr is initially set to the recursively mapped virtual address for the
// last entry in the top-most page table. Dereferencing a pointer to this address
// will allow us to access
for level, tableAddr = uint8(0), pdtVirtualAddr; level < pageLevels; level, tableAddr = level+1, entryAddr {
// Extract the bits from virtual address that correspond to the
// index in this level's page table
entryIndex = (virtAddr >> pageLevelShifts[level]) & ((1 << pageLevelBits[level]) - 1)
// By shifting the table virtual address left by pageLevelShifts[level] we add
// a new level of indirection to our recursive mapping allowing us to access
// the table pointed to by the page entry
entryAddr = tableAddr + (entryIndex << mem.PointerShift)
if ok = walkFn(level, (*pageTableEntry)(ptePtrFn(entryAddr))); !ok {
return
}
// Shift left by the number of bits for this paging level to get
// the virtual address of the table pointed to by entryAddr
entryAddr <<= pageLevelBits[level]
}
}

View File

@@ -0,0 +1,75 @@
package vmm
import (
"gopheros/kernel/mem"
"runtime"
"testing"
"unsafe"
)
func TestPtePtrFn(t *testing.T) {
// Dummy test to keep coverage happy
if exp, got := unsafe.Pointer(uintptr(123)), ptePtrFn(uintptr(123)); exp != got {
t.Fatalf("expected ptePtrFn to return %v; got %v", exp, got)
}
}
func TestWalkAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
}(ptePtrFn)
// This address breaks down to:
// p4 index: 1
// p3 index: 2
// p2 index: 3
// p1 index: 4
// offset : 1024
targetAddr := uintptr(0x8080604400)
sizeofPteEntry := uintptr(unsafe.Sizeof(pageTableEntry(0)))
expEntryAddrBits := [pageLevels][pageLevels + 1]uintptr{
{511, 511, 511, 511, 1 * sizeofPteEntry},
{511, 511, 511, 1, 2 * sizeofPteEntry},
{511, 511, 1, 2, 3 * sizeofPteEntry},
{511, 1, 2, 3, 4 * sizeofPteEntry},
}
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
if pteCallCount >= pageLevels {
t.Fatalf("unexpected call to ptePtrFn; already called %d times", pageLevels)
}
for i := 0; i < pageLevels; i++ {
pteIndex := (entry >> pageLevelShifts[i]) & ((1 << pageLevelBits[i]) - 1)
if pteIndex != expEntryAddrBits[pteCallCount][i] {
t.Errorf("[ptePtrFn call %d] expected pte entry for level %d to use offset %d; got %d", pteCallCount, i, expEntryAddrBits[pteCallCount][i], pteIndex)
}
}
// Check the page offset
pteIndex := entry & ((1 << mem.PageShift) - 1)
if pteIndex != expEntryAddrBits[pteCallCount][pageLevels] {
t.Errorf("[ptePtrFn call %d] expected pte offset to be %d; got %d", pteCallCount, expEntryAddrBits[pteCallCount][pageLevels], pteIndex)
}
pteCallCount++
return unsafe.Pointer(uintptr(0xf00))
}
walkFnCallCount := 0
walk(targetAddr, func(level uint8, entry *pageTableEntry) bool {
walkFnCallCount++
return walkFnCallCount != pageLevels
})
if pteCallCount != pageLevels {
t.Errorf("expected ptePtrFn to be called %d times; got %d", pageLevels, pteCallCount)
}
}