1
0
mirror of https://github.com/taigrr/gopher-os synced 2026-03-21 14:12:31 -07:00

Use pwd as a workspace; move sources to src/gopheros and rewrite imports

By setting up pwd as a Go workspace, we can trim import paths from
something like "github.com/achilleasa/gopher-os/kernel" to just
"kernel".

These changes make forking easier and also allows us to move the code to
a different git hosting provider without having to rewrite the imports.
This commit is contained in:
Achilleas Anagnostopoulos
2017-07-01 20:37:09 +01:00
parent 7b93d01c6e
commit 8dfc5d4e92
61 changed files with 93 additions and 114 deletions

View File

@@ -0,0 +1,17 @@
// +build amd64
package mem
const (
// PointerShift is equal to log2(unsafe.Sizeof(uintptr)). The pointer
// size for this architecture is defined as (1 << PointerShift).
PointerShift = 3
// PageShift is equal to log2(PageSize). This constant is used when
// we need to convert a physical address to a page number (shift right by PageShift)
// and vice-versa.
PageShift = 12
// PageSize defines the system's page size in bytes.
PageSize = Size(1 << PageShift)
)

View File

@@ -0,0 +1,49 @@
package mem
import (
"reflect"
"unsafe"
)
// Memset sets size bytes at the given address to the supplied value. The implementation
// is based on bytes.Repeat; instead of using a for loop, this function uses
// log2(size) copy calls which should give us a speed boost as page addresses
// are always aligned.
func Memset(addr uintptr, value byte, size Size) {
if size == 0 {
return
}
// overlay a slice on top of this address region
target := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(size),
Cap: int(size),
Data: addr,
}))
// Set first element and make log2(size) optimized copies
target[0] = value
for index := Size(1); index < size; index *= 2 {
copy(target[index:], target[:index])
}
}
// Memcopy copies size bytes from src to dst.
func Memcopy(src, dst uintptr, size Size) {
if size == 0 {
return
}
srcSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(size),
Cap: int(size),
Data: src,
}))
dstSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(size),
Cap: int(size),
Data: dst,
}))
copy(dstSlice, srcSlice)
}

View File

@@ -0,0 +1,52 @@
package mem
import (
"testing"
"unsafe"
)
func TestMemset(t *testing.T) {
// memset with a 0 size should be a no-op
Memset(uintptr(0), 0x00, 0)
for pageCount := uint32(1); pageCount <= 10; pageCount++ {
buf := make([]byte, PageSize<<pageCount)
for i := 0; i < len(buf); i++ {
buf[i] = 0xFE
}
addr := uintptr(unsafe.Pointer(&buf[0]))
Memset(addr, 0x00, Size(len(buf)))
for i := 0; i < len(buf); i++ {
if got := buf[i]; got != 0x00 {
t.Errorf("[block with %d pages] expected byte: %d to be 0x00; got 0x%x", pageCount, i, got)
}
}
}
}
func TestMemcopy(t *testing.T) {
// memcopy with a 0 size should be a no-op
Memcopy(uintptr(0), uintptr(0), 0)
var (
src = make([]byte, PageSize)
dst = make([]byte, PageSize)
)
for i := 0; i < len(src); i++ {
src[i] = byte(i % 256)
}
Memcopy(
uintptr(unsafe.Pointer(&src[0])),
uintptr(unsafe.Pointer(&dst[0])),
PageSize,
)
for i := 0; i < len(src); i++ {
if got := dst[i]; got != src[i] {
t.Errorf("value mismatch between src and dst at index %d", i)
}
}
}

View File

@@ -0,0 +1,326 @@
package allocator
import (
"gopheros/kernel"
"gopheros/kernel/hal/multiboot"
"gopheros/kernel/kfmt/early"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"gopheros/kernel/mem/vmm"
"math"
"reflect"
"unsafe"
)
var (
// bitmapAllocator is a BitmapAllocator instance that serves as the
// primary allocator for reserving pages.
bitmapAllocator BitmapAllocator
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"}
// The followning functions are used by tests to mock calls to the vmm package
// and are automatically inlined by the compiler.
reserveRegionFn = vmm.EarlyReserveRegion
mapFn = vmm.Map
)
type markAs bool
const (
markReserved markAs = false
markFree = true
)
type framePool struct {
// startFrame is the frame number for the first page in this pool.
// each free bitmap entry i corresponds to frame (startFrame + i).
startFrame pmm.Frame
// endFrame tracks the last frame in the pool. The total number of
// frames is given by: (endFrame - startFrame) - 1
endFrame pmm.Frame
// freeCount tracks the available pages in this pool. The allocator
// can use this field to skip fully allocated pools without the need
// to scan the free bitmap.
freeCount uint32
// freeBitmap tracks used/free pages in the pool.
freeBitmap []uint64
freeBitmapHdr reflect.SliceHeader
}
// BitmapAllocator implements a physical frame allocator that tracks frame
// reservations across the available memory pools using bitmaps.
type BitmapAllocator struct {
// totalPages tracks the total number of pages across all pools.
totalPages uint32
// reservedPages tracks the number of reserved pages across all pools.
reservedPages uint32
pools []framePool
poolsHdr reflect.SliceHeader
}
// init allocates space for the allocator structures using the early bootmem
// allocator and flags any allocated pages as reserved.
func (alloc *BitmapAllocator) init() *kernel.Error {
if err := alloc.setupPoolBitmaps(); err != nil {
return err
}
alloc.reserveKernelFrames()
alloc.reserveEarlyAllocatorFrames()
alloc.printStats()
return nil
}
// setupPoolBitmaps uses the early allocator and vmm region reservation helper
// to initialize the list of available pools and their free bitmap slices.
func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
var (
err *kernel.Error
sizeofPool = unsafe.Sizeof(framePool{})
pageSizeMinus1 = uint64(mem.PageSize - 1)
requiredBitmapBytes mem.Size
)
// Detect available memory regions and calculate their pool bitmap
// requirements.
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
alloc.poolsHdr.Len++
alloc.poolsHdr.Cap++
// Reported addresses may not be page-aligned; round up to get
// the start frame and round down to get the end frame
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
pageCount := uint32(regionEndFrame - regionStartFrame)
alloc.totalPages += pageCount
// To represent the free page bitmap we need pageCount bits. Since our
// slice uses uint64 for storing the bitmap we need to round up the
// required bits so they are a multiple of 64 bits
requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3)
return true
})
// Reserve enough pages to hold the allocator state
requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1)
requiredPages := requiredBytes >> mem.PageShift
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
if err != nil {
return err
}
for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 {
nextFrame, err := earlyAllocFrame()
if err != nil {
return err
}
if err = mapFn(page, nextFrame, vmm.FlagPresent|vmm.FlagRW|vmm.FlagNoExecute); err != nil {
return err
}
mem.Memset(page.Address(), 0, mem.PageSize)
}
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
// Run a second pass to initialize the free bitmap slices for all pools
bitmapStartAddr := alloc.poolsHdr.Data + uintptr(alloc.poolsHdr.Len)*sizeofPool
poolIndex := 0
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
if region.Type != multiboot.MemAvailable {
return true
}
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3)
alloc.pools[poolIndex].startFrame = regionStartFrame
alloc.pools[poolIndex].endFrame = regionEndFrame
alloc.pools[poolIndex].freeCount = uint32(regionEndFrame - regionStartFrame + 1)
alloc.pools[poolIndex].freeBitmapHdr.Len = int(bitmapBytes >> 3)
alloc.pools[poolIndex].freeBitmapHdr.Cap = alloc.pools[poolIndex].freeBitmapHdr.Len
alloc.pools[poolIndex].freeBitmapHdr.Data = bitmapStartAddr
alloc.pools[poolIndex].freeBitmap = *(*[]uint64)(unsafe.Pointer(&alloc.pools[poolIndex].freeBitmapHdr))
bitmapStartAddr += bitmapBytes
poolIndex++
return true
})
return nil
}
// markFrame updates the reservation flag for the bitmap entry that corresponds
// to the supplied frame.
func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag markAs) {
if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame {
return
}
// The offset in the block is given by: frame % 64. As the bitmap uses a
// big-ending representation we need to set the bit at index: 63 - offset
relFrame := frame - alloc.pools[poolIndex].startFrame
block := relFrame >> 6
mask := uint64(1 << (63 - (relFrame - block<<6)))
switch flag {
case markFree:
alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++
alloc.reservedPages--
case markReserved:
alloc.pools[poolIndex].freeBitmap[block] |= mask
alloc.pools[poolIndex].freeCount--
alloc.reservedPages++
}
}
// poolForFrame returns the index of the pool that contains frame or -1 if
// the frame is not contained in any of the available memory pools (e.g it
// points to a reserved memory region).
func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int {
for poolIndex, pool := range alloc.pools {
if frame >= pool.startFrame && frame <= pool.endFrame {
return poolIndex
}
}
return -1
}
// reserveKernelFrames makes as reserved the bitmap entries for the frames
// occupied by the kernel image.
func (alloc *BitmapAllocator) reserveKernelFrames() {
// Flag frames used by kernel image as reserved. Since the kernel must
// occupy a contiguous memory block we assume that all its frames will
// fall into one of the available memory pools
poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame)
for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ {
alloc.markFrame(poolIndex, frame, markReserved)
}
}
// reserveEarlyAllocatorFrames makes as reserved the bitmap entries for the frames
// already allocated by the early allocator.
func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() {
// We now need to decomission the early allocator by flagging all frames
// allocated by it as reserved. The allocator itself does not track
// individual frames but only a counter of allocated frames. To get
// the list of frames we reset its internal state and "replay" the
// allocation requests to get the correct frames.
allocCount := earlyAllocator.allocCount
earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0
for i := uint64(0); i < allocCount; i++ {
frame, _ := earlyAllocator.AllocFrame()
alloc.markFrame(
alloc.poolForFrame(frame),
frame,
markReserved,
)
}
}
func (alloc *BitmapAllocator) printStats() {
early.Printf(
"[bitmap_alloc] page stats: free: %d/%d (%d reserved)\n",
alloc.totalPages-alloc.reservedPages,
alloc.totalPages,
alloc.reservedPages,
)
}
// AllocFrame reserves and returns a physical memory frame. An error will be
// returned if no more memory can be allocated.
func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
if alloc.pools[poolIndex].freeCount == 0 {
continue
}
fullBlock := uint64(math.MaxUint64)
for blockIndex, block := range alloc.pools[poolIndex].freeBitmap {
if block == fullBlock {
continue
}
// Block has at least one free slot; we need to scan its bits
for blockOffset, mask := 0, uint64(1<<63); mask > 0; blockOffset, mask = blockOffset+1, mask>>1 {
if block&mask != 0 {
continue
}
alloc.pools[poolIndex].freeCount--
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
alloc.reservedPages++
return alloc.pools[poolIndex].startFrame + pmm.Frame((blockIndex<<6)+blockOffset), nil
}
}
}
return pmm.InvalidFrame, errBitmapAllocOutOfMemory
}
// FreeFrame releases a frame previously allocated via a call to AllocFrame.
// Trying to release a frame not part of the allocator pools or a frame that
// is already marked as free will cause an error to be returned.
func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error {
poolIndex := alloc.poolForFrame(frame)
if poolIndex < 0 {
return errBitmapAllocFrameNotManaged
}
relFrame := frame - alloc.pools[poolIndex].startFrame
block := relFrame >> 6
mask := uint64(1 << (63 - (relFrame - block<<6)))
if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 {
return errBitmapAllocDoubleFree
}
alloc.pools[poolIndex].freeBitmap[block] &^= mask
alloc.pools[poolIndex].freeCount++
alloc.reservedPages--
return nil
}
// earlyAllocFrame is a helper that delegates a frame allocation request to the
// early allocator instance. This function is passed as an argument to
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter
// confuses the compiler's escape analysis into thinking that
// earlyAllocator.Frame escapes to heap.
func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
return earlyAllocator.AllocFrame()
}
// AllocFrame is a helper that delegates a frame allocation request to the
// bitmap allocator instance.
func AllocFrame() (pmm.Frame, *kernel.Error) {
return bitmapAllocator.AllocFrame()
}
// Init sets up the kernel physical memory allocation sub-system.
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
earlyAllocator.init(kernelStart, kernelEnd)
earlyAllocator.printMemoryMap()
vmm.SetFrameAllocator(earlyAllocFrame)
if err := bitmapAllocator.init(); err != nil {
return err
}
vmm.SetFrameAllocator(AllocFrame)
return nil
}

View File

@@ -0,0 +1,431 @@
package allocator
import (
"gopheros/kernel"
"gopheros/kernel/hal/multiboot"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"gopheros/kernel/mem/vmm"
"math"
"strconv"
"testing"
"unsafe"
)
func TestSetupPoolBitmaps(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// The captured multiboot data corresponds to qemu running with 128M RAM.
// The allocator will need to reserve 2 pages to store the bitmap data.
var (
alloc BitmapAllocator
physMem = make([]byte, 2*mem.PageSize)
)
// Init phys mem with junk
for i := 0; i < len(physMem); i++ {
physMem[i] = 0xf0
}
mapCallCount := 0
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
mapCallCount++
return nil
}
reserveCallCount := 0
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
reserveCallCount++
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
if err := alloc.setupPoolBitmaps(); err != nil {
t.Fatal(err)
}
if exp := 2; mapCallCount != exp {
t.Fatalf("expected allocator to call vmm.Map %d times; called %d", exp, mapCallCount)
}
if exp := 1; reserveCallCount != exp {
t.Fatalf("expected allocator to call vmm.EarlyReserveRegion %d times; called %d", exp, reserveCallCount)
}
if exp, got := 2, len(alloc.pools); got != exp {
t.Fatalf("expected allocator to initialize %d pools; got %d", exp, got)
}
for poolIndex, pool := range alloc.pools {
if expFreeCount := uint32(pool.endFrame - pool.startFrame + 1); pool.freeCount != expFreeCount {
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount, pool.freeCount)
}
if exp, got := int(math.Ceil(float64(pool.freeCount)/64.0)), len(pool.freeBitmap); got != exp {
t.Errorf("[pool %d] expected bitmap len to be %d; got %d", poolIndex, exp, got)
}
for blockIndex, block := range pool.freeBitmap {
if block != 0 {
t.Errorf("[pool %d] expected bitmap block %d to be cleared; got %d", poolIndex, blockIndex, block)
}
}
}
}
func TestSetupPoolBitmapsErrors(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
var alloc BitmapAllocator
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("vmm.Map returns an error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return 0, nil
}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := alloc.setupPoolBitmaps(); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
t.Run("earlyAllocator returns an error", func(t *testing.T) {
emptyInfoData := []byte{
0, 0, 0, 0, // size
0, 0, 0, 0, // reserved
0, 0, 0, 0, // tag with type zero and length zero
0, 0, 0, 0,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
if err := alloc.setupPoolBitmaps(); err != errBootAllocOutOfMemory {
t.Fatalf("expected to get error: %v; got %v", errBootAllocOutOfMemory, err)
}
})
}
func TestBitmapAllocatorMarkFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(127),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 128,
}
lastFrame := pmm.Frame(alloc.totalPages)
for frame := pmm.Frame(0); frame < lastFrame; frame++ {
alloc.markFrame(0, frame, markReserved)
block := uint64(frame / 64)
blockOffset := uint64(frame % 64)
bitIndex := (63 - blockOffset)
bitMask := uint64(1 << bitIndex)
if alloc.pools[0].freeBitmap[block]&bitMask != bitMask {
t.Errorf("[frame %d] expected block[%d], bit %d to be set", frame, block, bitIndex)
}
alloc.markFrame(0, frame, markFree)
if alloc.pools[0].freeBitmap[block]&bitMask != 0 {
t.Errorf("[frame %d] expected block[%d], bit %d to be unset", frame, block, bitIndex)
}
}
// Calling markFrame with a frame not part of the pool should be a no-op
alloc.markFrame(0, pmm.Frame(0xbadf00d), markReserved)
for blockIndex, block := range alloc.pools[0].freeBitmap {
if block != 0 {
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
}
}
// Calling markFrame with a negative pool index should be a no-op
alloc.markFrame(-1, pmm.Frame(0), markReserved)
for blockIndex, block := range alloc.pools[0].freeBitmap {
if block != 0 {
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
}
}
}
func TestBitmapAllocatorPoolForFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(63),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(128),
endFrame: pmm.Frame(191),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
},
totalPages: 128,
}
specs := []struct {
frame pmm.Frame
expIndex int
}{
{pmm.Frame(0), 0},
{pmm.Frame(63), 0},
{pmm.Frame(64), -1},
{pmm.Frame(128), 1},
{pmm.Frame(192), -1},
}
for specIndex, spec := range specs {
if got := alloc.poolForFrame(spec.frame); got != spec.expIndex {
t.Errorf("[spec %d] expected to get pool index %d; got %d", specIndex, spec.expIndex, got)
}
}
}
func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(7),
freeCount: 8,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 136,
}
// kernel occupies 16 frames and starts at the beginning of pool 1
earlyAllocator.kernelStartFrame = pmm.Frame(64)
earlyAllocator.kernelEndFrame = pmm.Frame(79)
kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1)
alloc.reserveKernelFrames()
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := uint32(8), alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := 128-kernelSizePages, alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 1 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[1].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 1 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(63),
freeCount: 64,
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 64,
}
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
// Simulate 16 allocations made using the early allocator in region 0
// as reported by the multiboot data and move the kernel to pool 1
allocCount := uint32(16)
earlyAllocator.allocCount = uint64(allocCount)
earlyAllocator.kernelStartFrame = pmm.Frame(256)
earlyAllocator.kernelEndFrame = pmm.Frame(256)
alloc.reserveEarlyAllocatorFrames()
if exp, got := allocCount, alloc.reservedPages; got != exp {
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
}
if exp, got := 64-allocCount, alloc.pools[0].freeCount; got != exp {
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
}
if exp, got := uint32(128), alloc.pools[1].freeCount; got != exp {
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
}
// The first 16 bits of block 0 in pool 0 should all be set to 1
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[0].freeBitmap[0]; got != exp {
t.Fatalf("expected block 0 in pool 0 to be:\n%064s\ngot:\n%064s",
strconv.FormatUint(exp, 2),
strconv.FormatUint(got, 2),
)
}
}
func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) {
var alloc = BitmapAllocator{
pools: []framePool{
{
startFrame: pmm.Frame(0),
endFrame: pmm.Frame(7),
freeCount: 8,
// only the first 8 bits of block 0 are used
freeBitmap: make([]uint64, 1),
},
{
startFrame: pmm.Frame(64),
endFrame: pmm.Frame(191),
freeCount: 128,
freeBitmap: make([]uint64, 2),
},
},
totalPages: 136,
}
// Test Alloc
for poolIndex, pool := range alloc.pools {
for expFrame := pool.startFrame; expFrame <= pool.endFrame; expFrame++ {
got, err := alloc.AllocFrame()
if err != nil {
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
}
if got != expFrame {
t.Errorf("[pool %d] expected allocated frame to be %d; got %d", poolIndex, expFrame, got)
}
}
if alloc.pools[poolIndex].freeCount != 0 {
t.Errorf("[pool %d] expected free count to be 0; got %d", poolIndex, alloc.pools[poolIndex].freeCount)
}
}
if alloc.reservedPages != alloc.totalPages {
t.Errorf("expected reservedPages to match totalPages(%d); got %d", alloc.totalPages, alloc.reservedPages)
}
if _, err := alloc.AllocFrame(); err != errBitmapAllocOutOfMemory {
t.Fatalf("expected error errBitmapAllocOutOfMemory; got %v", err)
}
// Test Free
expFreeCount := []uint32{8, 128}
for poolIndex, pool := range alloc.pools {
for frame := pool.startFrame; frame <= pool.endFrame; frame++ {
if err := alloc.FreeFrame(frame); err != nil {
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
}
}
if alloc.pools[poolIndex].freeCount != expFreeCount[poolIndex] {
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount[poolIndex], alloc.pools[poolIndex].freeCount)
}
}
if alloc.reservedPages != 0 {
t.Errorf("expected reservedPages to be 0; got %d", alloc.reservedPages)
}
// Test Free errors
if err := alloc.FreeFrame(pmm.Frame(0)); err != errBitmapAllocDoubleFree {
t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err)
}
if err := alloc.FreeFrame(pmm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged {
t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err)
}
}
func TestAllocatorPackageInit(t *testing.T) {
defer func() {
mapFn = vmm.Map
reserveRegionFn = vmm.EarlyReserveRegion
}()
var (
physMem = make([]byte, 2*mem.PageSize)
)
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
t.Run("success", func(t *testing.T) {
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return nil
}
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
return uintptr(unsafe.Pointer(&physMem[0])), nil
}
mockTTY()
if err := Init(0x100000, 0x1fa7c8); err != nil {
t.Fatal(err)
}
// At this point sysAllocFrame should work
if _, err := AllocFrame(); err != nil {
t.Fatal(err)
}
})
t.Run("error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
return expErr
}
if err := Init(0x100000, 0x1fa7c8); err != expErr {
t.Fatalf("expected to get error: %v; got %v", expErr, err)
}
})
}

View File

@@ -0,0 +1,136 @@
package allocator
import (
"gopheros/kernel"
"gopheros/kernel/hal/multiboot"
"gopheros/kernel/kfmt/early"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
)
var (
// earlyAllocator is a boot mem allocator instance used for page
// allocations before switching to a more advanced allocator.
earlyAllocator bootMemAllocator
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
)
// bootMemAllocator implements a rudimentary physical memory allocator which is
// used to bootstrap the kernel.
//
// The allocator implementation uses the memory region information provided by
// the bootloader to detect free memory blocks and return the next available
// free frame. Allocations are tracked via an internal counter that contains
// the last allocated frame.
//
// Due to the way that the allocator works, it is not possible to free
// allocated pages. Once the kernel is properly initialized, the allocated
// blocks will be handed over to a more advanced memory allocator that does
// support freeing.
type bootMemAllocator struct {
// allocCount tracks the total number of allocated frames.
allocCount uint64
// lastAllocFrame tracks the last allocated frame number.
lastAllocFrame pmm.Frame
// Keep track of kernel location so we exclude this region.
kernelStartAddr, kernelEndAddr uintptr
kernelStartFrame, kernelEndFrame pmm.Frame
}
// init sets up the boot memory allocator internal state.
func (alloc *bootMemAllocator) init(kernelStart, kernelEnd uintptr) {
// round down kernel start to the nearest page and round up kernel end
// to the nearest page.
pageSizeMinus1 := uintptr(mem.PageSize - 1)
alloc.kernelStartAddr = kernelStart
alloc.kernelEndAddr = kernelEnd
alloc.kernelStartFrame = pmm.Frame((kernelStart & ^pageSizeMinus1) >> mem.PageShift)
alloc.kernelEndFrame = pmm.Frame(((kernelEnd+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift) - 1
}
// AllocFrame scans the system memory regions reported by the bootloader and
// reserves the next available free frame.
//
// AllocFrame returns an error if no more memory can be allocated.
func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
var err = errBootAllocOutOfMemory
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
// Ignore reserved regions and regions smaller than a single page
if region.Type != multiboot.MemAvailable || region.Length < uint64(mem.PageSize) {
return true
}
// Reported addresses may not be page-aligned; round up to get
// the start frame and round down to get the end frame
pageSizeMinus1 := uint64(mem.PageSize - 1)
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
// Skip over already allocated regions
if alloc.lastAllocFrame >= regionEndFrame {
return true
}
// If last frame used a different region and the kernel image
// is located at the beginning of this region OR we are in
// current region but lastAllocFrame + 1 points to the kernel
// start we need to jump to the page following the kernel end
// frame
if (alloc.lastAllocFrame <= regionStartFrame && alloc.kernelStartFrame == regionStartFrame) ||
(alloc.lastAllocFrame <= regionEndFrame && alloc.lastAllocFrame+1 == alloc.kernelStartFrame) {
//fmt.Printf("last: %d, case: 1, set last: %d\n", alloc.lastAllocFrame, alloc.kernelEndFrame+1)
alloc.lastAllocFrame = alloc.kernelEndFrame + 1
} else if alloc.lastAllocFrame < regionStartFrame || alloc.allocCount == 0 {
// we are in the previous region and need to jump to this one OR
// this is the first allocation and the region begins at frame 0
//fmt.Printf("last: %d, case: 2, set last: %d\n", alloc.lastAllocFrame, regionStartFrame)
alloc.lastAllocFrame = regionStartFrame
} else {
// we are in the region and we can select the next frame
//fmt.Printf("last: %d, case: 3, set last: %d\n", alloc.lastAllocFrame, alloc.lastAllocFrame+1)
alloc.lastAllocFrame++
}
// The above adjustment might push lastAllocFrame outside of the
// region end (e.g kernel ends at last page in the region)
if alloc.lastAllocFrame > regionEndFrame {
return true
}
err = nil
return false
})
if err != nil {
return pmm.InvalidFrame, errBootAllocOutOfMemory
}
alloc.allocCount++
return alloc.lastAllocFrame, nil
}
// printMemoryMap scans the memory region information provided by the
// bootloader and prints out the system's memory map.
func (alloc *bootMemAllocator) printMemoryMap() {
early.Printf("[boot_mem_alloc] system memory map:\n")
var totalFree mem.Size
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
early.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String())
if region.Type == multiboot.MemAvailable {
totalFree += mem.Size(region.Length)
}
return true
})
early.Printf("[boot_mem_alloc] available memory: %dKb\n", uint64(totalFree/mem.Kb))
early.Printf("[boot_mem_alloc] kernel loaded at 0x%x - 0x%x\n", alloc.kernelStartAddr, alloc.kernelEndAddr)
early.Printf("[boot_mem_alloc] size: %d bytes, reserved pages: %d\n",
uint64(alloc.kernelEndAddr-alloc.kernelStartAddr),
uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1),
)
}

View File

@@ -0,0 +1,130 @@
package allocator
import (
"gopheros/kernel/driver/video/console"
"gopheros/kernel/hal"
"gopheros/kernel/hal/multiboot"
"testing"
"unsafe"
)
func TestBootMemoryAllocator(t *testing.T) {
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
specs := []struct {
kernelStart, kernelEnd uintptr
expAllocCount uint64
}{
{
// the kernel is loaded in a reserved memory region
0xa0000,
0xa0000,
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
159 + 32480,
},
{
// the kernel is loaded at the beginning of region 1 taking 2.5 pages
0x0,
0x2800,
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these
// frames 0,1 and 2 (round up kernel end) are used by the kernel
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
159 - 3 + 32480,
},
{
// the kernel is loaded at the end of region 1 taking 2.5 pages
0x9c800,
0x9f000,
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these
// frames 156,157 and 158 (round down kernel start) are used by the kernel
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
159 - 3 + 32480,
},
{
// the kernel (after rounding) uses the entire region 1
0x123,
0x9fc00,
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; all are used
// by the kernel
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
32480,
},
{
// the kernel is loaded at region 2 start + 2K taking 1.5 pages
0x100800,
0x102000,
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735];
// out of these frames 256 (kernel start rounded down) and 257 is used by the kernel
159 + 32480 - 2,
},
}
var alloc bootMemAllocator
for specIndex, spec := range specs {
alloc.allocCount = 0
alloc.lastAllocFrame = 0
alloc.init(spec.kernelStart, spec.kernelEnd)
for {
frame, err := alloc.AllocFrame()
if err != nil {
if err == errBootAllocOutOfMemory {
break
}
t.Errorf("[spec %d] [frame %d] unexpected allocator error: %v", specIndex, alloc.allocCount, err)
break
}
if frame != alloc.lastAllocFrame {
t.Errorf("[spec %d] [frame %d] expected allocated frame to be %d; got %d", specIndex, alloc.allocCount, alloc.lastAllocFrame, frame)
}
if !frame.Valid() {
t.Errorf("[spec %d] [frame %d] expected IsValid() to return true", specIndex, alloc.allocCount)
}
}
if alloc.allocCount != spec.expAllocCount {
t.Errorf("[spec %d] expected allocator to allocate %d frames; allocated %d", specIndex, spec.expAllocCount, alloc.allocCount)
}
}
}
var (
// A dump of multiboot data when running under qemu containing only the
// memory region tag. The dump encodes the following available memory
// regions:
// [ 0 - 9fc00] length: 654336
// [100000 - 7fe0000] length: 133038080
multibootMemoryMap = []byte{
72, 5, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,
0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0,
0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0,
21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0,
1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
)
func mockTTY() []byte {
// Mock a tty to handle early.Printf output
mockConsoleFb := make([]byte, 160*25)
mockConsole := &console.Ega{}
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
hal.ActiveTerminal.AttachTo(mockConsole)
return mockConsoleFb
}

View File

@@ -0,0 +1,26 @@
// Package pmm contains code that manages physical memory frame allocations.
package pmm
import (
"gopheros/kernel/mem"
"math"
)
// Frame describes a physical memory page index.
type Frame uintptr
const (
// InvalidFrame is returned by page allocators when
// they fail to reserve the requested frame.
InvalidFrame = Frame(math.MaxUint64)
)
// Valid returns true if this is a valid frame.
func (f Frame) Valid() bool {
return f != InvalidFrame
}
// Address returns a pointer to the physical memory address pointed to by this Frame.
func (f Frame) Address() uintptr {
return uintptr(f << mem.PageShift)
}

View File

@@ -0,0 +1,25 @@
package pmm
import (
"gopheros/kernel/mem"
"testing"
)
func TestFrameMethods(t *testing.T) {
for frameIndex := uint64(0); frameIndex < 128; frameIndex++ {
frame := Frame(frameIndex)
if !frame.Valid() {
t.Errorf("expected frame %d to be valid", frameIndex)
}
if exp, got := uintptr(frameIndex<<mem.PageShift), frame.Address(); got != exp {
t.Errorf("expected frame (%d, index: %d) call to Address() to return %x; got %x", frame, frameIndex, exp, got)
}
}
invalidFrame := InvalidFrame
if invalidFrame.Valid() {
t.Error("expected InvalidFrame.Valid() to return false")
}
}

View File

@@ -0,0 +1,12 @@
package mem
// Size represents a memory block size in bytes.
type Size uint64
// Common memory block sizes.
const (
Byte Size = 1
Kb = 1024 * Byte
Mb = 1024 * Kb
Gb = 1024 * Mb
)

View File

@@ -0,0 +1,35 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
)
var (
// earlyReserveLastUsed tracks the last reserved page address and is
// decreased after each allocation request. Initially, it points to
// tempMappingAddr which coincides with the end of the kernel address
// space.
earlyReserveLastUsed = tempMappingAddr
errEarlyReserveNoSpace = &kernel.Error{Module: "early_reserve", Message: "remaining virtual address space not large enough to satisfy reservation request"}
)
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
// with the requested size in the kernel address space and returns its virtual
// address. If size is not a multiple of mem.PageSize it will be automatically
// rounded up.
//
// This function allocates regions starting at the end of the kernel address
// space. It should only be used during the early stages of kernel initialization.
func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) {
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
// reserving a region of the requested size will cause an underflow
if uintptr(size) > earlyReserveLastUsed {
return 0, errEarlyReserveNoSpace
}
earlyReserveLastUsed -= uintptr(size)
return earlyReserveLastUsed, nil
}

View File

@@ -0,0 +1,29 @@
package vmm
import (
"runtime"
"testing"
)
func TestEarlyReserveAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origLastUsed uintptr) {
earlyReserveLastUsed = origLastUsed
}(earlyReserveLastUsed)
earlyReserveLastUsed = 4096
next, err := EarlyReserveRegion(42)
if err != nil {
t.Fatal(err)
}
if exp := uintptr(0); next != exp {
t.Fatal("expected reservation request to be rounded to nearest page")
}
if _, err = EarlyReserveRegion(1); err != errEarlyReserveNoSpace {
t.Fatalf("expected to get errEarlyReserveNoSpace; got %v", err)
}
}

View File

@@ -0,0 +1,89 @@
// +build amd64
package vmm
import "math"
const (
// pageLevels indicates the number of page levels supported by the amd64 architecture.
pageLevels = 4
// ptePhysPageMask is a mask that allows us to extract the physical memory
// address pointed to by a page table entry. For this particular architecture,
// bits 12-51 contain the physical memory address.
ptePhysPageMask = uintptr(0x000ffffffffff000)
// tempMappingAddr is a reserved virtual page address used for
// temporary physical page mappings (e.g. when mapping inactive PDT
// pages). For amd64 this address uses the following table indices:
// 510, 511, 511, 511.
tempMappingAddr = uintptr(0Xffffff7ffffff000)
)
var (
// pdtVirtualAddr is a special virtual address that exploits the
// recursive mapping used in the last PDT entry for each page directory
// to allow accessing the PDT (P4) table using the system's MMU address
// translation mechanism. By setting all page level bits to 1 the MMU
// keeps following the last P4 entry for all page levels landing on the
// P4.
pdtVirtualAddr = uintptr(math.MaxUint64 &^ ((1 << 12) - 1))
// pageLevelBits defines the number of virtual address bits that correspond to each
// page level. For the amd64 architecture each PageLevel uses 9 bits which amounts to
// 512 entries for each page level.
pageLevelBits = [pageLevels]uint8{
9,
9,
9,
9,
}
// pageLevelShifts defines the shift required to access each page table component
// of a virtual address.
pageLevelShifts = [pageLevels]uint8{
39,
30,
21,
12,
}
)
const (
// FlagPresent is set when the page is available in memory and not swapped out.
FlagPresent PageTableEntryFlag = 1 << iota
// FlagRW is set if the page can be written to.
FlagRW
// FlagUserAccessible is set if user-mode processes can access this page. If
// not set only kernel code can access this page.
FlagUserAccessible
// FlagWriteThroughCaching implies write-through caching when set and write-back
// caching if cleared.
FlagWriteThroughCaching
// FlagDoNotCache prevents this page from being cached if set.
FlagDoNotCache
// FlagAccessed is set by the CPU when this page is accessed.
FlagAccessed
// FlagDirty is set by the CPU when this page is modified.
FlagDirty
// FlagHugePage is set if when using 2Mb pages instead of 4K pages.
FlagHugePage
// FlagGlobal if set, prevents the TLB from flushing the cached memory address
// for this page when the swapping page tables by updating the CR3 register.
FlagGlobal
// FlagCopyOnWrite is used to implement copy-on-write functionality. This
// flag and FlagRW are mutually exclusive.
FlagCopyOnWrite = 1 << 9
// FlagNoExecute if set, indicates that a page contains non-executable code.
FlagNoExecute = 1 << 63
)

View File

@@ -0,0 +1,154 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"unsafe"
)
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
// vmm package's Init function. The purpose of this frame is to assist
// in implementing on-demand memory allocation when mapping it in
// conjunction with the CopyOnWrite flag. Here is an example of how it
// can be used:
//
// func ReserveOnDemand(start vmm.Page, pageCount int) *kernel.Error {
// var err *kernel.Error
// mapFlags := vmm.FlagPresent|vmm.FlagCopyOnWrite
// for page := start; pageCount > 0; pageCount, page = pageCount-1, page+1 {
// if err = vmm.Map(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
// return err
// }
// }
// return nil
// }
//
// In the above example, page mappings are set up for the requested number of
// pages but no physical memory is reserved for their contents. A write to any
// of the above pages will trigger a page-fault causing a new frame to be
// allocated, cleared (the blank frame is copied to the new frame) and
// installed in-place with RW permissions.
var ReservedZeroedFrame pmm.Frame
var (
// protectReservedZeroedPage is set to true to prevent mapping to
protectReservedZeroedPage bool
// nextAddrFn is used by used by tests to override the nextTableAddr
// calculations used by Map. When compiling the kernel this function
// will be automatically inlined.
nextAddrFn = func(entryAddr uintptr) uintptr {
return entryAddr
}
// flushTLBEntryFn is used by tests to override calls to flushTLBEntry
// which will cause a fault if called in user-mode.
flushTLBEntryFn = cpu.FlushTLBEntry
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
)
// Map establishes a mapping between a virtual page and a physical memory frame
// using the currently active page directory table. Calls to Map will use the
// supplied physical frame allocator to initialize missing page tables at each
// paging level supported by the MMU.
//
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
return errAttemptToRWMapReservedFrame
}
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to map the
// frame in place and flag it as present and flush its TLB entry
if pteLevel == pageLevels-1 {
*pte = 0
pte.SetFrame(frame)
pte.SetFlags(flags)
flushTLBEntryFn(page.Address())
return true
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
// Next table does not yet exist; we need to allocate a
// physical frame for it map it and clear its contents.
if !pte.HasFlags(FlagPresent) {
var newTableFrame pmm.Frame
newTableFrame, err = frameAllocator()
if err != nil {
return false
}
*pte = 0
pte.SetFrame(newTableFrame)
pte.SetFlags(FlagPresent | FlagRW)
// The next pte entry becomes available but we need to
// make sure that the new page is properly cleared
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
}
return true
})
return err
}
// MapTemporary establishes a temporary RW mapping of a physical memory frame
// to a fixed virtual address overwriting any previous mapping. The temporary
// mapping mechanism is primarily used by the kernel to access and initialize
// inactive page tables.
//
// Attempts to map ReservedZeroedFrame will result in an error.
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
return 0, errAttemptToRWMapReservedFrame
}
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
return 0, err
}
return PageFromAddress(tempMappingAddr), nil
}
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
func Unmap(page Page) *kernel.Error {
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to set the
// page as non-present and flush its TLB entry
if pteLevel == pageLevels-1 {
pte.ClearFlags(FlagPresent)
flushTLBEntryFn(page.Address())
return true
}
// Next table is not present; this is an invalid mapping
if !pte.HasFlags(FlagPresent) {
err = ErrInvalidMapping
return false
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
return true
})
return err
}

View File

@@ -0,0 +1,270 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestNextAddrFn(t *testing.T) {
// Dummy test to keep coverage happy
if exp, got := uintptr(123), nextAddrFn(uintptr(123)); exp != got {
t.Fatalf("expected nextAddrFn to return %v; got %v", exp, got)
}
}
func TestMapTemporaryAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
frameAllocator = nil
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
nextPhysPage := 0
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
nextPhysPage++
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
})
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
pteCallCount++
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
}
nextAddrFn = func(entry uintptr) uintptr {
return uintptr(unsafe.Pointer(&physPages[nextPhysPage][0]))
}
flushTLBEntryCallCount := 0
flushTLBEntryFn = func(uintptr) {
flushTLBEntryCallCount++
}
// The temporary mappin address breaks down to:
// p4 index: 510
// p3 index: 511
// p2 index: 511
// p1 index: 511
frame := pmm.Frame(123)
levelIndices := []uint{510, 511, 511, 511}
page, err := MapTemporary(frame)
if err != nil {
t.Fatal(err)
}
if got := page.Address(); got != tempMappingAddr {
t.Fatalf("expected temp mapping virtual address to be %x; got %x", tempMappingAddr, got)
}
for level, physPage := range physPages {
pte := physPage[levelIndices[level]]
if !pte.HasFlags(FlagPresent | FlagRW) {
t.Errorf("[pte at level %d] expected entry to have FlagPresent and FlagRW set", level)
}
switch {
case level < pageLevels-1:
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
}
default:
// The last pte entry should point to frame
if got := pte.Frame(); got != frame {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
}
}
}
if exp := 1; flushTLBEntryCallCount != exp {
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
}
}
func TestMapTemporaryErrorsAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
p4Index := 510
frame := pmm.Frame(123)
t.Run("encounter huge page", func(t *testing.T) {
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
ptePtrFn = func(entry uintptr) unsafe.Pointer {
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[0][pteIndex])
}
if _, err := MapTemporary(frame); err != errNoHugePageSupport {
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
}
})
t.Run("allocFn returns an error", func(t *testing.T) {
defer func() { frameAllocator = nil }()
physPages[0][p4Index] = 0
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
return 0, expErr
})
if _, err := MapTemporary(frame); err != expErr {
t.Fatalf("got unexpected error %v", err)
}
})
t.Run("map BlankReservedFrame RW", func(t *testing.T) {
defer func() { protectReservedZeroedPage = false }()
protectReservedZeroedPage = true
if err := Map(Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame {
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
}
})
t.Run("temp-map BlankReservedFrame RW", func(t *testing.T) {
defer func() { protectReservedZeroedPage = false }()
protectReservedZeroedPage = true
if _, err := MapTemporary(ReservedZeroedFrame); err != errAttemptToRWMapReservedFrame {
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
}
})
}
func TestUnmapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, flushTLBEntryFn)
var (
physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
frame = pmm.Frame(123)
)
// Emulate a page mapped to virtAddr 0 across all page levels
for level := 0; level < pageLevels; level++ {
physPages[level][0].SetFlags(FlagPresent | FlagRW)
if level < pageLevels-1 {
physPages[level][0].SetFrame(pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mem.PageShift))
} else {
physPages[level][0].SetFrame(frame)
}
}
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
pteCallCount++
return unsafe.Pointer(&physPages[pteCallCount-1][0])
}
flushTLBEntryCallCount := 0
flushTLBEntryFn = func(uintptr) {
flushTLBEntryCallCount++
}
if err := Unmap(PageFromAddress(0)); err != nil {
t.Fatal(err)
}
for level, physPage := range physPages {
pte := physPage[0]
switch {
case level < pageLevels-1:
if !pte.HasFlags(FlagPresent) {
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
}
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
}
default:
if pte.HasFlags(FlagPresent) {
t.Errorf("[pte at level %d] expected entry not to have FlagPresent set", level)
}
// The last pte entry should still point to frame
if got := pte.Frame(); got != frame {
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
}
}
}
if exp := 1; flushTLBEntryCallCount != exp {
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
}
}
func TestUnmapErrorsAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
ptePtrFn = origPtePtr
nextAddrFn = origNextAddrFn
flushTLBEntryFn = origFlushTLBEntryFn
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
t.Run("encounter huge page", func(t *testing.T) {
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
ptePtrFn = func(entry uintptr) unsafe.Pointer {
// The last 12 bits encode the page table offset in bytes
// which we need to convert to a uint64 entry
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
return unsafe.Pointer(&physPages[0][pteIndex])
}
if err := Unmap(PageFromAddress(0)); err != errNoHugePageSupport {
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
}
})
t.Run("virtual address not mapped", func(t *testing.T) {
physPages[0][0].ClearFlags(FlagPresent)
if err := Unmap(PageFromAddress(0)); err != ErrInvalidMapping {
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
}
})
}

View File

@@ -0,0 +1,19 @@
package vmm
import "gopheros/kernel/mem"
// Page describes a virtual memory page index.
type Page uintptr
// Address returns a pointer to the virtual memory address pointed to by this Page.
func (f Page) Address() uintptr {
return uintptr(f << mem.PageShift)
}
// PageFromAddress returns a Page that corresponds to the given virtual
// address. This function can handle both page-aligned and not aligned virtual
// addresses. in the latter case, the input address will be rounded down to the
// page that contains it.
func PageFromAddress(virtAddr uintptr) Page {
return Page((virtAddr & ^(uintptr(mem.PageSize - 1))) >> mem.PageShift)
}

View File

@@ -0,0 +1,34 @@
package vmm
import (
"gopheros/kernel/mem"
"testing"
)
func TestPageMethods(t *testing.T) {
for pageIndex := uint64(0); pageIndex < 128; pageIndex++ {
page := Page(pageIndex)
if exp, got := uintptr(pageIndex<<mem.PageShift), page.Address(); got != exp {
t.Errorf("expected page (%d, index: %d) call to Address() to return %x; got %x", page, pageIndex, exp, got)
}
}
}
func TestPageFromAddress(t *testing.T) {
specs := []struct {
input uintptr
expPage Page
}{
{0, Page(0)},
{4095, Page(0)},
{4096, Page(1)},
{4123, Page(1)},
}
for specIndex, spec := range specs {
if got := PageFromAddress(spec.input); got != spec.expPage {
t.Errorf("[spec %d] expected returned page to be %v; got %v", specIndex, spec.expPage, got)
}
}
}

View File

@@ -0,0 +1,135 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"unsafe"
)
var (
// activePDTFn is used by tests to override calls to activePDT which
// will cause a fault if called in user-mode.
activePDTFn = cpu.ActivePDT
// switchPDTFn is used by tests to override calls to switchPDT which
// will cause a fault if called in user-mode.
switchPDTFn = cpu.SwitchPDT
// mapFn is used by tests and is automatically inlined by the compiler.
mapFn = Map
// mapTemporaryFn is used by tests and is automatically inlined by the compiler.
mapTemporaryFn = MapTemporary
// unmapmFn is used by tests and is automatically inlined by the compiler.
unmapFn = Unmap
)
// PageDirectoryTable describes the top-most table in a multi-level paging scheme.
type PageDirectoryTable struct {
pdtFrame pmm.Frame
}
// Init sets up the page table directory starting at the supplied physical
// address. If the supplied frame does not match the currently active PDT, then
// Init assumes that this is a new page table directory that needs
// bootstapping. In such a case, a temporary mapping is established so that
// Init can:
// - call mem.Memset to clear the frame contents
// - setup a recursive mapping for the last table entry to the page itself.
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error {
pdt.pdtFrame = pdtFrame
// Check active PDT physical address. If it matches the input pdt then
// nothing more needs to be done
activePdtAddr := activePDTFn()
if pdtFrame.Address() == activePdtAddr {
return nil
}
// Create a temporary mapping for the pdt frame so we can work on it
pdtPage, err := mapTemporaryFn(pdtFrame)
if err != nil {
return err
}
// Clear the page contents and setup recursive mapping for the last PDT entry
mem.Memset(pdtPage.Address(), 0, mem.PageSize)
lastPdtEntry := (*pageTableEntry)(unsafe.Pointer(pdtPage.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)))
*lastPdtEntry = 0
lastPdtEntry.SetFlags(FlagPresent | FlagRW)
lastPdtEntry.SetFrame(pdtFrame)
// Remove temporary mapping
unmapFn(pdtPage)
return nil
}
// Map establishes a mapping between a virtual page and a physical memory frame
// using this PDT. This method behaves in a similar fashion to the global Map()
// function with the difference that it also supports inactive page PDTs by
// establishing a temporary mapping so that Map() can access the inactive PDT
// entries.
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr
lastPdtEntry *pageTableEntry
)
// If this table is not active we need to temporarily map it to the
// last entry in the active PDT so we can access it using the recursive
// virtual address scheme.
if activePdtFrame != pdt.pdtFrame {
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
lastPdtEntry.SetFrame(pdt.pdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
err := mapFn(page, frame, flags)
if activePdtFrame != pdt.pdtFrame {
lastPdtEntry.SetFrame(activePdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
return err
}
// Unmap removes a mapping previousle installed by a call to Map() on this PDT.
// This method behaves in a similar fashion to the global Unmap() function with
// the difference that it also supports inactive page PDTs by establishing a
// temporary mapping so that Unmap() can access the inactive PDT entries.
func (pdt PageDirectoryTable) Unmap(page Page) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr
lastPdtEntry *pageTableEntry
)
// If this table is not active we need to temporarily map it to the
// last entry in the active PDT so we can access it using the recursive
// virtual address scheme.
if activePdtFrame != pdt.pdtFrame {
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
lastPdtEntry.SetFrame(pdt.pdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
err := unmapFn(page)
if activePdtFrame != pdt.pdtFrame {
lastPdtEntry.SetFrame(activePdtFrame)
flushTLBEntryFn(lastPdtEntryAddr)
}
return err
}
// Activate enables this page directory table and flushes the TLB
func (pdt PageDirectoryTable) Activate() {
switchPDTFn(pdt.pdtFrame.Address())
}

View File

@@ -0,0 +1,330 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestPageDirectoryTableInitAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapTemporaryFn = origMapTemporary
unmapFn = origUnmap
}(flushTLBEntryFn, activePDTFn, mapTemporaryFn, unmapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
t.Fatal("unexpected call to MapTemporary")
return 0, nil
}
unmapFn = func(_ Page) *kernel.Error {
t.Fatal("unexpected call to Unmap")
return nil
}
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
physPage [mem.PageSize >> mem.PointerShift]pageTableEntry
)
// Fill phys page with random junk
mem.Memset(uintptr(unsafe.Pointer(&physPage[0])), 0xf0, mem.PageSize)
activePDTFn = func() uintptr {
return 0
}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
}
flushTLBEntryFn = func(_ uintptr) {}
unmapCallCount := 0
unmapFn = func(_ Page) *kernel.Error {
unmapCallCount++
return nil
}
if err := pdt.Init(pdtFrame); err != nil {
t.Fatal(err)
}
if unmapCallCount != 1 {
t.Fatalf("expected Unmap to be called 1 time; called %d", unmapCallCount)
}
for i := 0; i < len(physPage)-1; i++ {
if physPage[i] != 0 {
t.Errorf("expected PDT entry %d to be cleared; got %x", i, physPage[i])
}
}
// The last page should be recursively mapped to the PDT
lastPdtEntry := physPage[len(physPage)-1]
if !lastPdtEntry.HasFlags(FlagPresent | FlagRW) {
t.Fatal("expected last PDT entry to have FlagPresent and FlagRW set")
}
if lastPdtEntry.Frame() != pdtFrame {
t.Fatalf("expected last PDT entry to be recursively mapped to physical frame %x; got %x", pdtFrame, lastPdtEntry.Frame())
}
})
t.Run("temporary mapping failure", func(t *testing.T) {
var (
pdt PageDirectoryTable
pdtFrame = pmm.Frame(123)
)
activePDTFn = func() uintptr {
return 0
}
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
return 0, expErr
}
unmapFn = func(_ Page) *kernel.Error {
t.Fatal("unexpected call to Unmap")
return nil
}
if err := pdt.Init(pdtFrame); err != expErr {
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
}
})
}
func TestPageDirectoryTableMapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapFn = origMap
}(flushTLBEntryFn, activePDTFn, mapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}
if exp := 0; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
)
// Initially, activePhysPage is recursively mapped to itself
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
activePDTFn = func() uintptr {
return activePdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
switch flushCallCount {
case 0:
// the first time we flush the tlb entry, the last entry of
// the active pdt should be pointing to pdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
}
case 1:
// the second time we flush the tlb entry, the last entry of
// the active pdt should be pointing back to activePdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
}
}
flushCallCount++
}
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
t.Fatal(err)
}
if exp := 2; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
}
func TestPageDirectoryTableUnmapAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
unmapFn = origUnmap
}(flushTLBEntryFn, activePDTFn, unmapFn)
t.Run("already mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
)
activePDTFn = func() uintptr {
return pdtFrame.Address()
}
unmapFn = func(_ Page) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
flushCallCount++
}
if err := pdt.Unmap(page); err != nil {
t.Fatal(err)
}
if exp := 0; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
t.Run("not mapped PDT", func(t *testing.T) {
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
page = PageFromAddress(uintptr(100 * mem.Mb))
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
)
// Initially, activePhysPage is recursively mapped to itself
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
activePDTFn = func() uintptr {
return activePdtFrame.Address()
}
unmapFn = func(_ Page) *kernel.Error {
return nil
}
flushCallCount := 0
flushTLBEntryFn = func(_ uintptr) {
switch flushCallCount {
case 0:
// the first time we flush the tlb entry, the last entry of
// the active pdt should be pointing to pdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
}
case 1:
// the second time we flush the tlb entry, the last entry of
// the active pdt should be pointing back to activePdtFrame
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
}
}
flushCallCount++
}
if err := pdt.Unmap(page); err != nil {
t.Fatal(err)
}
if exp := 2; flushCallCount != exp {
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
}
})
}
func TestPageDirectoryTableActivateAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origSwitchPDT func(uintptr)) {
switchPDTFn = origSwitchPDT
}(switchPDTFn)
var (
pdtFrame = pmm.Frame(123)
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
)
switchPDTCallCount := 0
switchPDTFn = func(_ uintptr) {
switchPDTCallCount++
}
pdt.Activate()
if exp := 1; switchPDTCallCount != exp {
t.Fatalf("expected switchPDT to be called %d times; called %d", exp, switchPDTCallCount)
}
}

View File

@@ -0,0 +1,74 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
)
var (
// ErrInvalidMapping is returned when trying to lookup a virtual memory address that is not yet mapped.
ErrInvalidMapping = &kernel.Error{Module: "vmm", Message: "virtual address does not point to a mapped physical page"}
)
// PageTableEntryFlag describes a flag that can be applied to a page table entry.
type PageTableEntryFlag uintptr
// pageTableEntry describes a page table entry. These entries encode
// a physical frame address and a set of flags. The actual format
// of the entry and flags is architecture-dependent.
type pageTableEntry uintptr
// HasFlags returns true if this entry has all the input flags set.
func (pte pageTableEntry) HasFlags(flags PageTableEntryFlag) bool {
return (uintptr(pte) & uintptr(flags)) == uintptr(flags)
}
// HasAnyFlag returns true if this entry has at least one of the input flags set.
func (pte pageTableEntry) HasAnyFlag(flags PageTableEntryFlag) bool {
return (uintptr(pte) & uintptr(flags)) != 0
}
// SetFlags sets the input list of flags to the page table entry.
func (pte *pageTableEntry) SetFlags(flags PageTableEntryFlag) {
*pte = (pageTableEntry)(uintptr(*pte) | uintptr(flags))
}
// ClearFlags unsets the input list of flags from the page table entry.
func (pte *pageTableEntry) ClearFlags(flags PageTableEntryFlag) {
*pte = (pageTableEntry)(uintptr(*pte) &^ uintptr(flags))
}
// Frame returns the physical page frame that this page table entry points to.
func (pte pageTableEntry) Frame() pmm.Frame {
return pmm.Frame((uintptr(pte) & ptePhysPageMask) >> mem.PageShift)
}
// SetFrame updates the page table entry to point the the given physical frame .
func (pte *pageTableEntry) SetFrame(frame pmm.Frame) {
*pte = (pageTableEntry)((uintptr(*pte) &^ ptePhysPageMask) | frame.Address())
}
// pteForAddress returns the final page table entry that correspond to a
// particular virtual address. The function performs a page table walk till it
// reaches the final page table entry returning ErrInvalidMapping if the page
// is not present.
func pteForAddress(virtAddr uintptr) (*pageTableEntry, *kernel.Error) {
var (
err *kernel.Error
entry *pageTableEntry
)
walk(virtAddr, func(pteLevel uint8, pte *pageTableEntry) bool {
if !pte.HasFlags(FlagPresent) {
entry = nil
err = ErrInvalidMapping
return false
}
entry = pte
return true
})
return entry, err
}

View File

@@ -0,0 +1,60 @@
package vmm
import (
"gopheros/kernel/mem/pmm"
"testing"
)
func TestPageTableEntryFlags(t *testing.T) {
var (
pte pageTableEntry
flag1 = PageTableEntryFlag(1 << 10)
flag2 = PageTableEntryFlag(1 << 21)
)
if pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return false")
}
pte.SetFlags(flag1 | flag2)
if !pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return true")
}
if !pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return true")
}
pte.ClearFlags(flag1)
if !pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return true")
}
if pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return false")
}
pte.ClearFlags(flag1 | flag2)
if pte.HasAnyFlag(flag1 | flag2) {
t.Fatalf("expected HasAnyFlags to return false")
}
if pte.HasFlags(flag1 | flag2) {
t.Fatalf("expected HasFlags to return false")
}
}
func TestPageTableEntryFrameEncoding(t *testing.T) {
var (
pte pageTableEntry
physFrame = pmm.Frame(123)
)
pte.SetFrame(physFrame)
if got := pte.Frame(); got != physFrame {
t.Fatalf("expected pte.Frame() to return %v; got %v", physFrame, got)
}
}

View File

@@ -0,0 +1,19 @@
package vmm
import "gopheros/kernel"
// Translate returns the physical address that corresponds to the supplied
// virtual address or ErrInvalidMapping if the virtual address does not
// correspond to a mapped physical address.
func Translate(virtAddr uintptr) (uintptr, *kernel.Error) {
pte, err := pteForAddress(virtAddr)
if err != nil {
return 0, err
}
// Calculate the physical address by taking the physical frame address and
// appending the offset from the virtual address
physAddr := pte.Frame().Address() + (virtAddr & ((1 << pageLevelShifts[pageLevels-1]) - 1))
return physAddr, nil
}

View File

@@ -0,0 +1,72 @@
package vmm
import (
"gopheros/kernel/mem/pmm"
"runtime"
"testing"
"unsafe"
)
func TestTranslateAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
}(ptePtrFn)
// the virtual address just contains the page offset
virtAddr := uintptr(1234)
expFrame := pmm.Frame(42)
expPhysAddr := expFrame.Address() + virtAddr
specs := [][pageLevels]bool{
{true, true, true, true},
{false, true, true, true},
{true, false, true, true},
{true, true, false, true},
{true, true, true, false},
}
for specIndex, spec := range specs {
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
var pte pageTableEntry
pte.SetFrame(expFrame)
if specs[specIndex][pteCallCount] {
pte.SetFlags(FlagPresent)
}
pteCallCount++
return unsafe.Pointer(&pte)
}
// An error is expected if any page level contains a non-present page
expError := false
for _, hasMapping := range spec {
if !hasMapping {
expError = true
break
}
}
physAddr, err := Translate(virtAddr)
switch {
case expError && err != ErrInvalidMapping:
t.Errorf("[spec %d] expected to get ErrInvalidMapping; got %v", specIndex, err)
case !expError && err != nil:
t.Errorf("[spec %d] unexpected error %v", specIndex, err)
case !expError && physAddr != expPhysAddr:
t.Errorf("[spec %d] expected phys addr to be 0x%x; got 0x%x", specIndex, expPhysAddr, physAddr)
}
}
}
/*
phys, err := vmm.Translate(uintptr(100 * mem.Mb))
if err != nil {
early.Printf("err: %s\n", err.Error())
} else {
early.Printf("phys: 0x%x\n", phys)
}
*/

View File

@@ -0,0 +1,155 @@
package vmm
import (
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/irq"
"gopheros/kernel/kfmt/early"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
)
var (
// frameAllocator points to a frame allocator function registered using
// SetFrameAllocator.
frameAllocator FrameAllocatorFn
// the following functions are mocked by tests and are automatically
// inlined by the compiler.
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
readCR2Fn = cpu.ReadCR2
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
)
// FrameAllocatorFn is a function that can allocate physical frames.
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
// SetFrameAllocator registers a frame allocator function that will be used by
// the vmm code when new physical frames need to be allocated.
func SetFrameAllocator(allocFn FrameAllocatorFn) {
frameAllocator = allocFn
}
func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) {
var (
faultAddress = uintptr(readCR2Fn())
faultPage = PageFromAddress(faultAddress)
pageEntry *pageTableEntry
)
// Lookup entry for the page where the fault occurred
walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
nextIsPresent := pte.HasFlags(FlagPresent)
if pteLevel == pageLevels-1 && nextIsPresent {
pageEntry = pte
}
// Abort walk if the next page table entry is missing
return nextIsPresent
})
// CoW is supported for RO pages with the CoW flag set
if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) {
var (
copy pmm.Frame
tmpPage Page
err *kernel.Error
)
if copy, err = frameAllocator(); err != nil {
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
} else if tmpPage, err = mapTemporaryFn(copy); err != nil {
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
} else {
// Copy page contents, mark as RW and remove CoW flag
mem.Memcopy(faultPage.Address(), tmpPage.Address(), mem.PageSize)
unmapFn(tmpPage)
// Update mapping to point to the new frame, flag it as RW and
// remove the CoW flag
pageEntry.ClearFlags(FlagCopyOnWrite)
pageEntry.SetFlags(FlagPresent | FlagRW)
pageEntry.SetFrame(copy)
flushTLBEntryFn(faultPage.Address())
// Fault recovered; retry the instruction that caused the fault
return
}
}
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, errUnrecoverableFault)
}
func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) {
early.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress)
switch {
case errorCode == 0:
early.Printf("read from non-present page")
case errorCode == 1:
early.Printf("page protection violation (read)")
case errorCode == 2:
early.Printf("write to non-present page")
case errorCode == 3:
early.Printf("page protection violation (write)")
case errorCode == 4:
early.Printf("page-fault in user-mode")
case errorCode == 8:
early.Printf("page table has reserved bit set")
case errorCode == 16:
early.Printf("instruction fetch")
default:
early.Printf("unknown")
}
early.Printf("\n\nRegisters:\n")
regs.Print()
frame.Print()
// TODO: Revisit this when user-mode tasks are implemented
panic(err)
}
func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) {
early.Printf("\nGeneral protection fault while accessing address: 0x%x\n", readCR2Fn())
early.Printf("Registers:\n")
regs.Print()
frame.Print()
// TODO: Revisit this when user-mode tasks are implemented
panic(errUnrecoverableFault)
}
// reserveZeroedFrame reserves a physical frame to be used together with
// FlagCopyOnWrite for lazy allocation requests.
func reserveZeroedFrame() *kernel.Error {
var (
err *kernel.Error
tempPage Page
)
if ReservedZeroedFrame, err = frameAllocator(); err != nil {
return err
} else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil {
return err
}
mem.Memset(tempPage.Address(), 0, mem.PageSize)
unmapFn(tempPage)
// From this point on, ReservedZeroedFrame cannot be mapped with a RW flag
protectReservedZeroedPage = true
return nil
}
// Init initializes the vmm system and installs paging-related exception
// handlers.
func Init() *kernel.Error {
if err := reserveZeroedFrame(); err != nil {
return err
}
handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler)
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
return nil
}

View File

@@ -0,0 +1,281 @@
package vmm
import (
"bytes"
"fmt"
"gopheros/kernel"
"gopheros/kernel/cpu"
"gopheros/kernel/driver/video/console"
"gopheros/kernel/hal"
"gopheros/kernel/irq"
"gopheros/kernel/mem"
"gopheros/kernel/mem/pmm"
"strings"
"testing"
"unsafe"
)
func TestRecoverablePageFault(t *testing.T) {
var (
frame irq.Frame
regs irq.Regs
pageEntry pageTableEntry
origPage = make([]byte, mem.PageSize)
clonedPage = make([]byte, mem.PageSize)
err = &kernel.Error{Module: "test", Message: "something went wrong"}
)
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
readCR2Fn = cpu.ReadCR2
frameAllocator = nil
mapTemporaryFn = MapTemporary
unmapFn = Unmap
flushTLBEntryFn = cpu.FlushTLBEntry
}(ptePtrFn)
specs := []struct {
pteFlags PageTableEntryFlag
allocError *kernel.Error
mapError *kernel.Error
expPanic bool
}{
// Missing pge
{0, nil, nil, true},
// Page is present but CoW flag not set
{FlagPresent, nil, nil, true},
// Page is present but both CoW and RW flags set
{FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true},
// Page is present with CoW flag set but allocating a page copy fails
{FlagPresent | FlagCopyOnWrite, err, nil, true},
// Page is present with CoW flag set but mapping the page copy fails
{FlagPresent | FlagCopyOnWrite, nil, err, true},
// Page is present with CoW flag set
{FlagPresent | FlagCopyOnWrite, nil, nil, false},
}
mockTTY()
ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) }
readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) }
unmapFn = func(_ Page) *kernel.Error { return nil }
flushTLBEntryFn = func(_ uintptr) {}
for specIndex, spec := range specs {
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
defer func() {
err := recover()
if spec.expPanic && err == nil {
t.Error("expected a panic")
} else if !spec.expPanic {
if err != nil {
t.Error("unexpected panic")
return
}
for i := 0; i < len(origPage); i++ {
if origPage[i] != clonedPage[i] {
t.Errorf("expected clone page to be a copy of the original page; mismatch at index %d", i)
}
}
}
}()
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), spec.mapError }
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&clonedPage[0]))
return pmm.Frame(addr >> mem.PageShift), spec.allocError
})
for i := 0; i < len(origPage); i++ {
origPage[i] = byte(i % 256)
clonedPage[i] = 0
}
pageEntry = 0
pageEntry.SetFlags(spec.pteFlags)
pageFaultHandler(2, &frame, &regs)
})
}
}
func TestNonRecoverablePageFault(t *testing.T) {
specs := []struct {
errCode uint64
expReason string
}{
{
0,
"read from non-present page",
},
{
1,
"page protection violation (read)",
},
{
2,
"write to non-present page",
},
{
3,
"page protection violation (write)",
},
{
4,
"page-fault in user-mode",
},
{
8,
"page table has reserved bit set",
},
{
16,
"instruction fetch",
},
{
0xf00,
"unknown",
},
}
var (
regs irq.Regs
frame irq.Frame
)
for specIndex, spec := range specs {
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
defer func() {
if err := recover(); err != errUnrecoverableFault {
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
}
}()
fb := mockTTY()
nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, &regs, errUnrecoverableFault)
if got := readTTY(fb); !strings.Contains(got, spec.expReason) {
t.Errorf("expected reason %q; got output:\n%q", spec.expReason, got)
}
})
}
}
func TestGPtHandler(t *testing.T) {
defer func() {
readCR2Fn = cpu.ReadCR2
}()
var (
regs irq.Regs
frame irq.Frame
)
readCR2Fn = func() uint64 {
return 0xbadf00d000
}
defer func() {
if err := recover(); err != errUnrecoverableFault {
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
}
}()
mockTTY()
generalProtectionFaultHandler(0, &frame, &regs)
}
func TestInit(t *testing.T) {
defer func() {
frameAllocator = nil
mapTemporaryFn = MapTemporary
unmapFn = Unmap
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
}()
// reserve space for an allocated page
reservedPage := make([]byte, mem.PageSize)
t.Run("success", func(t *testing.T) {
// fill page with junk
for i := 0; i < len(reservedPage); i++ {
reservedPage[i] = byte(i % 256)
}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != nil {
t.Fatal(err)
}
// reserved page should be zeroed
for i := 0; i < len(reservedPage); i++ {
if reservedPage[i] != 0 {
t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i)
}
}
})
t.Run("blank page allocation error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr })
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
t.Run("blank page mapping error", func(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "map failed"}
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
return pmm.Frame(addr >> mem.PageShift), nil
})
unmapFn = func(p Page) *kernel.Error { return nil }
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr }
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
if err := Init(); err != expErr {
t.Fatalf("expected error: %v; got %v", expErr, err)
}
})
}
func readTTY(fb []byte) string {
var buf bytes.Buffer
for i := 0; i < len(fb); i += 2 {
ch := fb[i]
if ch == 0 {
if i+2 < len(fb) && fb[i+2] != 0 {
buf.WriteByte('\n')
}
continue
}
buf.WriteByte(ch)
}
return buf.String()
}
func mockTTY() []byte {
// Mock a tty to handle early.Printf output
mockConsoleFb := make([]byte, 160*25)
mockConsole := &console.Ega{}
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
hal.ActiveTerminal.AttachTo(mockConsole)
return mockConsoleFb
}

View File

@@ -0,0 +1,55 @@
package vmm
import (
"gopheros/kernel/mem"
"unsafe"
)
var (
// ptePointerFn returns a pointer to the supplied entry address. It is
// used by tests to override the generated page table entry pointers so
// walk() can be properly tested. When compiling the kernel this function
// will be automatically inlined.
ptePtrFn = func(entryAddr uintptr) unsafe.Pointer {
return unsafe.Pointer(entryAddr)
}
)
// pageTableWalker is a function that can be passed to the walk method. The
// function receives the current page level and page table entry as its
// arguments. If the function returns false, then the page walk is aborted.
type pageTableWalker func(pteLevel uint8, pte *pageTableEntry) bool
// walk performs a page table walk for the given virtual address. It calls the
// suppplied walkFn with the page table entry that corresponds to each page
// table level. If walkFn returns an error then the walk is aborted and the
// error is returned to the caller.
func walk(virtAddr uintptr, walkFn pageTableWalker) {
var (
level uint8
tableAddr, entryAddr, entryIndex uintptr
ok bool
)
// tableAddr is initially set to the recursively mapped virtual address for the
// last entry in the top-most page table. Dereferencing a pointer to this address
// will allow us to access
for level, tableAddr = uint8(0), pdtVirtualAddr; level < pageLevels; level, tableAddr = level+1, entryAddr {
// Extract the bits from virtual address that correspond to the
// index in this level's page table
entryIndex = (virtAddr >> pageLevelShifts[level]) & ((1 << pageLevelBits[level]) - 1)
// By shifting the table virtual address left by pageLevelShifts[level] we add
// a new level of indirection to our recursive mapping allowing us to access
// the table pointed to by the page entry
entryAddr = tableAddr + (entryIndex << mem.PointerShift)
if ok = walkFn(level, (*pageTableEntry)(ptePtrFn(entryAddr))); !ok {
return
}
// Shift left by the number of bits for this paging level to get
// the virtual address of the table pointed to by entryAddr
entryAddr <<= pageLevelBits[level]
}
}

View File

@@ -0,0 +1,75 @@
package vmm
import (
"gopheros/kernel/mem"
"runtime"
"testing"
"unsafe"
)
func TestPtePtrFn(t *testing.T) {
// Dummy test to keep coverage happy
if exp, got := unsafe.Pointer(uintptr(123)), ptePtrFn(uintptr(123)); exp != got {
t.Fatalf("expected ptePtrFn to return %v; got %v", exp, got)
}
}
func TestWalkAmd64(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
ptePtrFn = origPtePtr
}(ptePtrFn)
// This address breaks down to:
// p4 index: 1
// p3 index: 2
// p2 index: 3
// p1 index: 4
// offset : 1024
targetAddr := uintptr(0x8080604400)
sizeofPteEntry := uintptr(unsafe.Sizeof(pageTableEntry(0)))
expEntryAddrBits := [pageLevels][pageLevels + 1]uintptr{
{511, 511, 511, 511, 1 * sizeofPteEntry},
{511, 511, 511, 1, 2 * sizeofPteEntry},
{511, 511, 1, 2, 3 * sizeofPteEntry},
{511, 1, 2, 3, 4 * sizeofPteEntry},
}
pteCallCount := 0
ptePtrFn = func(entry uintptr) unsafe.Pointer {
if pteCallCount >= pageLevels {
t.Fatalf("unexpected call to ptePtrFn; already called %d times", pageLevels)
}
for i := 0; i < pageLevels; i++ {
pteIndex := (entry >> pageLevelShifts[i]) & ((1 << pageLevelBits[i]) - 1)
if pteIndex != expEntryAddrBits[pteCallCount][i] {
t.Errorf("[ptePtrFn call %d] expected pte entry for level %d to use offset %d; got %d", pteCallCount, i, expEntryAddrBits[pteCallCount][i], pteIndex)
}
}
// Check the page offset
pteIndex := entry & ((1 << mem.PageShift) - 1)
if pteIndex != expEntryAddrBits[pteCallCount][pageLevels] {
t.Errorf("[ptePtrFn call %d] expected pte offset to be %d; got %d", pteCallCount, expEntryAddrBits[pteCallCount][pageLevels], pteIndex)
}
pteCallCount++
return unsafe.Pointer(uintptr(0xf00))
}
walkFnCallCount := 0
walk(targetAddr, func(level uint8, entry *pageTableEntry) bool {
walkFnCallCount++
return walkFnCallCount != pageLevels
})
if pteCallCount != pageLevels {
t.Errorf("expected ptePtrFn to be called %d times; got %d", pageLevels, pteCallCount)
}
}