mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Implement API for mapping virtual addresses to physical frames
The API provides the Map() and MapTemporary() functions that establish virtual -> physical address mappings using the currently active page directory table. Mapped pages can be unmapped using the Unmap() function. When unmapping virtual addresses, the page tables leading to them will not be automatically released even if they are empty. This will be addressed by a future commit.
This commit is contained in:
parent
6e03af069a
commit
8e38ff969d
@ -12,6 +12,12 @@ const (
|
||||
// address pointed to by a page table entry. For this particular architecture,
|
||||
// bits 12-51 contain the physical memory address.
|
||||
ptePhysPageMask = uintptr(0x000ffffffffff000)
|
||||
|
||||
// tempMappingAddr is a reserved virtual page address used for
|
||||
// temporary physical page mappings (e.g. when mapping inactive PDT
|
||||
// pages). For amd64 this address uses the following table indices:
|
||||
// 510, 511, 511, 511.
|
||||
tempMappingAddr = uintptr(0Xffffff7ffffff000)
|
||||
)
|
||||
|
||||
var (
|
||||
|
117
kernel/mem/vmm/map.go
Normal file
117
kernel/mem/vmm/map.go
Normal file
@ -0,0 +1,117 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
|
||||
)
|
||||
|
||||
var (
|
||||
// nextAddrFn is used by used by tests to override the nextTableAddr
|
||||
// calculations used by Map. When compiling the kernel this function
|
||||
// will be automatically inlined.
|
||||
nextAddrFn = func(entryAddr uintptr) uintptr {
|
||||
return entryAddr
|
||||
}
|
||||
|
||||
// flushTLBEntryFn is used by tests to override calls to flushTLBEntry
|
||||
// which will cause a fault if called in user-mode.
|
||||
flushTLBEntryFn = flushTLBEntry
|
||||
|
||||
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
|
||||
)
|
||||
|
||||
// FrameAllocator is a function that can allocate physical frames of the specified order.
|
||||
type FrameAllocator func(mem.PageOrder) (pmm.Frame, *kernel.Error)
|
||||
|
||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
||||
// using the currently active page directory table. Calls to Map will use the
|
||||
// supplied physical frame allocator to initialize missing page tables at each
|
||||
// paging level supported by the MMU.
|
||||
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error {
|
||||
var err *kernel.Error
|
||||
|
||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
// If we reached the last level all we need to do is to map the
|
||||
// frame in place and flag it as present and flush its TLB entry
|
||||
if pteLevel == pageLevels-1 {
|
||||
*pte = 0
|
||||
pte.SetFrame(frame)
|
||||
pte.SetFlags(FlagPresent | flags)
|
||||
flushTLBEntryFn(page.Address())
|
||||
return true
|
||||
}
|
||||
|
||||
if pte.HasFlags(FlagHugePage) {
|
||||
err = errNoHugePageSupport
|
||||
return false
|
||||
}
|
||||
|
||||
// Next table does not yet exist; we need to allocate a
|
||||
// physical frame for it map it and clear its contents.
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
var newTableFrame pmm.Frame
|
||||
newTableFrame, err = allocFn(mem.PageOrder(0))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
*pte = 0
|
||||
pte.SetFrame(newTableFrame)
|
||||
pte.SetFlags(FlagPresent | FlagRW)
|
||||
|
||||
// The next pte entry becomes available but we need to
|
||||
// make sure that the new page is properly cleared
|
||||
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
|
||||
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MapTemporary establishes a temporary RW mapping of a physical memory frame
|
||||
// to a fixed virtual address overwriting any previous mapping. The temporary
|
||||
// mapping mechanism is primarily used by the kernel to access and initialize
|
||||
// inactive page tables.
|
||||
func MapTemporary(frame pmm.Frame, allocFn FrameAllocator) (Page, *kernel.Error) {
|
||||
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return PageFromAddress(tempMappingAddr), nil
|
||||
}
|
||||
|
||||
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
|
||||
func Unmap(page Page) *kernel.Error {
|
||||
var err *kernel.Error
|
||||
|
||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
// If we reached the last level all we need to do is to set the
|
||||
// page as non-present and flush its TLB entry
|
||||
if pteLevel == pageLevels-1 {
|
||||
pte.ClearFlags(FlagPresent)
|
||||
flushTLBEntryFn(page.Address())
|
||||
return true
|
||||
}
|
||||
|
||||
// Next table is not present; this is an invalid mapping
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
err = ErrInvalidMapping
|
||||
return false
|
||||
}
|
||||
|
||||
if pte.HasFlags(FlagHugePage) {
|
||||
err = errNoHugePageSupport
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
251
kernel/mem/vmm/map_test.go
Normal file
251
kernel/mem/vmm/map_test.go
Normal file
@ -0,0 +1,251 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
|
||||
)
|
||||
|
||||
func TestNextAddrFn(t *testing.T) {
|
||||
// Dummy test to keep coverage happy
|
||||
if exp, got := uintptr(123), nextAddrFn(uintptr(123)); exp != got {
|
||||
t.Fatalf("expected nextAddrFn to return %v; got %v", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapTemporaryAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
nextPhysPage := 0
|
||||
|
||||
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
|
||||
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
|
||||
nextPhysPage++
|
||||
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
|
||||
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
|
||||
}
|
||||
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
pteCallCount++
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
|
||||
}
|
||||
|
||||
nextAddrFn = func(entry uintptr) uintptr {
|
||||
return uintptr(unsafe.Pointer(&physPages[nextPhysPage][0]))
|
||||
}
|
||||
|
||||
flushTLBEntryCallCount := 0
|
||||
flushTLBEntryFn = func(uintptr) {
|
||||
flushTLBEntryCallCount++
|
||||
}
|
||||
|
||||
// The temporary mappin address breaks down to:
|
||||
// p4 index: 510
|
||||
// p3 index: 511
|
||||
// p2 index: 511
|
||||
// p1 index: 511
|
||||
frame := pmm.Frame(123)
|
||||
levelIndices := []uint{510, 511, 511, 511}
|
||||
|
||||
page, err := MapTemporary(frame, allocFn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if got := page.Address(); got != tempMappingAddr {
|
||||
t.Fatalf("expected temp mapping virtual address to be %x; got %x", tempMappingAddr, got)
|
||||
}
|
||||
|
||||
for level, physPage := range physPages {
|
||||
pte := physPage[levelIndices[level]]
|
||||
if !pte.HasFlags(FlagPresent | FlagRW) {
|
||||
t.Errorf("[pte at level %d] expected entry to have FlagPresent and FlagRW set", level)
|
||||
}
|
||||
|
||||
switch {
|
||||
case level < pageLevels-1:
|
||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
|
||||
}
|
||||
default:
|
||||
// The last pte entry should point to frame
|
||||
if got := pte.Frame(); got != frame {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exp := 1; flushTLBEntryCallCount != exp {
|
||||
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
|
||||
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
|
||||
p4Index := 510
|
||||
frame := pmm.Frame(123)
|
||||
|
||||
t.Run("encounter huge page", func(t *testing.T) {
|
||||
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
|
||||
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||
}
|
||||
|
||||
if _, err := MapTemporary(frame, nil); err != errNoHugePageSupport {
|
||||
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("allocFn returns an error", func(t *testing.T) {
|
||||
physPages[0][p4Index] = 0
|
||||
|
||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
|
||||
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
|
||||
return 0, expErr
|
||||
}
|
||||
|
||||
if _, err := MapTemporary(frame, allocFn); err != expErr {
|
||||
t.Fatalf("got unexpected error %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnmapAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, flushTLBEntryFn)
|
||||
|
||||
var (
|
||||
physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
frame = pmm.Frame(123)
|
||||
)
|
||||
|
||||
// Emulate a page mapped to virtAddr 0 across all page levels
|
||||
for level := 0; level < pageLevels; level++ {
|
||||
physPages[level][0].SetFlags(FlagPresent | FlagRW)
|
||||
if level < pageLevels-1 {
|
||||
physPages[level][0].SetFrame(pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mem.PageShift))
|
||||
} else {
|
||||
physPages[level][0].SetFrame(frame)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
pteCallCount++
|
||||
return unsafe.Pointer(&physPages[pteCallCount-1][0])
|
||||
}
|
||||
|
||||
flushTLBEntryCallCount := 0
|
||||
flushTLBEntryFn = func(uintptr) {
|
||||
flushTLBEntryCallCount++
|
||||
}
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for level, physPage := range physPages {
|
||||
pte := physPage[0]
|
||||
|
||||
switch {
|
||||
case level < pageLevels-1:
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
|
||||
}
|
||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
||||
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
|
||||
}
|
||||
default:
|
||||
if pte.HasFlags(FlagPresent) {
|
||||
t.Errorf("[pte at level %d] expected entry not to have FlagPresent set", level)
|
||||
}
|
||||
|
||||
// The last pte entry should still point to frame
|
||||
if got := pte.Frame(); got != frame {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exp := 1; flushTLBEntryCallCount != exp {
|
||||
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmapErrorsAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
|
||||
t.Run("encounter huge page", func(t *testing.T) {
|
||||
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
|
||||
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||
}
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != errNoHugePageSupport {
|
||||
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("virtual address not mapped", func(t *testing.T) {
|
||||
physPages[0][0].ClearFlags(FlagPresent)
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != ErrInvalidMapping {
|
||||
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
11
kernel/mem/vmm/tlb.go
Normal file
11
kernel/mem/vmm/tlb.go
Normal file
@ -0,0 +1,11 @@
|
||||
package vmm
|
||||
|
||||
// flushTLBEntry flushes a TLB entry for a particular virtual address.
|
||||
func flushTLBEntry(virtAddr uintptr)
|
||||
|
||||
// switchPDT sets the root page table directory to point to the specified
|
||||
// physical address and flushes the TLB.
|
||||
func switchPDT(pdtPhysAddr uintptr)
|
||||
|
||||
// activePDT returns the physical address of the currently active page table.
|
||||
func activePDT() uintptr
|
15
kernel/mem/vmm/tlb_amd64.s
Normal file
15
kernel/mem/vmm/tlb_amd64.s
Normal file
@ -0,0 +1,15 @@
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·flushTLBEntry(SB),NOSPLIT,$0
|
||||
INVLPG virtAddr+0(FP)
|
||||
RET
|
||||
|
||||
TEXT ·switchPDT(SB),NOSPLIT,$0
|
||||
// loading CR3 also triggers a TLB flush
|
||||
MOVQ pdtPhysAddr+0(FP), CR3
|
||||
RET
|
||||
|
||||
TEXT ·activePDT(SB),NOSPLIT,$0
|
||||
MOVQ CR3, AX
|
||||
MOVQ AX, ret+0(FP)
|
||||
RET
|
Loading…
x
Reference in New Issue
Block a user