From dbdf686d2771dc9a839528bfbb041ef79f07c9fc Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 15 Jun 2017 09:44:29 +0100 Subject: [PATCH] Provide helper for setting the active frame allocator This allows us to remove the allocFn argument from the vmm functions which causes the compiler's escape analysis to sometimes incorectly flag it as escaping to the heap. --- kernel/mem/vmm/map.go | 11 ++++------- kernel/mem/vmm/map_test.go | 16 +++++++++------- kernel/mem/vmm/pdt.go | 8 ++++---- kernel/mem/vmm/pdt_test.go | 24 ++++++++++++------------ kernel/mem/vmm/vmm.go | 17 +++++++++++++++++ 5 files changed, 46 insertions(+), 30 deletions(-) create mode 100644 kernel/mem/vmm/vmm.go diff --git a/kernel/mem/vmm/map.go b/kernel/mem/vmm/map.go index 68aadf9..4dfe64d 100644 --- a/kernel/mem/vmm/map.go +++ b/kernel/mem/vmm/map.go @@ -23,14 +23,11 @@ var ( errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"} ) -// FrameAllocatorFn is a function that can allocate physical frames. -type FrameAllocatorFn func() (pmm.Frame, *kernel.Error) - // Map establishes a mapping between a virtual page and a physical memory frame // using the currently active page directory table. Calls to Map will use the // supplied physical frame allocator to initialize missing page tables at each // paging level supported by the MMU. -func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { +func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { var err *kernel.Error walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool { @@ -53,7 +50,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // physical frame for it map it and clear its contents. if !pte.HasFlags(FlagPresent) { var newTableFrame pmm.Frame - newTableFrame, err = allocFn() + newTableFrame, err = frameAllocator() if err != nil { return false } @@ -78,8 +75,8 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo // to a fixed virtual address overwriting any previous mapping. The temporary // mapping mechanism is primarily used by the kernel to access and initialize // inactive page tables. -func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) { - if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil { +func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) { + if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW); err != nil { return 0, err } diff --git a/kernel/mem/vmm/map_test.go b/kernel/mem/vmm/map_test.go index 9c961ee..cee8379 100644 --- a/kernel/mem/vmm/map_test.go +++ b/kernel/mem/vmm/map_test.go @@ -26,17 +26,18 @@ func TestMapTemporaryAmd64(t *testing.T) { ptePtrFn = origPtePtr nextAddrFn = origNextAddrFn flushTLBEntryFn = origFlushTLBEntryFn + frameAllocator = nil }(ptePtrFn, nextAddrFn, flushTLBEntryFn) var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry nextPhysPage := 0 // allocFn returns pages from index 1; we keep index 0 for the P4 entry - allocFn := func() (pmm.Frame, *kernel.Error) { + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { nextPhysPage++ pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0]) return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil - } + }) pteCallCount := 0 ptePtrFn = func(entry uintptr) unsafe.Pointer { @@ -64,7 +65,7 @@ func TestMapTemporaryAmd64(t *testing.T) { frame := pmm.Frame(123) levelIndices := []uint{510, 511, 511, 511} - page, err := MapTemporary(frame, allocFn) + page, err := MapTemporary(frame) if err != nil { t.Fatal(err) } @@ -124,21 +125,22 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) { return unsafe.Pointer(&physPages[0][pteIndex]) } - if _, err := MapTemporary(frame, nil); err != errNoHugePageSupport { + if _, err := MapTemporary(frame); err != errNoHugePageSupport { t.Fatalf("expected to get errNoHugePageSupport; got %v", err) } }) t.Run("allocFn returns an error", func(t *testing.T) { + defer func() { frameAllocator = nil }() physPages[0][p4Index] = 0 expErr := &kernel.Error{Module: "test", Message: "out of memory"} - allocFn := func() (pmm.Frame, *kernel.Error) { + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return 0, expErr - } + }) - if _, err := MapTemporary(frame, allocFn); err != expErr { + if _, err := MapTemporary(frame); err != expErr { t.Fatalf("got unexpected error %v", err) } }) diff --git a/kernel/mem/vmm/pdt.go b/kernel/mem/vmm/pdt.go index 8011d31..e60b336 100644 --- a/kernel/mem/vmm/pdt.go +++ b/kernel/mem/vmm/pdt.go @@ -39,7 +39,7 @@ type PageDirectoryTable struct { // Init can: // - call mem.Memset to clear the frame contents // - setup a recursive mapping for the last table entry to the page itself. -func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error { +func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error { pdt.pdtFrame = pdtFrame // Check active PDT physical address. If it matches the input pdt then @@ -50,7 +50,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn } // Create a temporary mapping for the pdt frame so we can work on it - pdtPage, err := mapTemporaryFn(pdtFrame, allocFn) + pdtPage, err := mapTemporaryFn(pdtFrame) if err != nil { return err } @@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn // function with the difference that it also supports inactive page PDTs by // establishing a temporary mapping so that Map() can access the inactive PDT // entries. -func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error { +func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { var ( activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift) lastPdtEntryAddr uintptr @@ -89,7 +89,7 @@ func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEnt flushTLBEntryFn(lastPdtEntryAddr) } - err := mapFn(page, frame, flags, allocFn) + err := mapFn(page, frame, flags) if activePdtFrame != pdt.pdtFrame { lastPdtEntry.SetFrame(activePdtFrame) diff --git a/kernel/mem/vmm/pdt_test.go b/kernel/mem/vmm/pdt_test.go index 8bc9442..c971030 100644 --- a/kernel/mem/vmm/pdt_test.go +++ b/kernel/mem/vmm/pdt_test.go @@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapTemporaryFn = origMapTemporary @@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return pdtFrame.Address() } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { t.Fatal("unexpected call to MapTemporary") return 0, nil } @@ -42,7 +42,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != nil { + if err := pdt.Init(pdtFrame); err != nil { t.Fatal(err) } }) @@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return 0 } - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil } @@ -73,7 +73,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != nil { + if err := pdt.Init(pdtFrame); err != nil { t.Fatal(err) } @@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { expErr := &kernel.Error{Module: "test", Message: "error mapping page"} - mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) { + mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) { return 0, expErr } @@ -119,7 +119,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) { return nil } - if err := pdt.Init(pdtFrame, nil); err != expErr { + if err := pdt.Init(pdtFrame); err != expErr { t.Fatalf("expected to get error: %v; got %v", *expErr, err) } }) @@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { t.Skip("test requires amd64 runtime; skipping") } - defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) { + defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) { flushTLBEntryFn = origFlushTLBEntry activePDTFn = origActivePDT mapFn = origMap @@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return pdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error { return nil } @@ -156,7 +156,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { flushCallCount++ } - if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil { + if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil { t.Fatal(err) } @@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { return activePdtFrame.Address() } - mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error { + mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error { return nil } @@ -205,7 +205,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) { flushCallCount++ } - if err := pdt.Map(page, pmm.Frame(321), FlagRW, nil); err != nil { + if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil { t.Fatal(err) } diff --git a/kernel/mem/vmm/vmm.go b/kernel/mem/vmm/vmm.go new file mode 100644 index 0000000..2ee2971 --- /dev/null +++ b/kernel/mem/vmm/vmm.go @@ -0,0 +1,17 @@ +package vmm + +import ( + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" +) + +var frameAllocator FrameAllocatorFn + +// FrameAllocatorFn is a function that can allocate physical frames. +type FrameAllocatorFn func() (pmm.Frame, *kernel.Error) + +// SetFrameAllocator registers a frame allocator function that will be used by +// the vmm code when new physical frames need to be allocated. +func SetFrameAllocator(allocFn FrameAllocatorFn) { + frameAllocator = allocFn +}