1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00

Remove mem.PageOrder

Since the goal is to bootstrap the runtime's slab-like allocator the kernel
should only deal with managing allocations at a single page level.
This commit is contained in:
Achilleas Anagnostopoulos 2017-06-17 07:11:08 +01:00
parent 1d11af21c2
commit c0a9e07e83
7 changed files with 20 additions and 44 deletions

View File

@ -9,13 +9,7 @@ import (
)
var (
// EarlyAllocator points to a static instance of the boot memory allocator
// which is used to bootstrap the kernel before initializing a more
// advanced memory allocator.
EarlyAllocator BootMemAllocator
errBootAllocUnsupportedPageSize = &kernel.Error{Module: "boot_mem_alloc", Message: "allocator only support allocation requests of order(0)"}
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
)
// BootMemAllocator implements a rudimentary physical memory allocator which is used
@ -63,13 +57,8 @@ func (alloc *BootMemAllocator) Init() {
// AllocFrame scans the system memory regions reported by the bootloader and
// reserves the next available free frame.
//
// AllocFrame returns an error if no more memory can be allocated or when the
// requested page order is > 0.
func (alloc *BootMemAllocator) AllocFrame(order mem.PageOrder) (pmm.Frame, *kernel.Error) {
if order > 0 {
return pmm.InvalidFrame, errBootAllocUnsupportedPageSize
}
// AllocFrame returns an error if no more memory can be allocated.
func (alloc *BootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
var (
foundPageIndex int64 = -1
regionStartPageIndex, regionEndPageIndex int64

View File

@ -36,7 +36,7 @@ func TestBootMemoryAllocator(t *testing.T) {
allocFrameCount uint64
)
for alloc.Init(); ; allocFrameCount++ {
frame, err := alloc.AllocFrame(mem.PageOrder(0))
frame, err := alloc.AllocFrame()
if err != nil {
if err == errBootAllocOutOfMemory {
break
@ -57,11 +57,6 @@ func TestBootMemoryAllocator(t *testing.T) {
if allocFrameCount != totalFreeFrames {
t.Fatalf("expected allocator to allocate %d frames; allocated %d", totalFreeFrames, allocFrameCount)
}
// This allocator only works with order(0) blocks
if frame, err := alloc.AllocFrame(mem.PageOrder(1)); err != errBootAllocUnsupportedPageSize || frame.Valid() {
t.Fatalf("expected allocator to return errBootAllocUnsupportedPageSize and an invalid frame when requested to allocate a block with order > 0; got %v, %v", err, frame)
}
}
var (

View File

@ -10,11 +10,3 @@ const (
Mb = 1024 * Kb
Gb = 1024 * Mb
)
// PageOrder represents a power-of-two multiple of the base page size and is
// used as an argument to page-based memory allocators.
//
// PageOrder(0) refers to a page with size PageSize << 0
// PageOrder(1) refers to a page with size PageSize << 1
// ...
type PageOrder uint8

View File

@ -23,14 +23,14 @@ var (
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
)
// FrameAllocator is a function that can allocate physical frames of the specified order.
type FrameAllocator func(mem.PageOrder) (pmm.Frame, *kernel.Error)
// FrameAllocatorFn is a function that can allocate physical frames.
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
// Map establishes a mapping between a virtual page and a physical memory frame
// using the currently active page directory table. Calls to Map will use the
// supplied physical frame allocator to initialize missing page tables at each
// paging level supported by the MMU.
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error {
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
@ -53,7 +53,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
// physical frame for it map it and clear its contents.
if !pte.HasFlags(FlagPresent) {
var newTableFrame pmm.Frame
newTableFrame, err = allocFn(mem.PageOrder(0))
newTableFrame, err = allocFn()
if err != nil {
return false
}
@ -78,7 +78,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
// to a fixed virtual address overwriting any previous mapping. The temporary
// mapping mechanism is primarily used by the kernel to access and initialize
// inactive page tables.
func MapTemporary(frame pmm.Frame, allocFn FrameAllocator) (Page, *kernel.Error) {
func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) {
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil {
return 0, err
}

View File

@ -32,7 +32,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
nextPhysPage := 0
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
allocFn := func() (pmm.Frame, *kernel.Error) {
nextPhysPage++
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
@ -134,7 +134,7 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
allocFn := func() (pmm.Frame, *kernel.Error) {
return 0, expErr
}

View File

@ -39,7 +39,7 @@ type PageDirectoryTable struct {
// Init can:
// - call mem.Memset to clear the frame contents
// - setup a recursive mapping for the last table entry to the page itself.
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator) *kernel.Error {
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error {
pdt.pdtFrame = pdtFrame
// Check active PDT physical address. If it matches the input pdt then
@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator)
// function with the difference that it also supports inactive page PDTs by
// establishing a temporary mapping so that Map() can access the inactive PDT
// entries.
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error {
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
var (
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
lastPdtEntryAddr uintptr

View File

@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocator) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapTemporaryFn = origMapTemporary
@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return pdtFrame.Address()
}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
t.Fatal("unexpected call to MapTemporary")
return 0, nil
}
@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
return 0
}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
}
@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
return 0, expErr
}
@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
t.Skip("test requires amd64 runtime; skipping")
}
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocator) *kernel.Error) {
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) {
flushTLBEntryFn = origFlushTLBEntry
activePDTFn = origActivePDT
mapFn = origMap
@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
return pdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error {
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
return nil
}
@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
return activePdtFrame.Address()
}
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error {
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
return nil
}