mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Remove mem.PageOrder
Since the goal is to bootstrap the runtime's slab-like allocator the kernel should only deal with managing allocations at a single page level.
This commit is contained in:
@@ -23,14 +23,14 @@ var (
|
||||
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
|
||||
)
|
||||
|
||||
// FrameAllocator is a function that can allocate physical frames of the specified order.
|
||||
type FrameAllocator func(mem.PageOrder) (pmm.Frame, *kernel.Error)
|
||||
// FrameAllocatorFn is a function that can allocate physical frames.
|
||||
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
|
||||
|
||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
||||
// using the currently active page directory table. Calls to Map will use the
|
||||
// supplied physical frame allocator to initialize missing page tables at each
|
||||
// paging level supported by the MMU.
|
||||
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error {
|
||||
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
|
||||
var err *kernel.Error
|
||||
|
||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
@@ -53,7 +53,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
|
||||
// physical frame for it map it and clear its contents.
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
var newTableFrame pmm.Frame
|
||||
newTableFrame, err = allocFn(mem.PageOrder(0))
|
||||
newTableFrame, err = allocFn()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllo
|
||||
// to a fixed virtual address overwriting any previous mapping. The temporary
|
||||
// mapping mechanism is primarily used by the kernel to access and initialize
|
||||
// inactive page tables.
|
||||
func MapTemporary(frame pmm.Frame, allocFn FrameAllocator) (Page, *kernel.Error) {
|
||||
func MapTemporary(frame pmm.Frame, allocFn FrameAllocatorFn) (Page, *kernel.Error) {
|
||||
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW, allocFn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
|
||||
nextPhysPage := 0
|
||||
|
||||
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
|
||||
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
|
||||
allocFn := func() (pmm.Frame, *kernel.Error) {
|
||||
nextPhysPage++
|
||||
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
|
||||
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
|
||||
@@ -134,7 +134,7 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
||||
|
||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
|
||||
allocFn := func(_ mem.PageOrder) (pmm.Frame, *kernel.Error) {
|
||||
allocFn := func() (pmm.Frame, *kernel.Error) {
|
||||
return 0, expErr
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ type PageDirectoryTable struct {
|
||||
// Init can:
|
||||
// - call mem.Memset to clear the frame contents
|
||||
// - setup a recursive mapping for the last table entry to the page itself.
|
||||
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator) *kernel.Error {
|
||||
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocatorFn) *kernel.Error {
|
||||
pdt.pdtFrame = pdtFrame
|
||||
|
||||
// Check active PDT physical address. If it matches the input pdt then
|
||||
@@ -73,7 +73,7 @@ func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame, allocFn FrameAllocator)
|
||||
// function with the difference that it also supports inactive page PDTs by
|
||||
// establishing a temporary mapping so that Map() can access the inactive PDT
|
||||
// entries.
|
||||
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocator) *kernel.Error {
|
||||
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag, allocFn FrameAllocatorFn) *kernel.Error {
|
||||
var (
|
||||
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
|
||||
lastPdtEntryAddr uintptr
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocator) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame, FrameAllocatorFn) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
|
||||
flushTLBEntryFn = origFlushTLBEntry
|
||||
activePDTFn = origActivePDT
|
||||
mapTemporaryFn = origMapTemporary
|
||||
@@ -32,7 +32,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||
return pdtFrame.Address()
|
||||
}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
|
||||
t.Fatal("unexpected call to MapTemporary")
|
||||
return 0, nil
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||
return 0
|
||||
}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
|
||||
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||
|
||||
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocator) (Page, *kernel.Error) {
|
||||
mapTemporaryFn = func(_ pmm.Frame, _ FrameAllocatorFn) (Page, *kernel.Error) {
|
||||
return 0, expErr
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocator) *kernel.Error) {
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag, FrameAllocatorFn) *kernel.Error) {
|
||||
flushTLBEntryFn = origFlushTLBEntry
|
||||
activePDTFn = origActivePDT
|
||||
mapFn = origMap
|
||||
@@ -147,7 +147,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
||||
return pdtFrame.Address()
|
||||
}
|
||||
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error {
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
||||
return activePdtFrame.Address()
|
||||
}
|
||||
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocator) *kernel.Error {
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag, _ FrameAllocatorFn) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user