mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
mm: refactor package layout for the memory management code
Summary of changes: - kernel/mem renamed to kernel/mm - consolidated page/frame defs into one file which now lives in the kernel/mm package and is referenced by both pmm and vmm pkgs - consolidated parts of the vmm code (e.g. PDT+PTE) - memcopy/memset helpers moved to the kernel package - physical allocators moved to the kernel/mm/pmm package - break vmm -> pmm pkg dependency by moving AllocFrame() into the mm package.
This commit is contained in:
parent
340b129e37
commit
e67e2644e2
@ -5,9 +5,8 @@ import (
|
|||||||
"gopheros/device/acpi/table"
|
"gopheros/device/acpi/table"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"io"
|
"io"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@ -165,18 +164,18 @@ func (drv *acpiDriver) enumerateTables(w io.Writer) *kernel.Error {
|
|||||||
// the mapping to cover the table contents and verifies the checksum before
|
// the mapping to cover the table contents and verifies the checksum before
|
||||||
// returning a pointer to the table header.
|
// returning a pointer to the table header.
|
||||||
func mapACPITable(tableAddr uintptr) (header *table.SDTHeader, sizeofHeader uintptr, err *kernel.Error) {
|
func mapACPITable(tableAddr uintptr) (header *table.SDTHeader, sizeofHeader uintptr, err *kernel.Error) {
|
||||||
var headerPage vmm.Page
|
var headerPage mm.Page
|
||||||
|
|
||||||
// Identity-map the table header so we can access its length field
|
// Identity-map the table header so we can access its length field
|
||||||
sizeofHeader = unsafe.Sizeof(table.SDTHeader{})
|
sizeofHeader = unsafe.Sizeof(table.SDTHeader{})
|
||||||
if headerPage, err = identityMapFn(pmm.FrameFromAddress(tableAddr), mem.Size(sizeofHeader), vmm.FlagPresent); err != nil {
|
if headerPage, err = identityMapFn(mm.FrameFromAddress(tableAddr), sizeofHeader, vmm.FlagPresent); err != nil {
|
||||||
return nil, sizeofHeader, err
|
return nil, sizeofHeader, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expand mapping to cover the table contents
|
// Expand mapping to cover the table contents
|
||||||
headerPageAddr := headerPage.Address() + vmm.PageOffset(tableAddr)
|
headerPageAddr := headerPage.Address() + vmm.PageOffset(tableAddr)
|
||||||
header = (*table.SDTHeader)(unsafe.Pointer(headerPageAddr))
|
header = (*table.SDTHeader)(unsafe.Pointer(headerPageAddr))
|
||||||
if _, err = identityMapFn(pmm.FrameFromAddress(tableAddr), mem.Size(header.Length), vmm.FlagPresent); err != nil {
|
if _, err = identityMapFn(mm.FrameFromAddress(tableAddr), uintptr(header.Length), vmm.FlagPresent); err != nil {
|
||||||
return nil, sizeofHeader, err
|
return nil, sizeofHeader, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,14 +199,14 @@ func locateRSDT() (uintptr, bool, *kernel.Error) {
|
|||||||
|
|
||||||
// Cleanup temporary identity mappings when the function returns
|
// Cleanup temporary identity mappings when the function returns
|
||||||
defer func() {
|
defer func() {
|
||||||
for curPage := vmm.PageFromAddress(rsdpLocationLow); curPage <= vmm.PageFromAddress(rsdpLocationHi); curPage++ {
|
for curPage := mm.PageFromAddress(rsdpLocationLow); curPage <= mm.PageFromAddress(rsdpLocationHi); curPage++ {
|
||||||
unmapFn(curPage)
|
unmapFn(curPage)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Setup temporary identity mapping so we can scan for the header
|
// Setup temporary identity mapping so we can scan for the header
|
||||||
for curPage := vmm.PageFromAddress(rsdpLocationLow); curPage <= vmm.PageFromAddress(rsdpLocationHi); curPage++ {
|
for curPage := mm.PageFromAddress(rsdpLocationLow); curPage <= mm.PageFromAddress(rsdpLocationHi); curPage++ {
|
||||||
if err := mapFn(curPage, pmm.Frame(curPage), vmm.FlagPresent); err != nil {
|
if err := mapFn(curPage, mm.Frame(curPage), vmm.FlagPresent); err != nil {
|
||||||
return 0, false, err
|
return 0, false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,9 +3,8 @@ package acpi
|
|||||||
import (
|
import (
|
||||||
"gopheros/device/acpi/table"
|
"gopheros/device/acpi/table"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -28,8 +27,8 @@ func TestProbe(t *testing.T) {
|
|||||||
}(rsdpLocationLow, rsdpLocationHi, rsdpAlignment)
|
}(rsdpLocationLow, rsdpLocationHi, rsdpAlignment)
|
||||||
|
|
||||||
t.Run("ACPI1", func(t *testing.T) {
|
t.Run("ACPI1", func(t *testing.T) {
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
||||||
unmapFn = func(_ vmm.Page) *kernel.Error { return nil }
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
|
||||||
// Allocate space for 2 descriptors; leave the first entry
|
// Allocate space for 2 descriptors; leave the first entry
|
||||||
// blank to test that locateRSDT will jump over it and populate
|
// blank to test that locateRSDT will jump over it and populate
|
||||||
@ -68,8 +67,8 @@ func TestProbe(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("ACPI2+", func(t *testing.T) {
|
t.Run("ACPI2+", func(t *testing.T) {
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
||||||
unmapFn = func(_ vmm.Page) *kernel.Error { return nil }
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
|
||||||
// Allocate space for 2 descriptors; leave the first entry
|
// Allocate space for 2 descriptors; leave the first entry
|
||||||
// blank to test that locateRSDT will jump over it and populate
|
// blank to test that locateRSDT will jump over it and populate
|
||||||
@ -109,8 +108,8 @@ func TestProbe(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("RSDP ACPI1 checksum mismatch", func(t *testing.T) {
|
t.Run("RSDP ACPI1 checksum mismatch", func(t *testing.T) {
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
||||||
unmapFn = func(_ vmm.Page) *kernel.Error { return nil }
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
|
||||||
sizeofRSDP := unsafe.Sizeof(table.RSDPDescriptor{})
|
sizeofRSDP := unsafe.Sizeof(table.RSDPDescriptor{})
|
||||||
buf := make([]byte, sizeofRSDP)
|
buf := make([]byte, sizeofRSDP)
|
||||||
@ -134,8 +133,8 @@ func TestProbe(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("RSDP ACPI2+ checksum mismatch", func(t *testing.T) {
|
t.Run("RSDP ACPI2+ checksum mismatch", func(t *testing.T) {
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return nil }
|
||||||
unmapFn = func(_ vmm.Page) *kernel.Error { return nil }
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
|
||||||
sizeofExtRSDP := unsafe.Sizeof(table.ExtRSDPDescriptor{})
|
sizeofExtRSDP := unsafe.Sizeof(table.ExtRSDPDescriptor{})
|
||||||
buf := make([]byte, sizeofExtRSDP)
|
buf := make([]byte, sizeofExtRSDP)
|
||||||
@ -160,8 +159,8 @@ func TestProbe(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("error mapping rsdp memory block", func(t *testing.T) {
|
t.Run("error mapping rsdp memory block", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "vmm.Map failed"}
|
expErr := &kernel.Error{Module: "test", Message: "vmm.Map failed"}
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return expErr }
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { return expErr }
|
||||||
unmapFn = func(_ vmm.Page) *kernel.Error { return nil }
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
|
||||||
drv := probeForACPI()
|
drv := probeForACPI()
|
||||||
if drv != nil {
|
if drv != nil {
|
||||||
@ -177,8 +176,8 @@ func TestDriverInit(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
rsdtAddr, _ := genTestRDST(t, acpiRev2Plus)
|
rsdtAddr, _ := genTestRDST(t, acpiRev2Plus)
|
||||||
identityMapFn = func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
identityMapFn = func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
drv := &acpiDriver{
|
drv := &acpiDriver{
|
||||||
@ -204,27 +203,27 @@ func TestDriverInit(t *testing.T) {
|
|||||||
useXSDT: true,
|
useXSDT: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
specs := []func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error){
|
specs := []func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error){
|
||||||
func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
// fail while trying to map RSDT
|
// fail while trying to map RSDT
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
},
|
},
|
||||||
func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
// fail while trying to map any other ACPI table
|
// fail while trying to map any other ACPI table
|
||||||
callCount++
|
callCount++
|
||||||
if callCount > 2 {
|
if callCount > 2 {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
},
|
},
|
||||||
func(frame pmm.Frame, size mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
func(frame mm.Frame, size uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
// fail while trying to map DSDT
|
// fail while trying to map DSDT
|
||||||
for _, header := range tableList {
|
for _, header := range tableList {
|
||||||
if header.Length == uint32(size) && string(header.Signature[:]) == dsdtSignature {
|
if header.Length == uint32(size) && string(header.Signature[:]) == dsdtSignature {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,16 +248,16 @@ func TestEnumerateTables(t *testing.T) {
|
|||||||
t.Run("ACPI1", func(t *testing.T) {
|
t.Run("ACPI1", func(t *testing.T) {
|
||||||
rsdtAddr, tableList := genTestRDST(t, acpiRev1)
|
rsdtAddr, tableList := genTestRDST(t, acpiRev1)
|
||||||
|
|
||||||
identityMapFn = func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
identityMapFn = func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
// The frame encodes the table index we need to lookup (see genTestRDST)
|
// The frame encodes the table index we need to lookup (see genTestRDST)
|
||||||
nextTableIndex := int(frame)
|
nextTableIndex := int(frame)
|
||||||
if nextTableIndex >= len(tableList) {
|
if nextTableIndex >= len(tableList) {
|
||||||
// This is the RSDT
|
// This is the RSDT
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
header := tableList[nextTableIndex]
|
header := tableList[nextTableIndex]
|
||||||
return vmm.PageFromAddress(uintptr(unsafe.Pointer(header))), nil
|
return mm.PageFromAddress(uintptr(unsafe.Pointer(header))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
drv := &acpiDriver{
|
drv := &acpiDriver{
|
||||||
@ -285,8 +284,8 @@ func TestEnumerateTables(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("ACPI2+", func(t *testing.T) {
|
t.Run("ACPI2+", func(t *testing.T) {
|
||||||
rsdtAddr, _ := genTestRDST(t, acpiRev2Plus)
|
rsdtAddr, _ := genTestRDST(t, acpiRev2Plus)
|
||||||
identityMapFn = func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
identityMapFn = func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
drv := &acpiDriver{
|
drv := &acpiDriver{
|
||||||
@ -311,8 +310,8 @@ func TestEnumerateTables(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("checksum mismatch", func(t *testing.T) {
|
t.Run("checksum mismatch", func(t *testing.T) {
|
||||||
rsdtAddr, tableList := genTestRDST(t, acpiRev2Plus)
|
rsdtAddr, tableList := genTestRDST(t, acpiRev2Plus)
|
||||||
identityMapFn = func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
identityMapFn = func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return vmm.Page(frame), nil
|
return mm.Page(frame), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set bad checksum for "SSDT" and "DSDT"
|
// Set bad checksum for "SSDT" and "DSDT"
|
||||||
@ -357,13 +356,13 @@ func TestMapACPITableErrors(t *testing.T) {
|
|||||||
header table.SDTHeader
|
header table.SDTHeader
|
||||||
)
|
)
|
||||||
|
|
||||||
identityMapFn = func(frame pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
identityMapFn = func(frame mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
callCount++
|
callCount++
|
||||||
if callCount >= 2 {
|
if callCount >= 2 {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
return vmm.PageFromAddress(uintptr(unsafe.Pointer(&header))), nil
|
return mm.PageFromAddress(uintptr(unsafe.Pointer(&header))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test errors while mapping the table contents and the table header
|
// Test errors while mapping the table contents and the table header
|
||||||
@ -414,7 +413,7 @@ func genTestRDST(t *testing.T, acpiVersion uint8) (rsdtAddr uintptr, tableList [
|
|||||||
// The test code will hook identityMapFn to reconstruct the
|
// The test code will hook identityMapFn to reconstruct the
|
||||||
// correct pointer to the table contents.
|
// correct pointer to the table contents.
|
||||||
offset := vmm.PageOffset(uintptr(unsafe.Pointer(dsdt)))
|
offset := vmm.PageOffset(uintptr(unsafe.Pointer(dsdt)))
|
||||||
encodedTableLoc := (uintptr(dsdtIndex) << mem.PageShift) + offset
|
encodedTableLoc := (uintptr(dsdtIndex) << mm.PageShift) + offset
|
||||||
fadtHeader.Dsdt = uint32(encodedTableLoc)
|
fadtHeader.Dsdt = uint32(encodedTableLoc)
|
||||||
} else {
|
} else {
|
||||||
fadtHeader.Ext.Dsdt = uint64(uintptr(unsafe.Pointer(dsdt)))
|
fadtHeader.Ext.Dsdt = uint64(uintptr(unsafe.Pointer(dsdt)))
|
||||||
@ -443,7 +442,7 @@ func genTestRDST(t *testing.T, acpiVersion uint8) (rsdtAddr uintptr, tableList [
|
|||||||
// correct pointer to the table contents.
|
// correct pointer to the table contents.
|
||||||
for index, tableHeader := range tableList {
|
for index, tableHeader := range tableList {
|
||||||
offset := vmm.PageOffset(uintptr(unsafe.Pointer(tableHeader)))
|
offset := vmm.PageOffset(uintptr(unsafe.Pointer(tableHeader)))
|
||||||
encodedTableLoc := (uintptr(index) << mem.PageShift) + offset
|
encodedTableLoc := (uintptr(index) << mm.PageShift) + offset
|
||||||
|
|
||||||
*(*uint32)(unsafe.Pointer(&buf[rsdtHeader.Length])) = uint32(encodedTableLoc)
|
*(*uint32)(unsafe.Pointer(&buf[rsdtHeader.Length])) = uint32(encodedTableLoc)
|
||||||
rsdtHeader.Length += 4
|
rsdtHeader.Length += 4
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"gopheros/device/video/console/font"
|
"gopheros/device/video/console/font"
|
||||||
"gopheros/device/video/console/logo"
|
"gopheros/device/video/console/logo"
|
||||||
"gopheros/kernel/cpu"
|
"gopheros/kernel/cpu"
|
||||||
"gopheros/kernel/mem/vmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/multiboot"
|
"gopheros/multiboot"
|
||||||
"image/color"
|
"image/color"
|
||||||
)
|
)
|
||||||
|
@ -6,9 +6,8 @@ import (
|
|||||||
"gopheros/device/video/console/logo"
|
"gopheros/device/video/console/logo"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"gopheros/multiboot"
|
"gopheros/multiboot"
|
||||||
"image/color"
|
"image/color"
|
||||||
"io"
|
"io"
|
||||||
@ -560,9 +559,9 @@ func (cons *VesaFbConsole) DriverVersion() (uint16, uint16, uint16) {
|
|||||||
// DriverInit initializes this driver.
|
// DriverInit initializes this driver.
|
||||||
func (cons *VesaFbConsole) DriverInit(w io.Writer) *kernel.Error {
|
func (cons *VesaFbConsole) DriverInit(w io.Writer) *kernel.Error {
|
||||||
// Map the framebuffer so we can write to it
|
// Map the framebuffer so we can write to it
|
||||||
fbSize := mem.Size(cons.height * cons.pitch)
|
fbSize := uintptr(cons.height * cons.pitch)
|
||||||
fbPage, err := mapRegionFn(
|
fbPage, err := mapRegionFn(
|
||||||
pmm.Frame(cons.fbPhysAddr>>mem.PageShift),
|
mm.Frame(cons.fbPhysAddr>>mm.PageShift),
|
||||||
fbSize,
|
fbSize,
|
||||||
vmm.FlagPresent|vmm.FlagRW,
|
vmm.FlagPresent|vmm.FlagRW,
|
||||||
)
|
)
|
||||||
|
@ -8,9 +8,8 @@ import (
|
|||||||
"gopheros/device/video/console/logo"
|
"gopheros/device/video/console/logo"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/cpu"
|
"gopheros/kernel/cpu"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"gopheros/multiboot"
|
"gopheros/multiboot"
|
||||||
"image/color"
|
"image/color"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -1453,7 +1452,7 @@ func TestVesaFbDriverInterface(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("init success", func(t *testing.T) {
|
t.Run("init success", func(t *testing.T) {
|
||||||
mapRegionFn = func(_ pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
mapRegionFn = func(_ mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return 0xa0000, nil
|
return 0xa0000, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1466,7 +1465,7 @@ func TestVesaFbDriverInterface(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("init fail", func(t *testing.T) {
|
t.Run("init fail", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
mapRegionFn = func(_ pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
mapRegionFn = func(_ mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,9 +4,8 @@ import (
|
|||||||
"gopheros/device"
|
"gopheros/device"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"gopheros/multiboot"
|
"gopheros/multiboot"
|
||||||
"image/color"
|
"image/color"
|
||||||
"io"
|
"io"
|
||||||
@ -209,9 +208,9 @@ func (cons *VgaTextConsole) DriverVersion() (uint16, uint16, uint16) {
|
|||||||
// DriverInit initializes this driver.
|
// DriverInit initializes this driver.
|
||||||
func (cons *VgaTextConsole) DriverInit(w io.Writer) *kernel.Error {
|
func (cons *VgaTextConsole) DriverInit(w io.Writer) *kernel.Error {
|
||||||
// Map the framebuffer so we can write to it
|
// Map the framebuffer so we can write to it
|
||||||
fbSize := mem.Size(cons.width * cons.height * 2)
|
fbSize := uintptr(cons.width * cons.height * 2)
|
||||||
fbPage, err := mapRegionFn(
|
fbPage, err := mapRegionFn(
|
||||||
pmm.Frame(cons.fbPhysAddr>>mem.PageShift),
|
mm.Frame(cons.fbPhysAddr>>mm.PageShift),
|
||||||
fbSize,
|
fbSize,
|
||||||
vmm.FlagPresent|vmm.FlagRW,
|
vmm.FlagPresent|vmm.FlagRW,
|
||||||
)
|
)
|
||||||
|
@ -4,9 +4,8 @@ import (
|
|||||||
"gopheros/device"
|
"gopheros/device"
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/cpu"
|
"gopheros/kernel/cpu"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"gopheros/multiboot"
|
"gopheros/multiboot"
|
||||||
"image/color"
|
"image/color"
|
||||||
"testing"
|
"testing"
|
||||||
@ -339,7 +338,7 @@ func TestVgaTextDriverInterface(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("init success", func(t *testing.T) {
|
t.Run("init success", func(t *testing.T) {
|
||||||
mapRegionFn = func(_ pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
mapRegionFn = func(_ mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return 0xb8000, nil
|
return 0xb8000, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,7 +349,7 @@ func TestVgaTextDriverInterface(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("init fail", func(t *testing.T) {
|
t.Run("init fail", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
mapRegionFn = func(_ pmm.Frame, _ mem.Size, _ vmm.PageTableEntryFlag) (vmm.Page, *kernel.Error) {
|
mapRegionFn = func(_ mm.Frame, _ uintptr, _ vmm.PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,17 +4,15 @@ package goruntime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm/allocator"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mapFn = vmm.Map
|
mapFn = vmm.Map
|
||||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||||
memsetFn = mem.Memset
|
memsetFn = kernel.Memset
|
||||||
frameAllocFn = allocator.AllocFrame
|
|
||||||
mallocInitFn = mallocInit
|
mallocInitFn = mallocInit
|
||||||
algInitFn = algInit
|
algInitFn = algInit
|
||||||
modulesInitFn = modulesInit
|
modulesInitFn = modulesInit
|
||||||
@ -53,7 +51,7 @@ func runtimeInit() {
|
|||||||
//go:redirect-from runtime.sysReserve
|
//go:redirect-from runtime.sysReserve
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer {
|
func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer {
|
||||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
regionSize := (size + mm.PageSize - 1) & ^(mm.PageSize - 1)
|
||||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -77,12 +75,12 @@ func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint6
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We trust the allocator to call sysMap with an address inside a reserved region.
|
// We trust the allocator to call sysMap with an address inside a reserved region.
|
||||||
regionStartAddr := (uintptr(virtAddr) + uintptr(mem.PageSize-1)) & ^uintptr(mem.PageSize-1)
|
regionStartAddr := (uintptr(virtAddr) + uintptr(mm.PageSize-1)) & ^uintptr(mm.PageSize-1)
|
||||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
regionSize := (size + mm.PageSize - 1) & ^(mm.PageSize - 1)
|
||||||
pageCount := regionSize >> mem.PageShift
|
pageCount := regionSize >> mm.PageShift
|
||||||
|
|
||||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite
|
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite
|
||||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
for page := mm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||||
if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
||||||
return unsafe.Pointer(uintptr(0))
|
return unsafe.Pointer(uintptr(0))
|
||||||
}
|
}
|
||||||
@ -102,16 +100,16 @@ func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint6
|
|||||||
//go:redirect-from runtime.sysAlloc
|
//go:redirect-from runtime.sysAlloc
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer {
|
func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer {
|
||||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
regionSize := (size + mm.PageSize - 1) & ^(mm.PageSize - 1)
|
||||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return unsafe.Pointer(uintptr(0))
|
return unsafe.Pointer(uintptr(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||||
pageCount := regionSize >> mem.PageShift
|
pageCount := regionSize >> mm.PageShift
|
||||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
for page := mm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||||
frame, err := frameAllocFn()
|
frame, err := mm.AllocFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return unsafe.Pointer(uintptr(0))
|
return unsafe.Pointer(uintptr(0))
|
||||||
}
|
}
|
||||||
@ -120,7 +118,7 @@ func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer {
|
|||||||
return unsafe.Pointer(uintptr(0))
|
return unsafe.Pointer(uintptr(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
memsetFn(page.Address(), 0, mem.PageSize)
|
memsetFn(page.Address(), 0, mm.PageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
mSysStatInc(sysStat, uintptr(regionSize))
|
mSysStatInc(sysStat, uintptr(regionSize))
|
||||||
|
@ -2,10 +2,8 @@ package goruntime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/pmm/allocator"
|
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
@ -19,17 +17,17 @@ func TestSysReserve(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
specs := []struct {
|
specs := []struct {
|
||||||
reqSize mem.Size
|
reqSize uintptr
|
||||||
expRegionSize mem.Size
|
expRegionSize uintptr
|
||||||
}{
|
}{
|
||||||
// exact multiple of page size
|
// exact multiple of page size
|
||||||
{100 << mem.PageShift, 100 << mem.PageShift},
|
{100 << mm.PageShift, 100 << mm.PageShift},
|
||||||
// size should be rounded up to nearest page size
|
// size should be rounded up to nearest page size
|
||||||
{2*mem.PageSize - 1, 2 * mem.PageSize},
|
{2*mm.PageSize - 1, 2 * mm.PageSize},
|
||||||
}
|
}
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
for specIndex, spec := range specs {
|
||||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(rsvSize uintptr) (uintptr, *kernel.Error) {
|
||||||
if rsvSize != spec.expRegionSize {
|
if rsvSize != spec.expRegionSize {
|
||||||
t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize)
|
t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize)
|
||||||
}
|
}
|
||||||
@ -52,7 +50,7 @@ func TestSysReserve(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(rsvSize uintptr) (uintptr, *kernel.Error) {
|
||||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,16 +67,16 @@ func TestSysMap(t *testing.T) {
|
|||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
specs := []struct {
|
specs := []struct {
|
||||||
reqAddr uintptr
|
reqAddr uintptr
|
||||||
reqSize mem.Size
|
reqSize uintptr
|
||||||
expRsvAddr uintptr
|
expRsvAddr uintptr
|
||||||
expMapCallCount int
|
expMapCallCount int
|
||||||
}{
|
}{
|
||||||
// exact multiple of page size
|
// exact multiple of page size
|
||||||
{100 << mem.PageShift, 4 * mem.PageSize, 100 << mem.PageShift, 4},
|
{100 << mm.PageShift, 4 * mm.PageSize, 100 << mm.PageShift, 4},
|
||||||
// address should be rounded up to nearest page size
|
// address should be rounded up to nearest page size
|
||||||
{(100 << mem.PageShift) + 1, 4 * mem.PageSize, 101 << mem.PageShift, 4},
|
{(100 << mm.PageShift) + 1, 4 * mm.PageSize, 101 << mm.PageShift, 4},
|
||||||
// size should be rounded up to nearest page size
|
// size should be rounded up to nearest page size
|
||||||
{1 << mem.PageShift, (4 * mem.PageSize) + 1, 1 << mem.PageShift, 5},
|
{1 << mm.PageShift, (4 * mm.PageSize) + 1, 1 << mm.PageShift, 5},
|
||||||
}
|
}
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
for specIndex, spec := range specs {
|
||||||
@ -86,7 +84,7 @@ func TestSysMap(t *testing.T) {
|
|||||||
sysStat uint64
|
sysStat uint64
|
||||||
mapCallCount int
|
mapCallCount int
|
||||||
)
|
)
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute
|
expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute
|
||||||
if flags != expFlags {
|
if flags != expFlags {
|
||||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||||
@ -104,14 +102,14 @@ func TestSysMap(t *testing.T) {
|
|||||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
if exp := uint64(spec.expMapCallCount << mm.PageShift); sysStat != exp {
|
||||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("map fails", func(t *testing.T) {
|
t.Run("map fails", func(t *testing.T) {
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,29 +134,29 @@ func TestSysAlloc(t *testing.T) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||||
mapFn = vmm.Map
|
mapFn = vmm.Map
|
||||||
memsetFn = mem.Memset
|
memsetFn = kernel.Memset
|
||||||
frameAllocFn = allocator.AllocFrame
|
mm.SetFrameAllocator(nil)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
specs := []struct {
|
specs := []struct {
|
||||||
reqSize mem.Size
|
reqSize uintptr
|
||||||
expMapCallCount int
|
expMapCallCount int
|
||||||
}{
|
}{
|
||||||
// exact multiple of page size
|
// exact multiple of page size
|
||||||
{4 * mem.PageSize, 4},
|
{4 * mm.PageSize, 4},
|
||||||
// round up to nearest page size
|
// round up to nearest page size
|
||||||
{(4 * mem.PageSize) + 1, 5},
|
{(4 * mm.PageSize) + 1, 5},
|
||||||
}
|
}
|
||||||
|
|
||||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
expRegionStartAddr := uintptr(10 * mm.PageSize)
|
||||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
return expRegionStartAddr, nil
|
return expRegionStartAddr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
return pmm.Frame(0), nil
|
return mm.Frame(0), nil
|
||||||
}
|
})
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
for specIndex, spec := range specs {
|
||||||
var (
|
var (
|
||||||
@ -167,11 +165,11 @@ func TestSysAlloc(t *testing.T) {
|
|||||||
memsetCallCount int
|
memsetCallCount int
|
||||||
)
|
)
|
||||||
|
|
||||||
memsetFn = func(_ uintptr, _ byte, _ mem.Size) {
|
memsetFn = func(_ uintptr, _ byte, _ uintptr) {
|
||||||
memsetCallCount++
|
memsetCallCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
expFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
expFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||||
if flags != expFlags {
|
if flags != expFlags {
|
||||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||||
@ -193,14 +191,14 @@ func TestSysAlloc(t *testing.T) {
|
|||||||
t.Errorf("[spec %d] expected mem.Memset call count to be %d; got %d", specIndex, spec.expMapCallCount, memsetCallCount)
|
t.Errorf("[spec %d] expected mem.Memset call count to be %d; got %d", specIndex, spec.expMapCallCount, memsetCallCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
if exp := uint64(spec.expMapCallCount << mm.PageShift); sysStat != exp {
|
||||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("earlyReserveRegion fails", func(t *testing.T) {
|
t.Run("earlyReserveRegion fails", func(t *testing.T) {
|
||||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(rsvSize uintptr) (uintptr, *kernel.Error) {
|
||||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,14 +209,14 @@ func TestSysAlloc(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("frame allocation fails", func(t *testing.T) {
|
t.Run("frame allocation fails", func(t *testing.T) {
|
||||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
expRegionStartAddr := uintptr(10 * mm.PageSize)
|
||||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(rsvSize uintptr) (uintptr, *kernel.Error) {
|
||||||
return expRegionStartAddr, nil
|
return expRegionStartAddr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
return pmm.InvalidFrame, &kernel.Error{Module: "test", Message: "out of memory"}
|
return mm.InvalidFrame, &kernel.Error{Module: "test", Message: "out of memory"}
|
||||||
}
|
})
|
||||||
|
|
||||||
var sysStat uint64
|
var sysStat uint64
|
||||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||||
@ -227,16 +225,16 @@ func TestSysAlloc(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("map fails", func(t *testing.T) {
|
t.Run("map fails", func(t *testing.T) {
|
||||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
expRegionStartAddr := uintptr(10 * mm.PageSize)
|
||||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(rsvSize uintptr) (uintptr, *kernel.Error) {
|
||||||
return expRegionStartAddr, nil
|
return expRegionStartAddr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
return pmm.Frame(0), nil
|
return mm.Frame(0), nil
|
||||||
}
|
})
|
||||||
|
|
||||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,10 +4,10 @@ import (
|
|||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/goruntime"
|
"gopheros/kernel/goruntime"
|
||||||
"gopheros/kernel/hal"
|
"gopheros/kernel/hal"
|
||||||
"gopheros/kernel/hal/multiboot"
|
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem/pmm/allocator"
|
"gopheros/kernel/mm/pmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
|
"gopheros/multiboot"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -31,7 +31,7 @@ func Kmain(multibootInfoPtr, kernelStart, kernelEnd, kernelPageOffset uintptr) {
|
|||||||
multiboot.SetInfoPtr(multibootInfoPtr)
|
multiboot.SetInfoPtr(multibootInfoPtr)
|
||||||
|
|
||||||
var err *kernel.Error
|
var err *kernel.Error
|
||||||
if err = allocator.Init(kernelStart, kernelEnd); err != nil {
|
if err = pmm.Init(kernelStart, kernelEnd); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
} else if err = vmm.Init(kernelPageOffset); err != nil {
|
} else if err = vmm.Init(kernelPageOffset); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
// Package pmm contains code that manages physical memory frame allocations.
|
|
||||||
package pmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Frame describes a physical memory page index.
|
|
||||||
type Frame uintptr
|
|
||||||
|
|
||||||
const (
|
|
||||||
// InvalidFrame is returned by page allocators when
|
|
||||||
// they fail to reserve the requested frame.
|
|
||||||
InvalidFrame = Frame(math.MaxUint64)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Valid returns true if this is a valid frame.
|
|
||||||
func (f Frame) Valid() bool {
|
|
||||||
return f != InvalidFrame
|
|
||||||
}
|
|
||||||
|
|
||||||
// Address returns a pointer to the physical memory address pointed to by this Frame.
|
|
||||||
func (f Frame) Address() uintptr {
|
|
||||||
return uintptr(f << mem.PageShift)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FrameFromAddress returns a Frame that corresponds to
|
|
||||||
// the given physical address. This function can handle
|
|
||||||
// both page-aligned and not aligned addresses. in the
|
|
||||||
// latter case, the input address will be rounded down
|
|
||||||
// to the frame that contains it.
|
|
||||||
func FrameFromAddress(physAddr uintptr) Frame {
|
|
||||||
return Frame((physAddr & ^(uintptr(mem.PageSize - 1))) >> mem.PageShift)
|
|
||||||
}
|
|
@ -1,43 +0,0 @@
|
|||||||
package pmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFrameMethods(t *testing.T) {
|
|
||||||
for frameIndex := uint64(0); frameIndex < 128; frameIndex++ {
|
|
||||||
frame := Frame(frameIndex)
|
|
||||||
|
|
||||||
if !frame.Valid() {
|
|
||||||
t.Errorf("expected frame %d to be valid", frameIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp, got := uintptr(frameIndex<<mem.PageShift), frame.Address(); got != exp {
|
|
||||||
t.Errorf("expected frame (%d, index: %d) call to Address() to return %x; got %x", frame, frameIndex, exp, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
invalidFrame := InvalidFrame
|
|
||||||
if invalidFrame.Valid() {
|
|
||||||
t.Error("expected InvalidFrame.Valid() to return false")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFrameFromAddress(t *testing.T) {
|
|
||||||
specs := []struct {
|
|
||||||
input uintptr
|
|
||||||
expFrame Frame
|
|
||||||
}{
|
|
||||||
{0, Frame(0)},
|
|
||||||
{4095, Frame(0)},
|
|
||||||
{4096, Frame(1)},
|
|
||||||
{4123, Frame(1)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
|
||||||
if got := FrameFromAddress(spec.input); got != spec.expFrame {
|
|
||||||
t.Errorf("[spec %d] expected returned frame to be %v; got %v", specIndex, spec.expFrame, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,12 +0,0 @@
|
|||||||
package mem
|
|
||||||
|
|
||||||
// Size represents a memory block size in bytes.
|
|
||||||
type Size uint64
|
|
||||||
|
|
||||||
// Common memory block sizes.
|
|
||||||
const (
|
|
||||||
Byte Size = 1
|
|
||||||
Kb = 1024 * Byte
|
|
||||||
Mb = 1024 * Kb
|
|
||||||
Gb = 1024 * Mb
|
|
||||||
)
|
|
@ -1,19 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import "gopheros/kernel/mem"
|
|
||||||
|
|
||||||
// Page describes a virtual memory page index.
|
|
||||||
type Page uintptr
|
|
||||||
|
|
||||||
// Address returns a pointer to the virtual memory address pointed to by this Page.
|
|
||||||
func (f Page) Address() uintptr {
|
|
||||||
return uintptr(f << mem.PageShift)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageFromAddress returns a Page that corresponds to the given virtual
|
|
||||||
// address. This function can handle both page-aligned and not aligned virtual
|
|
||||||
// addresses. in the latter case, the input address will be rounded down to the
|
|
||||||
// page that contains it.
|
|
||||||
func PageFromAddress(virtAddr uintptr) Page {
|
|
||||||
return Page((virtAddr & ^(uintptr(mem.PageSize - 1))) >> mem.PageShift)
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPageMethods(t *testing.T) {
|
|
||||||
for pageIndex := uint64(0); pageIndex < 128; pageIndex++ {
|
|
||||||
page := Page(pageIndex)
|
|
||||||
|
|
||||||
if exp, got := uintptr(pageIndex<<mem.PageShift), page.Address(); got != exp {
|
|
||||||
t.Errorf("expected page (%d, index: %d) call to Address() to return %x; got %x", page, pageIndex, exp, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPageFromAddress(t *testing.T) {
|
|
||||||
specs := []struct {
|
|
||||||
input uintptr
|
|
||||||
expPage Page
|
|
||||||
}{
|
|
||||||
{0, Page(0)},
|
|
||||||
{4095, Page(0)},
|
|
||||||
{4096, Page(1)},
|
|
||||||
{4123, Page(1)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
|
||||||
if got := PageFromAddress(spec.input); got != spec.expPage {
|
|
||||||
t.Errorf("[spec %d] expected returned page to be %v; got %v", specIndex, spec.expPage, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,135 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel"
|
|
||||||
"gopheros/kernel/cpu"
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// activePDTFn is used by tests to override calls to activePDT which
|
|
||||||
// will cause a fault if called in user-mode.
|
|
||||||
activePDTFn = cpu.ActivePDT
|
|
||||||
|
|
||||||
// switchPDTFn is used by tests to override calls to switchPDT which
|
|
||||||
// will cause a fault if called in user-mode.
|
|
||||||
switchPDTFn = cpu.SwitchPDT
|
|
||||||
|
|
||||||
// mapFn is used by tests and is automatically inlined by the compiler.
|
|
||||||
mapFn = Map
|
|
||||||
|
|
||||||
// mapTemporaryFn is used by tests and is automatically inlined by the compiler.
|
|
||||||
mapTemporaryFn = MapTemporary
|
|
||||||
|
|
||||||
// unmapmFn is used by tests and is automatically inlined by the compiler.
|
|
||||||
unmapFn = Unmap
|
|
||||||
)
|
|
||||||
|
|
||||||
// PageDirectoryTable describes the top-most table in a multi-level paging scheme.
|
|
||||||
type PageDirectoryTable struct {
|
|
||||||
pdtFrame pmm.Frame
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init sets up the page table directory starting at the supplied physical
|
|
||||||
// address. If the supplied frame does not match the currently active PDT, then
|
|
||||||
// Init assumes that this is a new page table directory that needs
|
|
||||||
// bootstapping. In such a case, a temporary mapping is established so that
|
|
||||||
// Init can:
|
|
||||||
// - call mem.Memset to clear the frame contents
|
|
||||||
// - setup a recursive mapping for the last table entry to the page itself.
|
|
||||||
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error {
|
|
||||||
pdt.pdtFrame = pdtFrame
|
|
||||||
|
|
||||||
// Check active PDT physical address. If it matches the input pdt then
|
|
||||||
// nothing more needs to be done
|
|
||||||
activePdtAddr := activePDTFn()
|
|
||||||
if pdtFrame.Address() == activePdtAddr {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temporary mapping for the pdt frame so we can work on it
|
|
||||||
pdtPage, err := mapTemporaryFn(pdtFrame)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear the page contents and setup recursive mapping for the last PDT entry
|
|
||||||
mem.Memset(pdtPage.Address(), 0, mem.PageSize)
|
|
||||||
lastPdtEntry := (*pageTableEntry)(unsafe.Pointer(pdtPage.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)))
|
|
||||||
*lastPdtEntry = 0
|
|
||||||
lastPdtEntry.SetFlags(FlagPresent | FlagRW)
|
|
||||||
lastPdtEntry.SetFrame(pdtFrame)
|
|
||||||
|
|
||||||
// Remove temporary mapping
|
|
||||||
_ = unmapFn(pdtPage)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
|
||||||
// using this PDT. This method behaves in a similar fashion to the global Map()
|
|
||||||
// function with the difference that it also supports inactive page PDTs by
|
|
||||||
// establishing a temporary mapping so that Map() can access the inactive PDT
|
|
||||||
// entries.
|
|
||||||
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|
||||||
var (
|
|
||||||
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
|
|
||||||
lastPdtEntryAddr uintptr
|
|
||||||
lastPdtEntry *pageTableEntry
|
|
||||||
)
|
|
||||||
// If this table is not active we need to temporarily map it to the
|
|
||||||
// last entry in the active PDT so we can access it using the recursive
|
|
||||||
// virtual address scheme.
|
|
||||||
if activePdtFrame != pdt.pdtFrame {
|
|
||||||
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
|
|
||||||
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
|
||||||
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
|
||||||
flushTLBEntryFn(lastPdtEntryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := mapFn(page, frame, flags)
|
|
||||||
|
|
||||||
if activePdtFrame != pdt.pdtFrame {
|
|
||||||
lastPdtEntry.SetFrame(activePdtFrame)
|
|
||||||
flushTLBEntryFn(lastPdtEntryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmap removes a mapping previousle installed by a call to Map() on this PDT.
|
|
||||||
// This method behaves in a similar fashion to the global Unmap() function with
|
|
||||||
// the difference that it also supports inactive page PDTs by establishing a
|
|
||||||
// temporary mapping so that Unmap() can access the inactive PDT entries.
|
|
||||||
func (pdt PageDirectoryTable) Unmap(page Page) *kernel.Error {
|
|
||||||
var (
|
|
||||||
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
|
|
||||||
lastPdtEntryAddr uintptr
|
|
||||||
lastPdtEntry *pageTableEntry
|
|
||||||
)
|
|
||||||
// If this table is not active we need to temporarily map it to the
|
|
||||||
// last entry in the active PDT so we can access it using the recursive
|
|
||||||
// virtual address scheme.
|
|
||||||
if activePdtFrame != pdt.pdtFrame {
|
|
||||||
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
|
|
||||||
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
|
||||||
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
|
||||||
flushTLBEntryFn(lastPdtEntryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := unmapFn(page)
|
|
||||||
|
|
||||||
if activePdtFrame != pdt.pdtFrame {
|
|
||||||
lastPdtEntry.SetFrame(activePdtFrame)
|
|
||||||
flushTLBEntryFn(lastPdtEntryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Activate enables this page directory table and flushes the TLB
|
|
||||||
func (pdt PageDirectoryTable) Activate() {
|
|
||||||
switchPDTFn(pdt.pdtFrame.Address())
|
|
||||||
}
|
|
@ -1,330 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel"
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
|
|
||||||
flushTLBEntryFn = origFlushTLBEntry
|
|
||||||
activePDTFn = origActivePDT
|
|
||||||
mapTemporaryFn = origMapTemporary
|
|
||||||
unmapFn = origUnmap
|
|
||||||
}(flushTLBEntryFn, activePDTFn, mapTemporaryFn, unmapFn)
|
|
||||||
|
|
||||||
t.Run("already mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdt PageDirectoryTable
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return pdtFrame.Address()
|
|
||||||
}
|
|
||||||
|
|
||||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
|
||||||
t.Fatal("unexpected call to MapTemporary")
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
unmapFn = func(_ Page) *kernel.Error {
|
|
||||||
t.Fatal("unexpected call to Unmap")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Init(pdtFrame); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("not mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdt PageDirectoryTable
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
physPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fill phys page with random junk
|
|
||||||
mem.Memset(uintptr(unsafe.Pointer(&physPage[0])), 0xf0, mem.PageSize)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
|
||||||
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {}
|
|
||||||
|
|
||||||
unmapCallCount := 0
|
|
||||||
unmapFn = func(_ Page) *kernel.Error {
|
|
||||||
unmapCallCount++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Init(pdtFrame); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if unmapCallCount != 1 {
|
|
||||||
t.Fatalf("expected Unmap to be called 1 time; called %d", unmapCallCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(physPage)-1; i++ {
|
|
||||||
if physPage[i] != 0 {
|
|
||||||
t.Errorf("expected PDT entry %d to be cleared; got %x", i, physPage[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The last page should be recursively mapped to the PDT
|
|
||||||
lastPdtEntry := physPage[len(physPage)-1]
|
|
||||||
if !lastPdtEntry.HasFlags(FlagPresent | FlagRW) {
|
|
||||||
t.Fatal("expected last PDT entry to have FlagPresent and FlagRW set")
|
|
||||||
}
|
|
||||||
|
|
||||||
if lastPdtEntry.Frame() != pdtFrame {
|
|
||||||
t.Fatalf("expected last PDT entry to be recursively mapped to physical frame %x; got %x", pdtFrame, lastPdtEntry.Frame())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("temporary mapping failure", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdt PageDirectoryTable
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
|
|
||||||
|
|
||||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
|
||||||
return 0, expErr
|
|
||||||
}
|
|
||||||
|
|
||||||
unmapFn = func(_ Page) *kernel.Error {
|
|
||||||
t.Fatal("unexpected call to Unmap")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Init(pdtFrame); err != expErr {
|
|
||||||
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) {
|
|
||||||
flushTLBEntryFn = origFlushTLBEntry
|
|
||||||
activePDTFn = origActivePDT
|
|
||||||
mapFn = origMap
|
|
||||||
}(flushTLBEntryFn, activePDTFn, mapFn)
|
|
||||||
|
|
||||||
t.Run("already mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
|
||||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
|
||||||
)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return pdtFrame.Address()
|
|
||||||
}
|
|
||||||
|
|
||||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
flushCallCount := 0
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {
|
|
||||||
flushCallCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 0; flushCallCount != exp {
|
|
||||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("not mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
|
||||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
|
||||||
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
|
||||||
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initially, activePhysPage is recursively mapped to itself
|
|
||||||
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
|
||||||
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return activePdtFrame.Address()
|
|
||||||
}
|
|
||||||
|
|
||||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
flushCallCount := 0
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {
|
|
||||||
switch flushCallCount {
|
|
||||||
case 0:
|
|
||||||
// the first time we flush the tlb entry, the last entry of
|
|
||||||
// the active pdt should be pointing to pdtFrame
|
|
||||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
|
||||||
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
// the second time we flush the tlb entry, the last entry of
|
|
||||||
// the active pdt should be pointing back to activePdtFrame
|
|
||||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
|
||||||
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
flushCallCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 2; flushCallCount != exp {
|
|
||||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPageDirectoryTableUnmapAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origUnmap func(Page) *kernel.Error) {
|
|
||||||
flushTLBEntryFn = origFlushTLBEntry
|
|
||||||
activePDTFn = origActivePDT
|
|
||||||
unmapFn = origUnmap
|
|
||||||
}(flushTLBEntryFn, activePDTFn, unmapFn)
|
|
||||||
|
|
||||||
t.Run("already mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
|
||||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
|
||||||
)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return pdtFrame.Address()
|
|
||||||
}
|
|
||||||
|
|
||||||
unmapFn = func(_ Page) *kernel.Error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
flushCallCount := 0
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {
|
|
||||||
flushCallCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Unmap(page); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 0; flushCallCount != exp {
|
|
||||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("not mapped PDT", func(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
|
||||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
|
||||||
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
|
||||||
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initially, activePhysPage is recursively mapped to itself
|
|
||||||
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
|
||||||
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
|
||||||
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return activePdtFrame.Address()
|
|
||||||
}
|
|
||||||
|
|
||||||
unmapFn = func(_ Page) *kernel.Error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
flushCallCount := 0
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {
|
|
||||||
switch flushCallCount {
|
|
||||||
case 0:
|
|
||||||
// the first time we flush the tlb entry, the last entry of
|
|
||||||
// the active pdt should be pointing to pdtFrame
|
|
||||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
|
||||||
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
// the second time we flush the tlb entry, the last entry of
|
|
||||||
// the active pdt should be pointing back to activePdtFrame
|
|
||||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
|
||||||
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
flushCallCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pdt.Unmap(page); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 2; flushCallCount != exp {
|
|
||||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPageDirectoryTableActivateAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origSwitchPDT func(uintptr)) {
|
|
||||||
switchPDTFn = origSwitchPDT
|
|
||||||
}(switchPDTFn)
|
|
||||||
|
|
||||||
var (
|
|
||||||
pdtFrame = pmm.Frame(123)
|
|
||||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
|
||||||
)
|
|
||||||
|
|
||||||
switchPDTCallCount := 0
|
|
||||||
switchPDTFn = func(_ uintptr) {
|
|
||||||
switchPDTCallCount++
|
|
||||||
}
|
|
||||||
|
|
||||||
pdt.Activate()
|
|
||||||
if exp := 1; switchPDTCallCount != exp {
|
|
||||||
t.Fatalf("expected switchPDT to be called %d times; called %d", exp, switchPDTCallCount)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,74 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel"
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrInvalidMapping is returned when trying to lookup a virtual memory address that is not yet mapped.
|
|
||||||
ErrInvalidMapping = &kernel.Error{Module: "vmm", Message: "virtual address does not point to a mapped physical page"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// PageTableEntryFlag describes a flag that can be applied to a page table entry.
|
|
||||||
type PageTableEntryFlag uintptr
|
|
||||||
|
|
||||||
// pageTableEntry describes a page table entry. These entries encode
|
|
||||||
// a physical frame address and a set of flags. The actual format
|
|
||||||
// of the entry and flags is architecture-dependent.
|
|
||||||
type pageTableEntry uintptr
|
|
||||||
|
|
||||||
// HasFlags returns true if this entry has all the input flags set.
|
|
||||||
func (pte pageTableEntry) HasFlags(flags PageTableEntryFlag) bool {
|
|
||||||
return (uintptr(pte) & uintptr(flags)) == uintptr(flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasAnyFlag returns true if this entry has at least one of the input flags set.
|
|
||||||
func (pte pageTableEntry) HasAnyFlag(flags PageTableEntryFlag) bool {
|
|
||||||
return (uintptr(pte) & uintptr(flags)) != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFlags sets the input list of flags to the page table entry.
|
|
||||||
func (pte *pageTableEntry) SetFlags(flags PageTableEntryFlag) {
|
|
||||||
*pte = (pageTableEntry)(uintptr(*pte) | uintptr(flags))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearFlags unsets the input list of flags from the page table entry.
|
|
||||||
func (pte *pageTableEntry) ClearFlags(flags PageTableEntryFlag) {
|
|
||||||
*pte = (pageTableEntry)(uintptr(*pte) &^ uintptr(flags))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Frame returns the physical page frame that this page table entry points to.
|
|
||||||
func (pte pageTableEntry) Frame() pmm.Frame {
|
|
||||||
return pmm.Frame((uintptr(pte) & ptePhysPageMask) >> mem.PageShift)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFrame updates the page table entry to point the the given physical frame .
|
|
||||||
func (pte *pageTableEntry) SetFrame(frame pmm.Frame) {
|
|
||||||
*pte = (pageTableEntry)((uintptr(*pte) &^ ptePhysPageMask) | frame.Address())
|
|
||||||
}
|
|
||||||
|
|
||||||
// pteForAddress returns the final page table entry that correspond to a
|
|
||||||
// particular virtual address. The function performs a page table walk till it
|
|
||||||
// reaches the final page table entry returning ErrInvalidMapping if the page
|
|
||||||
// is not present.
|
|
||||||
func pteForAddress(virtAddr uintptr) (*pageTableEntry, *kernel.Error) {
|
|
||||||
var (
|
|
||||||
err *kernel.Error
|
|
||||||
entry *pageTableEntry
|
|
||||||
)
|
|
||||||
|
|
||||||
walk(virtAddr, func(pteLevel uint8, pte *pageTableEntry) bool {
|
|
||||||
if !pte.HasFlags(FlagPresent) {
|
|
||||||
entry = nil
|
|
||||||
err = ErrInvalidMapping
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = pte
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
return entry, err
|
|
||||||
}
|
|
@ -1,60 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPageTableEntryFlags(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pte pageTableEntry
|
|
||||||
flag1 = PageTableEntryFlag(1 << 10)
|
|
||||||
flag2 = PageTableEntryFlag(1 << 21)
|
|
||||||
)
|
|
||||||
|
|
||||||
if pte.HasAnyFlag(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasAnyFlags to return false")
|
|
||||||
}
|
|
||||||
|
|
||||||
pte.SetFlags(flag1 | flag2)
|
|
||||||
|
|
||||||
if !pte.HasAnyFlag(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasAnyFlags to return true")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pte.HasFlags(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasFlags to return true")
|
|
||||||
}
|
|
||||||
|
|
||||||
pte.ClearFlags(flag1)
|
|
||||||
|
|
||||||
if !pte.HasAnyFlag(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasAnyFlags to return true")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pte.HasFlags(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasFlags to return false")
|
|
||||||
}
|
|
||||||
|
|
||||||
pte.ClearFlags(flag1 | flag2)
|
|
||||||
|
|
||||||
if pte.HasAnyFlag(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasAnyFlags to return false")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pte.HasFlags(flag1 | flag2) {
|
|
||||||
t.Fatalf("expected HasFlags to return false")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPageTableEntryFrameEncoding(t *testing.T) {
|
|
||||||
var (
|
|
||||||
pte pageTableEntry
|
|
||||||
physFrame = pmm.Frame(123)
|
|
||||||
)
|
|
||||||
|
|
||||||
pte.SetFrame(physFrame)
|
|
||||||
if got := pte.Frame(); got != physFrame {
|
|
||||||
t.Fatalf("expected pte.Frame() to return %v; got %v", physFrame, got)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import "gopheros/kernel"
|
|
||||||
|
|
||||||
// Translate returns the physical address that corresponds to the supplied
|
|
||||||
// virtual address or ErrInvalidMapping if the virtual address does not
|
|
||||||
// correspond to a mapped physical address.
|
|
||||||
func Translate(virtAddr uintptr) (uintptr, *kernel.Error) {
|
|
||||||
pte, err := pteForAddress(virtAddr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the physical address by taking the physical frame address and
|
|
||||||
// appending the offset from the virtual address
|
|
||||||
physAddr := pte.Frame().Address() + PageOffset(virtAddr)
|
|
||||||
return physAddr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageOffset returns the offset within the page specified by a virtual
|
|
||||||
// address.
|
|
||||||
func PageOffset(virtAddr uintptr) uintptr {
|
|
||||||
return (virtAddr & ((1 << pageLevelShifts[pageLevels-1]) - 1))
|
|
||||||
}
|
|
@ -1,63 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTranslateAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
|
||||||
ptePtrFn = origPtePtr
|
|
||||||
}(ptePtrFn)
|
|
||||||
|
|
||||||
// the virtual address just contains the page offset
|
|
||||||
virtAddr := uintptr(1234)
|
|
||||||
expFrame := pmm.Frame(42)
|
|
||||||
expPhysAddr := expFrame.Address() + virtAddr
|
|
||||||
specs := [][pageLevels]bool{
|
|
||||||
{true, true, true, true},
|
|
||||||
{false, true, true, true},
|
|
||||||
{true, false, true, true},
|
|
||||||
{true, true, false, true},
|
|
||||||
{true, true, true, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
|
||||||
pteCallCount := 0
|
|
||||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
|
||||||
var pte pageTableEntry
|
|
||||||
pte.SetFrame(expFrame)
|
|
||||||
if specs[specIndex][pteCallCount] {
|
|
||||||
pte.SetFlags(FlagPresent)
|
|
||||||
}
|
|
||||||
pteCallCount++
|
|
||||||
|
|
||||||
return unsafe.Pointer(&pte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An error is expected if any page level contains a non-present page
|
|
||||||
expError := false
|
|
||||||
for _, hasMapping := range spec {
|
|
||||||
if !hasMapping {
|
|
||||||
expError = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
physAddr, err := Translate(virtAddr)
|
|
||||||
switch {
|
|
||||||
case expError && err != ErrInvalidMapping:
|
|
||||||
t.Errorf("[spec %d] expected to get ErrInvalidMapping; got %v", specIndex, err)
|
|
||||||
case !expError && err != nil:
|
|
||||||
t.Errorf("[spec %d] unexpected error %v", specIndex, err)
|
|
||||||
case !expError && physAddr != expPhysAddr:
|
|
||||||
t.Errorf("[spec %d] expected phys addr to be 0x%x; got 0x%x", specIndex, expPhysAddr, physAddr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,255 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel"
|
|
||||||
"gopheros/kernel/cpu"
|
|
||||||
"gopheros/kernel/hal/multiboot"
|
|
||||||
"gopheros/kernel/irq"
|
|
||||||
"gopheros/kernel/kfmt"
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// frameAllocator points to a frame allocator function registered using
|
|
||||||
// SetFrameAllocator.
|
|
||||||
frameAllocator FrameAllocatorFn
|
|
||||||
|
|
||||||
// the following functions are mocked by tests and are automatically
|
|
||||||
// inlined by the compiler.
|
|
||||||
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
|
||||||
readCR2Fn = cpu.ReadCR2
|
|
||||||
translateFn = Translate
|
|
||||||
visitElfSectionsFn = multiboot.VisitElfSections
|
|
||||||
|
|
||||||
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrameAllocatorFn is a function that can allocate physical frames.
|
|
||||||
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
|
|
||||||
|
|
||||||
// SetFrameAllocator registers a frame allocator function that will be used by
|
|
||||||
// the vmm code when new physical frames need to be allocated.
|
|
||||||
func SetFrameAllocator(allocFn FrameAllocatorFn) {
|
|
||||||
frameAllocator = allocFn
|
|
||||||
}
|
|
||||||
|
|
||||||
func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) {
|
|
||||||
var (
|
|
||||||
faultAddress = uintptr(readCR2Fn())
|
|
||||||
faultPage = PageFromAddress(faultAddress)
|
|
||||||
pageEntry *pageTableEntry
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lookup entry for the page where the fault occurred
|
|
||||||
walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
|
||||||
nextIsPresent := pte.HasFlags(FlagPresent)
|
|
||||||
|
|
||||||
if pteLevel == pageLevels-1 && nextIsPresent {
|
|
||||||
pageEntry = pte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort walk if the next page table entry is missing
|
|
||||||
return nextIsPresent
|
|
||||||
})
|
|
||||||
|
|
||||||
// CoW is supported for RO pages with the CoW flag set
|
|
||||||
if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) {
|
|
||||||
var (
|
|
||||||
copy pmm.Frame
|
|
||||||
tmpPage Page
|
|
||||||
err *kernel.Error
|
|
||||||
)
|
|
||||||
|
|
||||||
if copy, err = frameAllocator(); err != nil {
|
|
||||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
|
||||||
} else if tmpPage, err = mapTemporaryFn(copy); err != nil {
|
|
||||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
|
||||||
} else {
|
|
||||||
// Copy page contents, mark as RW and remove CoW flag
|
|
||||||
mem.Memcopy(faultPage.Address(), tmpPage.Address(), mem.PageSize)
|
|
||||||
_ = unmapFn(tmpPage)
|
|
||||||
|
|
||||||
// Update mapping to point to the new frame, flag it as RW and
|
|
||||||
// remove the CoW flag
|
|
||||||
pageEntry.ClearFlags(FlagCopyOnWrite)
|
|
||||||
pageEntry.SetFlags(FlagPresent | FlagRW)
|
|
||||||
pageEntry.SetFrame(copy)
|
|
||||||
flushTLBEntryFn(faultPage.Address())
|
|
||||||
|
|
||||||
// Fault recovered; retry the instruction that caused the fault
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, errUnrecoverableFault)
|
|
||||||
}
|
|
||||||
|
|
||||||
func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) {
|
|
||||||
kfmt.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress)
|
|
||||||
switch {
|
|
||||||
case errorCode == 0:
|
|
||||||
kfmt.Printf("read from non-present page")
|
|
||||||
case errorCode == 1:
|
|
||||||
kfmt.Printf("page protection violation (read)")
|
|
||||||
case errorCode == 2:
|
|
||||||
kfmt.Printf("write to non-present page")
|
|
||||||
case errorCode == 3:
|
|
||||||
kfmt.Printf("page protection violation (write)")
|
|
||||||
case errorCode == 4:
|
|
||||||
kfmt.Printf("page-fault in user-mode")
|
|
||||||
case errorCode == 8:
|
|
||||||
kfmt.Printf("page table has reserved bit set")
|
|
||||||
case errorCode == 16:
|
|
||||||
kfmt.Printf("instruction fetch")
|
|
||||||
default:
|
|
||||||
kfmt.Printf("unknown")
|
|
||||||
}
|
|
||||||
|
|
||||||
kfmt.Printf("\n\nRegisters:\n")
|
|
||||||
regs.Print()
|
|
||||||
frame.Print()
|
|
||||||
|
|
||||||
// TODO: Revisit this when user-mode tasks are implemented
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) {
|
|
||||||
kfmt.Printf("\nGeneral protection fault while accessing address: 0x%x\n", readCR2Fn())
|
|
||||||
kfmt.Printf("Registers:\n")
|
|
||||||
regs.Print()
|
|
||||||
frame.Print()
|
|
||||||
|
|
||||||
// TODO: Revisit this when user-mode tasks are implemented
|
|
||||||
panic(errUnrecoverableFault)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reserveZeroedFrame reserves a physical frame to be used together with
|
|
||||||
// FlagCopyOnWrite for lazy allocation requests.
|
|
||||||
func reserveZeroedFrame() *kernel.Error {
|
|
||||||
var (
|
|
||||||
err *kernel.Error
|
|
||||||
tempPage Page
|
|
||||||
)
|
|
||||||
|
|
||||||
if ReservedZeroedFrame, err = frameAllocator(); err != nil {
|
|
||||||
return err
|
|
||||||
} else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mem.Memset(tempPage.Address(), 0, mem.PageSize)
|
|
||||||
_ = unmapFn(tempPage)
|
|
||||||
|
|
||||||
// From this point on, ReservedZeroedFrame cannot be mapped with a RW flag
|
|
||||||
protectReservedZeroedPage = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes the vmm system, creates a granular PDT for the kernel and
|
|
||||||
// installs paging-related exception handlers.
|
|
||||||
func Init(kernelPageOffset uintptr) *kernel.Error {
|
|
||||||
if err := setupPDTForKernel(kernelPageOffset); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := reserveZeroedFrame(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler)
|
|
||||||
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setupPDTForKernel queries the multiboot package for the ELF sections that
|
|
||||||
// correspond to the loaded kernel image and establishes a new granular PDT for
|
|
||||||
// the kernel's VMA using the appropriate flags (e.g. NX for data sections, RW
|
|
||||||
// for writable sections e.t.c).
|
|
||||||
func setupPDTForKernel(kernelPageOffset uintptr) *kernel.Error {
|
|
||||||
var pdt PageDirectoryTable
|
|
||||||
|
|
||||||
// Allocate frame for the page directory and initialize it
|
|
||||||
pdtFrame, err := frameAllocator()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = pdt.Init(pdtFrame); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query the ELF sections of the kernel image and establish mappings
|
|
||||||
// for each one using the appropriate flags
|
|
||||||
var visitor = func(_ string, secFlags multiboot.ElfSectionFlag, secAddress uintptr, secSize uint64) {
|
|
||||||
// Bail out if we have encountered an error; also ignore sections
|
|
||||||
// not using the kernel's VMA
|
|
||||||
if err != nil || secAddress < kernelPageOffset {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := FlagPresent
|
|
||||||
|
|
||||||
if (secFlags & multiboot.ElfSectionExecutable) == 0 {
|
|
||||||
flags |= FlagNoExecute
|
|
||||||
}
|
|
||||||
|
|
||||||
if (secFlags & multiboot.ElfSectionWritable) != 0 {
|
|
||||||
flags |= FlagRW
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map the start and end VMA addresses for the section contents
|
|
||||||
// into a start and end (inclusive) page number. To figure out
|
|
||||||
// the physical start frame we just need to subtract the
|
|
||||||
// kernel's VMA offset from the virtual address and round that
|
|
||||||
// down to the nearest frame number.
|
|
||||||
curPage := PageFromAddress(secAddress)
|
|
||||||
lastPage := PageFromAddress(secAddress + uintptr(secSize-1))
|
|
||||||
curFrame := pmm.Frame((secAddress - kernelPageOffset) >> mem.PageShift)
|
|
||||||
for ; curPage <= lastPage; curFrame, curPage = curFrame+1, curPage+1 {
|
|
||||||
if err = pdt.Map(curPage, curFrame, flags); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the noescape hack to prevent the compiler from leaking the visitor
|
|
||||||
// function literal to the heap.
|
|
||||||
visitElfSectionsFn(
|
|
||||||
*(*multiboot.ElfSectionVisitor)(noEscape(unsafe.Pointer(&visitor))),
|
|
||||||
)
|
|
||||||
|
|
||||||
// If an error occurred while maping the ELF sections bail out
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that any pages mapped by the memory allocator using
|
|
||||||
// EarlyReserveRegion are copied to the new page directory.
|
|
||||||
for rsvAddr := earlyReserveLastUsed; rsvAddr < tempMappingAddr; rsvAddr += uintptr(mem.PageSize) {
|
|
||||||
page := PageFromAddress(rsvAddr)
|
|
||||||
|
|
||||||
frameAddr, err := translateFn(rsvAddr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = pdt.Map(page, pmm.Frame(frameAddr>>mem.PageShift), FlagPresent|FlagRW); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Activate the new PDT. After this point, the identify mapping for the
|
|
||||||
// physical memory addresses where the kernel is loaded becomes invalid.
|
|
||||||
pdt.Activate()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// noEscape hides a pointer from escape analysis. This function is copied over
|
|
||||||
// from runtime/stubs.go
|
|
||||||
//go:nosplit
|
|
||||||
func noEscape(p unsafe.Pointer) unsafe.Pointer {
|
|
||||||
x := uintptr(p)
|
|
||||||
return unsafe.Pointer(x ^ 0)
|
|
||||||
}
|
|
@ -1,495 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"gopheros/kernel"
|
|
||||||
"gopheros/kernel/cpu"
|
|
||||||
"gopheros/kernel/hal/multiboot"
|
|
||||||
"gopheros/kernel/irq"
|
|
||||||
"gopheros/kernel/kfmt"
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRecoverablePageFault(t *testing.T) {
|
|
||||||
var (
|
|
||||||
frame irq.Frame
|
|
||||||
regs irq.Regs
|
|
||||||
pageEntry pageTableEntry
|
|
||||||
origPage = make([]byte, mem.PageSize)
|
|
||||||
clonedPage = make([]byte, mem.PageSize)
|
|
||||||
err = &kernel.Error{Module: "test", Message: "something went wrong"}
|
|
||||||
)
|
|
||||||
|
|
||||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
|
||||||
ptePtrFn = origPtePtr
|
|
||||||
readCR2Fn = cpu.ReadCR2
|
|
||||||
frameAllocator = nil
|
|
||||||
mapTemporaryFn = MapTemporary
|
|
||||||
unmapFn = Unmap
|
|
||||||
flushTLBEntryFn = cpu.FlushTLBEntry
|
|
||||||
}(ptePtrFn)
|
|
||||||
|
|
||||||
specs := []struct {
|
|
||||||
pteFlags PageTableEntryFlag
|
|
||||||
allocError *kernel.Error
|
|
||||||
mapError *kernel.Error
|
|
||||||
expPanic bool
|
|
||||||
}{
|
|
||||||
// Missing pge
|
|
||||||
{0, nil, nil, true},
|
|
||||||
// Page is present but CoW flag not set
|
|
||||||
{FlagPresent, nil, nil, true},
|
|
||||||
// Page is present but both CoW and RW flags set
|
|
||||||
{FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true},
|
|
||||||
// Page is present with CoW flag set but allocating a page copy fails
|
|
||||||
{FlagPresent | FlagCopyOnWrite, err, nil, true},
|
|
||||||
// Page is present with CoW flag set but mapping the page copy fails
|
|
||||||
{FlagPresent | FlagCopyOnWrite, nil, err, true},
|
|
||||||
// Page is present with CoW flag set
|
|
||||||
{FlagPresent | FlagCopyOnWrite, nil, nil, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) }
|
|
||||||
readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) }
|
|
||||||
unmapFn = func(_ Page) *kernel.Error { return nil }
|
|
||||||
flushTLBEntryFn = func(_ uintptr) {}
|
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
|
||||||
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
err := recover()
|
|
||||||
if spec.expPanic && err == nil {
|
|
||||||
t.Error("expected a panic")
|
|
||||||
} else if !spec.expPanic {
|
|
||||||
if err != nil {
|
|
||||||
t.Error("unexpected panic")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(origPage); i++ {
|
|
||||||
if origPage[i] != clonedPage[i] {
|
|
||||||
t.Errorf("expected clone page to be a copy of the original page; mismatch at index %d", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), spec.mapError }
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&clonedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), spec.allocError
|
|
||||||
})
|
|
||||||
|
|
||||||
for i := 0; i < len(origPage); i++ {
|
|
||||||
origPage[i] = byte(i % 256)
|
|
||||||
clonedPage[i] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pageEntry = 0
|
|
||||||
pageEntry.SetFlags(spec.pteFlags)
|
|
||||||
|
|
||||||
pageFaultHandler(2, &frame, ®s)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNonRecoverablePageFault(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
kfmt.SetOutputSink(nil)
|
|
||||||
}()
|
|
||||||
|
|
||||||
specs := []struct {
|
|
||||||
errCode uint64
|
|
||||||
expReason string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
0,
|
|
||||||
"read from non-present page",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
1,
|
|
||||||
"page protection violation (read)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
2,
|
|
||||||
"write to non-present page",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
3,
|
|
||||||
"page protection violation (write)",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
4,
|
|
||||||
"page-fault in user-mode",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
8,
|
|
||||||
"page table has reserved bit set",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
16,
|
|
||||||
"instruction fetch",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0xf00,
|
|
||||||
"unknown",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
regs irq.Regs
|
|
||||||
frame irq.Frame
|
|
||||||
buf bytes.Buffer
|
|
||||||
)
|
|
||||||
|
|
||||||
kfmt.SetOutputSink(&buf)
|
|
||||||
for specIndex, spec := range specs {
|
|
||||||
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
|
||||||
buf.Reset()
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err != errUnrecoverableFault {
|
|
||||||
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, ®s, errUnrecoverableFault)
|
|
||||||
if got := buf.String(); !strings.Contains(got, spec.expReason) {
|
|
||||||
t.Errorf("expected reason %q; got output:\n%q", spec.expReason, got)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPFHandler(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
readCR2Fn = cpu.ReadCR2
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
regs irq.Regs
|
|
||||||
frame irq.Frame
|
|
||||||
)
|
|
||||||
|
|
||||||
readCR2Fn = func() uint64 {
|
|
||||||
return 0xbadf00d000
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err != errUnrecoverableFault {
|
|
||||||
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
generalProtectionFaultHandler(0, &frame, ®s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInit(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
frameAllocator = nil
|
|
||||||
activePDTFn = cpu.ActivePDT
|
|
||||||
switchPDTFn = cpu.SwitchPDT
|
|
||||||
translateFn = Translate
|
|
||||||
mapTemporaryFn = MapTemporary
|
|
||||||
unmapFn = Unmap
|
|
||||||
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
|
||||||
}()
|
|
||||||
|
|
||||||
// reserve space for an allocated page
|
|
||||||
reservedPage := make([]byte, mem.PageSize)
|
|
||||||
|
|
||||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
|
||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
|
||||||
// fill page with junk
|
|
||||||
for i := 0; i < len(reservedPage); i++ {
|
|
||||||
reservedPage[i] = byte(i % 256)
|
|
||||||
}
|
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
|
||||||
|
|
||||||
if err := Init(0); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reserved page should be zeroed
|
|
||||||
for i := 0; i < len(reservedPage); i++ {
|
|
||||||
if reservedPage[i] != 0 {
|
|
||||||
t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("setupPDT fails", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
|
||||||
|
|
||||||
// Allow the PDT allocation to succeed and then return an error when
|
|
||||||
// trying to allocate the blank fram
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
return pmm.InvalidFrame, expErr
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := Init(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("blank page allocation error", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
|
||||||
|
|
||||||
// Allow the PDT allocation to succeed and then return an error when
|
|
||||||
// trying to allocate the blank fram
|
|
||||||
var allocCount int
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
defer func() { allocCount++ }()
|
|
||||||
|
|
||||||
if allocCount == 0 {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return pmm.InvalidFrame, expErr
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
|
||||||
|
|
||||||
if err := Init(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("blank page mapping error", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr }
|
|
||||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
|
||||||
|
|
||||||
if err := Init(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetupPDTForKernel(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
frameAllocator = nil
|
|
||||||
activePDTFn = cpu.ActivePDT
|
|
||||||
switchPDTFn = cpu.SwitchPDT
|
|
||||||
translateFn = Translate
|
|
||||||
mapFn = Map
|
|
||||||
mapTemporaryFn = MapTemporary
|
|
||||||
unmapFn = Unmap
|
|
||||||
earlyReserveLastUsed = tempMappingAddr
|
|
||||||
}()
|
|
||||||
|
|
||||||
// reserve space for an allocated page
|
|
||||||
reservedPage := make([]byte, mem.PageSize)
|
|
||||||
|
|
||||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
|
||||||
|
|
||||||
t.Run("map kernel sections", func(t *testing.T) {
|
|
||||||
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
|
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
|
|
||||||
// address < VMA; should be ignored
|
|
||||||
v(".debug", 0, 0, uint64(mem.PageSize>>1))
|
|
||||||
// section uses 32-byte alignment instead of page alignment and has a size
|
|
||||||
// equal to 1 page. Due to rounding, we need to actually map 2 pages.
|
|
||||||
v(".text", multiboot.ElfSectionExecutable, 0x10032, uint64(mem.PageSize))
|
|
||||||
v(".data", multiboot.ElfSectionWritable, 0x2000, uint64(mem.PageSize))
|
|
||||||
// section is page-aligned and occupies exactly 2 pages
|
|
||||||
v(".rodata", 0, 0x3000, uint64(mem.PageSize<<1))
|
|
||||||
}
|
|
||||||
mapCount := 0
|
|
||||||
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|
||||||
defer func() { mapCount++ }()
|
|
||||||
|
|
||||||
var expFlags PageTableEntryFlag
|
|
||||||
|
|
||||||
switch mapCount {
|
|
||||||
case 0, 1:
|
|
||||||
expFlags = FlagPresent
|
|
||||||
case 2:
|
|
||||||
expFlags = FlagPresent | FlagNoExecute | FlagRW
|
|
||||||
case 3, 4:
|
|
||||||
expFlags = FlagPresent | FlagNoExecute
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flags & expFlags) != expFlags {
|
|
||||||
t.Errorf("[map call %d] expected flags to be %d; got %d", mapCount, expFlags, flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0x123); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 5; mapCount != exp {
|
|
||||||
t.Errorf("expected Map to be called %d times; got %d", exp, mapCount)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("map of kernel sections fials", func(t *testing.T) {
|
|
||||||
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
|
|
||||||
v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mem.PageSize>>1))
|
|
||||||
}
|
|
||||||
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|
||||||
return expErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("copy allocator reservations to PDT", func(t *testing.T) {
|
|
||||||
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
switchPDTFn = func(_ uintptr) {}
|
|
||||||
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
|
||||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|
||||||
if exp := PageFromAddress(earlyReserveLastUsed); page != exp {
|
|
||||||
t.Errorf("expected Map to be called with page %d; got %d", exp, page)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := pmm.Frame(0xbadf00d000 >> mem.PageShift); frame != exp {
|
|
||||||
t.Errorf("expected Map to be called with frame %d; got %d", exp, frame)
|
|
||||||
}
|
|
||||||
|
|
||||||
if flags&(FlagPresent|FlagRW) != (FlagPresent | FlagRW) {
|
|
||||||
t.Error("expected Map to be called FlagPresent | FlagRW")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("pdt init fails", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
|
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr { return 0 }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return 0, expErr }
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("translation fails for page in reserved address space", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
|
|
||||||
|
|
||||||
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
translateFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
|
||||||
return 0, expErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("map fails for page in reserved address space", func(t *testing.T) {
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
|
||||||
|
|
||||||
earlyReserveLastUsed = tempMappingAddr - uintptr(mem.PageSize)
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
|
||||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
return pmm.Frame(addr >> mem.PageShift), nil
|
|
||||||
})
|
|
||||||
activePDTFn = func() uintptr {
|
|
||||||
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
|
||||||
}
|
|
||||||
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
|
||||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
|
||||||
mapFn = func(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { return expErr }
|
|
||||||
|
|
||||||
if err := setupPDTForKernel(0); err != expErr {
|
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
emptyInfoData = []byte{
|
|
||||||
0, 0, 0, 0, // size
|
|
||||||
0, 0, 0, 0, // reserved
|
|
||||||
0, 0, 0, 0, // tag with type zero and length zero
|
|
||||||
0, 0, 0, 0,
|
|
||||||
}
|
|
||||||
)
|
|
@ -1,55 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ptePointerFn returns a pointer to the supplied entry address. It is
|
|
||||||
// used by tests to override the generated page table entry pointers so
|
|
||||||
// walk() can be properly tested. When compiling the kernel this function
|
|
||||||
// will be automatically inlined.
|
|
||||||
ptePtrFn = func(entryAddr uintptr) unsafe.Pointer {
|
|
||||||
return unsafe.Pointer(entryAddr)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// pageTableWalker is a function that can be passed to the walk method. The
|
|
||||||
// function receives the current page level and page table entry as its
|
|
||||||
// arguments. If the function returns false, then the page walk is aborted.
|
|
||||||
type pageTableWalker func(pteLevel uint8, pte *pageTableEntry) bool
|
|
||||||
|
|
||||||
// walk performs a page table walk for the given virtual address. It calls the
|
|
||||||
// suppplied walkFn with the page table entry that corresponds to each page
|
|
||||||
// table level. If walkFn returns an error then the walk is aborted and the
|
|
||||||
// error is returned to the caller.
|
|
||||||
func walk(virtAddr uintptr, walkFn pageTableWalker) {
|
|
||||||
var (
|
|
||||||
level uint8
|
|
||||||
tableAddr, entryAddr, entryIndex uintptr
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// tableAddr is initially set to the recursively mapped virtual address for the
|
|
||||||
// last entry in the top-most page table. Dereferencing a pointer to this address
|
|
||||||
// will allow us to access
|
|
||||||
for level, tableAddr = uint8(0), pdtVirtualAddr; level < pageLevels; level, tableAddr = level+1, entryAddr {
|
|
||||||
// Extract the bits from virtual address that correspond to the
|
|
||||||
// index in this level's page table
|
|
||||||
entryIndex = (virtAddr >> pageLevelShifts[level]) & ((1 << pageLevelBits[level]) - 1)
|
|
||||||
|
|
||||||
// By shifting the table virtual address left by pageLevelShifts[level] we add
|
|
||||||
// a new level of indirection to our recursive mapping allowing us to access
|
|
||||||
// the table pointed to by the page entry
|
|
||||||
entryAddr = tableAddr + (entryIndex << mem.PointerShift)
|
|
||||||
|
|
||||||
if ok = walkFn(level, (*pageTableEntry)(ptePtrFn(entryAddr))); !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shift left by the number of bits for this paging level to get
|
|
||||||
// the virtual address of the table pointed to by entryAddr
|
|
||||||
entryAddr <<= pageLevelBits[level]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
package vmm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopheros/kernel/mem"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPtePtrFn(t *testing.T) {
|
|
||||||
// Dummy test to keep coverage happy
|
|
||||||
if exp, got := unsafe.Pointer(uintptr(123)), ptePtrFn(uintptr(123)); exp != got {
|
|
||||||
t.Fatalf("expected ptePtrFn to return %v; got %v", exp, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWalkAmd64(t *testing.T) {
|
|
||||||
if runtime.GOARCH != "amd64" {
|
|
||||||
t.Skip("test requires amd64 runtime; skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
|
||||||
ptePtrFn = origPtePtr
|
|
||||||
}(ptePtrFn)
|
|
||||||
|
|
||||||
// This address breaks down to:
|
|
||||||
// p4 index: 1
|
|
||||||
// p3 index: 2
|
|
||||||
// p2 index: 3
|
|
||||||
// p1 index: 4
|
|
||||||
// offset : 1024
|
|
||||||
targetAddr := uintptr(0x8080604400)
|
|
||||||
|
|
||||||
sizeofPteEntry := uintptr(unsafe.Sizeof(pageTableEntry(0)))
|
|
||||||
expEntryAddrBits := [pageLevels][pageLevels + 1]uintptr{
|
|
||||||
{511, 511, 511, 511, 1 * sizeofPteEntry},
|
|
||||||
{511, 511, 511, 1, 2 * sizeofPteEntry},
|
|
||||||
{511, 511, 1, 2, 3 * sizeofPteEntry},
|
|
||||||
{511, 1, 2, 3, 4 * sizeofPteEntry},
|
|
||||||
}
|
|
||||||
|
|
||||||
pteCallCount := 0
|
|
||||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
|
||||||
if pteCallCount >= pageLevels {
|
|
||||||
t.Fatalf("unexpected call to ptePtrFn; already called %d times", pageLevels)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < pageLevels; i++ {
|
|
||||||
pteIndex := (entry >> pageLevelShifts[i]) & ((1 << pageLevelBits[i]) - 1)
|
|
||||||
if pteIndex != expEntryAddrBits[pteCallCount][i] {
|
|
||||||
t.Errorf("[ptePtrFn call %d] expected pte entry for level %d to use offset %d; got %d", pteCallCount, i, expEntryAddrBits[pteCallCount][i], pteIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the page offset
|
|
||||||
pteIndex := entry & ((1 << mem.PageShift) - 1)
|
|
||||||
if pteIndex != expEntryAddrBits[pteCallCount][pageLevels] {
|
|
||||||
t.Errorf("[ptePtrFn call %d] expected pte offset to be %d; got %d", pteCallCount, expEntryAddrBits[pteCallCount][pageLevels], pteIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
pteCallCount++
|
|
||||||
|
|
||||||
return unsafe.Pointer(uintptr(0xf00))
|
|
||||||
}
|
|
||||||
|
|
||||||
walkFnCallCount := 0
|
|
||||||
walk(targetAddr, func(level uint8, entry *pageTableEntry) bool {
|
|
||||||
walkFnCallCount++
|
|
||||||
return walkFnCallCount != pageLevels
|
|
||||||
})
|
|
||||||
|
|
||||||
if pteCallCount != pageLevels {
|
|
||||||
t.Errorf("expected ptePtrFn to be called %d times; got %d", pageLevels, pteCallCount)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,4 +1,4 @@
|
|||||||
package mem
|
package kernel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -9,7 +9,7 @@ import (
|
|||||||
// is based on bytes.Repeat; instead of using a for loop, this function uses
|
// is based on bytes.Repeat; instead of using a for loop, this function uses
|
||||||
// log2(size) copy calls which should give us a speed boost as page addresses
|
// log2(size) copy calls which should give us a speed boost as page addresses
|
||||||
// are always aligned.
|
// are always aligned.
|
||||||
func Memset(addr uintptr, value byte, size Size) {
|
func Memset(addr uintptr, value byte, size uintptr) {
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -23,13 +23,13 @@ func Memset(addr uintptr, value byte, size Size) {
|
|||||||
|
|
||||||
// Set first element and make log2(size) optimized copies
|
// Set first element and make log2(size) optimized copies
|
||||||
target[0] = value
|
target[0] = value
|
||||||
for index := Size(1); index < size; index *= 2 {
|
for index := uintptr(1); index < size; index *= 2 {
|
||||||
copy(target[index:], target[:index])
|
copy(target[index:], target[:index])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Memcopy copies size bytes from src to dst.
|
// Memcopy copies size bytes from src to dst.
|
||||||
func Memcopy(src, dst uintptr, size Size) {
|
func Memcopy(src, dst uintptr, size uintptr) {
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
@ -1,22 +1,24 @@
|
|||||||
package mem
|
package kernel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const pageSize = 4096
|
||||||
|
|
||||||
func TestMemset(t *testing.T) {
|
func TestMemset(t *testing.T) {
|
||||||
// memset with a 0 size should be a no-op
|
// memset with a 0 size should be a no-op
|
||||||
Memset(uintptr(0), 0x00, 0)
|
Memset(uintptr(0), 0x00, 0)
|
||||||
|
|
||||||
for pageCount := uint32(1); pageCount <= 10; pageCount++ {
|
for pageCount := uint32(1); pageCount <= 10; pageCount++ {
|
||||||
buf := make([]byte, PageSize<<pageCount)
|
buf := make([]byte, pageSize<<pageCount)
|
||||||
for i := 0; i < len(buf); i++ {
|
for i := 0; i < len(buf); i++ {
|
||||||
buf[i] = 0xFE
|
buf[i] = 0xFE
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := uintptr(unsafe.Pointer(&buf[0]))
|
addr := uintptr(unsafe.Pointer(&buf[0]))
|
||||||
Memset(addr, 0x00, Size(len(buf)))
|
Memset(addr, 0x00, uintptr(len(buf)))
|
||||||
|
|
||||||
for i := 0; i < len(buf); i++ {
|
for i := 0; i < len(buf); i++ {
|
||||||
if got := buf[i]; got != 0x00 {
|
if got := buf[i]; got != 0x00 {
|
||||||
@ -31,8 +33,8 @@ func TestMemcopy(t *testing.T) {
|
|||||||
Memcopy(uintptr(0), uintptr(0), 0)
|
Memcopy(uintptr(0), uintptr(0), 0)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
src = make([]byte, PageSize)
|
src = make([]byte, pageSize)
|
||||||
dst = make([]byte, PageSize)
|
dst = make([]byte, pageSize)
|
||||||
)
|
)
|
||||||
for i := 0; i < len(src); i++ {
|
for i := 0; i < len(src); i++ {
|
||||||
src[i] = byte(i % 256)
|
src[i] = byte(i % 256)
|
||||||
@ -41,7 +43,7 @@ func TestMemcopy(t *testing.T) {
|
|||||||
Memcopy(
|
Memcopy(
|
||||||
uintptr(unsafe.Pointer(&src[0])),
|
uintptr(unsafe.Pointer(&src[0])),
|
||||||
uintptr(unsafe.Pointer(&dst[0])),
|
uintptr(unsafe.Pointer(&dst[0])),
|
||||||
PageSize,
|
pageSize,
|
||||||
)
|
)
|
||||||
|
|
||||||
for i := 0; i < len(src); i++ {
|
for i := 0; i < len(src); i++ {
|
@ -1,17 +1,15 @@
|
|||||||
// +build amd64
|
package mm
|
||||||
|
|
||||||
package mem
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// PointerShift is equal to log2(unsafe.Sizeof(uintptr)). The pointer
|
// PointerShift is equal to log2(unsafe.Sizeof(uintptr)). The pointer
|
||||||
// size for this architecture is defined as (1 << PointerShift).
|
// size for this architecture is defined as (1 << PointerShift).
|
||||||
PointerShift = 3
|
PointerShift = uintptr(3)
|
||||||
|
|
||||||
// PageShift is equal to log2(PageSize). This constant is used when
|
// PageShift is equal to log2(PageSize). This constant is used when
|
||||||
// we need to convert a physical address to a page number (shift right by PageShift)
|
// we need to convert a physical address to a page number (shift right by PageShift)
|
||||||
// and vice-versa.
|
// and vice-versa.
|
||||||
PageShift = 12
|
PageShift = uintptr(12)
|
||||||
|
|
||||||
// PageSize defines the system's page size in bytes.
|
// PageSize defines the system's page size in bytes.
|
||||||
PageSize = Size(1 << PageShift)
|
PageSize = uintptr(1 << PageShift)
|
||||||
)
|
)
|
67
src/gopheros/kernel/mm/page.go
Normal file
67
src/gopheros/kernel/mm/page.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package mm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Frame describes a physical memory page index.
|
||||||
|
type Frame uintptr
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidFrame is returned by page allocators when
|
||||||
|
// they fail to reserve the requested frame.
|
||||||
|
InvalidFrame = Frame(math.MaxUint64)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Valid returns true if this is a valid frame.
|
||||||
|
func (f Frame) Valid() bool {
|
||||||
|
return f != InvalidFrame
|
||||||
|
}
|
||||||
|
|
||||||
|
// Address returns a pointer to the physical memory address pointed to by this Frame.
|
||||||
|
func (f Frame) Address() uintptr {
|
||||||
|
return uintptr(f << PageShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FrameFromAddress returns a Frame that corresponds to
|
||||||
|
// the given physical address. This function can handle
|
||||||
|
// both page-aligned and not aligned addresses. in the
|
||||||
|
// latter case, the input address will be rounded down
|
||||||
|
// to the frame that contains it.
|
||||||
|
func FrameFromAddress(physAddr uintptr) Frame {
|
||||||
|
return Frame((physAddr & ^(uintptr(PageSize - 1))) >> PageShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// frameAllocator points to a frame allocator function registered using
|
||||||
|
// SetFrameAllocator.
|
||||||
|
frameAllocator FrameAllocatorFn
|
||||||
|
)
|
||||||
|
|
||||||
|
// FrameAllocatorFn is a function that can allocate physical frames.
|
||||||
|
type FrameAllocatorFn func() (Frame, *kernel.Error)
|
||||||
|
|
||||||
|
// SetFrameAllocator registers a frame allocator function that will be used by
|
||||||
|
// the vmm code when new physical frames need to be allocated.
|
||||||
|
func SetFrameAllocator(allocFn FrameAllocatorFn) { frameAllocator = allocFn }
|
||||||
|
|
||||||
|
// AllocFrame allocates a new physical frame using the currently active
|
||||||
|
// physical frame allocator.
|
||||||
|
func AllocFrame() (Frame, *kernel.Error) { return frameAllocator() }
|
||||||
|
|
||||||
|
// Page describes a virtual memory page index.
|
||||||
|
type Page uintptr
|
||||||
|
|
||||||
|
// Address returns a pointer to the virtual memory address pointed to by this Page.
|
||||||
|
func (f Page) Address() uintptr {
|
||||||
|
return uintptr(f << PageShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageFromAddress returns a Page that corresponds to the given virtual
|
||||||
|
// address. This function can handle both page-aligned and not aligned virtual
|
||||||
|
// addresses. in the latter case, the input address will be rounded down to the
|
||||||
|
// page that contains it.
|
||||||
|
func PageFromAddress(virtAddr uintptr) Page {
|
||||||
|
return Page((virtAddr & ^(uintptr(PageSize - 1))) >> PageShift)
|
||||||
|
}
|
90
src/gopheros/kernel/mm/page_test.go
Normal file
90
src/gopheros/kernel/mm/page_test.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package mm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFrameMethods(t *testing.T) {
|
||||||
|
for frameIndex := uint64(0); frameIndex < 128; frameIndex++ {
|
||||||
|
frame := Frame(frameIndex)
|
||||||
|
|
||||||
|
if !frame.Valid() {
|
||||||
|
t.Errorf("expected frame %d to be valid", frameIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp, got := uintptr(frameIndex<<PageShift), frame.Address(); got != exp {
|
||||||
|
t.Errorf("expected frame (%d, index: %d) call to Address() to return %x; got %x", frame, frameIndex, exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidFrame := InvalidFrame
|
||||||
|
if invalidFrame.Valid() {
|
||||||
|
t.Error("expected InvalidFrame.Valid() to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFrameFromAddress(t *testing.T) {
|
||||||
|
specs := []struct {
|
||||||
|
input uintptr
|
||||||
|
expFrame Frame
|
||||||
|
}{
|
||||||
|
{0, Frame(0)},
|
||||||
|
{4095, Frame(0)},
|
||||||
|
{4096, Frame(1)},
|
||||||
|
{4123, Frame(1)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for specIndex, spec := range specs {
|
||||||
|
if got := FrameFromAddress(spec.input); got != spec.expFrame {
|
||||||
|
t.Errorf("[spec %d] expected returned frame to be %v; got %v", specIndex, spec.expFrame, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFrameAllocator(t *testing.T) {
|
||||||
|
var allocCalled bool
|
||||||
|
customAlloc := func() (Frame, *kernel.Error) {
|
||||||
|
allocCalled = true
|
||||||
|
return FrameFromAddress(0xbadf00), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer SetFrameAllocator(nil)
|
||||||
|
SetFrameAllocator(customAlloc)
|
||||||
|
|
||||||
|
if _, err := AllocFrame(); err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allocCalled {
|
||||||
|
t.Fatal("expected custom allocator to be invoked after all to AllocFrame")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageMethods(t *testing.T) {
|
||||||
|
for pageIndex := uint64(0); pageIndex < 128; pageIndex++ {
|
||||||
|
page := Page(pageIndex)
|
||||||
|
|
||||||
|
if exp, got := uintptr(pageIndex<<PageShift), page.Address(); got != exp {
|
||||||
|
t.Errorf("expected page (%d, index: %d) call to Address() to return %x; got %x", page, pageIndex, exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageFromAddress(t *testing.T) {
|
||||||
|
specs := []struct {
|
||||||
|
input uintptr
|
||||||
|
expPage Page
|
||||||
|
}{
|
||||||
|
{0, Page(0)},
|
||||||
|
{4095, Page(0)},
|
||||||
|
{4096, Page(1)},
|
||||||
|
{4123, Page(1)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for specIndex, spec := range specs {
|
||||||
|
if got := PageFromAddress(spec.input); got != spec.expPage {
|
||||||
|
t.Errorf("[spec %d] expected returned page to be %v; got %v", specIndex, spec.expPage, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,22 +1,17 @@
|
|||||||
package allocator
|
package pmm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/hal/multiboot"
|
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/vmm"
|
"gopheros/multiboot"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// bitmapAllocator is a BitmapAllocator instance that serves as the
|
|
||||||
// primary allocator for reserving pages.
|
|
||||||
bitmapAllocator BitmapAllocator
|
|
||||||
|
|
||||||
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
|
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
|
||||||
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
|
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
|
||||||
errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"}
|
errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"}
|
||||||
@ -37,11 +32,11 @@ const (
|
|||||||
type framePool struct {
|
type framePool struct {
|
||||||
// startFrame is the frame number for the first page in this pool.
|
// startFrame is the frame number for the first page in this pool.
|
||||||
// each free bitmap entry i corresponds to frame (startFrame + i).
|
// each free bitmap entry i corresponds to frame (startFrame + i).
|
||||||
startFrame pmm.Frame
|
startFrame mm.Frame
|
||||||
|
|
||||||
// endFrame tracks the last frame in the pool. The total number of
|
// endFrame tracks the last frame in the pool. The total number of
|
||||||
// frames is given by: (endFrame - startFrame) - 1
|
// frames is given by: (endFrame - startFrame) - 1
|
||||||
endFrame pmm.Frame
|
endFrame mm.Frame
|
||||||
|
|
||||||
// freeCount tracks the available pages in this pool. The allocator
|
// freeCount tracks the available pages in this pool. The allocator
|
||||||
// can use this field to skip fully allocated pools without the need
|
// can use this field to skip fully allocated pools without the need
|
||||||
@ -85,8 +80,8 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
|||||||
var (
|
var (
|
||||||
err *kernel.Error
|
err *kernel.Error
|
||||||
sizeofPool = unsafe.Sizeof(framePool{})
|
sizeofPool = unsafe.Sizeof(framePool{})
|
||||||
pageSizeMinus1 = uint64(mem.PageSize - 1)
|
pageSizeMinus1 = mm.PageSize - 1
|
||||||
requiredBitmapBytes mem.Size
|
requiredBitmapBytes uint64
|
||||||
)
|
)
|
||||||
|
|
||||||
// Detect available memory regions and calculate their pool bitmap
|
// Detect available memory regions and calculate their pool bitmap
|
||||||
@ -101,27 +96,27 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
|||||||
|
|
||||||
// Reported addresses may not be page-aligned; round up to get
|
// Reported addresses may not be page-aligned; round up to get
|
||||||
// the start frame and round down to get the end frame
|
// the start frame and round down to get the end frame
|
||||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
regionStartFrame := mm.Frame(((uintptr(region.PhysAddress) + pageSizeMinus1) & ^pageSizeMinus1) >> mm.PageShift)
|
||||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
regionEndFrame := mm.Frame((uintptr(region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mm.PageShift) - 1
|
||||||
pageCount := uint32(regionEndFrame - regionStartFrame)
|
pageCount := uint32(regionEndFrame - regionStartFrame)
|
||||||
alloc.totalPages += pageCount
|
alloc.totalPages += pageCount
|
||||||
|
|
||||||
// To represent the free page bitmap we need pageCount bits. Since our
|
// To represent the free page bitmap we need pageCount bits. Since our
|
||||||
// slice uses uint64 for storing the bitmap we need to round up the
|
// slice uses uint64 for storing the bitmap we need to round up the
|
||||||
// required bits so they are a multiple of 64 bits
|
// required bits so they are a multiple of 64 bits
|
||||||
requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3)
|
requiredBitmapBytes += uint64(((pageCount + 63) &^ 63) >> 3)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
// Reserve enough pages to hold the allocator state
|
// Reserve enough pages to hold the allocator state
|
||||||
requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1)
|
requiredBytes := (uintptr(alloc.poolsHdr.Len)*sizeofPool + uintptr(requiredBitmapBytes) + pageSizeMinus1) & ^pageSizeMinus1
|
||||||
requiredPages := requiredBytes >> mem.PageShift
|
requiredPages := requiredBytes >> mm.PageShift
|
||||||
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
|
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 {
|
for page, index := mm.PageFromAddress(alloc.poolsHdr.Data), uintptr(0); index < requiredPages; page, index = page+1, index+1 {
|
||||||
nextFrame, err := earlyAllocFrame()
|
nextFrame, err := earlyAllocFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -131,7 +126,7 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mem.Memset(page.Address(), 0, mem.PageSize)
|
kernel.Memset(page.Address(), 0, mm.PageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
|
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
|
||||||
@ -144,9 +139,9 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
regionStartFrame := mm.Frame(((uintptr(region.PhysAddress) + pageSizeMinus1) & ^pageSizeMinus1) >> mm.PageShift)
|
||||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
regionEndFrame := mm.Frame((uintptr(region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mm.PageShift) - 1
|
||||||
bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3)
|
bitmapBytes := ((uintptr(regionEndFrame-regionStartFrame) + 63) &^ 63) >> 3
|
||||||
|
|
||||||
alloc.pools[poolIndex].startFrame = regionStartFrame
|
alloc.pools[poolIndex].startFrame = regionStartFrame
|
||||||
alloc.pools[poolIndex].endFrame = regionEndFrame
|
alloc.pools[poolIndex].endFrame = regionEndFrame
|
||||||
@ -166,7 +161,7 @@ func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
|||||||
|
|
||||||
// markFrame updates the reservation flag for the bitmap entry that corresponds
|
// markFrame updates the reservation flag for the bitmap entry that corresponds
|
||||||
// to the supplied frame.
|
// to the supplied frame.
|
||||||
func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag markAs) {
|
func (alloc *BitmapAllocator) markFrame(poolIndex int, frame mm.Frame, flag markAs) {
|
||||||
if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame {
|
if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -191,7 +186,7 @@ func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag mar
|
|||||||
// poolForFrame returns the index of the pool that contains frame or -1 if
|
// poolForFrame returns the index of the pool that contains frame or -1 if
|
||||||
// the frame is not contained in any of the available memory pools (e.g it
|
// the frame is not contained in any of the available memory pools (e.g it
|
||||||
// points to a reserved memory region).
|
// points to a reserved memory region).
|
||||||
func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int {
|
func (alloc *BitmapAllocator) poolForFrame(frame mm.Frame) int {
|
||||||
for poolIndex, pool := range alloc.pools {
|
for poolIndex, pool := range alloc.pools {
|
||||||
if frame >= pool.startFrame && frame <= pool.endFrame {
|
if frame >= pool.startFrame && frame <= pool.endFrame {
|
||||||
return poolIndex
|
return poolIndex
|
||||||
@ -207,8 +202,8 @@ func (alloc *BitmapAllocator) reserveKernelFrames() {
|
|||||||
// Flag frames used by kernel image as reserved. Since the kernel must
|
// Flag frames used by kernel image as reserved. Since the kernel must
|
||||||
// occupy a contiguous memory block we assume that all its frames will
|
// occupy a contiguous memory block we assume that all its frames will
|
||||||
// fall into one of the available memory pools
|
// fall into one of the available memory pools
|
||||||
poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame)
|
poolIndex := alloc.poolForFrame(bootMemAllocator.kernelStartFrame)
|
||||||
for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ {
|
for frame := bootMemAllocator.kernelStartFrame; frame <= bootMemAllocator.kernelEndFrame; frame++ {
|
||||||
alloc.markFrame(poolIndex, frame, markReserved)
|
alloc.markFrame(poolIndex, frame, markReserved)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -221,10 +216,10 @@ func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() {
|
|||||||
// individual frames but only a counter of allocated frames. To get
|
// individual frames but only a counter of allocated frames. To get
|
||||||
// the list of frames we reset its internal state and "replay" the
|
// the list of frames we reset its internal state and "replay" the
|
||||||
// allocation requests to get the correct frames.
|
// allocation requests to get the correct frames.
|
||||||
allocCount := earlyAllocator.allocCount
|
allocCount := bootMemAllocator.allocCount
|
||||||
earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0
|
bootMemAllocator.allocCount, bootMemAllocator.lastAllocFrame = 0, 0
|
||||||
for i := uint64(0); i < allocCount; i++ {
|
for i := uint64(0); i < allocCount; i++ {
|
||||||
frame, _ := earlyAllocator.AllocFrame()
|
frame, _ := bootMemAllocator.AllocFrame()
|
||||||
alloc.markFrame(
|
alloc.markFrame(
|
||||||
alloc.poolForFrame(frame),
|
alloc.poolForFrame(frame),
|
||||||
frame,
|
frame,
|
||||||
@ -244,7 +239,7 @@ func (alloc *BitmapAllocator) printStats() {
|
|||||||
|
|
||||||
// AllocFrame reserves and returns a physical memory frame. An error will be
|
// AllocFrame reserves and returns a physical memory frame. An error will be
|
||||||
// returned if no more memory can be allocated.
|
// returned if no more memory can be allocated.
|
||||||
func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
func (alloc *BitmapAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
|
||||||
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
|
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
|
||||||
if alloc.pools[poolIndex].freeCount == 0 {
|
if alloc.pools[poolIndex].freeCount == 0 {
|
||||||
continue
|
continue
|
||||||
@ -265,18 +260,18 @@ func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
|||||||
alloc.pools[poolIndex].freeCount--
|
alloc.pools[poolIndex].freeCount--
|
||||||
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
|
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
|
||||||
alloc.reservedPages++
|
alloc.reservedPages++
|
||||||
return alloc.pools[poolIndex].startFrame + pmm.Frame((blockIndex<<6)+blockOffset), nil
|
return alloc.pools[poolIndex].startFrame + mm.Frame((blockIndex<<6)+blockOffset), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pmm.InvalidFrame, errBitmapAllocOutOfMemory
|
return mm.InvalidFrame, errBitmapAllocOutOfMemory
|
||||||
}
|
}
|
||||||
|
|
||||||
// FreeFrame releases a frame previously allocated via a call to AllocFrame.
|
// FreeFrame releases a frame previously allocated via a call to AllocFrame.
|
||||||
// Trying to release a frame not part of the allocator pools or a frame that
|
// Trying to release a frame not part of the allocator pools or a frame that
|
||||||
// is already marked as free will cause an error to be returned.
|
// is already marked as free will cause an error to be returned.
|
||||||
func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error {
|
func (alloc *BitmapAllocator) FreeFrame(frame mm.Frame) *kernel.Error {
|
||||||
poolIndex := alloc.poolForFrame(frame)
|
poolIndex := alloc.poolForFrame(frame)
|
||||||
if poolIndex < 0 {
|
if poolIndex < 0 {
|
||||||
return errBitmapAllocFrameNotManaged
|
return errBitmapAllocFrameNotManaged
|
||||||
@ -295,32 +290,3 @@ func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error {
|
|||||||
alloc.reservedPages--
|
alloc.reservedPages--
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// earlyAllocFrame is a helper that delegates a frame allocation request to the
|
|
||||||
// early allocator instance. This function is passed as an argument to
|
|
||||||
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter
|
|
||||||
// confuses the compiler's escape analysis into thinking that
|
|
||||||
// earlyAllocator.Frame escapes to heap.
|
|
||||||
func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
|
|
||||||
return earlyAllocator.AllocFrame()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllocFrame is a helper that delegates a frame allocation request to the
|
|
||||||
// bitmap allocator instance.
|
|
||||||
func AllocFrame() (pmm.Frame, *kernel.Error) {
|
|
||||||
return bitmapAllocator.AllocFrame()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init sets up the kernel physical memory allocation sub-system.
|
|
||||||
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
|
|
||||||
earlyAllocator.init(kernelStart, kernelEnd)
|
|
||||||
earlyAllocator.printMemoryMap()
|
|
||||||
|
|
||||||
vmm.SetFrameAllocator(earlyAllocFrame)
|
|
||||||
if err := bitmapAllocator.init(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
vmm.SetFrameAllocator(AllocFrame)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,11 +1,10 @@
|
|||||||
package allocator
|
package pmm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/hal/multiboot"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm/vmm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/multiboot"
|
||||||
"gopheros/kernel/mem/vmm"
|
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
@ -24,7 +23,7 @@ func TestSetupPoolBitmaps(t *testing.T) {
|
|||||||
// The allocator will need to reserve 2 pages to store the bitmap data.
|
// The allocator will need to reserve 2 pages to store the bitmap data.
|
||||||
var (
|
var (
|
||||||
alloc BitmapAllocator
|
alloc BitmapAllocator
|
||||||
physMem = make([]byte, 2*mem.PageSize)
|
physMem = make([]byte, 2*mm.PageSize)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Init phys mem with junk
|
// Init phys mem with junk
|
||||||
@ -33,13 +32,13 @@ func TestSetupPoolBitmaps(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
mapCallCount := 0
|
mapCallCount := 0
|
||||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(page mm.Page, frame mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
mapCallCount++
|
mapCallCount++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
reserveCallCount := 0
|
reserveCallCount := 0
|
||||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
reserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
reserveCallCount++
|
reserveCallCount++
|
||||||
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
||||||
}
|
}
|
||||||
@ -89,7 +88,7 @@ func TestSetupPoolBitmapsErrors(t *testing.T) {
|
|||||||
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
|
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
|
|
||||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
reserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,11 +99,11 @@ func TestSetupPoolBitmapsErrors(t *testing.T) {
|
|||||||
t.Run("vmm.Map returns an error", func(t *testing.T) {
|
t.Run("vmm.Map returns an error", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
|
|
||||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
reserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(page mm.Page, frame mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
return expErr
|
return expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +112,7 @@ func TestSetupPoolBitmapsErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("earlyAllocator returns an error", func(t *testing.T) {
|
t.Run("bootMemAllocator returns an error", func(t *testing.T) {
|
||||||
emptyInfoData := []byte{
|
emptyInfoData := []byte{
|
||||||
0, 0, 0, 0, // size
|
0, 0, 0, 0, // size
|
||||||
0, 0, 0, 0, // reserved
|
0, 0, 0, 0, // reserved
|
||||||
@ -133,8 +132,8 @@ func TestBitmapAllocatorMarkFrame(t *testing.T) {
|
|||||||
var alloc = BitmapAllocator{
|
var alloc = BitmapAllocator{
|
||||||
pools: []framePool{
|
pools: []framePool{
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(0),
|
startFrame: mm.Frame(0),
|
||||||
endFrame: pmm.Frame(127),
|
endFrame: mm.Frame(127),
|
||||||
freeCount: 128,
|
freeCount: 128,
|
||||||
freeBitmap: make([]uint64, 2),
|
freeBitmap: make([]uint64, 2),
|
||||||
},
|
},
|
||||||
@ -142,8 +141,8 @@ func TestBitmapAllocatorMarkFrame(t *testing.T) {
|
|||||||
totalPages: 128,
|
totalPages: 128,
|
||||||
}
|
}
|
||||||
|
|
||||||
lastFrame := pmm.Frame(alloc.totalPages)
|
lastFrame := mm.Frame(alloc.totalPages)
|
||||||
for frame := pmm.Frame(0); frame < lastFrame; frame++ {
|
for frame := mm.Frame(0); frame < lastFrame; frame++ {
|
||||||
alloc.markFrame(0, frame, markReserved)
|
alloc.markFrame(0, frame, markReserved)
|
||||||
|
|
||||||
block := uint64(frame / 64)
|
block := uint64(frame / 64)
|
||||||
@ -163,7 +162,7 @@ func TestBitmapAllocatorMarkFrame(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calling markFrame with a frame not part of the pool should be a no-op
|
// Calling markFrame with a frame not part of the pool should be a no-op
|
||||||
alloc.markFrame(0, pmm.Frame(0xbadf00d), markReserved)
|
alloc.markFrame(0, mm.Frame(0xbadf00d), markReserved)
|
||||||
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
||||||
if block != 0 {
|
if block != 0 {
|
||||||
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
||||||
@ -171,7 +170,7 @@ func TestBitmapAllocatorMarkFrame(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calling markFrame with a negative pool index should be a no-op
|
// Calling markFrame with a negative pool index should be a no-op
|
||||||
alloc.markFrame(-1, pmm.Frame(0), markReserved)
|
alloc.markFrame(-1, mm.Frame(0), markReserved)
|
||||||
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
||||||
if block != 0 {
|
if block != 0 {
|
||||||
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
||||||
@ -183,14 +182,14 @@ func TestBitmapAllocatorPoolForFrame(t *testing.T) {
|
|||||||
var alloc = BitmapAllocator{
|
var alloc = BitmapAllocator{
|
||||||
pools: []framePool{
|
pools: []framePool{
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(0),
|
startFrame: mm.Frame(0),
|
||||||
endFrame: pmm.Frame(63),
|
endFrame: mm.Frame(63),
|
||||||
freeCount: 64,
|
freeCount: 64,
|
||||||
freeBitmap: make([]uint64, 1),
|
freeBitmap: make([]uint64, 1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(128),
|
startFrame: mm.Frame(128),
|
||||||
endFrame: pmm.Frame(191),
|
endFrame: mm.Frame(191),
|
||||||
freeCount: 64,
|
freeCount: 64,
|
||||||
freeBitmap: make([]uint64, 1),
|
freeBitmap: make([]uint64, 1),
|
||||||
},
|
},
|
||||||
@ -199,14 +198,14 @@ func TestBitmapAllocatorPoolForFrame(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
specs := []struct {
|
specs := []struct {
|
||||||
frame pmm.Frame
|
frame mm.Frame
|
||||||
expIndex int
|
expIndex int
|
||||||
}{
|
}{
|
||||||
{pmm.Frame(0), 0},
|
{mm.Frame(0), 0},
|
||||||
{pmm.Frame(63), 0},
|
{mm.Frame(63), 0},
|
||||||
{pmm.Frame(64), -1},
|
{mm.Frame(64), -1},
|
||||||
{pmm.Frame(128), 1},
|
{mm.Frame(128), 1},
|
||||||
{pmm.Frame(192), -1},
|
{mm.Frame(192), -1},
|
||||||
}
|
}
|
||||||
|
|
||||||
for specIndex, spec := range specs {
|
for specIndex, spec := range specs {
|
||||||
@ -220,14 +219,14 @@ func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
|
|||||||
var alloc = BitmapAllocator{
|
var alloc = BitmapAllocator{
|
||||||
pools: []framePool{
|
pools: []framePool{
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(0),
|
startFrame: mm.Frame(0),
|
||||||
endFrame: pmm.Frame(7),
|
endFrame: mm.Frame(7),
|
||||||
freeCount: 8,
|
freeCount: 8,
|
||||||
freeBitmap: make([]uint64, 1),
|
freeBitmap: make([]uint64, 1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(64),
|
startFrame: mm.Frame(64),
|
||||||
endFrame: pmm.Frame(191),
|
endFrame: mm.Frame(191),
|
||||||
freeCount: 128,
|
freeCount: 128,
|
||||||
freeBitmap: make([]uint64, 2),
|
freeBitmap: make([]uint64, 2),
|
||||||
},
|
},
|
||||||
@ -236,9 +235,9 @@ func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// kernel occupies 16 frames and starts at the beginning of pool 1
|
// kernel occupies 16 frames and starts at the beginning of pool 1
|
||||||
earlyAllocator.kernelStartFrame = pmm.Frame(64)
|
bootMemAllocator.kernelStartFrame = mm.Frame(64)
|
||||||
earlyAllocator.kernelEndFrame = pmm.Frame(79)
|
bootMemAllocator.kernelEndFrame = mm.Frame(79)
|
||||||
kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1)
|
kernelSizePages := uint32(bootMemAllocator.kernelEndFrame - bootMemAllocator.kernelStartFrame + 1)
|
||||||
alloc.reserveKernelFrames()
|
alloc.reserveKernelFrames()
|
||||||
|
|
||||||
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
|
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
|
||||||
@ -266,14 +265,14 @@ func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
|
|||||||
var alloc = BitmapAllocator{
|
var alloc = BitmapAllocator{
|
||||||
pools: []framePool{
|
pools: []framePool{
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(0),
|
startFrame: mm.Frame(0),
|
||||||
endFrame: pmm.Frame(63),
|
endFrame: mm.Frame(63),
|
||||||
freeCount: 64,
|
freeCount: 64,
|
||||||
freeBitmap: make([]uint64, 1),
|
freeBitmap: make([]uint64, 1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(64),
|
startFrame: mm.Frame(64),
|
||||||
endFrame: pmm.Frame(191),
|
endFrame: mm.Frame(191),
|
||||||
freeCount: 128,
|
freeCount: 128,
|
||||||
freeBitmap: make([]uint64, 2),
|
freeBitmap: make([]uint64, 2),
|
||||||
},
|
},
|
||||||
@ -286,9 +285,9 @@ func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
|
|||||||
// Simulate 16 allocations made using the early allocator in region 0
|
// Simulate 16 allocations made using the early allocator in region 0
|
||||||
// as reported by the multiboot data and move the kernel to pool 1
|
// as reported by the multiboot data and move the kernel to pool 1
|
||||||
allocCount := uint32(16)
|
allocCount := uint32(16)
|
||||||
earlyAllocator.allocCount = uint64(allocCount)
|
bootMemAllocator.allocCount = uint64(allocCount)
|
||||||
earlyAllocator.kernelStartFrame = pmm.Frame(256)
|
bootMemAllocator.kernelStartFrame = mm.Frame(256)
|
||||||
earlyAllocator.kernelEndFrame = pmm.Frame(256)
|
bootMemAllocator.kernelEndFrame = mm.Frame(256)
|
||||||
alloc.reserveEarlyAllocatorFrames()
|
alloc.reserveEarlyAllocatorFrames()
|
||||||
|
|
||||||
if exp, got := allocCount, alloc.reservedPages; got != exp {
|
if exp, got := allocCount, alloc.reservedPages; got != exp {
|
||||||
@ -316,15 +315,15 @@ func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) {
|
|||||||
var alloc = BitmapAllocator{
|
var alloc = BitmapAllocator{
|
||||||
pools: []framePool{
|
pools: []framePool{
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(0),
|
startFrame: mm.Frame(0),
|
||||||
endFrame: pmm.Frame(7),
|
endFrame: mm.Frame(7),
|
||||||
freeCount: 8,
|
freeCount: 8,
|
||||||
// only the first 8 bits of block 0 are used
|
// only the first 8 bits of block 0 are used
|
||||||
freeBitmap: make([]uint64, 1),
|
freeBitmap: make([]uint64, 1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
startFrame: pmm.Frame(64),
|
startFrame: mm.Frame(64),
|
||||||
endFrame: pmm.Frame(191),
|
endFrame: mm.Frame(191),
|
||||||
freeCount: 128,
|
freeCount: 128,
|
||||||
freeBitmap: make([]uint64, 2),
|
freeBitmap: make([]uint64, 2),
|
||||||
},
|
},
|
||||||
@ -377,11 +376,11 @@ func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test Free errors
|
// Test Free errors
|
||||||
if err := alloc.FreeFrame(pmm.Frame(0)); err != errBitmapAllocDoubleFree {
|
if err := alloc.FreeFrame(mm.Frame(0)); err != errBitmapAllocDoubleFree {
|
||||||
t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err)
|
t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := alloc.FreeFrame(pmm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged {
|
if err := alloc.FreeFrame(mm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged {
|
||||||
t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err)
|
t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -393,16 +392,16 @@ func TestAllocatorPackageInit(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
physMem = make([]byte, 2*mem.PageSize)
|
physMem = make([]byte, 2*mm.PageSize)
|
||||||
)
|
)
|
||||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(page mm.Page, frame mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
reserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,8 +409,8 @@ func TestAllocatorPackageInit(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// At this point sysAllocFrame should work
|
// At this point the bitmap allocator should be up and running
|
||||||
if _, err := AllocFrame(); err != nil {
|
if _, err := bitmapAllocFrame(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -419,7 +418,7 @@ func TestAllocatorPackageInit(t *testing.T) {
|
|||||||
t.Run("error", func(t *testing.T) {
|
t.Run("error", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
|
|
||||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
mapFn = func(page mm.Page, frame mm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||||
return expErr
|
return expErr
|
||||||
}
|
}
|
||||||
|
|
@ -1,22 +1,17 @@
|
|||||||
package allocator
|
package pmm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/hal/multiboot"
|
|
||||||
"gopheros/kernel/kfmt"
|
"gopheros/kernel/kfmt"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
"gopheros/multiboot"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// earlyAllocator is a boot mem allocator instance used for page
|
|
||||||
// allocations before switching to a more advanced allocator.
|
|
||||||
earlyAllocator bootMemAllocator
|
|
||||||
|
|
||||||
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
|
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// bootMemAllocator implements a rudimentary physical memory allocator which is
|
// BootMemAllocator implements a rudimentary physical memory allocator which is
|
||||||
// used to bootstrap the kernel.
|
// used to bootstrap the kernel.
|
||||||
//
|
//
|
||||||
// The allocator implementation uses the memory region information provided by
|
// The allocator implementation uses the memory region information provided by
|
||||||
@ -28,27 +23,27 @@ var (
|
|||||||
// allocated pages. Once the kernel is properly initialized, the allocated
|
// allocated pages. Once the kernel is properly initialized, the allocated
|
||||||
// blocks will be handed over to a more advanced memory allocator that does
|
// blocks will be handed over to a more advanced memory allocator that does
|
||||||
// support freeing.
|
// support freeing.
|
||||||
type bootMemAllocator struct {
|
type BootMemAllocator struct {
|
||||||
// allocCount tracks the total number of allocated frames.
|
// allocCount tracks the total number of allocated frames.
|
||||||
allocCount uint64
|
allocCount uint64
|
||||||
|
|
||||||
// lastAllocFrame tracks the last allocated frame number.
|
// lastAllocFrame tracks the last allocated frame number.
|
||||||
lastAllocFrame pmm.Frame
|
lastAllocFrame mm.Frame
|
||||||
|
|
||||||
// Keep track of kernel location so we exclude this region.
|
// Keep track of kernel location so we exclude this region.
|
||||||
kernelStartAddr, kernelEndAddr uintptr
|
kernelStartAddr, kernelEndAddr uintptr
|
||||||
kernelStartFrame, kernelEndFrame pmm.Frame
|
kernelStartFrame, kernelEndFrame mm.Frame
|
||||||
}
|
}
|
||||||
|
|
||||||
// init sets up the boot memory allocator internal state.
|
// init sets up the boot memory allocator internal state.
|
||||||
func (alloc *bootMemAllocator) init(kernelStart, kernelEnd uintptr) {
|
func (alloc *BootMemAllocator) init(kernelStart, kernelEnd uintptr) {
|
||||||
// round down kernel start to the nearest page and round up kernel end
|
// round down kernel start to the nearest page and round up kernel end
|
||||||
// to the nearest page.
|
// to the nearest page.
|
||||||
pageSizeMinus1 := uintptr(mem.PageSize - 1)
|
pageSizeMinus1 := mm.PageSize - 1
|
||||||
alloc.kernelStartAddr = kernelStart
|
alloc.kernelStartAddr = kernelStart
|
||||||
alloc.kernelEndAddr = kernelEnd
|
alloc.kernelEndAddr = kernelEnd
|
||||||
alloc.kernelStartFrame = pmm.Frame((kernelStart & ^pageSizeMinus1) >> mem.PageShift)
|
alloc.kernelStartFrame = mm.Frame((kernelStart & ^pageSizeMinus1) >> mm.PageShift)
|
||||||
alloc.kernelEndFrame = pmm.Frame(((kernelEnd+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
alloc.kernelEndFrame = mm.Frame(((kernelEnd+pageSizeMinus1) & ^pageSizeMinus1)>>mm.PageShift) - 1
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,20 +51,20 @@ func (alloc *bootMemAllocator) init(kernelStart, kernelEnd uintptr) {
|
|||||||
// reserves the next available free frame.
|
// reserves the next available free frame.
|
||||||
//
|
//
|
||||||
// AllocFrame returns an error if no more memory can be allocated.
|
// AllocFrame returns an error if no more memory can be allocated.
|
||||||
func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
func (alloc *BootMemAllocator) AllocFrame() (mm.Frame, *kernel.Error) {
|
||||||
var err = errBootAllocOutOfMemory
|
var err = errBootAllocOutOfMemory
|
||||||
|
|
||||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||||
// Ignore reserved regions and regions smaller than a single page
|
// Ignore reserved regions and regions smaller than a single page
|
||||||
if region.Type != multiboot.MemAvailable || region.Length < uint64(mem.PageSize) {
|
if region.Type != multiboot.MemAvailable || region.Length < uint64(mm.PageSize) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reported addresses may not be page-aligned; round up to get
|
// Reported addresses may not be page-aligned; round up to get
|
||||||
// the start frame and round down to get the end frame
|
// the start frame and round down to get the end frame
|
||||||
pageSizeMinus1 := uint64(mem.PageSize - 1)
|
pageSizeMinus1 := uint64(mm.PageSize - 1)
|
||||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
regionStartFrame := mm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mm.PageShift)
|
||||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
regionEndFrame := mm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mm.PageShift) - 1
|
||||||
|
|
||||||
// Skip over already allocated regions
|
// Skip over already allocated regions
|
||||||
if alloc.lastAllocFrame >= regionEndFrame {
|
if alloc.lastAllocFrame >= regionEndFrame {
|
||||||
@ -107,7 +102,7 @@ func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pmm.InvalidFrame, errBootAllocOutOfMemory
|
return mm.InvalidFrame, errBootAllocOutOfMemory
|
||||||
}
|
}
|
||||||
|
|
||||||
alloc.allocCount++
|
alloc.allocCount++
|
||||||
@ -116,18 +111,18 @@ func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
|||||||
|
|
||||||
// printMemoryMap scans the memory region information provided by the
|
// printMemoryMap scans the memory region information provided by the
|
||||||
// bootloader and prints out the system's memory map.
|
// bootloader and prints out the system's memory map.
|
||||||
func (alloc *bootMemAllocator) printMemoryMap() {
|
func (alloc *BootMemAllocator) printMemoryMap() {
|
||||||
kfmt.Printf("[boot_mem_alloc] system memory map:\n")
|
kfmt.Printf("[boot_mem_alloc] system memory map:\n")
|
||||||
var totalFree mem.Size
|
var totalFree uint64
|
||||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||||
kfmt.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String())
|
kfmt.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String())
|
||||||
|
|
||||||
if region.Type == multiboot.MemAvailable {
|
if region.Type == multiboot.MemAvailable {
|
||||||
totalFree += mem.Size(region.Length)
|
totalFree += region.Length
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
kfmt.Printf("[boot_mem_alloc] available memory: %dKb\n", uint64(totalFree/mem.Kb))
|
kfmt.Printf("[boot_mem_alloc] available memory: %dKb\n", totalFree/1024)
|
||||||
kfmt.Printf("[boot_mem_alloc] kernel loaded at 0x%x - 0x%x\n", alloc.kernelStartAddr, alloc.kernelEndAddr)
|
kfmt.Printf("[boot_mem_alloc] kernel loaded at 0x%x - 0x%x\n", alloc.kernelStartAddr, alloc.kernelEndAddr)
|
||||||
kfmt.Printf("[boot_mem_alloc] size: %d bytes, reserved pages: %d\n",
|
kfmt.Printf("[boot_mem_alloc] size: %d bytes, reserved pages: %d\n",
|
||||||
uint64(alloc.kernelEndAddr-alloc.kernelStartAddr),
|
uint64(alloc.kernelEndAddr-alloc.kernelStartAddr),
|
@ -1,7 +1,7 @@
|
|||||||
package allocator
|
package pmm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel/hal/multiboot"
|
"gopheros/multiboot"
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@ -59,7 +59,7 @@ func TestBootMemoryAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var alloc bootMemAllocator
|
var alloc BootMemAllocator
|
||||||
for specIndex, spec := range specs {
|
for specIndex, spec := range specs {
|
||||||
alloc.allocCount = 0
|
alloc.allocCount = 0
|
||||||
alloc.lastAllocFrame = 0
|
alloc.lastAllocFrame = 0
|
39
src/gopheros/kernel/mm/pmm/pmm.go
Normal file
39
src/gopheros/kernel/mm/pmm/pmm.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package pmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// bootMemAllocator is the page allocator used when the kernel boots.
|
||||||
|
// It is used to bootstrap the bitmap allocator which is used for all
|
||||||
|
// page allocations while the kernel runs.
|
||||||
|
bootMemAllocator BootMemAllocator
|
||||||
|
|
||||||
|
// bitmapAllocator is the standard allocator used by the kernel.
|
||||||
|
bitmapAllocator BitmapAllocator
|
||||||
|
)
|
||||||
|
|
||||||
|
// Init sets up the kernel physical memory allocation sub-system.
|
||||||
|
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
|
||||||
|
bootMemAllocator.init(kernelStart, kernelEnd)
|
||||||
|
bootMemAllocator.printMemoryMap()
|
||||||
|
mm.SetFrameAllocator(earlyAllocFrame)
|
||||||
|
|
||||||
|
// Using the bootMemAllocator bootstrap the bitmap allocator
|
||||||
|
if err := bitmapAllocator.init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mm.SetFrameAllocator(bitmapAllocFrame)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func earlyAllocFrame() (mm.Frame, *kernel.Error) {
|
||||||
|
return bootMemAllocator.AllocFrame()
|
||||||
|
}
|
||||||
|
|
||||||
|
func bitmapAllocFrame() (mm.Frame, *kernel.Error) {
|
||||||
|
return bitmapAllocator.AllocFrame()
|
||||||
|
}
|
@ -2,7 +2,7 @@ package vmm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -17,19 +17,19 @@ var (
|
|||||||
|
|
||||||
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
|
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
|
||||||
// with the requested size in the kernel address space and returns its virtual
|
// with the requested size in the kernel address space and returns its virtual
|
||||||
// address. If size is not a multiple of mem.PageSize it will be automatically
|
// address. If size is not a multiple of mm.PageSize it will be automatically
|
||||||
// rounded up.
|
// rounded up.
|
||||||
//
|
//
|
||||||
// This function allocates regions starting at the end of the kernel address
|
// This function allocates regions starting at the end of the kernel address
|
||||||
// space. It should only be used during the early stages of kernel initialization.
|
// space. It should only be used during the early stages of kernel initialization.
|
||||||
func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) {
|
func EarlyReserveRegion(size uintptr) (uintptr, *kernel.Error) {
|
||||||
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
|
size = (size + (mm.PageSize - 1)) & ^(mm.PageSize - 1)
|
||||||
|
|
||||||
// reserving a region of the requested size will cause an underflow
|
// reserving a region of the requested size will cause an underflow
|
||||||
if uintptr(size) > earlyReserveLastUsed {
|
if size > earlyReserveLastUsed {
|
||||||
return 0, errEarlyReserveNoSpace
|
return 0, errEarlyReserveNoSpace
|
||||||
}
|
}
|
||||||
|
|
||||||
earlyReserveLastUsed -= uintptr(size)
|
earlyReserveLastUsed -= size
|
||||||
return earlyReserveLastUsed, nil
|
return earlyReserveLastUsed, nil
|
||||||
}
|
}
|
98
src/gopheros/kernel/mm/vmm/fault.go
Normal file
98
src/gopheros/kernel/mm/vmm/fault.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/irq"
|
||||||
|
"gopheros/kernel/kfmt"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) {
|
||||||
|
var (
|
||||||
|
faultAddress = uintptr(readCR2Fn())
|
||||||
|
faultPage = mm.PageFromAddress(faultAddress)
|
||||||
|
pageEntry *pageTableEntry
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lookup entry for the page where the fault occurred
|
||||||
|
walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||||
|
nextIsPresent := pte.HasFlags(FlagPresent)
|
||||||
|
|
||||||
|
if pteLevel == pageLevels-1 && nextIsPresent {
|
||||||
|
pageEntry = pte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort walk if the next page table entry is missing
|
||||||
|
return nextIsPresent
|
||||||
|
})
|
||||||
|
|
||||||
|
// CoW is supported for RO pages with the CoW flag set
|
||||||
|
if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) {
|
||||||
|
var (
|
||||||
|
copy mm.Frame
|
||||||
|
tmpPage mm.Page
|
||||||
|
err *kernel.Error
|
||||||
|
)
|
||||||
|
|
||||||
|
if copy, err = mm.AllocFrame(); err != nil {
|
||||||
|
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
||||||
|
} else if tmpPage, err = mapTemporaryFn(copy); err != nil {
|
||||||
|
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
||||||
|
} else {
|
||||||
|
// Copy page contents, mark as RW and remove CoW flag
|
||||||
|
kernel.Memcopy(faultPage.Address(), tmpPage.Address(), mm.PageSize)
|
||||||
|
_ = unmapFn(tmpPage)
|
||||||
|
|
||||||
|
// Update mapping to point to the new frame, flag it as RW and
|
||||||
|
// remove the CoW flag
|
||||||
|
pageEntry.ClearFlags(FlagCopyOnWrite)
|
||||||
|
pageEntry.SetFlags(FlagPresent | FlagRW)
|
||||||
|
pageEntry.SetFrame(copy)
|
||||||
|
flushTLBEntryFn(faultPage.Address())
|
||||||
|
|
||||||
|
// Fault recovered; retry the instruction that caused the fault
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, errUnrecoverableFault)
|
||||||
|
}
|
||||||
|
|
||||||
|
func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) {
|
||||||
|
kfmt.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress)
|
||||||
|
switch {
|
||||||
|
case errorCode == 0:
|
||||||
|
kfmt.Printf("read from non-present page")
|
||||||
|
case errorCode == 1:
|
||||||
|
kfmt.Printf("page protection violation (read)")
|
||||||
|
case errorCode == 2:
|
||||||
|
kfmt.Printf("write to non-present page")
|
||||||
|
case errorCode == 3:
|
||||||
|
kfmt.Printf("page protection violation (write)")
|
||||||
|
case errorCode == 4:
|
||||||
|
kfmt.Printf("page-fault in user-mode")
|
||||||
|
case errorCode == 8:
|
||||||
|
kfmt.Printf("page table has reserved bit set")
|
||||||
|
case errorCode == 16:
|
||||||
|
kfmt.Printf("instruction fetch")
|
||||||
|
default:
|
||||||
|
kfmt.Printf("unknown")
|
||||||
|
}
|
||||||
|
|
||||||
|
kfmt.Printf("\n\nRegisters:\n")
|
||||||
|
regs.Print()
|
||||||
|
frame.Print()
|
||||||
|
|
||||||
|
// TODO: Revisit this when user-mode tasks are implemented
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) {
|
||||||
|
kfmt.Printf("\nGeneral protection fault while accessing address: 0x%x\n", readCR2Fn())
|
||||||
|
kfmt.Printf("Registers:\n")
|
||||||
|
regs.Print()
|
||||||
|
frame.Print()
|
||||||
|
|
||||||
|
// TODO: Revisit this when user-mode tasks are implemented
|
||||||
|
panic(errUnrecoverableFault)
|
||||||
|
}
|
188
src/gopheros/kernel/mm/vmm/fault_test.go
Normal file
188
src/gopheros/kernel/mm/vmm/fault_test.go
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/cpu"
|
||||||
|
"gopheros/kernel/irq"
|
||||||
|
"gopheros/kernel/kfmt"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRecoverablePageFault(t *testing.T) {
|
||||||
|
var (
|
||||||
|
frame irq.Frame
|
||||||
|
regs irq.Regs
|
||||||
|
pageEntry pageTableEntry
|
||||||
|
origPage = make([]byte, mm.PageSize)
|
||||||
|
clonedPage = make([]byte, mm.PageSize)
|
||||||
|
err = &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||||
|
ptePtrFn = origPtePtr
|
||||||
|
readCR2Fn = cpu.ReadCR2
|
||||||
|
mm.SetFrameAllocator(nil)
|
||||||
|
mapTemporaryFn = MapTemporary
|
||||||
|
unmapFn = Unmap
|
||||||
|
flushTLBEntryFn = cpu.FlushTLBEntry
|
||||||
|
}(ptePtrFn)
|
||||||
|
|
||||||
|
specs := []struct {
|
||||||
|
pteFlags PageTableEntryFlag
|
||||||
|
allocError *kernel.Error
|
||||||
|
mapError *kernel.Error
|
||||||
|
expPanic bool
|
||||||
|
}{
|
||||||
|
// Missing pge
|
||||||
|
{0, nil, nil, true},
|
||||||
|
// Page is present but CoW flag not set
|
||||||
|
{FlagPresent, nil, nil, true},
|
||||||
|
// Page is present but both CoW and RW flags set
|
||||||
|
{FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true},
|
||||||
|
// Page is present with CoW flag set but allocating a page copy fails
|
||||||
|
{FlagPresent | FlagCopyOnWrite, err, nil, true},
|
||||||
|
// Page is present with CoW flag set but mapping the page copy fails
|
||||||
|
{FlagPresent | FlagCopyOnWrite, nil, err, true},
|
||||||
|
// Page is present with CoW flag set
|
||||||
|
{FlagPresent | FlagCopyOnWrite, nil, nil, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) }
|
||||||
|
readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) }
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error { return nil }
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {}
|
||||||
|
|
||||||
|
for specIndex, spec := range specs {
|
||||||
|
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if spec.expPanic && err == nil {
|
||||||
|
t.Error("expected a panic")
|
||||||
|
} else if !spec.expPanic {
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected panic")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(origPage); i++ {
|
||||||
|
if origPage[i] != clonedPage[i] {
|
||||||
|
t.Errorf("expected clone page to be a copy of the original page; mismatch at index %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), spec.mapError }
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&clonedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), spec.allocError
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < len(origPage); i++ {
|
||||||
|
origPage[i] = byte(i % 256)
|
||||||
|
clonedPage[i] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pageEntry = 0
|
||||||
|
pageEntry.SetFlags(spec.pteFlags)
|
||||||
|
|
||||||
|
pageFaultHandler(2, &frame, ®s)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonRecoverablePageFault(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
kfmt.SetOutputSink(nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
specs := []struct {
|
||||||
|
errCode uint64
|
||||||
|
expReason string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
0,
|
||||||
|
"read from non-present page",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
1,
|
||||||
|
"page protection violation (read)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
2,
|
||||||
|
"write to non-present page",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
3,
|
||||||
|
"page protection violation (write)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
4,
|
||||||
|
"page-fault in user-mode",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
8,
|
||||||
|
"page table has reserved bit set",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
16,
|
||||||
|
"instruction fetch",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
0xf00,
|
||||||
|
"unknown",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
regs irq.Regs
|
||||||
|
frame irq.Frame
|
||||||
|
buf bytes.Buffer
|
||||||
|
)
|
||||||
|
|
||||||
|
kfmt.SetOutputSink(&buf)
|
||||||
|
for specIndex, spec := range specs {
|
||||||
|
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
||||||
|
buf.Reset()
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != errUnrecoverableFault {
|
||||||
|
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, ®s, errUnrecoverableFault)
|
||||||
|
if got := buf.String(); !strings.Contains(got, spec.expReason) {
|
||||||
|
t.Errorf("expected reason %q; got output:\n%q", spec.expReason, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGPFHandler(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
readCR2Fn = cpu.ReadCR2
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
regs irq.Regs
|
||||||
|
frame irq.Frame
|
||||||
|
)
|
||||||
|
|
||||||
|
readCR2Fn = func() uint64 {
|
||||||
|
return 0xbadf00d000
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != errUnrecoverableFault {
|
||||||
|
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
generalProtectionFaultHandler(0, &frame, ®s)
|
||||||
|
}
|
@ -3,14 +3,13 @@ package vmm
|
|||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/cpu"
|
"gopheros/kernel/cpu"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
|
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
|
||||||
// vmm package's Init function. The purpose of this frame is to assist
|
// vmm package's Init function. The purpose of this frame is to assist
|
||||||
// in implementing on-demand memory allocation when mapping it in
|
// in implementing on-demand mmory allocation when mapping it in
|
||||||
// conjunction with the CopyOnWrite flag. Here is an example of how it
|
// conjunction with the CopyOnWrite flag. Here is an example of how it
|
||||||
// can be used:
|
// can be used:
|
||||||
//
|
//
|
||||||
@ -26,11 +25,11 @@ import (
|
|||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// In the above example, page mappings are set up for the requested number of
|
// In the above example, page mappings are set up for the requested number of
|
||||||
// pages but no physical memory is reserved for their contents. A write to any
|
// pages but no physical mmory is reserved for their contents. A write to any
|
||||||
// of the above pages will trigger a page-fault causing a new frame to be
|
// of the above pages will trigger a page-fault causing a new frame to be
|
||||||
// allocated, cleared (the blank frame is copied to the new frame) and
|
// allocated, cleared (the blank frame is copied to the new frame) and
|
||||||
// installed in-place with RW permissions.
|
// installed in-place with RW permissions.
|
||||||
var ReservedZeroedFrame pmm.Frame
|
var ReservedZeroedFrame mm.Frame
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// protectReservedZeroedPage is set to true to prevent mapping to
|
// protectReservedZeroedPage is set to true to prevent mapping to
|
||||||
@ -53,13 +52,13 @@ var (
|
|||||||
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
|
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
// Map establishes a mapping between a virtual page and a physical mmory frame
|
||||||
// using the currently active page directory table. Calls to Map will use the
|
// using the currently active page directory table. Calls to Map will use the
|
||||||
// supplied physical frame allocator to initialize missing page tables at each
|
// supplied physical frame allocator to initialize missing page tables at each
|
||||||
// paging level supported by the MMU.
|
// paging level supported by the MMU.
|
||||||
//
|
//
|
||||||
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
|
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
|
||||||
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
func Map(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
|
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
|
||||||
return errAttemptToRWMapReservedFrame
|
return errAttemptToRWMapReservedFrame
|
||||||
}
|
}
|
||||||
@ -85,8 +84,8 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|||||||
// Next table does not yet exist; we need to allocate a
|
// Next table does not yet exist; we need to allocate a
|
||||||
// physical frame for it map it and clear its contents.
|
// physical frame for it map it and clear its contents.
|
||||||
if !pte.HasFlags(FlagPresent) {
|
if !pte.HasFlags(FlagPresent) {
|
||||||
var newTableFrame pmm.Frame
|
var newTableFrame mm.Frame
|
||||||
newTableFrame, err = frameAllocator()
|
newTableFrame, err = mm.AllocFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -98,7 +97,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|||||||
// The next pte entry becomes available but we need to
|
// The next pte entry becomes available but we need to
|
||||||
// make sure that the new page is properly cleared
|
// make sure that the new page is properly cleared
|
||||||
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
|
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
|
||||||
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
|
kernel.Memset(nextAddrFn(nextTableAddr), 0, mm.PageSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
@ -107,40 +106,40 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MapRegion establishes a mapping to the physical memory region which starts
|
// MapRegion establishes a mapping to the physical mmory region which starts
|
||||||
// at the given frame and ends at frame + pages(size). The size argument is
|
// at the given frame and ends at frame + pages(size). The size argument is
|
||||||
// always rounded up to the nearest page boundary. MapRegion reserves the next
|
// always rounded up to the nearest page boundary. MapRegion reserves the next
|
||||||
// available region in the active virtual address space, establishes the
|
// available region in the active virtual address space, establishes the
|
||||||
// mapping and returns back the Page that corresponds to the region start.
|
// mapping and returns back the Page that corresponds to the region start.
|
||||||
func MapRegion(frame pmm.Frame, size mem.Size, flags PageTableEntryFlag) (Page, *kernel.Error) {
|
func MapRegion(frame mm.Frame, size uintptr, flags PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
// Reserve next free block in the address space
|
// Reserve next free block in the address space
|
||||||
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
|
size = (size + (mm.PageSize - 1)) & ^(mm.PageSize - 1)
|
||||||
startPage, err := earlyReserveRegionFn(size)
|
startPage, err := earlyReserveRegionFn(size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pageCount := size >> mem.PageShift
|
pageCount := size >> mm.PageShift
|
||||||
for page := PageFromAddress(startPage); pageCount > 0; pageCount, page, frame = pageCount-1, page+1, frame+1 {
|
for page := mm.PageFromAddress(startPage); pageCount > 0; pageCount, page, frame = pageCount-1, page+1, frame+1 {
|
||||||
if err := mapFn(page, frame, flags); err != nil {
|
if err := mapFn(page, frame, flags); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return PageFromAddress(startPage), nil
|
return mm.PageFromAddress(startPage), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IdentityMapRegion establishes an identity mapping to the physical memory
|
// IdentityMapRegion establishes an identity mapping to the physical mmory
|
||||||
// region which starts at the given frame and ends at frame + pages(size). The
|
// region which starts at the given frame and ends at frame + pages(size). The
|
||||||
// size argument is always rounded up to the nearest page boundary.
|
// size argument is always rounded up to the nearest page boundary.
|
||||||
// IdentityMapRegion returns back the Page that corresponds to the region
|
// IdentityMapRegion returns back the Page that corresponds to the region
|
||||||
// start.
|
// start.
|
||||||
func IdentityMapRegion(startFrame pmm.Frame, size mem.Size, flags PageTableEntryFlag) (Page, *kernel.Error) {
|
func IdentityMapRegion(startFrame mm.Frame, size uintptr, flags PageTableEntryFlag) (mm.Page, *kernel.Error) {
|
||||||
startPage := Page(startFrame)
|
startPage := mm.Page(startFrame)
|
||||||
pageCount := Page(((size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift)
|
pageCount := mm.Page(((size + (mm.PageSize - 1)) & ^(mm.PageSize - 1)) >> mm.PageShift)
|
||||||
|
|
||||||
for curPage := startPage; curPage < startPage+pageCount; curPage++ {
|
for curPage := startPage; curPage < startPage+pageCount; curPage++ {
|
||||||
if err := mapFn(curPage, pmm.Frame(curPage), flags); err != nil {
|
if err := mapFn(curPage, mm.Frame(curPage), flags); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -148,26 +147,26 @@ func IdentityMapRegion(startFrame pmm.Frame, size mem.Size, flags PageTableEntry
|
|||||||
return startPage, nil
|
return startPage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MapTemporary establishes a temporary RW mapping of a physical memory frame
|
// MapTemporary establishes a temporary RW mapping of a physical mmory frame
|
||||||
// to a fixed virtual address overwriting any previous mapping. The temporary
|
// to a fixed virtual address overwriting any previous mapping. The temporary
|
||||||
// mapping mechanism is primarily used by the kernel to access and initialize
|
// mapping mechanism is primarily used by the kernel to access and initialize
|
||||||
// inactive page tables.
|
// inactive page tables.
|
||||||
//
|
//
|
||||||
// Attempts to map ReservedZeroedFrame will result in an error.
|
// Attempts to map ReservedZeroedFrame will result in an error.
|
||||||
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
|
func MapTemporary(frame mm.Frame) (mm.Page, *kernel.Error) {
|
||||||
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
|
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
|
||||||
return 0, errAttemptToRWMapReservedFrame
|
return 0, errAttemptToRWMapReservedFrame
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
|
if err := Map(mm.PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return PageFromAddress(tempMappingAddr), nil
|
return mm.PageFromAddress(tempMappingAddr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
|
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
|
||||||
func Unmap(page Page) *kernel.Error {
|
func Unmap(page mm.Page) *kernel.Error {
|
||||||
var err *kernel.Error
|
var err *kernel.Error
|
||||||
|
|
||||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||||
@ -195,3 +194,24 @@ func Unmap(page Page) *kernel.Error {
|
|||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Translate returns the physical address that corresponds to the supplied
|
||||||
|
// virtual address or ErrInvalidMapping if the virtual address does not
|
||||||
|
// correspond to a mapped physical address.
|
||||||
|
func Translate(virtAddr uintptr) (uintptr, *kernel.Error) {
|
||||||
|
pte, err := pteForAddress(virtAddr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the physical address by taking the physical frame address and
|
||||||
|
// appending the offset from the virtual address
|
||||||
|
physAddr := pte.Frame().Address() + PageOffset(virtAddr)
|
||||||
|
return physAddr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PageOffset returns the offset within the page specified by a virtual
|
||||||
|
// address.
|
||||||
|
func PageOffset(virtAddr uintptr) uintptr {
|
||||||
|
return (virtAddr & ((1 << pageLevelShifts[pageLevels-1]) - 1))
|
||||||
|
}
|
@ -2,8 +2,7 @@ package vmm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"gopheros/kernel"
|
"gopheros/kernel"
|
||||||
"gopheros/kernel/mem"
|
"gopheros/kernel/mm"
|
||||||
"gopheros/kernel/mem/pmm"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
@ -25,17 +24,17 @@ func TestMapTemporaryAmd64(t *testing.T) {
|
|||||||
ptePtrFn = origPtePtr
|
ptePtrFn = origPtePtr
|
||||||
nextAddrFn = origNextAddrFn
|
nextAddrFn = origNextAddrFn
|
||||||
flushTLBEntryFn = origFlushTLBEntryFn
|
flushTLBEntryFn = origFlushTLBEntryFn
|
||||||
frameAllocator = nil
|
mm.SetFrameAllocator(nil)
|
||||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||||
|
|
||||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
var physPages [pageLevels][mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
nextPhysPage := 0
|
nextPhysPage := 0
|
||||||
|
|
||||||
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
|
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
nextPhysPage++
|
nextPhysPage++
|
||||||
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
|
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
|
||||||
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
|
return mm.Frame(uintptr(pageAddr) >> mm.PageShift), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
pteCallCount := 0
|
pteCallCount := 0
|
||||||
@ -43,7 +42,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
|
|||||||
pteCallCount++
|
pteCallCount++
|
||||||
// The last 12 bits encode the page table offset in bytes
|
// The last 12 bits encode the page table offset in bytes
|
||||||
// which we need to convert to a uint64 entry
|
// which we need to convert to a uint64 entry
|
||||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
pteIndex := (entry & uintptr(mm.PageSize-1)) >> mm.PointerShift
|
||||||
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
|
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +60,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
|
|||||||
// p3 index: 511
|
// p3 index: 511
|
||||||
// p2 index: 511
|
// p2 index: 511
|
||||||
// p1 index: 511
|
// p1 index: 511
|
||||||
frame := pmm.Frame(123)
|
frame := mm.Frame(123)
|
||||||
levelIndices := []uint{510, 511, 511, 511}
|
levelIndices := []uint{510, 511, 511, 511}
|
||||||
|
|
||||||
page, err := MapTemporary(frame)
|
page, err := MapTemporary(frame)
|
||||||
@ -81,7 +80,7 @@ func TestMapTemporaryAmd64(t *testing.T) {
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case level < pageLevels-1:
|
case level < pageLevels-1:
|
||||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
if exp, got := mm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mm.PageShift), pte.Frame(); got != exp {
|
||||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
|
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -105,18 +104,18 @@ func TestMapRegion(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
mapCallCount := 0
|
mapCallCount := 0
|
||||||
mapFn = func(_ Page, _ pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
mapCallCount++
|
mapCallCount++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
earlyReserveRegionCallCount := 0
|
earlyReserveRegionCallCount := 0
|
||||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
earlyReserveRegionCallCount++
|
earlyReserveRegionCallCount++
|
||||||
return 0xf00, nil
|
return 0xf00, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := MapRegion(pmm.Frame(0xdf0000), 4097, FlagPresent|FlagRW); err != nil {
|
if _, err := MapRegion(mm.Frame(0xdf0000), 4097, FlagPresent|FlagRW); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,11 +131,11 @@ func TestMapRegion(t *testing.T) {
|
|||||||
t.Run("EarlyReserveRegion fails", func(t *testing.T) {
|
t.Run("EarlyReserveRegion fails", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "out of address space"}
|
expErr := &kernel.Error{Module: "test", Message: "out of address space"}
|
||||||
|
|
||||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := MapRegion(pmm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
if _, err := MapRegion(mm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -145,16 +144,16 @@ func TestMapRegion(t *testing.T) {
|
|||||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
|
|
||||||
earlyReserveRegionCallCount := 0
|
earlyReserveRegionCallCount := 0
|
||||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
earlyReserveRegionFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
earlyReserveRegionCallCount++
|
earlyReserveRegionCallCount++
|
||||||
return 0xf00, nil
|
return 0xf00, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
mapFn = func(_ Page, _ pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
return expErr
|
return expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := MapRegion(pmm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
if _, err := MapRegion(mm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,12 +170,12 @@ func TestIdentityMapRegion(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("success", func(t *testing.T) {
|
t.Run("success", func(t *testing.T) {
|
||||||
mapCallCount := 0
|
mapCallCount := 0
|
||||||
mapFn = func(_ Page, _ pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
mapCallCount++
|
mapCallCount++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := IdentityMapRegion(pmm.Frame(0xdf0000), 4097, FlagPresent|FlagRW); err != nil {
|
if _, err := IdentityMapRegion(mm.Frame(0xdf0000), 4097, FlagPresent|FlagRW); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,11 +187,11 @@ func TestIdentityMapRegion(t *testing.T) {
|
|||||||
t.Run("Map fails", func(t *testing.T) {
|
t.Run("Map fails", func(t *testing.T) {
|
||||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
|
|
||||||
mapFn = func(_ Page, _ pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
mapFn = func(_ mm.Page, _ mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
return expErr
|
return expErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := IdentityMapRegion(pmm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
if _, err := IdentityMapRegion(mm.Frame(0xdf0000), 128000, FlagPresent|FlagRW); err != expErr {
|
||||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -209,11 +208,11 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
|||||||
flushTLBEntryFn = origFlushTLBEntryFn
|
flushTLBEntryFn = origFlushTLBEntryFn
|
||||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||||
|
|
||||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
var physPages [pageLevels][mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
|
|
||||||
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
|
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
|
||||||
p4Index := 510
|
p4Index := 510
|
||||||
frame := pmm.Frame(123)
|
frame := mm.Frame(123)
|
||||||
|
|
||||||
t.Run("encounter huge page", func(t *testing.T) {
|
t.Run("encounter huge page", func(t *testing.T) {
|
||||||
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
|
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
|
||||||
@ -221,7 +220,7 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
|||||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||||
// The last 12 bits encode the page table offset in bytes
|
// The last 12 bits encode the page table offset in bytes
|
||||||
// which we need to convert to a uint64 entry
|
// which we need to convert to a uint64 entry
|
||||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
pteIndex := (entry & uintptr(mm.PageSize-1)) >> mm.PointerShift
|
||||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,12 +230,12 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("allocFn returns an error", func(t *testing.T) {
|
t.Run("allocFn returns an error", func(t *testing.T) {
|
||||||
defer func() { frameAllocator = nil }()
|
defer func() { mm.SetFrameAllocator(nil) }()
|
||||||
physPages[0][p4Index] = 0
|
physPages[0][p4Index] = 0
|
||||||
|
|
||||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
expErr := &kernel.Error{Module: "test", Message: "out of mmory"}
|
||||||
|
|
||||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
return 0, expErr
|
return 0, expErr
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -249,7 +248,7 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
|||||||
defer func() { protectReservedZeroedPage = false }()
|
defer func() { protectReservedZeroedPage = false }()
|
||||||
|
|
||||||
protectReservedZeroedPage = true
|
protectReservedZeroedPage = true
|
||||||
if err := Map(Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame {
|
if err := Map(mm.Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame {
|
||||||
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
|
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -275,15 +274,15 @@ func TestUnmapAmd64(t *testing.T) {
|
|||||||
}(ptePtrFn, flushTLBEntryFn)
|
}(ptePtrFn, flushTLBEntryFn)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
physPages [pageLevels][mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
frame = pmm.Frame(123)
|
frame = mm.Frame(123)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Emulate a page mapped to virtAddr 0 across all page levels
|
// Emulate a page mapped to virtAddr 0 across all page levels
|
||||||
for level := 0; level < pageLevels; level++ {
|
for level := 0; level < pageLevels; level++ {
|
||||||
physPages[level][0].SetFlags(FlagPresent | FlagRW)
|
physPages[level][0].SetFlags(FlagPresent | FlagRW)
|
||||||
if level < pageLevels-1 {
|
if level < pageLevels-1 {
|
||||||
physPages[level][0].SetFrame(pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mem.PageShift))
|
physPages[level][0].SetFrame(mm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mm.PageShift))
|
||||||
} else {
|
} else {
|
||||||
physPages[level][0].SetFrame(frame)
|
physPages[level][0].SetFrame(frame)
|
||||||
|
|
||||||
@ -301,7 +300,7 @@ func TestUnmapAmd64(t *testing.T) {
|
|||||||
flushTLBEntryCallCount++
|
flushTLBEntryCallCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := Unmap(PageFromAddress(0)); err != nil {
|
if err := Unmap(mm.PageFromAddress(0)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -313,7 +312,7 @@ func TestUnmapAmd64(t *testing.T) {
|
|||||||
if !pte.HasFlags(FlagPresent) {
|
if !pte.HasFlags(FlagPresent) {
|
||||||
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
|
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
|
||||||
}
|
}
|
||||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
if exp, got := mm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mm.PageShift), pte.Frame(); got != exp {
|
||||||
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
|
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -344,7 +343,7 @@ func TestUnmapErrorsAmd64(t *testing.T) {
|
|||||||
flushTLBEntryFn = origFlushTLBEntryFn
|
flushTLBEntryFn = origFlushTLBEntryFn
|
||||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||||
|
|
||||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
var physPages [pageLevels][mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
|
|
||||||
t.Run("encounter huge page", func(t *testing.T) {
|
t.Run("encounter huge page", func(t *testing.T) {
|
||||||
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
|
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
|
||||||
@ -352,11 +351,11 @@ func TestUnmapErrorsAmd64(t *testing.T) {
|
|||||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||||
// The last 12 bits encode the page table offset in bytes
|
// The last 12 bits encode the page table offset in bytes
|
||||||
// which we need to convert to a uint64 entry
|
// which we need to convert to a uint64 entry
|
||||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
pteIndex := (entry & uintptr(mm.PageSize-1)) >> mm.PointerShift
|
||||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := Unmap(PageFromAddress(0)); err != errNoHugePageSupport {
|
if err := Unmap(mm.PageFromAddress(0)); err != errNoHugePageSupport {
|
||||||
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -364,8 +363,63 @@ func TestUnmapErrorsAmd64(t *testing.T) {
|
|||||||
t.Run("virtual address not mapped", func(t *testing.T) {
|
t.Run("virtual address not mapped", func(t *testing.T) {
|
||||||
physPages[0][0].ClearFlags(FlagPresent)
|
physPages[0][0].ClearFlags(FlagPresent)
|
||||||
|
|
||||||
if err := Unmap(PageFromAddress(0)); err != ErrInvalidMapping {
|
if err := Unmap(mm.PageFromAddress(0)); err != ErrInvalidMapping {
|
||||||
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
|
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTranslateAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||||
|
ptePtrFn = origPtePtr
|
||||||
|
}(ptePtrFn)
|
||||||
|
|
||||||
|
// the virtual address just contains the page offset
|
||||||
|
virtAddr := uintptr(1234)
|
||||||
|
expFrame := mm.Frame(42)
|
||||||
|
expPhysAddr := expFrame.Address() + virtAddr
|
||||||
|
specs := [][pageLevels]bool{
|
||||||
|
{true, true, true, true},
|
||||||
|
{false, true, true, true},
|
||||||
|
{true, false, true, true},
|
||||||
|
{true, true, false, true},
|
||||||
|
{true, true, true, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for specIndex, spec := range specs {
|
||||||
|
pteCallCount := 0
|
||||||
|
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||||
|
var pte pageTableEntry
|
||||||
|
pte.SetFrame(expFrame)
|
||||||
|
if specs[specIndex][pteCallCount] {
|
||||||
|
pte.SetFlags(FlagPresent)
|
||||||
|
}
|
||||||
|
pteCallCount++
|
||||||
|
|
||||||
|
return unsafe.Pointer(&pte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An error is expected if any page level contains a non-present page
|
||||||
|
expError := false
|
||||||
|
for _, hasMapping := range spec {
|
||||||
|
if !hasMapping {
|
||||||
|
expError = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
physAddr, err := Translate(virtAddr)
|
||||||
|
switch {
|
||||||
|
case expError && err != ErrInvalidMapping:
|
||||||
|
t.Errorf("[spec %d] expected to get ErrInvalidMapping; got %v", specIndex, err)
|
||||||
|
case !expError && err != nil:
|
||||||
|
t.Errorf("[spec %d] unexpected error %v", specIndex, err)
|
||||||
|
case !expError && physAddr != expPhysAddr:
|
||||||
|
t.Errorf("[spec %d] expected phys addr to be 0x%x; got 0x%x", specIndex, expPhysAddr, physAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
350
src/gopheros/kernel/mm/vmm/pdt.go
Normal file
350
src/gopheros/kernel/mm/vmm/pdt.go
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/cpu"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
"gopheros/multiboot"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// activePDTFn is used by tests to override calls to activePDT which
|
||||||
|
// will cause a fault if called in user-mode.
|
||||||
|
activePDTFn = cpu.ActivePDT
|
||||||
|
|
||||||
|
// switchPDTFn is used by tests to override calls to switchPDT which
|
||||||
|
// will cause a fault if called in user-mode.
|
||||||
|
switchPDTFn = cpu.SwitchPDT
|
||||||
|
|
||||||
|
// mapFn is used by tests and is automatically inlined by the compiler.
|
||||||
|
mapFn = Map
|
||||||
|
|
||||||
|
// mapTemporaryFn is used by tests and is automatically inlined by the compiler.
|
||||||
|
mapTemporaryFn = MapTemporary
|
||||||
|
|
||||||
|
// unmapmFn is used by tests and is automatically inlined by the compiler.
|
||||||
|
unmapFn = Unmap
|
||||||
|
|
||||||
|
// visitElfSectionsFn is used by tests and is automatically inlined
|
||||||
|
// by the compiler.
|
||||||
|
visitElfSectionsFn = multiboot.VisitElfSections
|
||||||
|
|
||||||
|
// The granular PDT which is set up by the setupPDTForKernel call. It's
|
||||||
|
// entries correspond to the various kernel section address/size tuples
|
||||||
|
// as reported by the bootloader.
|
||||||
|
kernelPDT PageDirectoryTable
|
||||||
|
)
|
||||||
|
|
||||||
|
// PageDirectoryTable describes the top-most table in a multi-level paging scheme.
|
||||||
|
type PageDirectoryTable struct {
|
||||||
|
pdtFrame mm.Frame
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up the page table directory starting at the supplied physical
|
||||||
|
// address. If the supplied frame does not match the currently active PDT, then
|
||||||
|
// Init assumes that this is a new page table directory that needs
|
||||||
|
// bootstapping. In such a case, a temporary mapping is established so that
|
||||||
|
// Init can:
|
||||||
|
// - call kernel.Memset to clear the frame contents
|
||||||
|
// - setup a recursive mapping for the last table entry to the page itself.
|
||||||
|
func (pdt *PageDirectoryTable) Init(pdtFrame mm.Frame) *kernel.Error {
|
||||||
|
pdt.pdtFrame = pdtFrame
|
||||||
|
|
||||||
|
// Check active PDT physical address. If it matches the input pdt then
|
||||||
|
// nothing more needs to be done
|
||||||
|
activePdtAddr := activePDTFn()
|
||||||
|
if pdtFrame.Address() == activePdtAddr {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a temporary mapping for the pdt frame so we can work on it
|
||||||
|
pdtPage, err := mapTemporaryFn(pdtFrame)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the page contents and setup recursive mapping for the last PDT entry
|
||||||
|
kernel.Memset(pdtPage.Address(), 0, mm.PageSize)
|
||||||
|
lastPdtEntry := (*pageTableEntry)(unsafe.Pointer(pdtPage.Address() + (((1 << pageLevelBits[0]) - 1) << mm.PointerShift)))
|
||||||
|
*lastPdtEntry = 0
|
||||||
|
lastPdtEntry.SetFlags(FlagPresent | FlagRW)
|
||||||
|
lastPdtEntry.SetFrame(pdtFrame)
|
||||||
|
|
||||||
|
// Remove temporary mapping
|
||||||
|
_ = unmapFn(pdtPage)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map establishes a mapping between a virtual page and a physical memory frame
|
||||||
|
// using this PDT. This method behaves in a similar fashion to the global Map()
|
||||||
|
// function with the difference that it also supports inactive page PDTs by
|
||||||
|
// establishing a temporary mapping so that Map() can access the inactive PDT
|
||||||
|
// entries.
|
||||||
|
func (pdt PageDirectoryTable) Map(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
|
var (
|
||||||
|
activePdtFrame = mm.Frame(activePDTFn() >> mm.PageShift)
|
||||||
|
lastPdtEntryAddr uintptr
|
||||||
|
lastPdtEntry *pageTableEntry
|
||||||
|
)
|
||||||
|
// If this table is not active we need to temporarily map it to the
|
||||||
|
// last entry in the active PDT so we can access it using the recursive
|
||||||
|
// virtual address scheme.
|
||||||
|
if activePdtFrame != pdt.pdtFrame {
|
||||||
|
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mm.PointerShift)
|
||||||
|
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
||||||
|
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
||||||
|
flushTLBEntryFn(lastPdtEntryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := mapFn(page, frame, flags)
|
||||||
|
|
||||||
|
if activePdtFrame != pdt.pdtFrame {
|
||||||
|
lastPdtEntry.SetFrame(activePdtFrame)
|
||||||
|
flushTLBEntryFn(lastPdtEntryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmap removes a mapping previousle installed by a call to Map() on this PDT.
|
||||||
|
// This method behaves in a similar fashion to the global Unmap() function with
|
||||||
|
// the difference that it also supports inactive page PDTs by establishing a
|
||||||
|
// temporary mapping so that Unmap() can access the inactive PDT entries.
|
||||||
|
func (pdt PageDirectoryTable) Unmap(page mm.Page) *kernel.Error {
|
||||||
|
var (
|
||||||
|
activePdtFrame = mm.Frame(activePDTFn() >> mm.PageShift)
|
||||||
|
lastPdtEntryAddr uintptr
|
||||||
|
lastPdtEntry *pageTableEntry
|
||||||
|
)
|
||||||
|
// If this table is not active we need to temporarily map it to the
|
||||||
|
// last entry in the active PDT so we can access it using the recursive
|
||||||
|
// virtual address scheme.
|
||||||
|
if activePdtFrame != pdt.pdtFrame {
|
||||||
|
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mm.PointerShift)
|
||||||
|
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
||||||
|
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
||||||
|
flushTLBEntryFn(lastPdtEntryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := unmapFn(page)
|
||||||
|
|
||||||
|
if activePdtFrame != pdt.pdtFrame {
|
||||||
|
lastPdtEntry.SetFrame(activePdtFrame)
|
||||||
|
flushTLBEntryFn(lastPdtEntryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Activate enables this page directory table and flushes the TLB
|
||||||
|
func (pdt PageDirectoryTable) Activate() {
|
||||||
|
switchPDTFn(pdt.pdtFrame.Address())
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupPDTForKernel queries the multiboot package for the ELF sections that
|
||||||
|
// correspond to the loaded kernel image and establishes a new granular PDT for
|
||||||
|
// the kernel's VMA using the appropriate flags (e.g. NX for data sections, RW
|
||||||
|
// for writable sections e.t.c).
|
||||||
|
func setupPDTForKernel(kernelPageOffset uintptr) *kernel.Error {
|
||||||
|
// Allocate frame for the page directory and initialize it
|
||||||
|
kernelPDTFrame, err := mm.AllocFrame()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = kernelPDT.Init(kernelPDTFrame); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query the ELF sections of the kernel image and establish mappings
|
||||||
|
// for each one using the appropriate flags
|
||||||
|
var visitor = func(_ string, secFlags multiboot.ElfSectionFlag, secAddress uintptr, secSize uint64) {
|
||||||
|
// Bail out if we have encountered an error; also ignore sections
|
||||||
|
// not using the kernel's VMA
|
||||||
|
if err != nil || secAddress < kernelPageOffset {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := FlagPresent
|
||||||
|
|
||||||
|
if (secFlags & multiboot.ElfSectionExecutable) == 0 {
|
||||||
|
flags |= FlagNoExecute
|
||||||
|
}
|
||||||
|
|
||||||
|
if (secFlags & multiboot.ElfSectionWritable) != 0 {
|
||||||
|
flags |= FlagRW
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map the start and end VMA addresses for the section contents
|
||||||
|
// into a start and end (inclusive) page number. To figure out
|
||||||
|
// the physical start frame we just need to subtract the
|
||||||
|
// kernel's VMA offset from the virtual address and round that
|
||||||
|
// down to the nearest frame number.
|
||||||
|
curPage := mm.PageFromAddress(secAddress)
|
||||||
|
lastPage := mm.PageFromAddress(secAddress + uintptr(secSize-1))
|
||||||
|
curFrame := mm.Frame((secAddress - kernelPageOffset) >> mm.PageShift)
|
||||||
|
for ; curPage <= lastPage; curFrame, curPage = curFrame+1, curPage+1 {
|
||||||
|
if err = kernelPDT.Map(curPage, curFrame, flags); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the noescape hack to prevent the compiler from leaking the visitor
|
||||||
|
// function literal to the heap.
|
||||||
|
visitElfSectionsFn(
|
||||||
|
*(*multiboot.ElfSectionVisitor)(noEscape(unsafe.Pointer(&visitor))),
|
||||||
|
)
|
||||||
|
|
||||||
|
// If an error occurred while maping the ELF sections bail out
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that any pages mapped by the mmory allocator using
|
||||||
|
// EarlyReserveRegion are copied to the new page directory.
|
||||||
|
for rsvAddr := earlyReserveLastUsed; rsvAddr < tempMappingAddr; rsvAddr += mm.PageSize {
|
||||||
|
page := mm.PageFromAddress(rsvAddr)
|
||||||
|
|
||||||
|
frameAddr, err := translateFn(rsvAddr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = kernelPDT.Map(page, mm.Frame(frameAddr>>mm.PageShift), FlagPresent|FlagRW); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Activate the new PDT. After this point, the identify mapping for the
|
||||||
|
// physical mmory addresses where the kernel is loaded becomes invalid.
|
||||||
|
kernelPDT.Activate()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// noEscape hides a pointer from escape analysis. This function is copied over
|
||||||
|
// from runtime/stubs.go
|
||||||
|
//go:nosplit
|
||||||
|
func noEscape(p unsafe.Pointer) unsafe.Pointer {
|
||||||
|
x := uintptr(p)
|
||||||
|
return unsafe.Pointer(x ^ 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrInvalidMapping is returned when trying to lookup a virtual memory address that is not yet mapped.
|
||||||
|
ErrInvalidMapping = &kernel.Error{Module: "vmm", Message: "virtual address does not point to a mapped physical page"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// PageTableEntryFlag describes a flag that can be applied to a page table entry.
|
||||||
|
type PageTableEntryFlag uintptr
|
||||||
|
|
||||||
|
// pageTableEntry describes a page table entry. These entries encode
|
||||||
|
// a physical frame address and a set of flags. The actual format
|
||||||
|
// of the entry and flags is architecture-dependent.
|
||||||
|
type pageTableEntry uintptr
|
||||||
|
|
||||||
|
// HasFlags returns true if this entry has all the input flags set.
|
||||||
|
func (pte pageTableEntry) HasFlags(flags PageTableEntryFlag) bool {
|
||||||
|
return (uintptr(pte) & uintptr(flags)) == uintptr(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAnyFlag returns true if this entry has at least one of the input flags set.
|
||||||
|
func (pte pageTableEntry) HasAnyFlag(flags PageTableEntryFlag) bool {
|
||||||
|
return (uintptr(pte) & uintptr(flags)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFlags sets the input list of flags to the page table entry.
|
||||||
|
func (pte *pageTableEntry) SetFlags(flags PageTableEntryFlag) {
|
||||||
|
*pte = (pageTableEntry)(uintptr(*pte) | uintptr(flags))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearFlags unsets the input list of flags from the page table entry.
|
||||||
|
func (pte *pageTableEntry) ClearFlags(flags PageTableEntryFlag) {
|
||||||
|
*pte = (pageTableEntry)(uintptr(*pte) &^ uintptr(flags))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frame returns the physical page frame that this page table entry points to.
|
||||||
|
func (pte pageTableEntry) Frame() mm.Frame {
|
||||||
|
return mm.Frame((uintptr(pte) & ptePhysPageMask) >> mm.PageShift)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFrame updates the page table entry to point the the given physical frame .
|
||||||
|
func (pte *pageTableEntry) SetFrame(frame mm.Frame) {
|
||||||
|
*pte = (pageTableEntry)((uintptr(*pte) &^ ptePhysPageMask) | frame.Address())
|
||||||
|
}
|
||||||
|
|
||||||
|
// pteForAddress returns the final page table entry that correspond to a
|
||||||
|
// particular virtual address. The function performs a page table walk till it
|
||||||
|
// reaches the final page table entry returning ErrInvalidMapping if the page
|
||||||
|
// is not present.
|
||||||
|
func pteForAddress(virtAddr uintptr) (*pageTableEntry, *kernel.Error) {
|
||||||
|
var (
|
||||||
|
err *kernel.Error
|
||||||
|
entry *pageTableEntry
|
||||||
|
)
|
||||||
|
|
||||||
|
walk(virtAddr, func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||||
|
if !pte.HasFlags(FlagPresent) {
|
||||||
|
entry = nil
|
||||||
|
err = ErrInvalidMapping
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = pte
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ptePointerFn returns a pointer to the supplied entry address. It is
|
||||||
|
// used by tests to override the generated page table entry pointers so
|
||||||
|
// walk() can be properly tested. When compiling the kernel this function
|
||||||
|
// will be automatically inlined.
|
||||||
|
ptePtrFn = func(entryAddr uintptr) unsafe.Pointer {
|
||||||
|
return unsafe.Pointer(entryAddr)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// pageTableWalker is a function that can be passed to the walk method. The
|
||||||
|
// function receives the current page level and page table entry as its
|
||||||
|
// arguments. If the function returns false, then the page walk is aborted.
|
||||||
|
type pageTableWalker func(pteLevel uint8, pte *pageTableEntry) bool
|
||||||
|
|
||||||
|
// walk performs a page table walk for the given virtual address. It calls the
|
||||||
|
// suppplied walkFn with the page table entry that corresponds to each page
|
||||||
|
// table level. If walkFn returns an error then the walk is aborted and the
|
||||||
|
// error is returned to the caller.
|
||||||
|
func walk(virtAddr uintptr, walkFn pageTableWalker) {
|
||||||
|
var (
|
||||||
|
level uint8
|
||||||
|
tableAddr, entryAddr, entryIndex uintptr
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// tableAddr is initially set to the recursively mapped virtual address for the
|
||||||
|
// last entry in the top-most page table. Dereferencing a pointer to this address
|
||||||
|
// will allow us to access
|
||||||
|
for level, tableAddr = uint8(0), pdtVirtualAddr; level < pageLevels; level, tableAddr = level+1, entryAddr {
|
||||||
|
// Extract the bits from virtual address that correspond to the
|
||||||
|
// index in this level's page table
|
||||||
|
entryIndex = (virtAddr >> pageLevelShifts[level]) & ((1 << pageLevelBits[level]) - 1)
|
||||||
|
|
||||||
|
// By shifting the table virtual address left by pageLevelShifts[level] we add
|
||||||
|
// a new level of indirection to our recursive mapping allowing us to access
|
||||||
|
// the table pointed to by the page entry
|
||||||
|
entryAddr = tableAddr + (entryIndex << mm.PointerShift)
|
||||||
|
|
||||||
|
if ok = walkFn(level, (*pageTableEntry)(ptePtrFn(entryAddr))); !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shift left by the number of bits for this paging level to get
|
||||||
|
// the virtual address of the table pointed to by entryAddr
|
||||||
|
entryAddr <<= pageLevelBits[level]
|
||||||
|
}
|
||||||
|
}
|
651
src/gopheros/kernel/mm/vmm/pdt_test.go
Normal file
651
src/gopheros/kernel/mm/vmm/pdt_test.go
Normal file
@ -0,0 +1,651 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/cpu"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
"gopheros/multiboot"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
oneMb = 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(mm.Frame) (mm.Page, *kernel.Error), origUnmap func(mm.Page) *kernel.Error) {
|
||||||
|
flushTLBEntryFn = origFlushTLBEntry
|
||||||
|
activePDTFn = origActivePDT
|
||||||
|
mapTemporaryFn = origMapTemporary
|
||||||
|
unmapFn = origUnmap
|
||||||
|
}(flushTLBEntryFn, activePDTFn, mapTemporaryFn, unmapFn)
|
||||||
|
|
||||||
|
t.Run("already mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdt PageDirectoryTable
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return pdtFrame.Address()
|
||||||
|
}
|
||||||
|
|
||||||
|
mapTemporaryFn = func(_ mm.Frame) (mm.Page, *kernel.Error) {
|
||||||
|
t.Fatal("unexpected call to MapTemporary")
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error {
|
||||||
|
t.Fatal("unexpected call to Unmap")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Init(pdtFrame); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("not mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdt PageDirectoryTable
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
physPage [mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fill phys page with random junk
|
||||||
|
kernel.Memset(uintptr(unsafe.Pointer(&physPage[0])), 0xf0, mm.PageSize)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
mapTemporaryFn = func(_ mm.Frame) (mm.Page, *kernel.Error) {
|
||||||
|
return mm.PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {}
|
||||||
|
|
||||||
|
unmapCallCount := 0
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error {
|
||||||
|
unmapCallCount++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Init(pdtFrame); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if unmapCallCount != 1 {
|
||||||
|
t.Fatalf("expected Unmap to be called 1 time; called %d", unmapCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(physPage)-1; i++ {
|
||||||
|
if physPage[i] != 0 {
|
||||||
|
t.Errorf("expected PDT entry %d to be cleared; got %x", i, physPage[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last page should be recursively mapped to the PDT
|
||||||
|
lastPdtEntry := physPage[len(physPage)-1]
|
||||||
|
if !lastPdtEntry.HasFlags(FlagPresent | FlagRW) {
|
||||||
|
t.Fatal("expected last PDT entry to have FlagPresent and FlagRW set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastPdtEntry.Frame() != pdtFrame {
|
||||||
|
t.Fatalf("expected last PDT entry to be recursively mapped to physical frame %x; got %x", pdtFrame, lastPdtEntry.Frame())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("temporary mapping failure", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdt PageDirectoryTable
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
|
||||||
|
|
||||||
|
mapTemporaryFn = func(_ mm.Frame) (mm.Page, *kernel.Error) {
|
||||||
|
return 0, expErr
|
||||||
|
}
|
||||||
|
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error {
|
||||||
|
t.Fatal("unexpected call to Unmap")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Init(pdtFrame); err != expErr {
|
||||||
|
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(mm.Page, mm.Frame, PageTableEntryFlag) *kernel.Error) {
|
||||||
|
flushTLBEntryFn = origFlushTLBEntry
|
||||||
|
activePDTFn = origActivePDT
|
||||||
|
mapFn = origMap
|
||||||
|
}(flushTLBEntryFn, activePDTFn, mapFn)
|
||||||
|
|
||||||
|
t.Run("already mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||||
|
page = mm.PageFromAddress(uintptr(100 * oneMb))
|
||||||
|
)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return pdtFrame.Address()
|
||||||
|
}
|
||||||
|
|
||||||
|
mapFn = func(_ mm.Page, _ mm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
flushCallCount := 0
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {
|
||||||
|
flushCallCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Map(page, mm.Frame(321), FlagRW); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 0; flushCallCount != exp {
|
||||||
|
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("not mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||||
|
page = mm.PageFromAddress(uintptr(100 * oneMb))
|
||||||
|
activePhysPage [mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
|
activePdtFrame = mm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mm.PageShift)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initially, activePhysPage is recursively mapped to itself
|
||||||
|
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
||||||
|
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return activePdtFrame.Address()
|
||||||
|
}
|
||||||
|
|
||||||
|
mapFn = func(_ mm.Page, _ mm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
flushCallCount := 0
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {
|
||||||
|
switch flushCallCount {
|
||||||
|
case 0:
|
||||||
|
// the first time we flush the tlb entry, the last entry of
|
||||||
|
// the active pdt should be pointing to pdtFrame
|
||||||
|
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
||||||
|
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// the second time we flush the tlb entry, the last entry of
|
||||||
|
// the active pdt should be pointing back to activePdtFrame
|
||||||
|
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
||||||
|
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flushCallCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Map(page, mm.Frame(321), FlagRW); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 2; flushCallCount != exp {
|
||||||
|
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageDirectoryTableUnmapAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origUnmap func(mm.Page) *kernel.Error) {
|
||||||
|
flushTLBEntryFn = origFlushTLBEntry
|
||||||
|
activePDTFn = origActivePDT
|
||||||
|
unmapFn = origUnmap
|
||||||
|
}(flushTLBEntryFn, activePDTFn, unmapFn)
|
||||||
|
|
||||||
|
t.Run("already mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||||
|
page = mm.PageFromAddress(uintptr(100 * oneMb))
|
||||||
|
)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return pdtFrame.Address()
|
||||||
|
}
|
||||||
|
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
flushCallCount := 0
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {
|
||||||
|
flushCallCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Unmap(page); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 0; flushCallCount != exp {
|
||||||
|
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("not mapped PDT", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||||
|
page = mm.PageFromAddress(uintptr(100 * oneMb))
|
||||||
|
activePhysPage [mm.PageSize >> mm.PointerShift]pageTableEntry
|
||||||
|
activePdtFrame = mm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mm.PageShift)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initially, activePhysPage is recursively mapped to itself
|
||||||
|
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
||||||
|
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
||||||
|
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return activePdtFrame.Address()
|
||||||
|
}
|
||||||
|
|
||||||
|
unmapFn = func(_ mm.Page) *kernel.Error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
flushCallCount := 0
|
||||||
|
flushTLBEntryFn = func(_ uintptr) {
|
||||||
|
switch flushCallCount {
|
||||||
|
case 0:
|
||||||
|
// the first time we flush the tlb entry, the last entry of
|
||||||
|
// the active pdt should be pointing to pdtFrame
|
||||||
|
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
||||||
|
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// the second time we flush the tlb entry, the last entry of
|
||||||
|
// the active pdt should be pointing back to activePdtFrame
|
||||||
|
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
||||||
|
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
flushCallCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := pdt.Unmap(page); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 2; flushCallCount != exp {
|
||||||
|
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageDirectoryTableActivateAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origSwitchPDT func(uintptr)) {
|
||||||
|
switchPDTFn = origSwitchPDT
|
||||||
|
}(switchPDTFn)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pdtFrame = mm.Frame(123)
|
||||||
|
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||||
|
)
|
||||||
|
|
||||||
|
switchPDTCallCount := 0
|
||||||
|
switchPDTFn = func(_ uintptr) {
|
||||||
|
switchPDTCallCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
pdt.Activate()
|
||||||
|
if exp := 1; switchPDTCallCount != exp {
|
||||||
|
t.Fatalf("expected switchPDT to be called %d times; called %d", exp, switchPDTCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetupPDTForKernel(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
mm.SetFrameAllocator(nil)
|
||||||
|
activePDTFn = cpu.ActivePDT
|
||||||
|
switchPDTFn = cpu.SwitchPDT
|
||||||
|
translateFn = Translate
|
||||||
|
mapFn = Map
|
||||||
|
mapTemporaryFn = MapTemporary
|
||||||
|
unmapFn = Unmap
|
||||||
|
earlyReserveLastUsed = tempMappingAddr
|
||||||
|
}()
|
||||||
|
|
||||||
|
// reserve space for an allocated page
|
||||||
|
reservedPage := make([]byte, mm.PageSize)
|
||||||
|
|
||||||
|
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||||
|
|
||||||
|
t.Run("map kernel sections", func(t *testing.T) {
|
||||||
|
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
|
||||||
|
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
|
||||||
|
// address < VMA; should be ignored
|
||||||
|
v(".debug", 0, 0, uint64(mm.PageSize>>1))
|
||||||
|
// section uses 32-byte alignment instead of page alignment and has a size
|
||||||
|
// equal to 1 page. Due to rounding, we need to actually map 2 pages.
|
||||||
|
v(".text", multiboot.ElfSectionExecutable, 0x10032, uint64(mm.PageSize))
|
||||||
|
v(".data", multiboot.ElfSectionWritable, 0x2000, uint64(mm.PageSize))
|
||||||
|
// section is page-aligned and occupies exactly 2 pages
|
||||||
|
v(".rodata", 0, 0x3000, uint64(mm.PageSize<<1))
|
||||||
|
}
|
||||||
|
mapCount := 0
|
||||||
|
mapFn = func(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
|
defer func() { mapCount++ }()
|
||||||
|
|
||||||
|
var expFlags PageTableEntryFlag
|
||||||
|
|
||||||
|
switch mapCount {
|
||||||
|
case 0, 1:
|
||||||
|
expFlags = FlagPresent
|
||||||
|
case 2:
|
||||||
|
expFlags = FlagPresent | FlagNoExecute | FlagRW
|
||||||
|
case 3, 4:
|
||||||
|
expFlags = FlagPresent | FlagNoExecute
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flags & expFlags) != expFlags {
|
||||||
|
t.Errorf("[map call %d] expected flags to be %d; got %d", mapCount, expFlags, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0x123); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 5; mapCount != exp {
|
||||||
|
t.Errorf("expected Map to be called %d times; got %d", exp, mapCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("map of kernel sections fials", func(t *testing.T) {
|
||||||
|
defer func() { visitElfSectionsFn = multiboot.VisitElfSections }()
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
|
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
visitElfSectionsFn = func(v multiboot.ElfSectionVisitor) {
|
||||||
|
v(".text", multiboot.ElfSectionExecutable, 0xbadc0ffee, uint64(mm.PageSize>>1))
|
||||||
|
}
|
||||||
|
mapFn = func(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
|
return expErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("copy allocator reservations to PDT", func(t *testing.T) {
|
||||||
|
earlyReserveLastUsed = tempMappingAddr - uintptr(mm.PageSize)
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
||||||
|
unmapFn = func(p mm.Page) *kernel.Error { return nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
mapFn = func(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||||
|
if exp := mm.PageFromAddress(earlyReserveLastUsed); page != exp {
|
||||||
|
t.Errorf("expected Map to be called with page %d; got %d", exp, page)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := mm.Frame(0xbadf00d000 >> mm.PageShift); frame != exp {
|
||||||
|
t.Errorf("expected Map to be called with frame %d; got %d", exp, frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags&(FlagPresent|FlagRW) != (FlagPresent | FlagRW) {
|
||||||
|
t.Error("expected Map to be called FlagPresent | FlagRW")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pdt init fails", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
|
||||||
|
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr { return 0 }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return 0, expErr }
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("translation fails for page in reserved address space", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "translate failed"}
|
||||||
|
|
||||||
|
earlyReserveLastUsed = tempMappingAddr - uintptr(mm.PageSize)
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
translateFn = func(_ uintptr) (uintptr, *kernel.Error) {
|
||||||
|
return 0, expErr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("map fails for page in reserved address space", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
|
|
||||||
|
earlyReserveLastUsed = tempMappingAddr - uintptr(mm.PageSize)
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
translateFn = func(_ uintptr) (uintptr, *kernel.Error) { return 0xbadf00d000, nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
mapFn = func(page mm.Page, frame mm.Frame, flags PageTableEntryFlag) *kernel.Error { return expErr }
|
||||||
|
|
||||||
|
if err := setupPDTForKernel(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
emptyInfoData = []byte{
|
||||||
|
0, 0, 0, 0, // size
|
||||||
|
0, 0, 0, 0, // reserved
|
||||||
|
0, 0, 0, 0, // tag with type zero and length zero
|
||||||
|
0, 0, 0, 0,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPageTableEntryFlags(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pte pageTableEntry
|
||||||
|
flag1 = PageTableEntryFlag(1 << 10)
|
||||||
|
flag2 = PageTableEntryFlag(1 << 21)
|
||||||
|
)
|
||||||
|
|
||||||
|
if pte.HasAnyFlag(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasAnyFlags to return false")
|
||||||
|
}
|
||||||
|
|
||||||
|
pte.SetFlags(flag1 | flag2)
|
||||||
|
|
||||||
|
if !pte.HasAnyFlag(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasAnyFlags to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pte.HasFlags(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasFlags to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
pte.ClearFlags(flag1)
|
||||||
|
|
||||||
|
if !pte.HasAnyFlag(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasAnyFlags to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pte.HasFlags(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasFlags to return false")
|
||||||
|
}
|
||||||
|
|
||||||
|
pte.ClearFlags(flag1 | flag2)
|
||||||
|
|
||||||
|
if pte.HasAnyFlag(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasAnyFlags to return false")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pte.HasFlags(flag1 | flag2) {
|
||||||
|
t.Fatalf("expected HasFlags to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPageTableEntryFrameEncoding(t *testing.T) {
|
||||||
|
var (
|
||||||
|
pte pageTableEntry
|
||||||
|
physFrame = mm.Frame(123)
|
||||||
|
)
|
||||||
|
|
||||||
|
pte.SetFrame(physFrame)
|
||||||
|
if got := pte.Frame(); got != physFrame {
|
||||||
|
t.Fatalf("expected pte.Frame() to return %v; got %v", physFrame, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPtePtrFn(t *testing.T) {
|
||||||
|
// Dummy test to keep coverage happy
|
||||||
|
if exp, got := unsafe.Pointer(uintptr(123)), ptePtrFn(uintptr(123)); exp != got {
|
||||||
|
t.Fatalf("expected ptePtrFn to return %v; got %v", exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWalkAmd64(t *testing.T) {
|
||||||
|
if runtime.GOARCH != "amd64" {
|
||||||
|
t.Skip("test requires amd64 runtime; skipping")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||||
|
ptePtrFn = origPtePtr
|
||||||
|
}(ptePtrFn)
|
||||||
|
|
||||||
|
// This address breaks down to:
|
||||||
|
// p4 index: 1
|
||||||
|
// p3 index: 2
|
||||||
|
// p2 index: 3
|
||||||
|
// p1 index: 4
|
||||||
|
// offset : 1024
|
||||||
|
targetAddr := uintptr(0x8080604400)
|
||||||
|
|
||||||
|
sizeofPteEntry := uintptr(unsafe.Sizeof(pageTableEntry(0)))
|
||||||
|
expEntryAddrBits := [pageLevels][pageLevels + 1]uintptr{
|
||||||
|
{511, 511, 511, 511, 1 * sizeofPteEntry},
|
||||||
|
{511, 511, 511, 1, 2 * sizeofPteEntry},
|
||||||
|
{511, 511, 1, 2, 3 * sizeofPteEntry},
|
||||||
|
{511, 1, 2, 3, 4 * sizeofPteEntry},
|
||||||
|
}
|
||||||
|
|
||||||
|
pteCallCount := 0
|
||||||
|
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||||
|
if pteCallCount >= pageLevels {
|
||||||
|
t.Fatalf("unexpected call to ptePtrFn; already called %d times", pageLevels)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < pageLevels; i++ {
|
||||||
|
pteIndex := (entry >> pageLevelShifts[i]) & ((1 << pageLevelBits[i]) - 1)
|
||||||
|
if pteIndex != expEntryAddrBits[pteCallCount][i] {
|
||||||
|
t.Errorf("[ptePtrFn call %d] expected pte entry for level %d to use offset %d; got %d", pteCallCount, i, expEntryAddrBits[pteCallCount][i], pteIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the page offset
|
||||||
|
pteIndex := entry & ((1 << mm.PageShift) - 1)
|
||||||
|
if pteIndex != expEntryAddrBits[pteCallCount][pageLevels] {
|
||||||
|
t.Errorf("[ptePtrFn call %d] expected pte offset to be %d; got %d", pteCallCount, expEntryAddrBits[pteCallCount][pageLevels], pteIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
pteCallCount++
|
||||||
|
|
||||||
|
return unsafe.Pointer(uintptr(0xf00))
|
||||||
|
}
|
||||||
|
|
||||||
|
walkFnCallCount := 0
|
||||||
|
walk(targetAddr, func(level uint8, entry *pageTableEntry) bool {
|
||||||
|
walkFnCallCount++
|
||||||
|
return walkFnCallCount != pageLevels
|
||||||
|
})
|
||||||
|
|
||||||
|
if pteCallCount != pageLevels {
|
||||||
|
t.Errorf("expected ptePtrFn to be called %d times; got %d", pageLevels, pteCallCount)
|
||||||
|
}
|
||||||
|
}
|
55
src/gopheros/kernel/mm/vmm/vmm.go
Normal file
55
src/gopheros/kernel/mm/vmm/vmm.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/cpu"
|
||||||
|
"gopheros/kernel/irq"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// the following functions are mocked by tests and are automatically
|
||||||
|
// inlined by the compiler.
|
||||||
|
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
||||||
|
readCR2Fn = cpu.ReadCR2
|
||||||
|
translateFn = Translate
|
||||||
|
|
||||||
|
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Init initializes the vmm system, creates a granular PDT for the kernel and
|
||||||
|
// installs paging-related exception handlers.
|
||||||
|
func Init(kernelPageOffset uintptr) *kernel.Error {
|
||||||
|
if err := setupPDTForKernel(kernelPageOffset); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := reserveZeroedFrame(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler)
|
||||||
|
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reserveZeroedFrame reserves a physical frame to be used together with
|
||||||
|
// FlagCopyOnWrite for lazy allocation requests.
|
||||||
|
func reserveZeroedFrame() *kernel.Error {
|
||||||
|
var (
|
||||||
|
err *kernel.Error
|
||||||
|
tempPage mm.Page
|
||||||
|
)
|
||||||
|
|
||||||
|
if ReservedZeroedFrame, err = mm.AllocFrame(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kernel.Memset(tempPage.Address(), 0, mm.PageSize)
|
||||||
|
_ = unmapFn(tempPage)
|
||||||
|
|
||||||
|
// From this point on, ReservedZeroedFrame cannot be mapped with a RW flag
|
||||||
|
protectReservedZeroedPage = true
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,5 +1,3 @@
|
|||||||
// +build amd64
|
|
||||||
|
|
||||||
package vmm
|
package vmm
|
||||||
|
|
||||||
import "math"
|
import "math"
|
121
src/gopheros/kernel/mm/vmm/vmm_test.go
Normal file
121
src/gopheros/kernel/mm/vmm/vmm_test.go
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package vmm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gopheros/kernel"
|
||||||
|
"gopheros/kernel/cpu"
|
||||||
|
"gopheros/kernel/irq"
|
||||||
|
"gopheros/kernel/mm"
|
||||||
|
"gopheros/multiboot"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInit(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
mm.SetFrameAllocator(nil)
|
||||||
|
activePDTFn = cpu.ActivePDT
|
||||||
|
switchPDTFn = cpu.SwitchPDT
|
||||||
|
translateFn = Translate
|
||||||
|
mapTemporaryFn = MapTemporary
|
||||||
|
unmapFn = Unmap
|
||||||
|
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
||||||
|
}()
|
||||||
|
|
||||||
|
// reserve space for an allocated page
|
||||||
|
reservedPage := make([]byte, mm.PageSize)
|
||||||
|
|
||||||
|
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||||
|
|
||||||
|
t.Run("success", func(t *testing.T) {
|
||||||
|
// fill page with junk
|
||||||
|
for i := 0; i < len(reservedPage); i++ {
|
||||||
|
reservedPage[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
unmapFn = func(p mm.Page) *kernel.Error { return nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||||
|
|
||||||
|
if err := Init(0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reserved page should be zeroed
|
||||||
|
for i := 0; i < len(reservedPage); i++ {
|
||||||
|
if reservedPage[i] != 0 {
|
||||||
|
t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("setupPDT fails", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||||
|
|
||||||
|
// Allow the PDT allocation to succeed and then return an error when
|
||||||
|
// trying to allocate the blank fram
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
return mm.InvalidFrame, expErr
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := Init(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("blank page allocation error", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||||
|
|
||||||
|
// Allow the PDT allocation to succeed and then return an error when
|
||||||
|
// trying to allocate the blank fram
|
||||||
|
var allocCount int
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
defer func() { allocCount++ }()
|
||||||
|
|
||||||
|
if allocCount == 0 {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return mm.InvalidFrame, expErr
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
unmapFn = func(p mm.Page) *kernel.Error { return nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), nil }
|
||||||
|
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||||
|
|
||||||
|
if err := Init(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("blank page mapping error", func(t *testing.T) {
|
||||||
|
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||||
|
|
||||||
|
mm.SetFrameAllocator(func() (mm.Frame, *kernel.Error) {
|
||||||
|
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
return mm.Frame(addr >> mm.PageShift), nil
|
||||||
|
})
|
||||||
|
activePDTFn = func() uintptr {
|
||||||
|
return uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||||
|
}
|
||||||
|
switchPDTFn = func(_ uintptr) {}
|
||||||
|
unmapFn = func(p mm.Page) *kernel.Error { return nil }
|
||||||
|
mapTemporaryFn = func(f mm.Frame) (mm.Page, *kernel.Error) { return mm.Page(f), expErr }
|
||||||
|
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||||
|
|
||||||
|
if err := Init(0); err != expErr {
|
||||||
|
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user