mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Provide replacement for runtime.sysReserve
This commit is contained in:
parent
5e5e9f1c0b
commit
6ee95b439e
79
kernel/goruntime/bootstrap.go
Normal file
79
kernel/goruntime/bootstrap.go
Normal file
@ -0,0 +1,79 @@
|
||||
// Package goruntime contains code for bootstrapping Go runtime features such
|
||||
// as the memory allocator.
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
|
||||
)
|
||||
|
||||
var (
|
||||
mapFn = vmm.Map
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
)
|
||||
|
||||
//go:linkname mSysStatInc runtime.mSysStatInc
|
||||
func mSysStatInc(*uint64, uintptr)
|
||||
|
||||
// sysReserve reserves address space without allocating any memory or
|
||||
// establishing any page mappings.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysReserve
|
||||
//go:nosplit
|
||||
func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer {
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
*reserved = true
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// sysMap establishes a copy-on-write mapping for a particular memory region
|
||||
// that has been reserved previously via a call to sysReserve.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysMap
|
||||
//go:nosplit
|
||||
func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint64) unsafe.Pointer {
|
||||
if !reserved {
|
||||
panic("sysMap should only be called with reserved=true")
|
||||
}
|
||||
|
||||
// We trust the allocator to call sysMap with an address inside a reserved region.
|
||||
regionStartAddr := (uintptr(virtAddr) + uintptr(mem.PageSize-1)) & ^uintptr(mem.PageSize-1)
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
pageCount := regionSize >> mem.PageShift
|
||||
|
||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite
|
||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
}
|
||||
|
||||
mSysStatInc(sysStat, uintptr(regionSize))
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Dummy calls so the compiler does not optimize away the functions in
|
||||
// this file.
|
||||
var (
|
||||
reserved bool
|
||||
stat uint64
|
||||
zeroPtr = unsafe.Pointer(uintptr(0))
|
||||
)
|
||||
|
||||
sysReserve(zeroPtr, 0, &reserved)
|
||||
sysMap(zeroPtr, 0, reserved, &stat)
|
||||
}
|
1
kernel/goruntime/bootstrap.s
Normal file
1
kernel/goruntime/bootstrap.s
Normal file
@ -0,0 +1 @@
|
||||
// dummy file to prevent compiler errors for using go:linkname
|
132
kernel/goruntime/bootstrap_test.go
Normal file
132
kernel/goruntime/bootstrap_test.go
Normal file
@ -0,0 +1,132 @@
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
|
||||
)
|
||||
|
||||
func TestSysReserve(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
var reserved bool
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqSize mem.Size
|
||||
expRegionSize mem.Size
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 100 << mem.PageShift},
|
||||
// size should be rounded up to nearest page size
|
||||
{2*mem.PageSize - 1, 2 * mem.PageSize},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
if rsvSize != spec.expRegionSize {
|
||||
t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize)
|
||||
}
|
||||
|
||||
return 0xbadf00d, nil
|
||||
}
|
||||
|
||||
ptr := sysReserve(nil, uintptr(spec.reqSize), &reserved)
|
||||
if uintptr(ptr) == 0 {
|
||||
t.Errorf("[spec %d] sysReserve returned 0", specIndex)
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fail", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysReserve to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||
}
|
||||
|
||||
sysReserve(nil, uintptr(0xf00), &reserved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSysMap(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
}()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqAddr uintptr
|
||||
reqSize mem.Size
|
||||
expRsvAddr uintptr
|
||||
expMapCallCount int
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 4 * mem.PageSize, 100 << mem.PageShift, 4},
|
||||
// address should be rounded up to nearest page size
|
||||
{(100 << mem.PageShift) + 1, 4 * mem.PageSize, 101 << mem.PageShift, 4},
|
||||
// size should be rounded up to nearest page size
|
||||
{1 << mem.PageShift, (4 * mem.PageSize) + 1, 1 << mem.PageShift, 5},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
var (
|
||||
sysStat uint64
|
||||
mapCallCount int
|
||||
)
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute
|
||||
if flags != expFlags {
|
||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||
}
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
rsvPtr := sysMap(unsafe.Pointer(spec.reqAddr), uintptr(spec.reqSize), true, &sysStat)
|
||||
if got := uintptr(rsvPtr); got != spec.expRsvAddr {
|
||||
t.Errorf("[spec %d] expected mapped address 0x%x; got 0x%x", specIndex, spec.expRsvAddr, got)
|
||||
}
|
||||
|
||||
if mapCallCount != spec.expMapCallCount {
|
||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map fails", func(t *testing.T) {
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysMap(unsafe.Pointer(uintptr(0xbadf00d)), 1, true, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysMap to return 0x0 if Map returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("panic if not reserved", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysMap to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
sysMap(nil, 0, false, nil)
|
||||
})
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user