From ce34763e23f815314e38d8aaaebbf6d1db63f4a7 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 30 Jun 2017 06:54:51 +0100 Subject: [PATCH 1/6] Redirects tool should only consider comments attached to functions --- tools/redirects/redirects.go | 46 +++++++++++++++++------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/tools/redirects/redirects.go b/tools/redirects/redirects.go index 5e6d3b4..18ecdfb 100644 --- a/tools/redirects/redirects.go +++ b/tools/redirects/redirects.go @@ -76,35 +76,33 @@ func findRedirects(goFiles []string) ([]*redirect, error) { cmap := ast.NewCommentMap(fset, f, f.Comments) cmap.Filter(f) - for astNode, commentGroups := range cmap { + for astNode := range cmap { fnDecl, ok := astNode.(*ast.FuncDecl) - if !ok { + if !ok || fnDecl.Doc == nil { continue } - for _, commentGroup := range commentGroups { - for _, comment := range commentGroup.List { - if !strings.Contains(comment.Text, "go:redirect-from") { - continue - } - - // build qualified name to fn - fqName := fmt.Sprintf("%s/%s.%s", - prefix, - goFile[:strings.LastIndexByte(goFile, '/')], - fnDecl.Name, - ) - - fields := strings.Fields(comment.Text) - if len(fields) != 2 || fields[0] != "//go:redirect-from" { - return nil, fmt.Errorf("malformed go:redirect-from syntax for %q", fqName) - } - - redirects = append(redirects, &redirect{ - src: fields[1], - dst: fqName, - }) + for _, comment := range fnDecl.Doc.List { + if !strings.Contains(comment.Text, "go:redirect-from") { + continue } + + // build qualified name to fn + fqName := fmt.Sprintf("%s/%s.%s", + prefix, + goFile[:strings.LastIndexByte(goFile, '/')], + fnDecl.Name, + ) + + fields := strings.Fields(comment.Text) + if len(fields) != 2 || fields[0] != "//go:redirect-from" { + return nil, fmt.Errorf("malformed go:redirect-from syntax for %q\n-> %q", fqName, comment.Text) + } + + redirects = append(redirects, &redirect{ + src: fields[1], + dst: fqName, + }) } } } From 5e5e9f1c0b2b69ed86a6a57896eec1087d140333 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Mon, 26 Jun 2017 08:09:21 +0100 Subject: [PATCH 2/6] Redirect runtime.throw to kernel.Panic via kernel.panicString --- kernel/panic.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kernel/panic.go b/kernel/panic.go index b05e198..562e3eb 100644 --- a/kernel/panic.go +++ b/kernel/panic.go @@ -23,8 +23,8 @@ func Panic(e interface{}) { case *Error: err = t case string: - errRuntimePanic.Message = t - err = errRuntimePanic + panicString(t) + return case error: errRuntimePanic.Message = t.Error() err = errRuntimePanic @@ -39,3 +39,10 @@ func Panic(e interface{}) { cpuHaltFn() } + +// panicString serves as a redirect target for runtime.throw +//go:redirect-from runtime.throw +func panicString(msg string) { + errRuntimePanic.Message = msg + Panic(errRuntimePanic) +} From 6ee95b439e2639ff4d0c2898eb74a01917ca53d3 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 30 Jun 2017 06:59:26 +0100 Subject: [PATCH 3/6] Provide replacement for runtime.sysReserve --- kernel/goruntime/bootstrap.go | 79 +++++++++++++++++ kernel/goruntime/bootstrap.s | 1 + kernel/goruntime/bootstrap_test.go | 132 +++++++++++++++++++++++++++++ 3 files changed, 212 insertions(+) create mode 100644 kernel/goruntime/bootstrap.go create mode 100644 kernel/goruntime/bootstrap.s create mode 100644 kernel/goruntime/bootstrap_test.go diff --git a/kernel/goruntime/bootstrap.go b/kernel/goruntime/bootstrap.go new file mode 100644 index 0000000..981fb28 --- /dev/null +++ b/kernel/goruntime/bootstrap.go @@ -0,0 +1,79 @@ +// Package goruntime contains code for bootstrapping Go runtime features such +// as the memory allocator. +package goruntime + +import ( + "unsafe" + + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/vmm" +) + +var ( + mapFn = vmm.Map + earlyReserveRegionFn = vmm.EarlyReserveRegion +) + +//go:linkname mSysStatInc runtime.mSysStatInc +func mSysStatInc(*uint64, uintptr) + +// sysReserve reserves address space without allocating any memory or +// establishing any page mappings. +// +// This function replaces runtime.sysReserve and is required for initializing +// the Go allocator. +// +//go:redirect-from runtime.sysReserve +//go:nosplit +func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer { + regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1) + regionStartAddr, err := earlyReserveRegionFn(regionSize) + if err != nil { + panic(err) + } + + *reserved = true + return unsafe.Pointer(regionStartAddr) +} + +// sysMap establishes a copy-on-write mapping for a particular memory region +// that has been reserved previously via a call to sysReserve. +// +// This function replaces runtime.sysReserve and is required for initializing +// the Go allocator. +// +//go:redirect-from runtime.sysMap +//go:nosplit +func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint64) unsafe.Pointer { + if !reserved { + panic("sysMap should only be called with reserved=true") + } + + // We trust the allocator to call sysMap with an address inside a reserved region. + regionStartAddr := (uintptr(virtAddr) + uintptr(mem.PageSize-1)) & ^uintptr(mem.PageSize-1) + regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1) + pageCount := regionSize >> mem.PageShift + + mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite + for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 { + if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil { + return unsafe.Pointer(uintptr(0)) + } + } + + mSysStatInc(sysStat, uintptr(regionSize)) + return unsafe.Pointer(regionStartAddr) +} + +func init() { + // Dummy calls so the compiler does not optimize away the functions in + // this file. + var ( + reserved bool + stat uint64 + zeroPtr = unsafe.Pointer(uintptr(0)) + ) + + sysReserve(zeroPtr, 0, &reserved) + sysMap(zeroPtr, 0, reserved, &stat) +} diff --git a/kernel/goruntime/bootstrap.s b/kernel/goruntime/bootstrap.s new file mode 100644 index 0000000..d590585 --- /dev/null +++ b/kernel/goruntime/bootstrap.s @@ -0,0 +1 @@ +// dummy file to prevent compiler errors for using go:linkname diff --git a/kernel/goruntime/bootstrap_test.go b/kernel/goruntime/bootstrap_test.go new file mode 100644 index 0000000..45d6148 --- /dev/null +++ b/kernel/goruntime/bootstrap_test.go @@ -0,0 +1,132 @@ +package goruntime + +import ( + "testing" + "unsafe" + + "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" + "github.com/achilleasa/gopher-os/kernel/mem/vmm" +) + +func TestSysReserve(t *testing.T) { + defer func() { + earlyReserveRegionFn = vmm.EarlyReserveRegion + }() + var reserved bool + + t.Run("success", func(t *testing.T) { + specs := []struct { + reqSize mem.Size + expRegionSize mem.Size + }{ + // exact multiple of page size + {100 << mem.PageShift, 100 << mem.PageShift}, + // size should be rounded up to nearest page size + {2*mem.PageSize - 1, 2 * mem.PageSize}, + } + + for specIndex, spec := range specs { + earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) { + if rsvSize != spec.expRegionSize { + t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize) + } + + return 0xbadf00d, nil + } + + ptr := sysReserve(nil, uintptr(spec.reqSize), &reserved) + if uintptr(ptr) == 0 { + t.Errorf("[spec %d] sysReserve returned 0", specIndex) + continue + } + } + }) + + t.Run("fail", func(t *testing.T) { + defer func() { + if err := recover(); err == nil { + t.Fatal("expected sysReserve to panic") + } + }() + + earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) { + return 0, &kernel.Error{Module: "test", Message: "consumed available address space"} + } + + sysReserve(nil, uintptr(0xf00), &reserved) + }) +} + +func TestSysMap(t *testing.T) { + defer func() { + earlyReserveRegionFn = vmm.EarlyReserveRegion + mapFn = vmm.Map + }() + + t.Run("success", func(t *testing.T) { + specs := []struct { + reqAddr uintptr + reqSize mem.Size + expRsvAddr uintptr + expMapCallCount int + }{ + // exact multiple of page size + {100 << mem.PageShift, 4 * mem.PageSize, 100 << mem.PageShift, 4}, + // address should be rounded up to nearest page size + {(100 << mem.PageShift) + 1, 4 * mem.PageSize, 101 << mem.PageShift, 4}, + // size should be rounded up to nearest page size + {1 << mem.PageShift, (4 * mem.PageSize) + 1, 1 << mem.PageShift, 5}, + } + + for specIndex, spec := range specs { + var ( + sysStat uint64 + mapCallCount int + ) + mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute + if flags != expFlags { + t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags) + } + mapCallCount++ + return nil + } + + rsvPtr := sysMap(unsafe.Pointer(spec.reqAddr), uintptr(spec.reqSize), true, &sysStat) + if got := uintptr(rsvPtr); got != spec.expRsvAddr { + t.Errorf("[spec %d] expected mapped address 0x%x; got 0x%x", specIndex, spec.expRsvAddr, got) + } + + if mapCallCount != spec.expMapCallCount { + t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount) + } + + if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp { + t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat) + } + } + }) + + t.Run("map fails", func(t *testing.T) { + mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { + return &kernel.Error{Module: "test", Message: "map failed"} + } + + var sysStat uint64 + if got := sysMap(unsafe.Pointer(uintptr(0xbadf00d)), 1, true, &sysStat); got != unsafe.Pointer(uintptr(0)) { + t.Fatalf("expected sysMap to return 0x0 if Map returns an error; got 0x%x", uintptr(got)) + } + }) + + t.Run("panic if not reserved", func(t *testing.T) { + defer func() { + if err := recover(); err == nil { + t.Fatal("expected sysMap to panic") + } + }() + + sysMap(nil, 0, false, nil) + }) +} From 636220ab1d3a15b188026dca6755414386303653 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 30 Jun 2017 07:48:37 +0100 Subject: [PATCH 4/6] Unexport bitmap allocator instance and export AllocFrame helper This allows us to mock calls to the frame allocator from other packages while testing. --- kernel/mem/pmm/allocator/bitmap_allocator.go | 14 +++++++------- kernel/mem/pmm/allocator/bitmap_allocator_test.go | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/mem/pmm/allocator/bitmap_allocator.go b/kernel/mem/pmm/allocator/bitmap_allocator.go index 392f237..f0e8602 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator.go @@ -14,9 +14,9 @@ import ( ) var ( - // FrameAllocator is a BitmapAllocator instance that serves as the + // bitmapAllocator is a BitmapAllocator instance that serves as the // primary allocator for reserving pages. - FrameAllocator BitmapAllocator + bitmapAllocator BitmapAllocator errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"} errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"} @@ -306,10 +306,10 @@ func earlyAllocFrame() (pmm.Frame, *kernel.Error) { return earlyAllocator.AllocFrame() } -// sysAllocFrame is a helper that delegates a frame allocation request to the +// AllocFrame is a helper that delegates a frame allocation request to the // bitmap allocator instance. -func sysAllocFrame() (pmm.Frame, *kernel.Error) { - return FrameAllocator.AllocFrame() +func AllocFrame() (pmm.Frame, *kernel.Error) { + return bitmapAllocator.AllocFrame() } // Init sets up the kernel physical memory allocation sub-system. @@ -318,10 +318,10 @@ func Init(kernelStart, kernelEnd uintptr) *kernel.Error { earlyAllocator.printMemoryMap() vmm.SetFrameAllocator(earlyAllocFrame) - if err := FrameAllocator.init(); err != nil { + if err := bitmapAllocator.init(); err != nil { return err } - vmm.SetFrameAllocator(sysAllocFrame) + vmm.SetFrameAllocator(AllocFrame) return nil } diff --git a/kernel/mem/pmm/allocator/bitmap_allocator_test.go b/kernel/mem/pmm/allocator/bitmap_allocator_test.go index 0bb97e6..d1d214c 100644 --- a/kernel/mem/pmm/allocator/bitmap_allocator_test.go +++ b/kernel/mem/pmm/allocator/bitmap_allocator_test.go @@ -413,7 +413,7 @@ func TestAllocatorPackageInit(t *testing.T) { } // At this point sysAllocFrame should work - if _, err := sysAllocFrame(); err != nil { + if _, err := AllocFrame(); err != nil { t.Fatal(err) } }) From b4f4a9a738649370bfcf9b8fd711674804cbad7f Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 30 Jun 2017 07:55:12 +0100 Subject: [PATCH 5/6] Provide replacement for runtime.sysAlloc --- kernel/goruntime/bootstrap.go | 36 ++++++++++ kernel/goruntime/bootstrap_test.go | 105 +++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) diff --git a/kernel/goruntime/bootstrap.go b/kernel/goruntime/bootstrap.go index 981fb28..8e3b63f 100644 --- a/kernel/goruntime/bootstrap.go +++ b/kernel/goruntime/bootstrap.go @@ -6,12 +6,14 @@ import ( "unsafe" "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator" "github.com/achilleasa/gopher-os/kernel/mem/vmm" ) var ( mapFn = vmm.Map earlyReserveRegionFn = vmm.EarlyReserveRegion + frameAllocFn = allocator.AllocFrame ) //go:linkname mSysStatInc runtime.mSysStatInc @@ -65,6 +67,39 @@ func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint6 return unsafe.Pointer(regionStartAddr) } +// sysAlloc reserves enough phsysical frames to satisfy the allocation request +// and establishes a contiguous virtual page mapping for them returning back +// the pointer to the virtual region start. +// +// This function replaces runtime.sysMap and is required for initializing the +// Go allocator. +// +//go:redirect-from runtime.sysAlloc +//go:nosplit +func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer { + regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1) + regionStartAddr, err := earlyReserveRegionFn(regionSize) + if err != nil { + return unsafe.Pointer(uintptr(0)) + } + + mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW + pageCount := regionSize >> mem.PageShift + for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 { + frame, err := frameAllocFn() + if err != nil { + return unsafe.Pointer(uintptr(0)) + } + + if err = mapFn(page, frame, mapFlags); err != nil { + return unsafe.Pointer(uintptr(0)) + } + } + + mSysStatInc(sysStat, uintptr(regionSize)) + return unsafe.Pointer(regionStartAddr) +} + func init() { // Dummy calls so the compiler does not optimize away the functions in // this file. @@ -76,4 +111,5 @@ func init() { sysReserve(zeroPtr, 0, &reserved) sysMap(zeroPtr, 0, reserved, &stat) + sysAlloc(0, &stat) } diff --git a/kernel/goruntime/bootstrap_test.go b/kernel/goruntime/bootstrap_test.go index 45d6148..603f123 100644 --- a/kernel/goruntime/bootstrap_test.go +++ b/kernel/goruntime/bootstrap_test.go @@ -7,6 +7,7 @@ import ( "github.com/achilleasa/gopher-os/kernel" "github.com/achilleasa/gopher-os/kernel/mem" "github.com/achilleasa/gopher-os/kernel/mem/pmm" + "github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator" "github.com/achilleasa/gopher-os/kernel/mem/vmm" ) @@ -130,3 +131,107 @@ func TestSysMap(t *testing.T) { sysMap(nil, 0, false, nil) }) } + +func TestSysAlloc(t *testing.T) { + defer func() { + earlyReserveRegionFn = vmm.EarlyReserveRegion + mapFn = vmm.Map + frameAllocFn = allocator.AllocFrame + }() + + t.Run("success", func(t *testing.T) { + specs := []struct { + reqSize mem.Size + expMapCallCount int + }{ + // exact multiple of page size + {4 * mem.PageSize, 4}, + // round up to nearest page size + {(4 * mem.PageSize) + 1, 5}, + } + + expRegionStartAddr := uintptr(10 * mem.PageSize) + earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) { + return expRegionStartAddr, nil + } + + frameAllocFn = func() (pmm.Frame, *kernel.Error) { + return pmm.Frame(0), nil + } + + for specIndex, spec := range specs { + var ( + sysStat uint64 + mapCallCount int + ) + + mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error { + expFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW + if flags != expFlags { + t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags) + } + mapCallCount++ + return nil + } + + if got := sysAlloc(uintptr(spec.reqSize), &sysStat); uintptr(got) != expRegionStartAddr { + t.Errorf("[spec %d] expected sysAlloc to return address 0x%x; got 0x%x", specIndex, expRegionStartAddr, uintptr(got)) + } + + if mapCallCount != spec.expMapCallCount { + t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount) + } + + if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp { + t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat) + } + } + }) + + t.Run("earlyReserveRegion fails", func(t *testing.T) { + earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) { + return 0, &kernel.Error{Module: "test", Message: "consumed available address space"} + } + + var sysStat uint64 + if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) { + t.Fatalf("expected sysAlloc to return 0x0 if EarlyReserveRegion returns an error; got 0x%x", uintptr(got)) + } + }) + + t.Run("frame allocation fails", func(t *testing.T) { + expRegionStartAddr := uintptr(10 * mem.PageSize) + earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) { + return expRegionStartAddr, nil + } + + frameAllocFn = func() (pmm.Frame, *kernel.Error) { + return pmm.InvalidFrame, &kernel.Error{Module: "test", Message: "out of memory"} + } + + var sysStat uint64 + if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) { + t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got)) + } + }) + + t.Run("map fails", func(t *testing.T) { + expRegionStartAddr := uintptr(10 * mem.PageSize) + earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) { + return expRegionStartAddr, nil + } + + frameAllocFn = func() (pmm.Frame, *kernel.Error) { + return pmm.Frame(0), nil + } + + mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error { + return &kernel.Error{Module: "test", Message: "map failed"} + } + + var sysStat uint64 + if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) { + t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got)) + } + }) +} From effc6710d97a71f1ffd419ad6e98f8b544b4f44f Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Fri, 30 Jun 2017 08:04:10 +0100 Subject: [PATCH 6/6] Initialize Go memory allocator --- kernel/goruntime/bootstrap.go | 30 ++++++++++++++++++++++++++++++ kernel/goruntime/bootstrap_test.go | 11 +++++++++++ kernel/kmain/kmain.go | 3 +++ 3 files changed, 44 insertions(+) diff --git a/kernel/goruntime/bootstrap.go b/kernel/goruntime/bootstrap.go index 8e3b63f..e78cc35 100644 --- a/kernel/goruntime/bootstrap.go +++ b/kernel/goruntime/bootstrap.go @@ -5,6 +5,7 @@ package goruntime import ( "unsafe" + "github.com/achilleasa/gopher-os/kernel" "github.com/achilleasa/gopher-os/kernel/mem" "github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator" "github.com/achilleasa/gopher-os/kernel/mem/vmm" @@ -14,8 +15,12 @@ var ( mapFn = vmm.Map earlyReserveRegionFn = vmm.EarlyReserveRegion frameAllocFn = allocator.AllocFrame + mallocInitFn = mallocInit ) +//go:linkname mallocInit runtime.mallocinit +func mallocInit() + //go:linkname mSysStatInc runtime.mSysStatInc func mSysStatInc(*uint64, uintptr) @@ -100,6 +105,30 @@ func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer { return unsafe.Pointer(regionStartAddr) } +// nanotime returns a monotonically increasing clock value. This is a dummy +// implementation and will be replaced when the timekeeper package is +// implemented. +// +// This function replaces runtime.nanotime and is invoked by the Go allocator +// when a span allocation is performed. +// +//go:redirect-from runtime.nanotime +//go:nosplit +func nanotime() uint64 { + // Use a dummy loop to prevent the compiler from inlining this function. + for i := 0; i < 100; i++ { + } + return 1 +} + +// Init enables support for various Go runtime features. After a call to init +// the following runtime features become available for use: +// - heap memory allocation (new, make e.t.c) +func Init() *kernel.Error { + mallocInitFn() + return nil +} + func init() { // Dummy calls so the compiler does not optimize away the functions in // this file. @@ -112,4 +141,5 @@ func init() { sysReserve(zeroPtr, 0, &reserved) sysMap(zeroPtr, 0, reserved, &stat) sysAlloc(0, &stat) + stat = nanotime() } diff --git a/kernel/goruntime/bootstrap_test.go b/kernel/goruntime/bootstrap_test.go index 603f123..0ab567f 100644 --- a/kernel/goruntime/bootstrap_test.go +++ b/kernel/goruntime/bootstrap_test.go @@ -235,3 +235,14 @@ func TestSysAlloc(t *testing.T) { } }) } + +func TestInit(t *testing.T) { + defer func() { + mallocInitFn = mallocInit + }() + mallocInitFn = func() {} + + if err := Init(); err != nil { + t.Fatal(t) + } +} diff --git a/kernel/kmain/kmain.go b/kernel/kmain/kmain.go index 2c59342..631d67e 100644 --- a/kernel/kmain/kmain.go +++ b/kernel/kmain/kmain.go @@ -2,6 +2,7 @@ package kmain import ( "github.com/achilleasa/gopher-os/kernel" + "github.com/achilleasa/gopher-os/kernel/goruntime" "github.com/achilleasa/gopher-os/kernel/hal" "github.com/achilleasa/gopher-os/kernel/hal/multiboot" "github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator" @@ -34,6 +35,8 @@ func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) { panic(err) } else if err = vmm.Init(); err != nil { panic(err) + } else if err = goruntime.Init(); err != nil { + panic(err) } // Use kernel.Panic instead of panic to prevent the compiler from