mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Merge pull request #33 from achilleasa/bootstrap-go-allocator
Bootstrap Go allocator
This commit is contained in:
commit
264ea09b9a
145
kernel/goruntime/bootstrap.go
Normal file
145
kernel/goruntime/bootstrap.go
Normal file
@ -0,0 +1,145 @@
|
||||
// Package goruntime contains code for bootstrapping Go runtime features such
|
||||
// as the memory allocator.
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
|
||||
)
|
||||
|
||||
var (
|
||||
mapFn = vmm.Map
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
frameAllocFn = allocator.AllocFrame
|
||||
mallocInitFn = mallocInit
|
||||
)
|
||||
|
||||
//go:linkname mallocInit runtime.mallocinit
|
||||
func mallocInit()
|
||||
|
||||
//go:linkname mSysStatInc runtime.mSysStatInc
|
||||
func mSysStatInc(*uint64, uintptr)
|
||||
|
||||
// sysReserve reserves address space without allocating any memory or
|
||||
// establishing any page mappings.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysReserve
|
||||
//go:nosplit
|
||||
func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer {
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
*reserved = true
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// sysMap establishes a copy-on-write mapping for a particular memory region
|
||||
// that has been reserved previously via a call to sysReserve.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysMap
|
||||
//go:nosplit
|
||||
func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint64) unsafe.Pointer {
|
||||
if !reserved {
|
||||
panic("sysMap should only be called with reserved=true")
|
||||
}
|
||||
|
||||
// We trust the allocator to call sysMap with an address inside a reserved region.
|
||||
regionStartAddr := (uintptr(virtAddr) + uintptr(mem.PageSize-1)) & ^uintptr(mem.PageSize-1)
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
pageCount := regionSize >> mem.PageShift
|
||||
|
||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite
|
||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
}
|
||||
|
||||
mSysStatInc(sysStat, uintptr(regionSize))
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// sysAlloc reserves enough phsysical frames to satisfy the allocation request
|
||||
// and establishes a contiguous virtual page mapping for them returning back
|
||||
// the pointer to the virtual region start.
|
||||
//
|
||||
// This function replaces runtime.sysMap and is required for initializing the
|
||||
// Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysAlloc
|
||||
//go:nosplit
|
||||
func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer {
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||
if err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
|
||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||
pageCount := regionSize >> mem.PageShift
|
||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
frame, err := frameAllocFn()
|
||||
if err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
|
||||
if err = mapFn(page, frame, mapFlags); err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
}
|
||||
|
||||
mSysStatInc(sysStat, uintptr(regionSize))
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// nanotime returns a monotonically increasing clock value. This is a dummy
|
||||
// implementation and will be replaced when the timekeeper package is
|
||||
// implemented.
|
||||
//
|
||||
// This function replaces runtime.nanotime and is invoked by the Go allocator
|
||||
// when a span allocation is performed.
|
||||
//
|
||||
//go:redirect-from runtime.nanotime
|
||||
//go:nosplit
|
||||
func nanotime() uint64 {
|
||||
// Use a dummy loop to prevent the compiler from inlining this function.
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Init enables support for various Go runtime features. After a call to init
|
||||
// the following runtime features become available for use:
|
||||
// - heap memory allocation (new, make e.t.c)
|
||||
func Init() *kernel.Error {
|
||||
mallocInitFn()
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Dummy calls so the compiler does not optimize away the functions in
|
||||
// this file.
|
||||
var (
|
||||
reserved bool
|
||||
stat uint64
|
||||
zeroPtr = unsafe.Pointer(uintptr(0))
|
||||
)
|
||||
|
||||
sysReserve(zeroPtr, 0, &reserved)
|
||||
sysMap(zeroPtr, 0, reserved, &stat)
|
||||
sysAlloc(0, &stat)
|
||||
stat = nanotime()
|
||||
}
|
1
kernel/goruntime/bootstrap.s
Normal file
1
kernel/goruntime/bootstrap.s
Normal file
@ -0,0 +1 @@
|
||||
// dummy file to prevent compiler errors for using go:linkname
|
248
kernel/goruntime/bootstrap_test.go
Normal file
248
kernel/goruntime/bootstrap_test.go
Normal file
@ -0,0 +1,248 @@
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/vmm"
|
||||
)
|
||||
|
||||
func TestSysReserve(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
var reserved bool
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqSize mem.Size
|
||||
expRegionSize mem.Size
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 100 << mem.PageShift},
|
||||
// size should be rounded up to nearest page size
|
||||
{2*mem.PageSize - 1, 2 * mem.PageSize},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
if rsvSize != spec.expRegionSize {
|
||||
t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize)
|
||||
}
|
||||
|
||||
return 0xbadf00d, nil
|
||||
}
|
||||
|
||||
ptr := sysReserve(nil, uintptr(spec.reqSize), &reserved)
|
||||
if uintptr(ptr) == 0 {
|
||||
t.Errorf("[spec %d] sysReserve returned 0", specIndex)
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fail", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysReserve to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||
}
|
||||
|
||||
sysReserve(nil, uintptr(0xf00), &reserved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSysMap(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
}()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqAddr uintptr
|
||||
reqSize mem.Size
|
||||
expRsvAddr uintptr
|
||||
expMapCallCount int
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 4 * mem.PageSize, 100 << mem.PageShift, 4},
|
||||
// address should be rounded up to nearest page size
|
||||
{(100 << mem.PageShift) + 1, 4 * mem.PageSize, 101 << mem.PageShift, 4},
|
||||
// size should be rounded up to nearest page size
|
||||
{1 << mem.PageShift, (4 * mem.PageSize) + 1, 1 << mem.PageShift, 5},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
var (
|
||||
sysStat uint64
|
||||
mapCallCount int
|
||||
)
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute
|
||||
if flags != expFlags {
|
||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||
}
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
rsvPtr := sysMap(unsafe.Pointer(spec.reqAddr), uintptr(spec.reqSize), true, &sysStat)
|
||||
if got := uintptr(rsvPtr); got != spec.expRsvAddr {
|
||||
t.Errorf("[spec %d] expected mapped address 0x%x; got 0x%x", specIndex, spec.expRsvAddr, got)
|
||||
}
|
||||
|
||||
if mapCallCount != spec.expMapCallCount {
|
||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map fails", func(t *testing.T) {
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysMap(unsafe.Pointer(uintptr(0xbadf00d)), 1, true, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysMap to return 0x0 if Map returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("panic if not reserved", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysMap to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
sysMap(nil, 0, false, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSysAlloc(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
frameAllocFn = allocator.AllocFrame
|
||||
}()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqSize mem.Size
|
||||
expMapCallCount int
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{4 * mem.PageSize, 4},
|
||||
// round up to nearest page size
|
||||
{(4 * mem.PageSize) + 1, 5},
|
||||
}
|
||||
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.Frame(0), nil
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
var (
|
||||
sysStat uint64
|
||||
mapCallCount int
|
||||
)
|
||||
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
expFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||
if flags != expFlags {
|
||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||
}
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
if got := sysAlloc(uintptr(spec.reqSize), &sysStat); uintptr(got) != expRegionStartAddr {
|
||||
t.Errorf("[spec %d] expected sysAlloc to return address 0x%x; got 0x%x", specIndex, expRegionStartAddr, uintptr(got))
|
||||
}
|
||||
|
||||
if mapCallCount != spec.expMapCallCount {
|
||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("earlyReserveRegion fails", func(t *testing.T) {
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if EarlyReserveRegion returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("frame allocation fails", func(t *testing.T) {
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.InvalidFrame, &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map fails", func(t *testing.T) {
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.Frame(0), nil
|
||||
}
|
||||
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
defer func() {
|
||||
mallocInitFn = mallocInit
|
||||
}()
|
||||
mallocInitFn = func() {}
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatal(t)
|
||||
}
|
||||
}
|
@ -2,6 +2,7 @@ package kmain
|
||||
|
||||
import (
|
||||
"github.com/achilleasa/gopher-os/kernel"
|
||||
"github.com/achilleasa/gopher-os/kernel/goruntime"
|
||||
"github.com/achilleasa/gopher-os/kernel/hal"
|
||||
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem/pmm/allocator"
|
||||
@ -34,6 +35,8 @@ func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) {
|
||||
panic(err)
|
||||
} else if err = vmm.Init(); err != nil {
|
||||
panic(err)
|
||||
} else if err = goruntime.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Use kernel.Panic instead of panic to prevent the compiler from
|
||||
|
@ -14,9 +14,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// FrameAllocator is a BitmapAllocator instance that serves as the
|
||||
// bitmapAllocator is a BitmapAllocator instance that serves as the
|
||||
// primary allocator for reserving pages.
|
||||
FrameAllocator BitmapAllocator
|
||||
bitmapAllocator BitmapAllocator
|
||||
|
||||
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
|
||||
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
|
||||
@ -306,10 +306,10 @@ func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
return earlyAllocator.AllocFrame()
|
||||
}
|
||||
|
||||
// sysAllocFrame is a helper that delegates a frame allocation request to the
|
||||
// AllocFrame is a helper that delegates a frame allocation request to the
|
||||
// bitmap allocator instance.
|
||||
func sysAllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
return FrameAllocator.AllocFrame()
|
||||
func AllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
return bitmapAllocator.AllocFrame()
|
||||
}
|
||||
|
||||
// Init sets up the kernel physical memory allocation sub-system.
|
||||
@ -318,10 +318,10 @@ func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
|
||||
earlyAllocator.printMemoryMap()
|
||||
|
||||
vmm.SetFrameAllocator(earlyAllocFrame)
|
||||
if err := FrameAllocator.init(); err != nil {
|
||||
if err := bitmapAllocator.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
vmm.SetFrameAllocator(sysAllocFrame)
|
||||
vmm.SetFrameAllocator(AllocFrame)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -413,7 +413,7 @@ func TestAllocatorPackageInit(t *testing.T) {
|
||||
}
|
||||
|
||||
// At this point sysAllocFrame should work
|
||||
if _, err := sysAllocFrame(); err != nil {
|
||||
if _, err := AllocFrame(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
@ -23,8 +23,8 @@ func Panic(e interface{}) {
|
||||
case *Error:
|
||||
err = t
|
||||
case string:
|
||||
errRuntimePanic.Message = t
|
||||
err = errRuntimePanic
|
||||
panicString(t)
|
||||
return
|
||||
case error:
|
||||
errRuntimePanic.Message = t.Error()
|
||||
err = errRuntimePanic
|
||||
@ -39,3 +39,10 @@ func Panic(e interface{}) {
|
||||
|
||||
cpuHaltFn()
|
||||
}
|
||||
|
||||
// panicString serves as a redirect target for runtime.throw
|
||||
//go:redirect-from runtime.throw
|
||||
func panicString(msg string) {
|
||||
errRuntimePanic.Message = msg
|
||||
Panic(errRuntimePanic)
|
||||
}
|
||||
|
@ -76,35 +76,33 @@ func findRedirects(goFiles []string) ([]*redirect, error) {
|
||||
|
||||
cmap := ast.NewCommentMap(fset, f, f.Comments)
|
||||
cmap.Filter(f)
|
||||
for astNode, commentGroups := range cmap {
|
||||
for astNode := range cmap {
|
||||
fnDecl, ok := astNode.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
if !ok || fnDecl.Doc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, commentGroup := range commentGroups {
|
||||
for _, comment := range commentGroup.List {
|
||||
if !strings.Contains(comment.Text, "go:redirect-from") {
|
||||
continue
|
||||
}
|
||||
|
||||
// build qualified name to fn
|
||||
fqName := fmt.Sprintf("%s/%s.%s",
|
||||
prefix,
|
||||
goFile[:strings.LastIndexByte(goFile, '/')],
|
||||
fnDecl.Name,
|
||||
)
|
||||
|
||||
fields := strings.Fields(comment.Text)
|
||||
if len(fields) != 2 || fields[0] != "//go:redirect-from" {
|
||||
return nil, fmt.Errorf("malformed go:redirect-from syntax for %q", fqName)
|
||||
}
|
||||
|
||||
redirects = append(redirects, &redirect{
|
||||
src: fields[1],
|
||||
dst: fqName,
|
||||
})
|
||||
for _, comment := range fnDecl.Doc.List {
|
||||
if !strings.Contains(comment.Text, "go:redirect-from") {
|
||||
continue
|
||||
}
|
||||
|
||||
// build qualified name to fn
|
||||
fqName := fmt.Sprintf("%s/%s.%s",
|
||||
prefix,
|
||||
goFile[:strings.LastIndexByte(goFile, '/')],
|
||||
fnDecl.Name,
|
||||
)
|
||||
|
||||
fields := strings.Fields(comment.Text)
|
||||
if len(fields) != 2 || fields[0] != "//go:redirect-from" {
|
||||
return nil, fmt.Errorf("malformed go:redirect-from syntax for %q\n-> %q", fqName, comment.Text)
|
||||
}
|
||||
|
||||
redirects = append(redirects, &redirect{
|
||||
src: fields[1],
|
||||
dst: fqName,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user