mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Merge pull request #12 from achilleasa/early-physical-page-allocator
Early physical page allocator
This commit is contained in:
commit
412bcf8402
1
Makefile
1
Makefile
@ -138,3 +138,4 @@ lint-check-deps:
|
||||
@gometalinter.v1 --install >/dev/null
|
||||
|
||||
test:
|
||||
go test -cover ./...
|
||||
|
@ -4,7 +4,7 @@ set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic $d
|
||||
go test -v -race -coverprofile=profile.out -covermode=atomic $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
|
@ -101,6 +101,15 @@ const (
|
||||
memUnknown
|
||||
)
|
||||
|
||||
var (
|
||||
infoData uintptr
|
||||
)
|
||||
|
||||
// MemRegionVisitor defies a visitor function that gets invoked by VisitMemRegions
|
||||
// for each memory region provided by the boot loader. The visitor must return true
|
||||
// to continue or false to abort the scan.
|
||||
type MemRegionVisitor func(entry *MemoryMapEntry) bool
|
||||
|
||||
// MemoryMapEntry describes a memory region entry, namely its physical address,
|
||||
// its length and its type.
|
||||
type MemoryMapEntry struct {
|
||||
@ -114,13 +123,21 @@ type MemoryMapEntry struct {
|
||||
Type MemoryEntryType
|
||||
}
|
||||
|
||||
var (
|
||||
infoData uintptr
|
||||
)
|
||||
|
||||
// MemRegionVisitor defies a visitor function that gets invoked by VisitMemRegions
|
||||
// for each memory region provided by the boot loader.
|
||||
type MemRegionVisitor func(entry *MemoryMapEntry)
|
||||
// String implements fmt.Stringer for MemoryEntryType.
|
||||
func (t MemoryEntryType) String() string {
|
||||
switch t {
|
||||
case MemAvailable:
|
||||
return "available"
|
||||
case MemReserved:
|
||||
return "reserved"
|
||||
case MemAcpiReclaimable:
|
||||
return "ACPI (reclaimable)"
|
||||
case MemNvs:
|
||||
return "NVS"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// SetInfoPtr updates the internal multiboot information pointer to the given
|
||||
// value. This function must be invoked before invoking any other function
|
||||
@ -151,7 +168,9 @@ func VisitMemRegions(visitor MemRegionVisitor) {
|
||||
entry.Type = MemReserved
|
||||
}
|
||||
|
||||
visitor(entry)
|
||||
if !visitor(entry) {
|
||||
return
|
||||
}
|
||||
|
||||
curPtr += uintptr(ptrMapHeader.entrySize)
|
||||
}
|
||||
|
@ -59,8 +59,9 @@ func TestVisitMemRegion(t *testing.T) {
|
||||
var visitCount int
|
||||
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||
VisitMemRegions(func(_ *MemoryMapEntry) {
|
||||
VisitMemRegions(func(_ *MemoryMapEntry) bool {
|
||||
visitCount++
|
||||
return true
|
||||
})
|
||||
|
||||
if visitCount != 0 {
|
||||
@ -71,7 +72,7 @@ func TestVisitMemRegion(t *testing.T) {
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
|
||||
multibootInfoTestData[128] = 0xFF
|
||||
|
||||
VisitMemRegions(func(entry *MemoryMapEntry) {
|
||||
VisitMemRegions(func(entry *MemoryMapEntry) bool {
|
||||
if entry.PhysAddress != specs[visitCount].expPhys {
|
||||
t.Errorf("[visit %d] expected physical address to be %x; got %x", visitCount, specs[visitCount].expPhys, entry.PhysAddress)
|
||||
}
|
||||
@ -82,11 +83,42 @@ func TestVisitMemRegion(t *testing.T) {
|
||||
t.Errorf("[visit %d] expected region type to be %d; got %d", visitCount, specs[visitCount].expType, entry.Type)
|
||||
}
|
||||
visitCount++
|
||||
return true
|
||||
})
|
||||
|
||||
if visitCount != len(specs) {
|
||||
t.Errorf("expected the visitor func to be invoked %d times; got %d", len(specs), visitCount)
|
||||
}
|
||||
|
||||
// Test that the visitor function can abort the scan by returning false
|
||||
visitCount = 0
|
||||
VisitMemRegions(func(entry *MemoryMapEntry) bool {
|
||||
visitCount++
|
||||
return false
|
||||
})
|
||||
|
||||
if visitCount != 1 {
|
||||
t.Errorf("expected the visitor func to be invoked %d times; got %d", 1, visitCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryEntryTypeStringer(t *testing.T) {
|
||||
specs := []struct {
|
||||
input MemoryEntryType
|
||||
exp string
|
||||
}{
|
||||
{MemAvailable, "available"},
|
||||
{MemReserved, "reserved"},
|
||||
{MemAcpiReclaimable, "ACPI (reclaimable)"},
|
||||
{MemNvs, "NVS"},
|
||||
{MemoryEntryType(123), "unknown"},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
if got := spec.input.String(); got != spec.exp {
|
||||
t.Errorf("[spec %d] expected MemoryEntryType(%d).String() to return %q; got %q", specIndex, spec.input, spec.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFramebufferInfo(t *testing.T) {
|
||||
|
@ -1,11 +1,8 @@
|
||||
package kernel
|
||||
|
||||
import (
|
||||
_ "unsafe" // required for go:linkname
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel/hal"
|
||||
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
|
||||
"github.com/achilleasa/gopher-os/kernel/kfmt/early"
|
||||
)
|
||||
|
||||
// Kmain is the only Go symbol that is visible (exported) from the rt0 initialization
|
||||
@ -25,5 +22,4 @@ func Kmain(multibootInfoPtr uintptr) {
|
||||
// Initialize and clear the terminal
|
||||
hal.InitTerminal()
|
||||
hal.ActiveTerminal.Clear()
|
||||
early.Printf("Starting gopher-os\n")
|
||||
}
|
||||
|
13
kernel/mem/constants_amd64.go
Normal file
13
kernel/mem/constants_amd64.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build amd64
|
||||
|
||||
package mem
|
||||
|
||||
const (
|
||||
// PageShift is equal to log2(PageSize). This constant is used when
|
||||
// we need to convert a physical address to a page number (shift right by PageShift)
|
||||
// and vice-versa.
|
||||
PageShift = 12
|
||||
|
||||
// PageSize defines the system's page size in bytes.
|
||||
PageSize = Size(1 << PageShift)
|
||||
)
|
29
kernel/mem/memset.go
Normal file
29
kernel/mem/memset.go
Normal file
@ -0,0 +1,29 @@
|
||||
package mem
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Memset sets size bytes at the given address to the supplied value. The implementation
|
||||
// is based on bytes.Repeat; instead of using a for loop, this function uses
|
||||
// log2(size) copy calls which should give us a speed boost as page addresses
|
||||
// are always aligned.
|
||||
func Memset(addr uintptr, value byte, size Size) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// overlay a slice on top of this address region
|
||||
target := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: int(size),
|
||||
Cap: int(size),
|
||||
Data: addr,
|
||||
}))
|
||||
|
||||
// Set first element and make log2(size) optimized copies
|
||||
target[0] = value
|
||||
for index := Size(1); index < size; index *= 2 {
|
||||
copy(target[index:], target[:index])
|
||||
}
|
||||
}
|
27
kernel/mem/memset_test.go
Normal file
27
kernel/mem/memset_test.go
Normal file
@ -0,0 +1,27 @@
|
||||
package mem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestMemset(t *testing.T) {
|
||||
// memset with a 0 size should be a no-op
|
||||
Memset(uintptr(0), 0x00, 0)
|
||||
|
||||
for pageCount := uint32(1); pageCount <= 10; pageCount++ {
|
||||
buf := make([]byte, PageSize<<pageCount)
|
||||
for i := 0; i < len(buf); i++ {
|
||||
buf[i] = 0xFE
|
||||
}
|
||||
|
||||
addr := uintptr(unsafe.Pointer(&buf[0]))
|
||||
Memset(addr, 0x00, Size(len(buf)))
|
||||
|
||||
for i := 0; i < len(buf); i++ {
|
||||
if got := buf[i]; got != 0x00 {
|
||||
t.Errorf("[block with %d pages] expected byte: %d to be 0x00; got 0x%x", pageCount, i, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
120
kernel/mem/pfn/bootmem_allocator.go
Normal file
120
kernel/mem/pfn/bootmem_allocator.go
Normal file
@ -0,0 +1,120 @@
|
||||
package pfn
|
||||
|
||||
import (
|
||||
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
|
||||
"github.com/achilleasa/gopher-os/kernel/kfmt/early"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
)
|
||||
|
||||
var (
|
||||
// EarlyAllocator points to a static instance of the boot memory allocator
|
||||
// which is used to bootstrap the kernel before initializing a more
|
||||
// advanced memory allocator.
|
||||
EarlyAllocator BootMemAllocator
|
||||
)
|
||||
|
||||
// BootMemAllocator implements a rudimentary physical memory allocator which is used
|
||||
// to bootstrap the kernel.
|
||||
//
|
||||
// The allocator implementation uses the memory region information provided by
|
||||
// the bootloader to detect free memory blocks and return the next available
|
||||
// free frame.
|
||||
//
|
||||
// Allocations are tracked via an internal counter that contains the last
|
||||
// allocated frame index. The system memory regions are mapped into a linear
|
||||
// page index by aligning the region start address to the system's page size
|
||||
// and then dividing by the page size.
|
||||
//
|
||||
// Due to the way that the allocator works, it is not possible to free
|
||||
// allocated pages. Once the kernel is properly initialized, the allocated
|
||||
// blocks will be handed over to a more advanced memory allocator that does
|
||||
// support freeing.
|
||||
type BootMemAllocator struct {
|
||||
initialized bool
|
||||
|
||||
// allocCount tracks the total number of allocated frames.
|
||||
allocCount uint64
|
||||
|
||||
// lastAllocIndex tracks the last allocated frame index.
|
||||
lastAllocIndex int64
|
||||
}
|
||||
|
||||
// init sets up the boot memory allocator internal state and prints out the
|
||||
// system memory map.
|
||||
func (alloc *BootMemAllocator) init() {
|
||||
alloc.lastAllocIndex = -1
|
||||
alloc.initialized = true
|
||||
|
||||
early.Printf("[boot_mem_alloc] system memory map:\n")
|
||||
var totalFree mem.Size
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
early.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String())
|
||||
|
||||
if region.Type == multiboot.MemAvailable {
|
||||
totalFree += mem.Size(region.Length)
|
||||
}
|
||||
return true
|
||||
})
|
||||
early.Printf("[boot_mem_alloc] free memory: %dKb\n", uint64(totalFree/mem.Kb))
|
||||
}
|
||||
|
||||
// AllocFrame scans the system memory regions reported by the bootloader and
|
||||
// reseves the next available free frame. AllocFrame returns false if no more
|
||||
// memory can be allocated.
|
||||
//
|
||||
// The allocator only supports allocating blocks equal to the page size.
|
||||
// Requests for a page order > 0 will cause the allocator to return false.
|
||||
//
|
||||
// The use of a bool return value is intentional; if this method returned an
|
||||
// error then the compiler would call runtime.convT2I which in turn invokes the
|
||||
// yet uninitialized Go allocator.
|
||||
func (alloc *BootMemAllocator) AllocFrame(order mem.PageOrder) (Frame, bool) {
|
||||
if !alloc.initialized {
|
||||
alloc.init()
|
||||
}
|
||||
|
||||
if order > 0 {
|
||||
return InvalidFrame, false
|
||||
}
|
||||
|
||||
var (
|
||||
foundPageIndex int64 = -1
|
||||
regionStartPageIndex, regionEndPageIndex int64
|
||||
)
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
if region.Type != multiboot.MemAvailable {
|
||||
return true
|
||||
}
|
||||
|
||||
// Align region start address to a page boundary and find the start
|
||||
// and end page indices for the region
|
||||
regionStartPageIndex = int64(((mem.Size(region.PhysAddress) + (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift)
|
||||
regionEndPageIndex = int64(((mem.Size(region.PhysAddress+region.Length) - (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift)
|
||||
|
||||
// Ignore already allocated regions
|
||||
if alloc.lastAllocIndex >= regionEndPageIndex {
|
||||
return true
|
||||
}
|
||||
|
||||
// We found a block that can be allocated. The last allocated
|
||||
// index will be either pointing to a previous region or will
|
||||
// point inside this region. In the first case we just need to
|
||||
// select the regionStartPageIndex. In the latter case we can
|
||||
// simply select the next available page in the current region.
|
||||
if alloc.lastAllocIndex < regionStartPageIndex {
|
||||
foundPageIndex = regionStartPageIndex
|
||||
} else {
|
||||
foundPageIndex = alloc.lastAllocIndex + 1
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if foundPageIndex == -1 {
|
||||
return InvalidFrame, false
|
||||
}
|
||||
|
||||
alloc.allocCount++
|
||||
alloc.lastAllocIndex = foundPageIndex
|
||||
|
||||
return Frame(foundPageIndex), true
|
||||
}
|
93
kernel/mem/pfn/bootmem_allocator_test.go
Normal file
93
kernel/mem/pfn/bootmem_allocator_test.go
Normal file
@ -0,0 +1,93 @@
|
||||
package pfn
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel/driver/video/console"
|
||||
"github.com/achilleasa/gopher-os/kernel/hal"
|
||||
"github.com/achilleasa/gopher-os/kernel/hal/multiboot"
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
)
|
||||
|
||||
func TestBootMemoryAllocator(t *testing.T) {
|
||||
// Mock a tty to handle early.Printf output
|
||||
mockConsoleFb := make([]byte, 160*25)
|
||||
mockConsole := &console.Ega{}
|
||||
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
|
||||
hal.ActiveTerminal.AttachTo(mockConsole)
|
||||
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
|
||||
var totalFreeFrames uint64
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
if region.Type == multiboot.MemAvailable {
|
||||
regionStartFrameIndex := uint64(((mem.Size(region.PhysAddress) + (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift)
|
||||
regionEndFrameIndex := uint64(((mem.Size(region.PhysAddress+region.Length) - (mem.PageSize - 1)) & ^(mem.PageSize - 1)) >> mem.PageShift)
|
||||
|
||||
totalFreeFrames += regionEndFrameIndex - regionStartFrameIndex + 1
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
var (
|
||||
alloc BootMemAllocator
|
||||
allocFrameCount uint64
|
||||
)
|
||||
for ; ; allocFrameCount++ {
|
||||
frame, ok := alloc.AllocFrame(mem.PageOrder(0))
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
expAddress := uintptr(uint64(alloc.lastAllocIndex) * uint64(mem.PageSize))
|
||||
if got := frame.Address(); got != expAddress {
|
||||
t.Errorf("[frame %d] expected frame address to be 0x%x; got 0x%x", allocFrameCount, expAddress, got)
|
||||
}
|
||||
|
||||
if !frame.IsValid() {
|
||||
t.Errorf("[frame %d] expected IsValid() to return true", allocFrameCount)
|
||||
}
|
||||
|
||||
if got := frame.PageOrder(); got != mem.PageOrder(0) {
|
||||
t.Errorf("[frame %d] expected allocated frame page order to be 0; got %d", allocFrameCount, got)
|
||||
}
|
||||
|
||||
if got := frame.Size(); got != mem.PageSize {
|
||||
t.Errorf("[frame %d] expected allocated frame size to be %d; got %d", allocFrameCount, mem.PageSize, got)
|
||||
}
|
||||
}
|
||||
|
||||
if allocFrameCount != totalFreeFrames {
|
||||
t.Fatalf("expected allocator to allocate %d frames; allocated %d", totalFreeFrames, allocFrameCount)
|
||||
}
|
||||
|
||||
// This allocator only works with order(0) blocks
|
||||
if frame, ok := alloc.AllocFrame(mem.PageOrder(1)); ok || frame.IsValid() {
|
||||
t.Fatalf("expected allocator to return false and an invalid frame when requested to allocate a block with order > 0; got %t, %v", ok, frame)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// A dump of multiboot data when running under qemu containing only the memory region tag.
|
||||
multibootMemoryMap = []byte{
|
||||
72, 5, 0, 0, 0, 0, 0, 0,
|
||||
6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,
|
||||
0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0,
|
||||
0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0,
|
||||
21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0,
|
||||
1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0,
|
||||
24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
)
|
39
kernel/mem/pfn/frame.go
Normal file
39
kernel/mem/pfn/frame.go
Normal file
@ -0,0 +1,39 @@
|
||||
// Package pfn provides physical memory allocator implementations that allow
|
||||
// allocations of physical memory frames.
|
||||
package pfn
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
)
|
||||
|
||||
// Frame describes a physical memory page index.
|
||||
type Frame uint64
|
||||
|
||||
const (
|
||||
// InvalidFrame is returned by page allocators when
|
||||
// they fail to reserve the requested frame.
|
||||
InvalidFrame = Frame(math.MaxUint64)
|
||||
)
|
||||
|
||||
// IsValid returns true if this is a valid frame.
|
||||
func (f Frame) IsValid() bool {
|
||||
return f != InvalidFrame
|
||||
}
|
||||
|
||||
// Address returns a pointer to the physical memory address pointed to by this Frame.
|
||||
func (f Frame) Address() uintptr {
|
||||
return uintptr(f << mem.PageShift)
|
||||
}
|
||||
|
||||
// PageOrder returns the page order of this frame. The page order is encoded in the
|
||||
// 8 MSB of the frame number.
|
||||
func (f Frame) PageOrder() mem.PageOrder {
|
||||
return mem.PageOrder((f >> 56) & 0xFF)
|
||||
}
|
||||
|
||||
// Size returns the size of this frame.
|
||||
func (f Frame) Size() mem.Size {
|
||||
return mem.PageSize << ((f >> 56) & 0xFF)
|
||||
}
|
31
kernel/mem/pfn/frame_test.go
Normal file
31
kernel/mem/pfn/frame_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
package pfn
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/achilleasa/gopher-os/kernel/mem"
|
||||
)
|
||||
|
||||
func TestFrameMethods(t *testing.T) {
|
||||
for order := mem.PageOrder(0); order < mem.PageOrder(10); order++ {
|
||||
for frameIndex := uint64(0); frameIndex < 128; frameIndex++ {
|
||||
frame := Frame(frameIndex | (uint64(order) << 56))
|
||||
|
||||
if !frame.IsValid() {
|
||||
t.Errorf("[order %d] expected frame %d to be valid", order, frameIndex)
|
||||
}
|
||||
|
||||
if got := frame.PageOrder(); got != order {
|
||||
t.Errorf("[order %d] expected frame (%d, index: %d) call to PageOrder() to return %d; got %d", order, frame, frameIndex, order, got)
|
||||
}
|
||||
|
||||
if exp, got := uintptr(frameIndex<<mem.PageShift), frame.Address(); got != exp {
|
||||
t.Errorf("[order %d] expected frame (%d, index: %d) call to Address() to return %x; got %x", order, frame, frameIndex, exp, got)
|
||||
}
|
||||
|
||||
if exp, got := mem.Size(mem.PageSize<<order), frame.Size(); got != exp {
|
||||
t.Errorf("[order %d] expected frame (%d, index: %d) call to Size() to return %d; got %d", order, frame, frameIndex, exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
20
kernel/mem/size.go
Normal file
20
kernel/mem/size.go
Normal file
@ -0,0 +1,20 @@
|
||||
package mem
|
||||
|
||||
// Size represents a memory block size in bytes.
|
||||
type Size uint64
|
||||
|
||||
// Common memory block sizes.
|
||||
const (
|
||||
Byte Size = 1
|
||||
Kb = 1024 * Byte
|
||||
Mb = 1024 * Kb
|
||||
Gb = 1024 * Mb
|
||||
)
|
||||
|
||||
// PageOrder represents a power-of-two multiple of the base page size and is
|
||||
// used as an argument to page-based memory allocators.
|
||||
//
|
||||
// PageOrder(0) refers to a page with size PageSize << 0
|
||||
// PageOrder(1) refers to a page with size PageSize << 1
|
||||
// ...
|
||||
type PageOrder uint8
|
Loading…
x
Reference in New Issue
Block a user