1
0
mirror of https://github.com/taigrr/gopher-os synced 2025-01-18 04:43:13 -08:00
Achilleas Anagnostopoulos 1fc9d20ed2 Reserve and protect zereod frame when initializing vmm
This vmm package exports ReservedZeroedFrame which can be used to setup
a lazy physical page allocation scheme. This is implemented by mapping
ReservedZeroedFrame to each page in a virtual memory region using the
following flag combination: FlagPresent | FlagCopyOnWrite.

This has the effect that all reads from the virtual address region
target the contents of ReservedZeroedFrame (always returning zero). On
the other hand, writes to the virtual address region trigger a page
fault which is resolved as follows:
- a new physical frame is allocated and the contents of ReservedZeroedFrame
  are copied to it (effectively clearing the new frame).
- the page entry for the virtual address that caused the fault is
  updated to point to the new frame and its flags are changed to:
  FlagPresent | FlagRW
- execution control is returned back to the code that caused the fault
2017-06-22 19:17:19 +01:00

156 lines
4.9 KiB
Go

package vmm
import (
"unsafe"
"github.com/achilleasa/gopher-os/kernel"
"github.com/achilleasa/gopher-os/kernel/cpu"
"github.com/achilleasa/gopher-os/kernel/mem"
"github.com/achilleasa/gopher-os/kernel/mem/pmm"
)
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
// vmm package's Init function. The purpose of this frame is to assist
// in implementing on-demand memory allocation when mapping it in
// conjunction with the CopyOnWrite flag. Here is an example of how it
// can be used:
//
// func ReserveOnDemand(start vmm.Page, pageCount int) *kernel.Error {
// var err *kernel.Error
// mapFlags := vmm.FlagPresent|vmm.FlagCopyOnWrite
// for page := start; pageCount > 0; pageCount, page = pageCount-1, page+1 {
// if err = vmm.Map(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
// return err
// }
// }
// return nil
// }
//
// In the above example, page mappings are set up for the requested number of
// pages but no physical memory is reserved for their contents. A write to any
// of the above pages will trigger a page-fault causing a new frame to be
// allocated, cleared (the blank frame is copied to the new frame) and
// installed in-place with RW permissions.
var ReservedZeroedFrame pmm.Frame
var (
// protectReservedZeroedPage is set to true to prevent mapping to
protectReservedZeroedPage bool
// nextAddrFn is used by used by tests to override the nextTableAddr
// calculations used by Map. When compiling the kernel this function
// will be automatically inlined.
nextAddrFn = func(entryAddr uintptr) uintptr {
return entryAddr
}
// flushTLBEntryFn is used by tests to override calls to flushTLBEntry
// which will cause a fault if called in user-mode.
flushTLBEntryFn = cpu.FlushTLBEntry
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
)
// Map establishes a mapping between a virtual page and a physical memory frame
// using the currently active page directory table. Calls to Map will use the
// supplied physical frame allocator to initialize missing page tables at each
// paging level supported by the MMU.
//
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
return errAttemptToRWMapReservedFrame
}
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to map the
// frame in place and flag it as present and flush its TLB entry
if pteLevel == pageLevels-1 {
*pte = 0
pte.SetFrame(frame)
pte.SetFlags(flags)
flushTLBEntryFn(page.Address())
return true
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
// Next table does not yet exist; we need to allocate a
// physical frame for it map it and clear its contents.
if !pte.HasFlags(FlagPresent) {
var newTableFrame pmm.Frame
newTableFrame, err = frameAllocator()
if err != nil {
return false
}
*pte = 0
pte.SetFrame(newTableFrame)
pte.SetFlags(FlagPresent | FlagRW)
// The next pte entry becomes available but we need to
// make sure that the new page is properly cleared
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
}
return true
})
return err
}
// MapTemporary establishes a temporary RW mapping of a physical memory frame
// to a fixed virtual address overwriting any previous mapping. The temporary
// mapping mechanism is primarily used by the kernel to access and initialize
// inactive page tables.
//
// Attempts to map ReservedZeroedFrame will result in an error.
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
return 0, errAttemptToRWMapReservedFrame
}
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
return 0, err
}
return PageFromAddress(tempMappingAddr), nil
}
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
func Unmap(page Page) *kernel.Error {
var err *kernel.Error
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
// If we reached the last level all we need to do is to set the
// page as non-present and flush its TLB entry
if pteLevel == pageLevels-1 {
pte.ClearFlags(FlagPresent)
flushTLBEntryFn(page.Address())
return true
}
// Next table is not present; this is an invalid mapping
if !pte.HasFlags(FlagPresent) {
err = ErrInvalidMapping
return false
}
if pte.HasFlags(FlagHugePage) {
err = errNoHugePageSupport
return false
}
return true
})
return err
}