mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
Use pwd as a workspace; move sources to src/gopheros and rewrite imports
By setting up pwd as a Go workspace, we can trim import paths from something like "github.com/achilleasa/gopher-os/kernel" to just "kernel". These changes make forking easier and also allows us to move the code to a different git hosting provider without having to rewrite the imports.
This commit is contained in:
26
src/arch/x86_64/asm/cgo_stubs.s
Normal file
26
src/arch/x86_64/asm/cgo_stubs.s
Normal file
@@ -0,0 +1,26 @@
|
||||
; vim: set ft=nasm :
|
||||
|
||||
section .text
|
||||
bits 64
|
||||
|
||||
global x_cgo_callers
|
||||
global x_cgo_init
|
||||
global x_cgo_mmap
|
||||
global x_cgo_notify_runtime_init_done
|
||||
global x_cgo_sigaction
|
||||
global x_cgo_thread_start
|
||||
global x_cgo_setenv
|
||||
global x_cgo_unsetenv
|
||||
global _cgo_yield
|
||||
|
||||
; Stubs for missing cgo functions to keep the linker happy
|
||||
x_cgo_callers:
|
||||
x_cgo_init:
|
||||
x_cgo_mmap:
|
||||
x_cgo_notify_runtime_init_done:
|
||||
x_cgo_sigaction:
|
||||
x_cgo_thread_start:
|
||||
x_cgo_setenv:
|
||||
x_cgo_unsetenv:
|
||||
_cgo_yield:
|
||||
ret
|
||||
12
src/arch/x86_64/asm/constants.inc
Normal file
12
src/arch/x86_64/asm/constants.inc
Normal file
@@ -0,0 +1,12 @@
|
||||
; vim: set ft=nasm :
|
||||
|
||||
; The bootloader load the kernel at LOAD_ADDRESS and jumps to the rt0_32 entrypoint
|
||||
; at this address.
|
||||
LOAD_ADDRESS equ 0x100000
|
||||
|
||||
; Page offset is the start of the 48-bit upper half canonical memory region
|
||||
; The kernel is compiled with a VMA equal to PAGE_OFFSET + LOAD_ADDRESS but
|
||||
; loaded at physical address LOAD_ADDRESS.
|
||||
PAGE_OFFSET equ 0xffff800000000000
|
||||
|
||||
|
||||
0
src/arch/x86_64/asm/data.s
Normal file
0
src/arch/x86_64/asm/data.s
Normal file
41
src/arch/x86_64/asm/multiboot_header.s
Normal file
41
src/arch/x86_64/asm/multiboot_header.s
Normal file
@@ -0,0 +1,41 @@
|
||||
; vim: set ft=nasm :
|
||||
|
||||
section .multiboot_header
|
||||
|
||||
MAGIC equ 0xe85250d6
|
||||
ARCH equ 0x0
|
||||
|
||||
; Define the multiboot header (multiboot 1.6)
|
||||
; http://nongnu.askapache.com/grub/phcoder/multiboot.pdf
|
||||
header_start:
|
||||
dd MAGIC ; magic number
|
||||
dd ARCH ; i386 protected mode
|
||||
dd header_end - header_start ; header length
|
||||
|
||||
; The field ‘checksum’ is a 32-bit unsigned value which, when added to the other
|
||||
; magic fields (i.e. ‘magic’, ‘architecture’ and ‘header_length’), must have a
|
||||
; 32-bit unsigned sum of zero.
|
||||
dd (1 << 32) - (MAGIC + ARCH + (header_end - header_start))
|
||||
|
||||
; Console flags tag
|
||||
align 8 ; tags should be 64-bit aligned
|
||||
dw 4 ; type
|
||||
dw 0 ; flags
|
||||
dd 12 ; size
|
||||
dd 0x3 ; kernel supports EGA console
|
||||
|
||||
; Define graphics mode tag
|
||||
;align 8 ; tags should be 64-bit aligned
|
||||
;dw 5 ; type
|
||||
;dw 0 ; flags
|
||||
;dd 20 ; size
|
||||
;dd 80 ; width (pixels or chars)
|
||||
;dd 25 ; height (pixels or chars)
|
||||
;dd 0 ; bpp (0 for text mode
|
||||
|
||||
; According to page 6 of the spec, the tag list is terminated by a tag with
|
||||
; type 0 and size 8
|
||||
align 8 ; tags should be 64-bit aligned
|
||||
dd 0 ; type & flag = 0
|
||||
dd 8 ; size
|
||||
header_end:
|
||||
364
src/arch/x86_64/asm/rt0_32.s
Normal file
364
src/arch/x86_64/asm/rt0_32.s
Normal file
@@ -0,0 +1,364 @@
|
||||
; vim: set ft=nasm :
|
||||
%include "constants.inc"
|
||||
|
||||
section .data
|
||||
align 4
|
||||
|
||||
; GDT definition
|
||||
gdt0:
|
||||
gdt0_nil_seg: dw 0 ; Limit (low)
|
||||
dw 0 ; Base (low)
|
||||
db 0 ; Base (middle)
|
||||
db 0 ; Access (exec/read)
|
||||
db 0 ; Granularity
|
||||
db 0 ; Base (high)
|
||||
gdt0_cs_seg: dw 0 ; Limit (low)
|
||||
dw 0 ; Base (low)
|
||||
db 0 ; Base (middle)
|
||||
db 10011010b ; Access (exec/read)
|
||||
db 00100000b ; Granularity
|
||||
db 0 ; Base (high)
|
||||
gdt0_ds_seg: dw 0 ; Limit (low)
|
||||
dw 0 ; Base (low)
|
||||
db 0 ; Base (middle)
|
||||
db 10010010b ; Access (read/write)
|
||||
db 00000000b ; Granularity
|
||||
db 0 ; Base (high)
|
||||
|
||||
gdt0_desc:
|
||||
dw $ - gdt0 - 1 ; gdt size should be 1 byte less than actual length
|
||||
dq gdt0 - PAGE_OFFSET
|
||||
|
||||
NULL_SEG equ gdt0_nil_seg - gdt0
|
||||
CS_SEG equ gdt0_cs_seg - gdt0
|
||||
DS_SEG equ gdt0_ds_seg - gdt0
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Error messages
|
||||
;------------------------------------------------------------------------------
|
||||
err_unsupported_bootloader db '[rt0_32] kernel not loaded by multiboot-compliant bootloader', 0
|
||||
err_multiboot_data_too_big db '[rt0_32] multiboot information data length exceeds local buffer size', 0
|
||||
err_cpuid_not_supported db '[rt0_32] the processor does not support the CPUID instruction', 0
|
||||
err_longmode_not_supported db '[rt0_32] the processor does not support longmode which is required by this kernel', 0
|
||||
err_sse_not_supported db '[rt0_32] the processor does not support SSE instructions which are required by this kernel', 0
|
||||
|
||||
section .bss
|
||||
align 4096
|
||||
|
||||
; Reserve 3 pages for the initial page tables
|
||||
page_table_l4: resb 4096
|
||||
page_table_l3: resb 4096
|
||||
page_table_l2: resb 4096
|
||||
|
||||
; Reserve 16K for storing multiboot data and for the kernel stack
|
||||
global multiboot_data ; Make this available to the 64-bit entrypoint
|
||||
global stack_bottom
|
||||
global stack_top
|
||||
multiboot_data: resb 16384
|
||||
stack_bottom: resb 16384
|
||||
stack_top:
|
||||
|
||||
section .rt0
|
||||
bits 32
|
||||
align 4
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Kernel 32-bit entry point
|
||||
;
|
||||
; The boot loader will jump to this symbol after setting up the CPU according
|
||||
; to the multiboot standard. At this point:
|
||||
; - A20 is enabled
|
||||
; - The CPU is using 32-bit protected mode
|
||||
; - Interrupts are disabled
|
||||
; - Paging is disabled
|
||||
; - EAX contains the magic value ‘0x36d76289’; the presence of this value indicates
|
||||
; to the operating system that it was loaded by a Multiboot-compliant boot loader
|
||||
; - EBX contains the 32-bit physical address of the Multiboot information structure
|
||||
;------------------------------------------------------------------------------
|
||||
global _rt0_32_entry
|
||||
_rt0_32_entry:
|
||||
; Provide a stack
|
||||
mov esp, stack_top - PAGE_OFFSET
|
||||
|
||||
; Ensure we were booted by a bootloader supporting multiboot
|
||||
cmp eax, 0x36d76289
|
||||
jne _rt0_32_entry.unsupported_bootloader
|
||||
|
||||
; Copy multiboot struct to our own buffer
|
||||
call _rt0_copy_multiboot_data
|
||||
|
||||
; Check processor features
|
||||
call _rt0_check_cpuid_support
|
||||
call _rt0_check_longmode_support
|
||||
call _rt0_check_sse_support
|
||||
|
||||
; Setup initial page tables, enable paging and enter longmode
|
||||
call _rt0_populate_initial_page_tables
|
||||
call _rt0_enter_long_mode
|
||||
|
||||
call _rt0_64_entry_trampoline
|
||||
|
||||
.unsupported_bootloader:
|
||||
mov edi, err_unsupported_bootloader - PAGE_OFFSET
|
||||
call write_string
|
||||
jmp _rt0_32_entry.halt
|
||||
|
||||
.halt:
|
||||
cli
|
||||
hlt
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Copy multiboot information blocks from the address pointed to by ebx into a
|
||||
; local buffer. This enables the kernel code to access them once paging is enabled.
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_copy_multiboot_data:
|
||||
mov esi, ebx
|
||||
mov edi, multiboot_data - PAGE_OFFSET
|
||||
|
||||
mov ecx, dword [esi]
|
||||
cmp ecx, 16384
|
||||
jle _rt0_copy_multiboot_data.copy
|
||||
|
||||
mov edi, err_multiboot_data_too_big - PAGE_OFFSET
|
||||
call write_string
|
||||
jmp _rt0_32_entry.halt
|
||||
|
||||
.copy:
|
||||
test ecx, ecx
|
||||
jz _rt0_copy_multiboot_data.done
|
||||
|
||||
mov eax, dword[esi]
|
||||
mov dword [edi], eax
|
||||
add esi, 4
|
||||
add edi, 4
|
||||
sub ecx, 4
|
||||
jmp _rt0_copy_multiboot_data.copy
|
||||
|
||||
.done:
|
||||
ret
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Check that the processor supports the CPUID instruction.
|
||||
;
|
||||
; To check if CPUID is supported, we need to attempt to flip the ID bit (bit 21)
|
||||
; in the FLAGS register. If that works, CPUID is available.
|
||||
;
|
||||
; Code taken from: http://wiki.osdev.org/Setting_Up_Long_Mode#x86_or_x86-64
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_check_cpuid_support:
|
||||
; Copy FLAGS in to EAX via stack
|
||||
pushfd
|
||||
pop eax
|
||||
|
||||
; Copy to ECX as well for comparing later on
|
||||
mov ecx, eax
|
||||
|
||||
; Flip the ID bit
|
||||
xor eax, 1 << 21
|
||||
|
||||
; Copy EAX to FLAGS via the stack
|
||||
push eax
|
||||
popfd
|
||||
|
||||
; Copy FLAGS back to EAX (with the flipped bit if CPUID is supported)
|
||||
pushfd
|
||||
pop eax
|
||||
|
||||
; Restore FLAGS from the old version stored in ECX (i.e. flipping the
|
||||
; ID bit back if it was ever flipped).
|
||||
push ecx
|
||||
popfd
|
||||
|
||||
; Compare EAX and ECX. If they are equal then that means the bit
|
||||
; wasn't flipped, and CPUID isn't supported.
|
||||
cmp eax, ecx
|
||||
je _rt0_check_cpuid_support.no_cpuid
|
||||
ret
|
||||
|
||||
.no_cpuid:
|
||||
mov edi, err_cpuid_not_supported - PAGE_OFFSET
|
||||
call write_string
|
||||
jmp _rt0_32_entry.halt
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Check that the processor supports long mode
|
||||
; Code taken from: http://wiki.osdev.org/Setting_Up_Long_Mode#x86_or_x86-64
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_check_longmode_support:
|
||||
; To check for longmode support we need to ensure that the CPUID instruction
|
||||
; can report it. To do this we need to query it first.
|
||||
mov eax, 0x80000000 ; Set the A-register to 0x80000000.
|
||||
cpuid
|
||||
cmp eax, 0x80000001 ; We need at least 0x80000001 to check for long mode.
|
||||
jb _rt0_check_longmode_support.no_long_mode
|
||||
|
||||
mov eax, 0x80000001 ; Set the A-register to 0x80000001.
|
||||
cpuid
|
||||
test edx, 1 << 29 ; Test if the LM-bit, which is bit 29, is set in the D-register.
|
||||
jz _rt0_check_longmode_support.no_long_mode
|
||||
ret
|
||||
|
||||
.no_long_mode:
|
||||
mov edi, err_longmode_not_supported - PAGE_OFFSET
|
||||
call write_string
|
||||
jmp _rt0_32_entry.halt
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Check for and enabl SSE support. Code taken from:
|
||||
; http://wiki.osdev.org/SSE#Checking_for_SSE
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_check_sse_support:
|
||||
; check for SSE
|
||||
mov eax, 0x1
|
||||
cpuid
|
||||
test edx, 1<<25
|
||||
jz _rt0_check_sse_support.no_sse
|
||||
|
||||
; Enable SSE
|
||||
mov eax, cr0
|
||||
and ax, 0xfffb ; Clear coprocessor emulation CR0.EM
|
||||
or ax, 0x2 ; Set coprocessor monitoring CR0.MP
|
||||
mov cr0, eax
|
||||
mov eax, cr4
|
||||
or ax, 3 << 9 ; Set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
|
||||
mov cr4, eax
|
||||
|
||||
ret
|
||||
.no_sse:
|
||||
mov edi, err_sse_not_supported - PAGE_OFFSET
|
||||
call write_string
|
||||
jmp _rt0_32_entry.halt
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Setup minimal page tables to allow access to the following regions:
|
||||
; - 0 to 8M
|
||||
; - PAGE_OFFSET to PAGE_OFFSET + 8M
|
||||
;
|
||||
; The second region mapping allows us to access the kernel at its VMA when
|
||||
; paging is enabled.
|
||||
;------------------------------------------------------------------------------
|
||||
PAGE_PRESENT equ (1 << 0)
|
||||
PAGE_WRITABLE equ (1 << 1)
|
||||
PAGE_2MB equ (1 << 7)
|
||||
|
||||
_rt0_populate_initial_page_tables:
|
||||
; The CPU uses bits 39-47 of the virtual address as an index to the P4 table.
|
||||
mov eax, page_table_l3 - PAGE_OFFSET
|
||||
or eax, PAGE_PRESENT | PAGE_WRITABLE
|
||||
mov ebx, page_table_l4 - PAGE_OFFSET
|
||||
mov [ebx], eax
|
||||
|
||||
; Recursively map the last P4 entry to itself. This allows us to use
|
||||
; specially crafted memory addresses to access the page tables themselves
|
||||
mov ecx, ebx
|
||||
or ecx, PAGE_PRESENT | PAGE_WRITABLE
|
||||
mov [ebx + 511*8], ecx
|
||||
|
||||
; Also map the addresses starting at PAGE_OFFSET to the same P3 table.
|
||||
; To find the P4 index for PAGE_OFFSET we need to extract bits 39-47
|
||||
; of its address.
|
||||
mov ecx, (PAGE_OFFSET >> 39) & 511
|
||||
mov [ebx + ecx*8], eax
|
||||
|
||||
; The CPU uses bits 30-38 as an index to the P3 table. We just need to map
|
||||
; entry 0 from the P3 table to point to the P2 table .
|
||||
mov eax, page_table_l2 - PAGE_OFFSET
|
||||
or eax, PAGE_PRESENT | PAGE_WRITABLE
|
||||
mov ebx, page_table_l3 - PAGE_OFFSET
|
||||
mov [ebx], eax
|
||||
|
||||
; For the L2 table we enable the huge page bit which allows us to specify
|
||||
; 2M pages without needing to use the L1 table. To cover the required
|
||||
; 0-8M region we need to provide 4 2M page entries at indices 0 to 4.
|
||||
mov ecx, 0
|
||||
mov ebx, page_table_l2 - PAGE_OFFSET
|
||||
.next_page:
|
||||
mov eax, 1 << 21 ; 2M
|
||||
mul ecx ; eax *= ecx
|
||||
or eax, PAGE_PRESENT | PAGE_WRITABLE | PAGE_2MB
|
||||
mov [ebx + ecx*8], eax
|
||||
|
||||
inc ecx
|
||||
cmp ecx, 4
|
||||
jne _rt0_populate_initial_page_tables.next_page
|
||||
|
||||
ret
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Load P4 table, enable PAE, enter long mode and finally enable paging
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_enter_long_mode:
|
||||
; Load page table map pointer to cr3
|
||||
mov eax, page_table_l4 - PAGE_OFFSET
|
||||
mov cr3, eax
|
||||
|
||||
; Enable PAE support
|
||||
mov eax, cr4
|
||||
or eax, 1 << 5
|
||||
mov cr4, eax
|
||||
|
||||
; Now enable long mode (bit 8) and the no-execute support (bit 11) by
|
||||
; modifying the EFER MSR
|
||||
mov ecx, 0xc0000080
|
||||
rdmsr ; read msr value to eax
|
||||
or eax, (1 << 8) | (1<<11)
|
||||
wrmsr
|
||||
|
||||
; Finally enable paging (bit 31) and user/kernel page write protection (bit 16)
|
||||
mov eax, cr0
|
||||
or eax, (1 << 31) | (1<<16)
|
||||
mov cr0, eax
|
||||
|
||||
; We are in 32-bit compatibility submode. We need to load a 64bit GDT
|
||||
; and perform a far jmp to switch to long mode
|
||||
mov eax, gdt0_desc - PAGE_OFFSET
|
||||
lgdt [eax]
|
||||
|
||||
; set ds and es segments
|
||||
; to set the cs segment we need to perform a far jmp
|
||||
mov ax, DS_SEG
|
||||
mov ds, ax
|
||||
mov es, ax
|
||||
mov fs, ax
|
||||
mov gs, ax
|
||||
mov ss, ax
|
||||
|
||||
jmp CS_SEG:.flush_gdt - PAGE_OFFSET
|
||||
.flush_gdt:
|
||||
ret
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Write the NULL-terminated string contained in edi to the screen using white
|
||||
; text on red background. Assumes that text-mode is enabled and that its
|
||||
; physical address is 0xb8000.
|
||||
;------------------------------------------------------------------------------
|
||||
write_string:
|
||||
mov ebx,0xb8000
|
||||
mov ah, 0x4F
|
||||
.next_char:
|
||||
mov al, byte[edi]
|
||||
test al, al
|
||||
jz write_string.done
|
||||
|
||||
mov word [ebx], ax
|
||||
add ebx, 2
|
||||
inc edi
|
||||
jmp write_string.next_char
|
||||
|
||||
.done:
|
||||
ret
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Set up the stack pointer to the virtual address of the stack and jump to the
|
||||
; 64-bit entrypoint.
|
||||
;------------------------------------------------------------------------------
|
||||
bits 64
|
||||
_rt0_64_entry_trampoline:
|
||||
mov rsp, stack_top ; now that paging is enabled we can load the stack
|
||||
; with the virtual address of the allocated stack.
|
||||
|
||||
; Jump to 64-bit entry
|
||||
extern _rt0_64_entry
|
||||
mov rax, _rt0_64_entry
|
||||
jmp rax
|
||||
397
src/arch/x86_64/asm/rt0_64.s
Normal file
397
src/arch/x86_64/asm/rt0_64.s
Normal file
@@ -0,0 +1,397 @@
|
||||
; vim: set ft=nasm :
|
||||
%include "constants.inc"
|
||||
|
||||
bits 64
|
||||
|
||||
section .bss
|
||||
align 8
|
||||
|
||||
; Allocate space for the interrupt descriptor table (IDT).
|
||||
; This arch supports up to 256 interrupt handlers
|
||||
%define IDT_ENTRIES 0xff
|
||||
_rt0_idt_start:
|
||||
resq 2 * IDT_ENTRIES ; each 64-bit IDT entry is 16 bytes
|
||||
_rt0_idt_end:
|
||||
|
||||
_rt0_idt_desc:
|
||||
resw 1
|
||||
resq 1
|
||||
|
||||
; Allocates space for the IRQ handlers pointers registered by the IRQ package
|
||||
_rt0_irq_handlers resq IDT_ENTRIES
|
||||
|
||||
; The FS register is loaded with the address of r0_g_ptr. fs:0x00 should contain
|
||||
; a pointer to the currently active g struct (in this case runtime.g0)
|
||||
r0_g_ptr: resq 1
|
||||
|
||||
section .text
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Kernel 64-bit entry point
|
||||
;
|
||||
; The 32-bit entrypoint code jumps to this entrypoint after:
|
||||
; - it has entered long mode and enabled paging
|
||||
; - it has loaded a 64bit GDT
|
||||
; - it has set up identity paging for the physical 0-8M region and the
|
||||
; PAGE_OFFSET to PAGE_OFFSET+8M region.
|
||||
;------------------------------------------------------------------------------
|
||||
global _rt0_64_entry
|
||||
_rt0_64_entry:
|
||||
call _rt0_install_redirect_trampolines
|
||||
call _rt0_64_load_idt
|
||||
call _rt0_64_setup_go_runtime_structs
|
||||
|
||||
; Call the kernel entry point passing a pointer to the multiboot data
|
||||
; copied by the 32-bit entry code
|
||||
extern multiboot_data
|
||||
extern _kernel_start
|
||||
extern _kernel_end
|
||||
extern kernel.Kmain
|
||||
|
||||
mov rax, _kernel_end - PAGE_OFFSET
|
||||
push rax
|
||||
mov rax, _kernel_start - PAGE_OFFSET
|
||||
push rax
|
||||
mov rax, multiboot_data
|
||||
push rax
|
||||
call kernel.Kmain
|
||||
|
||||
; Main should never return; halt the CPU
|
||||
mov rdi, err_kmain_returned
|
||||
call write_string
|
||||
|
||||
cli
|
||||
hlt
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Setup m0, g0 and other symbols required for bootstrapping the Go runtime.
|
||||
; For the definitions of g and m see the Go runtime src: src/runtime/runtime2.go
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_64_setup_go_runtime_structs:
|
||||
%include "go_asm_offsets.inc" ; generated by tools/offsets
|
||||
|
||||
; The Go allocator expects this symbol to be set to the system page size
|
||||
; As the kernel bypass osinit() this needs to be set here.
|
||||
extern runtime.physPageSize
|
||||
mov rax, runtime.physPageSize
|
||||
mov qword [rax], 0x1000 ; 4096
|
||||
|
||||
; Setup r0_g stack limits using the reserved stack
|
||||
extern stack_top
|
||||
extern stack_bottom
|
||||
extern runtime.g0
|
||||
mov rax, stack_bottom
|
||||
mov rbx, stack_top
|
||||
mov rsi, runtime.g0
|
||||
mov qword [rsi+GO_G_STACK+GO_STACK_LO], rax ; g.stack.lo
|
||||
mov qword [rsi+GO_G_STACK+GO_STACK_HI], rbx ; g.stack.hi
|
||||
mov qword [rsi+GO_G_STACKGUARD0], rax ; g.stackguard0
|
||||
|
||||
; Link m0 to the g0
|
||||
extern runtime.m0
|
||||
mov rbx, runtime.m0
|
||||
mov qword [rbx+GO_M_G0], rsi ; m.g0 = g0
|
||||
mov qword [rsi+GO_G_M], rbx ; g.m = m
|
||||
|
||||
; Store the address of g0 in r0_g_ptr
|
||||
mov rax, r0_g_ptr
|
||||
mov qword [rax], rsi
|
||||
|
||||
; According to the x86_64 ABI, the fs register should contain the
|
||||
; address after the pointer to the pointer to the user-space thread
|
||||
; structure. This allows the Go runtime to retrieve the address of
|
||||
; the currently active g structure by accessing fs:-0x8.
|
||||
;
|
||||
; Load 64-bit FS register address
|
||||
; eax -> lower 32 bits
|
||||
; edx -> upper 32 bits
|
||||
mov ecx, 0xc0000100 ; fs_base
|
||||
mov rsi, r0_g_ptr
|
||||
add rsi, 8 ; fs -> r0_g_ptr + 0x8
|
||||
mov rax, rsi ; lower 32 bits
|
||||
shr rsi, 32
|
||||
mov rdx, rsi ; high 32 bits
|
||||
wrmsr
|
||||
|
||||
ret
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Setup and load IDT. We preload each IDT entry with a pointer to a gate handler
|
||||
; but set it as inactive. The code in irq_amd64 is responsible for enabling
|
||||
; individual IDT entries when handlers are installed.
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_64_load_idt:
|
||||
mov rax, _rt0_idt_start
|
||||
|
||||
%assign gate_num 0
|
||||
%rep IDT_ENTRIES
|
||||
mov rbx, _rt0_64_gate_entry_%+ gate_num
|
||||
mov word [rax], bx ; gate entry bits 0-15
|
||||
mov word [rax+2], 0x8 ; GDT descriptor
|
||||
mov byte [rax+5], 0x0 ; Mark the entry as NOT present
|
||||
shr rbx, 16
|
||||
mov word [rax+6], bx ; gate entry bits 16-31
|
||||
shr rbx, 16
|
||||
mov dword [rax+8], ebx ; gate entry bits 32-63
|
||||
|
||||
add rax, 16 ; size of IDT entry
|
||||
%assign gate_num gate_num+1
|
||||
%endrep
|
||||
mov rax, _rt0_idt_desc
|
||||
mov word [rax], _rt0_idt_end - _rt0_idt_start - 1 ; similar to GDT this must be len(IDT) - 1
|
||||
mov rbx, _rt0_idt_start
|
||||
mov qword [rax+2], rbx
|
||||
lidt [rax]
|
||||
ret
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Generate gate entries. Each gate handler pushes the address of the registered
|
||||
; handler to the stack before jumping to a dispatcher function.
|
||||
;
|
||||
; Some exceptions push an error code to the stack after the stack frame. This
|
||||
; code must be popped off the stack before calling iretq. The generated handlers
|
||||
; are aware whether they need to deal with the code or not and jump to the
|
||||
; appropriate get dispatcher.
|
||||
;------------------------------------------------------------------------------
|
||||
%assign gate_num 0
|
||||
%rep IDT_ENTRIES
|
||||
extern _rt0_interrupt_handlers
|
||||
_rt0_64_gate_entry_%+ gate_num:
|
||||
push rax
|
||||
mov rax, _rt0_interrupt_handlers
|
||||
add rax, 8*gate_num
|
||||
mov rax, [rax]
|
||||
xchg rax, [rsp] ; store handler address and restore original rax
|
||||
|
||||
; For a list of gate numbers that push an error code see:
|
||||
; http://wiki.osdev.org/Exceptions
|
||||
%if (gate_num == 8) || (gate_num >= 10 && gate_num <= 14) || (gate_num == 17) || (gate_num == 30)
|
||||
jmp _rt0_64_gate_dispatcher_with_code
|
||||
%else
|
||||
jmp _rt0_64_gate_dispatcher_without_code
|
||||
%endif
|
||||
%assign gate_num gate_num+1
|
||||
%endrep
|
||||
|
||||
%macro save_regs 0
|
||||
push r15
|
||||
push r14
|
||||
push r13
|
||||
push r12
|
||||
push r11
|
||||
push r10
|
||||
push r9
|
||||
push r8
|
||||
push rbp
|
||||
push rdi
|
||||
push rsi
|
||||
push rdx
|
||||
push rcx
|
||||
push rbx
|
||||
push rax
|
||||
%endmacro
|
||||
|
||||
%macro restore_regs 0
|
||||
pop rax
|
||||
pop rbx
|
||||
pop rcx
|
||||
pop rdx
|
||||
pop rsi
|
||||
pop rdi
|
||||
pop rbp
|
||||
pop r8
|
||||
pop r9
|
||||
pop r10
|
||||
pop r11
|
||||
pop r12
|
||||
pop r13
|
||||
pop r14
|
||||
pop r15
|
||||
%endmacro
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; This dispatcher is invoked by gate entries that expect a code to be pushed
|
||||
; by the CPU to the stack. It performs the following functions:
|
||||
; - save registers
|
||||
; - push pointer to saved regs
|
||||
; - push pointer to stack frame
|
||||
; - read and push exception code
|
||||
; - invoke handler(code, &frame, ®s)
|
||||
; - restore registers
|
||||
; - pop exception code from stack so rsp points to the stack frame
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_64_gate_dispatcher_with_code:
|
||||
; This is how the stack looks like when entering this function:
|
||||
; (each item is 8-bytes wide)
|
||||
;
|
||||
;------------------
|
||||
; handler address | <- pushed by gate_entry_xxx (RSP points here)
|
||||
;-----------------|
|
||||
; Exception code | <- needs to be removed from stack before calling iretq
|
||||
;-----------------|
|
||||
; RIP | <- exception frame
|
||||
; CS |
|
||||
; RFLAGS |
|
||||
; RSP |
|
||||
; SS |
|
||||
;-----------------
|
||||
cld
|
||||
|
||||
; save regs and push a pointer to them
|
||||
save_regs
|
||||
mov rax, rsp ; rax points to saved rax
|
||||
push rax ; push pointer to saved regs
|
||||
|
||||
; push pointer to exception stack frame (we have used 15 qwords for the
|
||||
; saved registers plus one qword for the data pushed by the gate entry
|
||||
; plus one extra qword to jump over the exception code)
|
||||
add rax, 17*8
|
||||
push rax
|
||||
|
||||
; push exception code (located between the stack frame and the saved regs)
|
||||
sub rax, 8
|
||||
push qword [rax]
|
||||
|
||||
call [rsp + 18*8] ; call registered irq handler
|
||||
|
||||
add rsp, 3 * 8 ; unshift the pushed arguments so rsp points to the saved regs
|
||||
restore_regs
|
||||
|
||||
add rsp, 16 ; pop handler address and exception code off the stack before returning
|
||||
iretq
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; This dispatcher is invoked by gate entries that do not use exception codes.
|
||||
; It performs the following functions:
|
||||
; - save registers
|
||||
; - push pointer to saved regs
|
||||
; - push pointer to stack frame
|
||||
; - invoke handler(&frame, ®s)
|
||||
; - restore registers
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_64_gate_dispatcher_without_code:
|
||||
; This is how the stack looks like when entering this function:
|
||||
; (each item is 8-bytes wide)
|
||||
;
|
||||
;------------------
|
||||
; handler address | <- pushed by gate_entry_xxx (RSP points here)
|
||||
;-----------------|
|
||||
; RIP | <- exception frame
|
||||
; CS |
|
||||
; RFLAGS |
|
||||
; RSP |
|
||||
; SS |
|
||||
;-----------------
|
||||
cld
|
||||
|
||||
; save regs and push a pointer to them
|
||||
save_regs
|
||||
mov rax, rsp ; rax points to saved rax
|
||||
push rax ; push pointer to saved regs
|
||||
|
||||
; push pointer to exception stack frame (we have used 15 qwords for the
|
||||
; saved registers plus one qword for the data pushed by the gate entry)
|
||||
add rax, 16*8
|
||||
push rax
|
||||
|
||||
call [rsp + 17*8] ; call registered irq handler
|
||||
|
||||
add rsp, 2 * 8 ; unshift the pushed arguments so rsp points to the saved regs
|
||||
restore_regs
|
||||
|
||||
add rsp, 8 ; pop handler address off the stack before returning
|
||||
iretq
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Error messages
|
||||
;------------------------------------------------------------------------------
|
||||
err_kmain_returned db '[rt0_64] kmain returned', 0
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Write the NULL-terminated string contained in rdi to the screen using white
|
||||
; text on red background. Assumes that text-mode is enabled and that its
|
||||
; physical address is 0xb8000.
|
||||
;------------------------------------------------------------------------------
|
||||
write_string:
|
||||
mov rbx,0xb8000
|
||||
mov ah, 0x4F
|
||||
.next_char:
|
||||
mov al, byte[rdi]
|
||||
test al, al
|
||||
jz write_string.done
|
||||
|
||||
mov word [rbx], ax
|
||||
add rbx, 2
|
||||
inc rdi
|
||||
jmp write_string.next_char
|
||||
|
||||
.done:
|
||||
ret
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; Install redirect trampolines. This hack allows us to redirect calls to Go
|
||||
; runtime functions to the kernel's own implementation without the need to
|
||||
; export/globalize any symbols. This works by first setting up a redirect table
|
||||
; (populated by a post-link step) that contains the addresses of the symbol to
|
||||
; hook and the address where calls to that symbol should be redirected.
|
||||
;
|
||||
; This function iterates the redirect table entries and for each entry it
|
||||
; sets up a trampoline to the dst symbol and overwrites the code in src with
|
||||
; the 14-byte long _rt0_redirect_trampoline code.
|
||||
;
|
||||
; Note: this code modification is only possible because we are currently
|
||||
; operating in supervisor mode with no memory protection enabled. Under normal
|
||||
; conditions the .text section should be flagged as read-only.
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_install_redirect_trampolines:
|
||||
mov rax, _rt0_redirect_table
|
||||
mov rdx, NUM_REDIRECTS
|
||||
|
||||
_rt0_install_redirect_rampolines.next:
|
||||
mov rdi, [rax] ; the symbol address to hook
|
||||
mov rbx, [rax+8] ; the symbol to redirect to
|
||||
|
||||
; setup trampoline target and copy it to the hooked symbol
|
||||
mov rsi, _rt0_redirect_trampoline
|
||||
mov qword [rsi+6], rbx
|
||||
mov rcx, 14
|
||||
rep movsb ; copy rcx bytes from rsi to rdi
|
||||
|
||||
add rax, 16
|
||||
dec rdx
|
||||
jnz _rt0_install_redirect_rampolines.next
|
||||
|
||||
ret
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; This trampoline exploits rip-relative addressing to allow a jump to a
|
||||
; 64-bit address without the need to touch any registers. The generated
|
||||
; code is equivalent to:
|
||||
;
|
||||
; jmp [rip+0]
|
||||
; dq abs_address_to_jump_to
|
||||
;------------------------------------------------------------------------------
|
||||
_rt0_redirect_trampoline:
|
||||
db 0xff ; the first 6 bytes encode a "jmp [rip+0]" instruction
|
||||
db 0x25
|
||||
dd 0x00
|
||||
dq 0x00 ; the absolute address to jump to
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; The redirect table is placed in a dedicated section allowing us to easily
|
||||
; find its offset in the kernel image file. As the VMA addresses of the src
|
||||
; and target symbols for the redirect are now known in advance we just reserve
|
||||
; enough space space for the src and dst addresses using the NUM_REDIRECTS
|
||||
; define which is calculated by the Makefile and passed to nasm.
|
||||
;------------------------------------------------------------------------------
|
||||
section .goredirectstbl
|
||||
|
||||
_rt0_redirect_table:
|
||||
%rep NUM_REDIRECTS
|
||||
dq 0 ; src: address of the symbol we want to redirect
|
||||
dq 0 ; dst: address of the symbol where calls to src are redirected to
|
||||
%endrep
|
||||
|
||||
|
||||
7
src/arch/x86_64/script/grub.cfg
Normal file
7
src/arch/x86_64/script/grub.cfg
Normal file
@@ -0,0 +1,7 @@
|
||||
set timeout=0
|
||||
set default=0
|
||||
|
||||
menuentry "gopheros" {
|
||||
multiboot2 /boot/kernel.bin
|
||||
boot
|
||||
}
|
||||
52
src/arch/x86_64/script/linker.ld.in
Normal file
52
src/arch/x86_64/script/linker.ld.in
Normal file
@@ -0,0 +1,52 @@
|
||||
VMA = PAGE_OFFSET + LOAD_ADDRESS;
|
||||
|
||||
ENTRY(_rt0_32_entry)
|
||||
|
||||
SECTIONS {
|
||||
/* Set the kernel VMA at PAGE_OFFSET + 1M
|
||||
* but load it at physical address 1M */
|
||||
. = VMA;
|
||||
|
||||
_kernel_start = .;
|
||||
|
||||
.text BLOCK(4K) : AT(ADDR(.text) - PAGE_OFFSET)
|
||||
{
|
||||
/* The multiboot header must be present in the first 4K of the kernel
|
||||
* image so that the bootloader can find it */
|
||||
*(.multiboot_header)
|
||||
|
||||
*(.rt0)
|
||||
|
||||
*(.text)
|
||||
}
|
||||
|
||||
/* Read-only data. */
|
||||
.rodata ALIGN(4K) : AT(ADDR(.rodata) - PAGE_OFFSET)
|
||||
{
|
||||
*(.rodata)
|
||||
}
|
||||
|
||||
/* Read-write data (initialized) */
|
||||
.data ALIGN(4K) : AT(ADDR(.data) - PAGE_OFFSET)
|
||||
{
|
||||
*(.data)
|
||||
}
|
||||
|
||||
/* Read-write data (zeroed) */
|
||||
.bss ALIGN(4K) : AT(ADDR(.bss) - PAGE_OFFSET)
|
||||
{
|
||||
*(COMMON)
|
||||
*(.bss)
|
||||
}
|
||||
|
||||
/* Go function redirection table. This table is used for hooking
|
||||
* Go runtime function symbols so that calls to them are redirected to
|
||||
* functions provided by the kernel.
|
||||
*/
|
||||
.goredirectstbl ALIGN(4K): AT(ADDR(.goredirectstbl) - PAGE_OFFSET)
|
||||
{
|
||||
*(.goredirectstbl)
|
||||
}
|
||||
|
||||
_kernel_end = ALIGN(4K);
|
||||
}
|
||||
23
src/gopheros/kernel/cpu/cpu_amd64.go
Normal file
23
src/gopheros/kernel/cpu/cpu_amd64.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package cpu
|
||||
|
||||
// EnableInterrupts enables interrupt handling.
|
||||
func EnableInterrupts()
|
||||
|
||||
// DisableInterrupts disables interrupt handling.
|
||||
func DisableInterrupts()
|
||||
|
||||
// Halt stops instruction execution.
|
||||
func Halt()
|
||||
|
||||
// FlushTLBEntry flushes a TLB entry for a particular virtual address.
|
||||
func FlushTLBEntry(virtAddr uintptr)
|
||||
|
||||
// SwitchPDT sets the root page table directory to point to the specified
|
||||
// physical address and flushes the TLB.
|
||||
func SwitchPDT(pdtPhysAddr uintptr)
|
||||
|
||||
// ActivePDT returns the physical address of the currently active page table.
|
||||
func ActivePDT() uintptr
|
||||
|
||||
// ReadCR2 returns the value stored in the CR2 register.
|
||||
func ReadCR2() uint64
|
||||
33
src/gopheros/kernel/cpu/cpu_amd64.s
Normal file
33
src/gopheros/kernel/cpu/cpu_amd64.s
Normal file
@@ -0,0 +1,33 @@
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·EnableInterrupts(SB),NOSPLIT,$0
|
||||
STI
|
||||
RET
|
||||
|
||||
TEXT ·DisableInterrupts(SB),NOSPLIT,$0
|
||||
CLI
|
||||
RET
|
||||
|
||||
TEXT ·Halt(SB),NOSPLIT,$0
|
||||
CLI
|
||||
HLT
|
||||
RET
|
||||
|
||||
TEXT ·FlushTLBEntry(SB),NOSPLIT,$0
|
||||
INVLPG virtAddr+0(FP)
|
||||
RET
|
||||
|
||||
TEXT ·SwitchPDT(SB),NOSPLIT,$0
|
||||
// loading CR3 also triggers a TLB flush
|
||||
MOVQ pdtPhysAddr+0(FP), CR3
|
||||
RET
|
||||
|
||||
TEXT ·ActivePDT(SB),NOSPLIT,$0
|
||||
MOVQ CR3, AX
|
||||
MOVQ AX, ret+0(FP)
|
||||
RET
|
||||
|
||||
TEXT ·ReadCR2(SB),NOSPLIT,$0
|
||||
MOVQ CR2, AX
|
||||
MOVQ AX, ret+0(FP)
|
||||
RET
|
||||
19
src/gopheros/kernel/driver/tty/tty.go
Normal file
19
src/gopheros/kernel/driver/tty/tty.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package tty
|
||||
|
||||
import "io"
|
||||
|
||||
// Tty is implemented by objects that can register themselves as ttys.
|
||||
type Tty interface {
|
||||
io.Writer
|
||||
io.ByteWriter
|
||||
|
||||
// Position returns the current cursor position (x, y).
|
||||
Position() (uint16, uint16)
|
||||
|
||||
// SetPosition sets the current cursor position to (x,y). Console implementations
|
||||
// must clip the provided cursor position if it exceeds the console dimensions.
|
||||
SetPosition(x, y uint16)
|
||||
|
||||
// Clear clears the terminal.
|
||||
Clear()
|
||||
}
|
||||
128
src/gopheros/kernel/driver/tty/vt.go
Normal file
128
src/gopheros/kernel/driver/tty/vt.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package tty
|
||||
|
||||
import "gopheros/kernel/driver/video/console"
|
||||
|
||||
const (
|
||||
defaultFg = console.LightGrey
|
||||
defaultBg = console.Black
|
||||
tabWidth = 4
|
||||
)
|
||||
|
||||
// Vt implements a simple terminal that can process LF and CR characters. The
|
||||
// terminal uses a console device for its output.
|
||||
type Vt struct {
|
||||
// Go interfaces will not work before we can get memory allocation working.
|
||||
// Till then we need to use concrete types instead.
|
||||
cons *console.Ega
|
||||
|
||||
width uint16
|
||||
height uint16
|
||||
|
||||
curX uint16
|
||||
curY uint16
|
||||
curAttr console.Attr
|
||||
}
|
||||
|
||||
// AttachTo links the terminal with the specified console device and updates
|
||||
// the terminal's dimensions to match the ones reported by the attached device.
|
||||
func (t *Vt) AttachTo(cons *console.Ega) {
|
||||
t.cons = cons
|
||||
t.width, t.height = cons.Dimensions()
|
||||
t.curX = 0
|
||||
t.curY = 0
|
||||
|
||||
// Default to lightgrey on black text.
|
||||
t.curAttr = makeAttr(defaultFg, defaultBg)
|
||||
}
|
||||
|
||||
// Clear clears the terminal.
|
||||
func (t *Vt) Clear() {
|
||||
t.clear()
|
||||
}
|
||||
|
||||
// Position returns the current cursor position (x, y).
|
||||
func (t *Vt) Position() (uint16, uint16) {
|
||||
return t.curX, t.curY
|
||||
}
|
||||
|
||||
// SetPosition sets the current cursor position to (x,y).
|
||||
func (t *Vt) SetPosition(x, y uint16) {
|
||||
if x >= t.width {
|
||||
x = t.width - 1
|
||||
}
|
||||
|
||||
if y >= t.height {
|
||||
y = t.height - 1
|
||||
}
|
||||
|
||||
t.curX, t.curY = x, y
|
||||
}
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (t *Vt) Write(data []byte) (int, error) {
|
||||
for _, b := range data {
|
||||
t.WriteByte(b)
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// WriteByte implements io.ByteWriter.
|
||||
func (t *Vt) WriteByte(b byte) error {
|
||||
switch b {
|
||||
case '\r':
|
||||
t.cr()
|
||||
case '\n':
|
||||
t.cr()
|
||||
t.lf()
|
||||
case '\b':
|
||||
if t.curX > 0 {
|
||||
t.cons.Write(' ', t.curAttr, t.curX, t.curY)
|
||||
t.curX--
|
||||
}
|
||||
case '\t':
|
||||
for i := 0; i < tabWidth; i++ {
|
||||
t.cons.Write(' ', t.curAttr, t.curX, t.curY)
|
||||
t.curX++
|
||||
if t.curX == t.width {
|
||||
t.cr()
|
||||
t.lf()
|
||||
}
|
||||
}
|
||||
default:
|
||||
t.cons.Write(b, t.curAttr, t.curX, t.curY)
|
||||
t.curX++
|
||||
if t.curX == t.width {
|
||||
t.cr()
|
||||
t.lf()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cls clears the terminal.
|
||||
func (t *Vt) clear() {
|
||||
t.cons.Clear(0, 0, t.width, t.height)
|
||||
}
|
||||
|
||||
// cr resets the x coordinate of the terminal cursor to 0.
|
||||
func (t *Vt) cr() {
|
||||
t.curX = 0
|
||||
}
|
||||
|
||||
// lf advances the y coordinate of the terminal cursor by one line scrolling
|
||||
// the terminal contents if the end of the last terminal line is reached.
|
||||
func (t *Vt) lf() {
|
||||
if t.curY+1 < t.height {
|
||||
t.curY++
|
||||
return
|
||||
}
|
||||
|
||||
t.cons.Scroll(console.Up, 1)
|
||||
t.cons.Clear(0, t.height-1, t.width, 1)
|
||||
}
|
||||
|
||||
func makeAttr(fg, bg console.Attr) console.Attr {
|
||||
return (bg << 4) | (fg & 0xF)
|
||||
}
|
||||
88
src/gopheros/kernel/driver/tty/vt_test.go
Normal file
88
src/gopheros/kernel/driver/tty/vt_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package tty
|
||||
|
||||
import (
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestVtPosition(t *testing.T) {
|
||||
specs := []struct {
|
||||
inX, inY uint16
|
||||
expX, expY uint16
|
||||
}{
|
||||
{20, 20, 20, 20},
|
||||
{100, 20, 79, 20},
|
||||
{10, 200, 10, 24},
|
||||
{10, 200, 10, 24},
|
||||
{100, 100, 79, 24},
|
||||
}
|
||||
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons console.Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
var vt Vt
|
||||
vt.AttachTo(&cons)
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
vt.SetPosition(spec.inX, spec.inY)
|
||||
if x, y := vt.Position(); x != spec.expX || y != spec.expY {
|
||||
t.Errorf("[spec %d] expected setting position to (%d, %d) to update the position to (%d, %d); got (%d, %d)", specIndex, spec.inX, spec.inY, spec.expX, spec.expY, x, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons console.Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
var vt Vt
|
||||
vt.AttachTo(&cons)
|
||||
|
||||
vt.Clear()
|
||||
vt.SetPosition(0, 1)
|
||||
vt.Write([]byte("12\n\t3\n4\r567\b8"))
|
||||
|
||||
// Tab spanning rows
|
||||
vt.SetPosition(78, 4)
|
||||
vt.WriteByte('\t')
|
||||
vt.WriteByte('9')
|
||||
|
||||
// Trigger scroll
|
||||
vt.SetPosition(79, 24)
|
||||
vt.Write([]byte{'!'})
|
||||
|
||||
specs := []struct {
|
||||
x, y uint16
|
||||
expChar byte
|
||||
}{
|
||||
{0, 0, '1'},
|
||||
{1, 0, '2'},
|
||||
// tabs
|
||||
{0, 1, ' '},
|
||||
{1, 1, ' '},
|
||||
{2, 1, ' '},
|
||||
{3, 1, ' '},
|
||||
{4, 1, '3'},
|
||||
// tab spanning 2 rows
|
||||
{78, 3, ' '},
|
||||
{79, 3, ' '},
|
||||
{0, 4, ' '},
|
||||
{1, 4, ' '},
|
||||
{2, 4, '9'},
|
||||
//
|
||||
{0, 2, '5'},
|
||||
{1, 2, '6'},
|
||||
{2, 2, '8'}, // overwritten by BS
|
||||
{79, 23, '!'},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
ch := (byte)(fb[(spec.y*vt.width)+spec.x] & 0xFF)
|
||||
if ch != spec.expChar {
|
||||
t.Errorf("[spec %d] expected char at (%d, %d) to be %c; got %c", specIndex, spec.x, spec.y, spec.expChar, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
48
src/gopheros/kernel/driver/video/console/console.go
Normal file
48
src/gopheros/kernel/driver/video/console/console.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package console
|
||||
|
||||
// Attr defines a color attribute.
|
||||
type Attr uint16
|
||||
|
||||
// The set of attributes that can be passed to Write().
|
||||
const (
|
||||
Black Attr = iota
|
||||
Blue
|
||||
Green
|
||||
Cyan
|
||||
Red
|
||||
Magenta
|
||||
Brown
|
||||
LightGrey
|
||||
Grey
|
||||
LightBlue
|
||||
LightGreen
|
||||
LightCyan
|
||||
LightRed
|
||||
LightMagenta
|
||||
LightBrown
|
||||
White
|
||||
)
|
||||
|
||||
// ScrollDir defines a scroll direction.
|
||||
type ScrollDir uint8
|
||||
|
||||
// The supported list of scroll directions for the console Scroll() calls.
|
||||
const (
|
||||
Up ScrollDir = iota
|
||||
Down
|
||||
)
|
||||
|
||||
// The Console interface is implemented by objects that can function as physical consoles.
|
||||
type Console interface {
|
||||
// Dimensions returns the width and height of the console in characters.
|
||||
Dimensions() (uint16, uint16)
|
||||
|
||||
// Clear clears the specified rectangular region
|
||||
Clear(x, y, width, height uint16)
|
||||
|
||||
// Scroll a particular number of lines to the specified direction.
|
||||
Scroll(dir ScrollDir, lines uint16)
|
||||
|
||||
// Write a char to the specified location.
|
||||
Write(ch byte, attr Attr, x, y uint16)
|
||||
}
|
||||
100
src/gopheros/kernel/driver/video/console/ega.go
Normal file
100
src/gopheros/kernel/driver/video/console/ega.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
clearColor = Black
|
||||
clearChar = byte(' ')
|
||||
)
|
||||
|
||||
// Ega implements an EGA-compatible text console. At the moment, it uses the
|
||||
// ega console physical address as its outpucons. After implementing a memory
|
||||
// allocator, each console will use its own framebuffer while the active console
|
||||
// will periodically sync its internal buffer with the physical screen buffer.
|
||||
type Ega struct {
|
||||
width uint16
|
||||
height uint16
|
||||
|
||||
fb []uint16
|
||||
}
|
||||
|
||||
// Init sets up the console.
|
||||
func (cons *Ega) Init(width, height uint16, fbPhysAddr uintptr) {
|
||||
cons.width = width
|
||||
cons.height = height
|
||||
|
||||
cons.fb = *(*[]uint16)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: int(cons.width * cons.height),
|
||||
Cap: int(cons.width * cons.height),
|
||||
Data: fbPhysAddr,
|
||||
}))
|
||||
}
|
||||
|
||||
// Clear clears the specified rectangular region
|
||||
func (cons *Ega) Clear(x, y, width, height uint16) {
|
||||
var (
|
||||
attr = uint16((clearColor << 4) | clearColor)
|
||||
clr = attr | uint16(clearChar)
|
||||
rowOffset, colOffset uint16
|
||||
)
|
||||
|
||||
// clip rectangle
|
||||
if x >= cons.width {
|
||||
x = cons.width
|
||||
}
|
||||
if y >= cons.height {
|
||||
y = cons.height
|
||||
}
|
||||
|
||||
if x+width > cons.width {
|
||||
width = cons.width - x
|
||||
}
|
||||
if y+height > cons.height {
|
||||
height = cons.height - y
|
||||
}
|
||||
|
||||
rowOffset = (y * cons.width) + x
|
||||
for ; height > 0; height, rowOffset = height-1, rowOffset+cons.width {
|
||||
for colOffset = rowOffset; colOffset < rowOffset+width; colOffset++ {
|
||||
cons.fb[colOffset] = clr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Dimensions returns the console width and height in characters.
|
||||
func (cons *Ega) Dimensions() (uint16, uint16) {
|
||||
return cons.width, cons.height
|
||||
}
|
||||
|
||||
// Scroll a particular number of lines to the specified direction.
|
||||
func (cons *Ega) Scroll(dir ScrollDir, lines uint16) {
|
||||
if lines == 0 || lines > cons.height {
|
||||
return
|
||||
}
|
||||
|
||||
var i uint16
|
||||
offset := lines * cons.width
|
||||
|
||||
switch dir {
|
||||
case Up:
|
||||
for ; i < (cons.height-lines)*cons.width; i++ {
|
||||
cons.fb[i] = cons.fb[i+offset]
|
||||
}
|
||||
case Down:
|
||||
for i = cons.height*cons.width - 1; i >= lines*cons.width; i-- {
|
||||
cons.fb[i] = cons.fb[i-offset]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write a char to the specified location.
|
||||
func (cons *Ega) Write(ch byte, attr Attr, x, y uint16) {
|
||||
if x >= cons.width || y >= cons.height {
|
||||
return
|
||||
}
|
||||
|
||||
cons.fb[(y*cons.width)+x] = (uint16(attr) << 8) | uint16(ch)
|
||||
}
|
||||
212
src/gopheros/kernel/driver/video/console/ega_test.go
Normal file
212
src/gopheros/kernel/driver/video/console/ega_test.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestEgaInit(t *testing.T) {
|
||||
var cons Ega
|
||||
cons.Init(80, 25, 0xB8000)
|
||||
|
||||
var expWidth uint16 = 80
|
||||
var expHeight uint16 = 25
|
||||
|
||||
if w, h := cons.Dimensions(); w != expWidth || h != expHeight {
|
||||
t.Fatalf("expected console dimensions after Init() to be (%d, %d); got (%d, %d)", expWidth, expHeight, w, h)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEgaClear(t *testing.T) {
|
||||
specs := []struct {
|
||||
// Input rect
|
||||
x, y, w, h uint16
|
||||
|
||||
// Expected area to be cleared
|
||||
expX, expY, expW, expH uint16
|
||||
}{
|
||||
{
|
||||
0, 0, 500, 500,
|
||||
0, 0, 80, 25,
|
||||
},
|
||||
{
|
||||
10, 10, 11, 50,
|
||||
10, 10, 11, 15,
|
||||
},
|
||||
{
|
||||
10, 10, 110, 1,
|
||||
10, 10, 70, 1,
|
||||
},
|
||||
{
|
||||
70, 20, 20, 20,
|
||||
70, 20, 10, 5,
|
||||
},
|
||||
{
|
||||
90, 25, 20, 20,
|
||||
0, 0, 0, 0,
|
||||
},
|
||||
{
|
||||
12, 12, 5, 6,
|
||||
12, 12, 5, 6,
|
||||
},
|
||||
}
|
||||
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
testPat := uint16(0xDEAD)
|
||||
clearPat := (uint16(clearColor) << 8) | uint16(clearChar)
|
||||
|
||||
nextSpec:
|
||||
for specIndex, spec := range specs {
|
||||
// Fill FB with test pattern
|
||||
for i := 0; i < len(cons.fb); i++ {
|
||||
fb[i] = testPat
|
||||
}
|
||||
|
||||
cons.Clear(spec.x, spec.y, spec.w, spec.h)
|
||||
|
||||
var x, y uint16
|
||||
for y = 0; y < cons.height; y++ {
|
||||
for x = 0; x < cons.width; x++ {
|
||||
fbVal := fb[(y*cons.width)+x]
|
||||
|
||||
if x < spec.expX || y < spec.expY || x >= spec.expX+spec.expW || y >= spec.expY+spec.expH {
|
||||
if fbVal != testPat {
|
||||
t.Errorf("[spec %d] expected char at (%d, %d) not to be cleared", specIndex, x, y)
|
||||
continue nextSpec
|
||||
}
|
||||
} else {
|
||||
if fbVal != clearPat {
|
||||
t.Errorf("[spec %d] expected char at (%d, %d) to be cleared", specIndex, x, y)
|
||||
continue nextSpec
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEgaScrollUp(t *testing.T) {
|
||||
specs := []uint16{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
}
|
||||
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
nextSpec:
|
||||
for specIndex, lines := range specs {
|
||||
// Fill buffer with test pattern
|
||||
var x, y, index uint16
|
||||
for y = 0; y < cons.height; y++ {
|
||||
for x = 0; x < cons.width; x++ {
|
||||
fb[index] = (y << 8) | x
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
cons.Scroll(Up, lines)
|
||||
|
||||
// Check that rows 1 to (height - lines) have been scrolled up
|
||||
index = 0
|
||||
for y = 0; y < cons.height-lines; y++ {
|
||||
for x = 0; x < cons.width; x++ {
|
||||
expVal := ((y + lines) << 8) | x
|
||||
if fb[index] != expVal {
|
||||
t.Errorf("[spec %d] expected value at (%d, %d) to be %d; got %d", specIndex, x, y, expVal, cons.fb[index])
|
||||
continue nextSpec
|
||||
}
|
||||
index++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEgaScrollDown(t *testing.T) {
|
||||
specs := []uint16{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
}
|
||||
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
nextSpec:
|
||||
for specIndex, lines := range specs {
|
||||
// Fill buffer with test pattern
|
||||
var x, y, index uint16
|
||||
for y = 0; y < cons.height; y++ {
|
||||
for x = 0; x < cons.width; x++ {
|
||||
fb[index] = (y << 8) | x
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
cons.Scroll(Down, lines)
|
||||
|
||||
// Check that rows lines to height have been scrolled down
|
||||
index = lines * cons.width
|
||||
for y = lines; y < cons.height-lines; y++ {
|
||||
for x = 0; x < cons.width; x++ {
|
||||
expVal := ((y - lines) << 8) | x
|
||||
if fb[index] != expVal {
|
||||
t.Errorf("[spec %d] expected value at (%d, %d) to be %d; got %d", specIndex, x, y, expVal, cons.fb[index])
|
||||
continue nextSpec
|
||||
}
|
||||
index++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEgaWriteWithOffScreenCoords(t *testing.T) {
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
specs := []struct {
|
||||
x, y uint16
|
||||
}{
|
||||
{80, 25},
|
||||
{90, 24},
|
||||
{79, 30},
|
||||
{100, 100},
|
||||
}
|
||||
|
||||
nextSpec:
|
||||
for specIndex, spec := range specs {
|
||||
for i := 0; i < len(cons.fb); i++ {
|
||||
fb[i] = 0
|
||||
}
|
||||
|
||||
cons.Write('!', Red, spec.x, spec.y)
|
||||
|
||||
for i := 0; i < len(cons.fb); i++ {
|
||||
if got := fb[i]; got != 0 {
|
||||
t.Errorf("[spec %d] expected Write() with off-screen coords to be a no-op", specIndex)
|
||||
continue nextSpec
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEgaWrite(t *testing.T) {
|
||||
fb := make([]uint16, 80*25)
|
||||
var cons Ega
|
||||
cons.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
attr := (Black << 4) | Red
|
||||
cons.Write('!', attr, 0, 0)
|
||||
|
||||
expVal := uint16(attr<<8) | uint16('!')
|
||||
if got := fb[0]; got != expVal {
|
||||
t.Errorf("expected call to Write() to set fb[0] to %d; got %d", expVal, got)
|
||||
}
|
||||
}
|
||||
18
src/gopheros/kernel/error.go
Normal file
18
src/gopheros/kernel/error.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package kernel
|
||||
|
||||
// Error describes a kernel error. All kernel errors must be defined as global
|
||||
// variables that are pointers to the Error structure. This requirement stems
|
||||
// from the fact that the Go allocator is not available to us so we cannot use
|
||||
// errors.New.
|
||||
type Error struct {
|
||||
// The module where the error occurred.
|
||||
Module string
|
||||
|
||||
// The error message
|
||||
Message string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *Error) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
14
src/gopheros/kernel/error_test.go
Normal file
14
src/gopheros/kernel/error_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package kernel
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestKernelError(t *testing.T) {
|
||||
err := &Error{
|
||||
Module: "foo",
|
||||
Message: "error message",
|
||||
}
|
||||
|
||||
if err.Error() != err.Message {
|
||||
t.Fatalf("expected to err.Error() to return %q; got %q", err.Message, err.Error())
|
||||
}
|
||||
}
|
||||
183
src/gopheros/kernel/goruntime/bootstrap.go
Normal file
183
src/gopheros/kernel/goruntime/bootstrap.go
Normal file
@@ -0,0 +1,183 @@
|
||||
// Package goruntime contains code for bootstrapping Go runtime features such
|
||||
// as the memory allocator.
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm/allocator"
|
||||
"gopheros/kernel/mem/vmm"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
mapFn = vmm.Map
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
frameAllocFn = allocator.AllocFrame
|
||||
mallocInitFn = mallocInit
|
||||
algInitFn = algInit
|
||||
modulesInitFn = modulesInit
|
||||
typeLinksInitFn = typeLinksInit
|
||||
itabsInitFn = itabsInit
|
||||
|
||||
// A seed for the pseudo-random number generator used by getRandomData
|
||||
prngSeed = 0xdeadc0de
|
||||
)
|
||||
|
||||
//go:linkname algInit runtime.alginit
|
||||
func algInit()
|
||||
|
||||
//go:linkname modulesInit runtime.modulesinit
|
||||
func modulesInit()
|
||||
|
||||
//go:linkname typeLinksInit runtime.typelinksinit
|
||||
func typeLinksInit()
|
||||
|
||||
//go:linkname itabsInit runtime.itabsinit
|
||||
func itabsInit()
|
||||
|
||||
//go:linkname mallocInit runtime.mallocinit
|
||||
func mallocInit()
|
||||
|
||||
//go:linkname mSysStatInc runtime.mSysStatInc
|
||||
func mSysStatInc(*uint64, uintptr)
|
||||
|
||||
// sysReserve reserves address space without allocating any memory or
|
||||
// establishing any page mappings.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysReserve
|
||||
//go:nosplit
|
||||
func sysReserve(_ unsafe.Pointer, size uintptr, reserved *bool) unsafe.Pointer {
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
*reserved = true
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// sysMap establishes a copy-on-write mapping for a particular memory region
|
||||
// that has been reserved previously via a call to sysReserve.
|
||||
//
|
||||
// This function replaces runtime.sysReserve and is required for initializing
|
||||
// the Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysMap
|
||||
//go:nosplit
|
||||
func sysMap(virtAddr unsafe.Pointer, size uintptr, reserved bool, sysStat *uint64) unsafe.Pointer {
|
||||
if !reserved {
|
||||
panic("sysMap should only be called with reserved=true")
|
||||
}
|
||||
|
||||
// We trust the allocator to call sysMap with an address inside a reserved region.
|
||||
regionStartAddr := (uintptr(virtAddr) + uintptr(mem.PageSize-1)) & ^uintptr(mem.PageSize-1)
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
pageCount := regionSize >> mem.PageShift
|
||||
|
||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagCopyOnWrite
|
||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
if err := mapFn(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
}
|
||||
|
||||
mSysStatInc(sysStat, uintptr(regionSize))
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// sysAlloc reserves enough phsysical frames to satisfy the allocation request
|
||||
// and establishes a contiguous virtual page mapping for them returning back
|
||||
// the pointer to the virtual region start.
|
||||
//
|
||||
// This function replaces runtime.sysMap and is required for initializing the
|
||||
// Go allocator.
|
||||
//
|
||||
//go:redirect-from runtime.sysAlloc
|
||||
//go:nosplit
|
||||
func sysAlloc(size uintptr, sysStat *uint64) unsafe.Pointer {
|
||||
regionSize := (mem.Size(size) + mem.PageSize - 1) & ^(mem.PageSize - 1)
|
||||
regionStartAddr, err := earlyReserveRegionFn(regionSize)
|
||||
if err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
|
||||
mapFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||
pageCount := regionSize >> mem.PageShift
|
||||
for page := vmm.PageFromAddress(regionStartAddr); pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
frame, err := frameAllocFn()
|
||||
if err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
|
||||
if err = mapFn(page, frame, mapFlags); err != nil {
|
||||
return unsafe.Pointer(uintptr(0))
|
||||
}
|
||||
}
|
||||
|
||||
mSysStatInc(sysStat, uintptr(regionSize))
|
||||
return unsafe.Pointer(regionStartAddr)
|
||||
}
|
||||
|
||||
// nanotime returns a monotonically increasing clock value. This is a dummy
|
||||
// implementation and will be replaced when the timekeeper package is
|
||||
// implemented.
|
||||
//
|
||||
// This function replaces runtime.nanotime and is invoked by the Go allocator
|
||||
// when a span allocation is performed.
|
||||
//
|
||||
//go:redirect-from runtime.nanotime
|
||||
//go:nosplit
|
||||
func nanotime() uint64 {
|
||||
// Use a dummy loop to prevent the compiler from inlining this function.
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// getRandomData populates the given slice with random data. The implementation
|
||||
// is the runtime package reads a random stream from /dev/random but since this
|
||||
// is not available, we use a prng instead.
|
||||
//
|
||||
//go:redirect-from runtime.getRandomData
|
||||
func getRandomData(r []byte) {
|
||||
for i := 0; i < len(r); i++ {
|
||||
prngSeed = (prngSeed * 58321) + 11113
|
||||
r[i] = byte((prngSeed >> 16) & 255)
|
||||
}
|
||||
}
|
||||
|
||||
// Init enables support for various Go runtime features. After a call to init
|
||||
// the following runtime features become available for use:
|
||||
// - heap memory allocation (new, make e.t.c)
|
||||
// - map primitives
|
||||
// - interfaces
|
||||
func Init() *kernel.Error {
|
||||
mallocInitFn()
|
||||
algInitFn() // setup hash implementation for map keys
|
||||
modulesInitFn() // provides activeModules
|
||||
typeLinksInitFn() // uses maps, activeModules
|
||||
itabsInitFn() // uses activeModules
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Dummy calls so the compiler does not optimize away the functions in
|
||||
// this file.
|
||||
var (
|
||||
reserved bool
|
||||
stat uint64
|
||||
zeroPtr = unsafe.Pointer(uintptr(0))
|
||||
)
|
||||
|
||||
sysReserve(zeroPtr, 0, &reserved)
|
||||
sysMap(zeroPtr, 0, reserved, &stat)
|
||||
sysAlloc(0, &stat)
|
||||
getRandomData(nil)
|
||||
stat = nanotime()
|
||||
}
|
||||
1
src/gopheros/kernel/goruntime/bootstrap.s
Normal file
1
src/gopheros/kernel/goruntime/bootstrap.s
Normal file
@@ -0,0 +1 @@
|
||||
// dummy file to prevent compiler errors for using go:linkname
|
||||
269
src/gopheros/kernel/goruntime/bootstrap_test.go
Normal file
269
src/gopheros/kernel/goruntime/bootstrap_test.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package goruntime
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"gopheros/kernel/mem/pmm/allocator"
|
||||
"gopheros/kernel/mem/vmm"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestSysReserve(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
var reserved bool
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqSize mem.Size
|
||||
expRegionSize mem.Size
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 100 << mem.PageShift},
|
||||
// size should be rounded up to nearest page size
|
||||
{2*mem.PageSize - 1, 2 * mem.PageSize},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
if rsvSize != spec.expRegionSize {
|
||||
t.Errorf("[spec %d] expected reservation size to be %d; got %d", specIndex, spec.expRegionSize, rsvSize)
|
||||
}
|
||||
|
||||
return 0xbadf00d, nil
|
||||
}
|
||||
|
||||
ptr := sysReserve(nil, uintptr(spec.reqSize), &reserved)
|
||||
if uintptr(ptr) == 0 {
|
||||
t.Errorf("[spec %d] sysReserve returned 0", specIndex)
|
||||
continue
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("fail", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysReserve to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||
}
|
||||
|
||||
sysReserve(nil, uintptr(0xf00), &reserved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSysMap(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
}()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqAddr uintptr
|
||||
reqSize mem.Size
|
||||
expRsvAddr uintptr
|
||||
expMapCallCount int
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{100 << mem.PageShift, 4 * mem.PageSize, 100 << mem.PageShift, 4},
|
||||
// address should be rounded up to nearest page size
|
||||
{(100 << mem.PageShift) + 1, 4 * mem.PageSize, 101 << mem.PageShift, 4},
|
||||
// size should be rounded up to nearest page size
|
||||
{1 << mem.PageShift, (4 * mem.PageSize) + 1, 1 << mem.PageShift, 5},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
var (
|
||||
sysStat uint64
|
||||
mapCallCount int
|
||||
)
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
expFlags := vmm.FlagPresent | vmm.FlagCopyOnWrite | vmm.FlagNoExecute
|
||||
if flags != expFlags {
|
||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||
}
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
rsvPtr := sysMap(unsafe.Pointer(spec.reqAddr), uintptr(spec.reqSize), true, &sysStat)
|
||||
if got := uintptr(rsvPtr); got != spec.expRsvAddr {
|
||||
t.Errorf("[spec %d] expected mapped address 0x%x; got 0x%x", specIndex, spec.expRsvAddr, got)
|
||||
}
|
||||
|
||||
if mapCallCount != spec.expMapCallCount {
|
||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map fails", func(t *testing.T) {
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysMap(unsafe.Pointer(uintptr(0xbadf00d)), 1, true, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysMap to return 0x0 if Map returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("panic if not reserved", func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Fatal("expected sysMap to panic")
|
||||
}
|
||||
}()
|
||||
|
||||
sysMap(nil, 0, false, nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSysAlloc(t *testing.T) {
|
||||
defer func() {
|
||||
earlyReserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
frameAllocFn = allocator.AllocFrame
|
||||
}()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
specs := []struct {
|
||||
reqSize mem.Size
|
||||
expMapCallCount int
|
||||
}{
|
||||
// exact multiple of page size
|
||||
{4 * mem.PageSize, 4},
|
||||
// round up to nearest page size
|
||||
{(4 * mem.PageSize) + 1, 5},
|
||||
}
|
||||
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.Frame(0), nil
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
var (
|
||||
sysStat uint64
|
||||
mapCallCount int
|
||||
)
|
||||
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
expFlags := vmm.FlagPresent | vmm.FlagNoExecute | vmm.FlagRW
|
||||
if flags != expFlags {
|
||||
t.Errorf("[spec %d] expected map flags to be %d; got %d", specIndex, expFlags, flags)
|
||||
}
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
if got := sysAlloc(uintptr(spec.reqSize), &sysStat); uintptr(got) != expRegionStartAddr {
|
||||
t.Errorf("[spec %d] expected sysAlloc to return address 0x%x; got 0x%x", specIndex, expRegionStartAddr, uintptr(got))
|
||||
}
|
||||
|
||||
if mapCallCount != spec.expMapCallCount {
|
||||
t.Errorf("[spec %d] expected vmm.Map call count to be %d; got %d", specIndex, spec.expMapCallCount, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := uint64(spec.expMapCallCount << mem.PageShift); sysStat != exp {
|
||||
t.Errorf("[spec %d] expected stat counter to be %d; got %d", specIndex, exp, sysStat)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("earlyReserveRegion fails", func(t *testing.T) {
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, &kernel.Error{Module: "test", Message: "consumed available address space"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if EarlyReserveRegion returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("frame allocation fails", func(t *testing.T) {
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.InvalidFrame, &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map fails", func(t *testing.T) {
|
||||
expRegionStartAddr := uintptr(10 * mem.PageSize)
|
||||
earlyReserveRegionFn = func(rsvSize mem.Size) (uintptr, *kernel.Error) {
|
||||
return expRegionStartAddr, nil
|
||||
}
|
||||
|
||||
frameAllocFn = func() (pmm.Frame, *kernel.Error) {
|
||||
return pmm.Frame(0), nil
|
||||
}
|
||||
|
||||
mapFn = func(_ vmm.Page, _ pmm.Frame, _ vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return &kernel.Error{Module: "test", Message: "map failed"}
|
||||
}
|
||||
|
||||
var sysStat uint64
|
||||
if got := sysAlloc(1, &sysStat); got != unsafe.Pointer(uintptr(0)) {
|
||||
t.Fatalf("expected sysAlloc to return 0x0 if AllocFrame returns an error; got 0x%x", uintptr(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetRandomData(t *testing.T) {
|
||||
sample1 := make([]byte, 128)
|
||||
sample2 := make([]byte, 128)
|
||||
|
||||
getRandomData(sample1)
|
||||
getRandomData(sample2)
|
||||
|
||||
if reflect.DeepEqual(sample1, sample2) {
|
||||
t.Fatal("expected getRandomData to return different values for each invocation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
defer func() {
|
||||
mallocInitFn = mallocInit
|
||||
algInitFn = algInit
|
||||
modulesInitFn = modulesInit
|
||||
typeLinksInitFn = typeLinksInit
|
||||
itabsInitFn = itabsInit
|
||||
}()
|
||||
|
||||
mallocInitFn = func() {}
|
||||
algInitFn = func() {}
|
||||
modulesInitFn = func() {}
|
||||
typeLinksInitFn = func() {}
|
||||
itabsInitFn = func() {}
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatal(t)
|
||||
}
|
||||
}
|
||||
23
src/gopheros/kernel/hal/hal.go
Normal file
23
src/gopheros/kernel/hal/hal.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package hal
|
||||
|
||||
import (
|
||||
"gopheros/kernel/driver/tty"
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
)
|
||||
|
||||
var (
|
||||
egaConsole = &console.Ega{}
|
||||
|
||||
// ActiveTerminal points to the currently active terminal.
|
||||
ActiveTerminal = &tty.Vt{}
|
||||
)
|
||||
|
||||
// InitTerminal provides a basic terminal to allow the kernel to emit some output
|
||||
// till everything is properly setup
|
||||
func InitTerminal() {
|
||||
fbInfo := multiboot.GetFramebufferInfo()
|
||||
|
||||
egaConsole.Init(uint16(fbInfo.Width), uint16(fbInfo.Height), uintptr(fbInfo.PhysAddr))
|
||||
ActiveTerminal.AttachTo(egaConsole)
|
||||
}
|
||||
212
src/gopheros/kernel/hal/multiboot/multiboot.go
Normal file
212
src/gopheros/kernel/hal/multiboot/multiboot.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package multiboot
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type tagType uint32
|
||||
|
||||
// nolint
|
||||
const (
|
||||
tagMbSectionEnd tagType = iota
|
||||
tagBootCmdLine
|
||||
tagBootLoaderName
|
||||
tagModules
|
||||
tagBasicMemoryInfo
|
||||
tagBiosBootDevice
|
||||
tagMemoryMap
|
||||
tagVbeInfo
|
||||
tagFramebufferInfo
|
||||
tagElfSymbols
|
||||
tagApmTable
|
||||
)
|
||||
|
||||
// info describes the multiboot info section header.
|
||||
type info struct {
|
||||
// Total size of multiboot info section.
|
||||
totalSize uint32
|
||||
|
||||
// Always set to zero; reserved for future use
|
||||
reserved uint32
|
||||
}
|
||||
|
||||
// tagHeader describes the header the preceedes each tag.
|
||||
type tagHeader struct {
|
||||
// The type of the tag
|
||||
tagType tagType
|
||||
|
||||
// The size of the tag including the header but *not* including any
|
||||
// padding. According to the spec, each tag starts at a 8-byte aligned
|
||||
// address.
|
||||
size uint32
|
||||
}
|
||||
|
||||
// mmapHeader describes the header for a memory map specification.
|
||||
type mmapHeader struct {
|
||||
// The size of each entry.
|
||||
entrySize uint32
|
||||
|
||||
// The version of the entries that follow.
|
||||
entryVersion uint32
|
||||
}
|
||||
|
||||
// FramebufferType defines the type of the initialized framebuffer.
|
||||
type FramebufferType uint8
|
||||
|
||||
const (
|
||||
// FrameBufferTypeIndexed specifies a 256-color palette.
|
||||
FrameBufferTypeIndexed FramebufferType = iota
|
||||
|
||||
// FramebufferTypeRGB specifies direct RGB mode.
|
||||
FramebufferTypeRGB
|
||||
|
||||
// FramebufferTypeEGA specifies EGA text mode.
|
||||
FramebufferTypeEGA
|
||||
)
|
||||
|
||||
// FramebufferInfo provides information about the initialized framebuffer.
|
||||
type FramebufferInfo struct {
|
||||
// The framebuffer physical address.
|
||||
PhysAddr uint64
|
||||
|
||||
// Row pitch in bytes.
|
||||
Pitch uint32
|
||||
|
||||
// Width and height in pixels (or characters if Type = FramebufferTypeEGA)
|
||||
Width, Height uint32
|
||||
|
||||
// Bits per pixel (non EGA modes only).
|
||||
Bpp uint8
|
||||
|
||||
// Framebuffer type.
|
||||
Type FramebufferType
|
||||
}
|
||||
|
||||
// MemoryEntryType defines the type of a MemoryMapEntry.
|
||||
type MemoryEntryType uint32
|
||||
|
||||
const (
|
||||
// MemAvailable indicates that the memory region is available for use.
|
||||
MemAvailable MemoryEntryType = iota + 1
|
||||
|
||||
// MemReserved indicates that the memory region is not available for use.
|
||||
MemReserved
|
||||
|
||||
// MemAcpiReclaimable indicates a memory region that holds ACPI info that
|
||||
// can be reused by the OS.
|
||||
MemAcpiReclaimable
|
||||
|
||||
// MemNvs indicates memory that must be preserved when hibernating.
|
||||
MemNvs
|
||||
|
||||
// Any value >= memUnknown will be mapped to MemReserved.
|
||||
memUnknown
|
||||
)
|
||||
|
||||
var (
|
||||
infoData uintptr
|
||||
)
|
||||
|
||||
// MemRegionVisitor defies a visitor function that gets invoked by VisitMemRegions
|
||||
// for each memory region provided by the boot loader. The visitor must return true
|
||||
// to continue or false to abort the scan.
|
||||
type MemRegionVisitor func(entry *MemoryMapEntry) bool
|
||||
|
||||
// MemoryMapEntry describes a memory region entry, namely its physical address,
|
||||
// its length and its type.
|
||||
type MemoryMapEntry struct {
|
||||
// The physical address for this memory region.
|
||||
PhysAddress uint64
|
||||
|
||||
// The length of the memory region.
|
||||
Length uint64
|
||||
|
||||
// The type of this entry.
|
||||
Type MemoryEntryType
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer for MemoryEntryType.
|
||||
func (t MemoryEntryType) String() string {
|
||||
switch t {
|
||||
case MemAvailable:
|
||||
return "available"
|
||||
case MemReserved:
|
||||
return "reserved"
|
||||
case MemAcpiReclaimable:
|
||||
return "ACPI (reclaimable)"
|
||||
case MemNvs:
|
||||
return "NVS"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// SetInfoPtr updates the internal multiboot information pointer to the given
|
||||
// value. This function must be invoked before invoking any other function
|
||||
// exported by this package.
|
||||
func SetInfoPtr(ptr uintptr) {
|
||||
infoData = ptr
|
||||
}
|
||||
|
||||
// VisitMemRegions will invoke the supplied visitor for each memory region that
|
||||
// is defined by the multiboot info data that we received from the bootloader.
|
||||
func VisitMemRegions(visitor MemRegionVisitor) {
|
||||
curPtr, size := findTagByType(tagMemoryMap)
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// curPtr points to the memory map header (2 dwords long)
|
||||
ptrMapHeader := (*mmapHeader)(unsafe.Pointer(curPtr))
|
||||
endPtr := curPtr + uintptr(size)
|
||||
curPtr += 8
|
||||
|
||||
var entry *MemoryMapEntry
|
||||
for curPtr != endPtr {
|
||||
entry = (*MemoryMapEntry)(unsafe.Pointer(curPtr))
|
||||
|
||||
// Mark unknown entry types as reserved
|
||||
if entry.Type == 0 || entry.Type > memUnknown {
|
||||
entry.Type = MemReserved
|
||||
}
|
||||
|
||||
if !visitor(entry) {
|
||||
return
|
||||
}
|
||||
|
||||
curPtr += uintptr(ptrMapHeader.entrySize)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFramebufferInfo returns information about the framebuffer initialized by the
|
||||
// bootloader. This function returns nil if no framebuffer info is available.
|
||||
func GetFramebufferInfo() *FramebufferInfo {
|
||||
var info *FramebufferInfo
|
||||
|
||||
curPtr, size := findTagByType(tagFramebufferInfo)
|
||||
if size != 0 {
|
||||
info = (*FramebufferInfo)(unsafe.Pointer(curPtr))
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
// findTagByType scans the multiboot info data looking for the start of of the
|
||||
// specified type. It returns a pointer to the tag contents start offset and
|
||||
// the content length exluding the tag header.
|
||||
//
|
||||
// If the tag is not present in the multiboot info, findTagSection will return
|
||||
// back (0,0).
|
||||
func findTagByType(tagType tagType) (uintptr, uint32) {
|
||||
var ptrTagHeader *tagHeader
|
||||
|
||||
curPtr := infoData + 8
|
||||
for ptrTagHeader = (*tagHeader)(unsafe.Pointer(curPtr)); ptrTagHeader.tagType != tagMbSectionEnd; ptrTagHeader = (*tagHeader)(unsafe.Pointer(curPtr)) {
|
||||
if ptrTagHeader.tagType == tagType {
|
||||
return curPtr + 8, ptrTagHeader.size - 8
|
||||
}
|
||||
|
||||
// Tags are aligned at 8-byte aligned addresses
|
||||
curPtr += uintptr(int32(ptrTagHeader.size+7) & ^7)
|
||||
}
|
||||
|
||||
return 0, 0
|
||||
}
|
||||
247
src/gopheros/kernel/hal/multiboot/multiboot_test.go
Normal file
247
src/gopheros/kernel/hal/multiboot/multiboot_test.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package multiboot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestFindTagByType(t *testing.T) {
|
||||
specs := []struct {
|
||||
tagType tagType
|
||||
expSize uint32
|
||||
}{
|
||||
{tagBootCmdLine, 1},
|
||||
{tagBootLoaderName, 27},
|
||||
{tagBasicMemoryInfo, 8},
|
||||
{tagBiosBootDevice, 12},
|
||||
{tagMemoryMap, 152},
|
||||
{tagFramebufferInfo, 24},
|
||||
{tagElfSymbols, 972},
|
||||
{tagApmTable, 20},
|
||||
}
|
||||
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
_, size := findTagByType(spec.tagType)
|
||||
|
||||
if size != spec.expSize {
|
||||
t.Errorf("[spec %d] expected tag size for tag type %d to be %d; got %d", specIndex, spec.tagType, spec.expSize, size)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindTagByTypeWithMissingTag(t *testing.T) {
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
|
||||
|
||||
if offset, size := findTagByType(tagModules); offset != 0 || size != 0 {
|
||||
t.Fatalf("expected findTagByType to return (0,0) for missing tag; got (%d, %d)", offset, size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVisitMemRegion(t *testing.T) {
|
||||
specs := []struct {
|
||||
expPhys uint64
|
||||
expLen uint64
|
||||
expType MemoryEntryType
|
||||
}{
|
||||
// This region type is actually MemAvailable but we patch it to
|
||||
// a bogus value to test whether it gets flagged as reserved
|
||||
{0, 654336, MemReserved},
|
||||
{654336, 1024, MemReserved},
|
||||
{983040, 65536, MemReserved},
|
||||
{1048576, 133038080, MemAvailable},
|
||||
{134086656, 131072, MemReserved},
|
||||
{4294705152, 262144, MemReserved},
|
||||
}
|
||||
|
||||
var visitCount int
|
||||
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||
VisitMemRegions(func(_ *MemoryMapEntry) bool {
|
||||
visitCount++
|
||||
return true
|
||||
})
|
||||
|
||||
if visitCount != 0 {
|
||||
t.Fatal("expected visitor not to be invoked when no memory map tag is present")
|
||||
}
|
||||
|
||||
// Set a bogus type for the first entry in the map
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
|
||||
multibootInfoTestData[128] = 0xFF
|
||||
|
||||
VisitMemRegions(func(entry *MemoryMapEntry) bool {
|
||||
if entry.PhysAddress != specs[visitCount].expPhys {
|
||||
t.Errorf("[visit %d] expected physical address to be %x; got %x", visitCount, specs[visitCount].expPhys, entry.PhysAddress)
|
||||
}
|
||||
if entry.Length != specs[visitCount].expLen {
|
||||
t.Errorf("[visit %d] expected region len to be %x; got %x", visitCount, specs[visitCount].expLen, entry.Length)
|
||||
}
|
||||
if entry.Type != specs[visitCount].expType {
|
||||
t.Errorf("[visit %d] expected region type to be %d; got %d", visitCount, specs[visitCount].expType, entry.Type)
|
||||
}
|
||||
visitCount++
|
||||
return true
|
||||
})
|
||||
|
||||
if visitCount != len(specs) {
|
||||
t.Errorf("expected the visitor func to be invoked %d times; got %d", len(specs), visitCount)
|
||||
}
|
||||
|
||||
// Test that the visitor function can abort the scan by returning false
|
||||
visitCount = 0
|
||||
VisitMemRegions(func(entry *MemoryMapEntry) bool {
|
||||
visitCount++
|
||||
return false
|
||||
})
|
||||
|
||||
if visitCount != 1 {
|
||||
t.Errorf("expected the visitor func to be invoked %d times; got %d", 1, visitCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryEntryTypeStringer(t *testing.T) {
|
||||
specs := []struct {
|
||||
input MemoryEntryType
|
||||
exp string
|
||||
}{
|
||||
{MemAvailable, "available"},
|
||||
{MemReserved, "reserved"},
|
||||
{MemAcpiReclaimable, "ACPI (reclaimable)"},
|
||||
{MemNvs, "NVS"},
|
||||
{MemoryEntryType(123), "unknown"},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
if got := spec.input.String(); got != spec.exp {
|
||||
t.Errorf("[spec %d] expected MemoryEntryType(%d).String() to return %q; got %q", specIndex, spec.input, spec.exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFramebufferInfo(t *testing.T) {
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||
|
||||
if GetFramebufferInfo() != nil {
|
||||
t.Fatalf("expected GetFramebufferInfo() to return nil when no framebuffer tag is present")
|
||||
}
|
||||
|
||||
SetInfoPtr(uintptr(unsafe.Pointer(&multibootInfoTestData[0])))
|
||||
fbInfo := GetFramebufferInfo()
|
||||
|
||||
if fbInfo.Type != FramebufferTypeEGA {
|
||||
t.Errorf("expected framebuffer type to be %d; got %d", FramebufferTypeEGA, fbInfo.Type)
|
||||
}
|
||||
|
||||
if fbInfo.PhysAddr != 0xB8000 {
|
||||
t.Errorf("expected physical address for EGA text mode to be 0xB8000; got %x", fbInfo.PhysAddr)
|
||||
}
|
||||
|
||||
if fbInfo.Width != 80 || fbInfo.Height != 25 {
|
||||
t.Errorf("expected framebuffer dimensions to be 80x25; got %dx%d", fbInfo.Width, fbInfo.Height)
|
||||
}
|
||||
|
||||
if fbInfo.Pitch != 160 {
|
||||
t.Errorf("expected pitch to be 160; got %x", fbInfo.Pitch)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
emptyInfoData = []byte{
|
||||
0, 0, 0, 0, // size
|
||||
0, 0, 0, 0, // reserved
|
||||
0, 0, 0, 0, // tag with type zero and length zero
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
|
||||
// A dump of multiboot data when running under qemu.
|
||||
multibootInfoTestData = []byte{
|
||||
72, 5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 9, 0, 0, 0,
|
||||
0, 171, 253, 7, 118, 119, 123, 0, 2, 0, 0, 0, 35, 0, 0, 0,
|
||||
71, 82, 85, 66, 32, 50, 46, 48, 50, 126, 98, 101, 116, 97, 50, 45,
|
||||
57, 117, 98, 117, 110, 116, 117, 49, 46, 54, 0, 0, 0, 0, 0, 0,
|
||||
10, 0, 0, 0, 28, 0, 0, 0, 2, 1, 0, 240, 4, 213, 0, 0,
|
||||
0, 240, 0, 240, 3, 0, 240, 255, 240, 255, 240, 255, 0, 0, 0, 0,
|
||||
6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,
|
||||
0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0,
|
||||
0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0,
|
||||
21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0,
|
||||
1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0,
|
||||
24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0,
|
||||
0, 0, 0, 0, 38, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0,
|
||||
0, 16, 16, 0, 0, 32, 0, 0, 135, 26, 4, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 44, 0, 0, 0,
|
||||
1, 0, 0, 0, 2, 0, 0, 0, 0, 48, 20, 0, 0, 64, 4, 0,
|
||||
194, 167, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0,
|
||||
0, 0, 0, 0, 52, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0,
|
||||
224, 215, 21, 0, 224, 231, 5, 0, 176, 6, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 62, 0, 0, 0,
|
||||
1, 0, 0, 0, 2, 0, 0, 0, 144, 222, 21, 0, 144, 238, 5, 0,
|
||||
4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,
|
||||
0, 0, 0, 0, 72, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0,
|
||||
160, 222, 21, 0, 160, 238, 5, 0, 119, 23, 2, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 83, 0, 0, 0,
|
||||
7, 0, 0, 0, 2, 0, 0, 0, 32, 246, 23, 0, 32, 6, 8, 0,
|
||||
56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0,
|
||||
0, 0, 0, 0, 100, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0,
|
||||
0, 0, 24, 0, 0, 16, 8, 0, 204, 5, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 106, 0, 0, 0,
|
||||
1, 0, 0, 0, 3, 0, 0, 0, 224, 5, 24, 0, 224, 21, 8, 0,
|
||||
178, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0,
|
||||
0, 0, 0, 0, 117, 0, 0, 0, 8, 0, 0, 0, 3, 4, 0, 0,
|
||||
148, 15, 24, 0, 146, 31, 8, 0, 4, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 123, 0, 0, 0,
|
||||
8, 0, 0, 0, 3, 0, 0, 0, 0, 16, 24, 0, 146, 31, 8, 0,
|
||||
176, 61, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0,
|
||||
0, 0, 0, 0, 128, 0, 0, 0, 8, 0, 0, 0, 3, 0, 0, 0,
|
||||
192, 77, 25, 0, 146, 31, 8, 0, 32, 56, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 138, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 224, 133, 25, 0, 146, 31, 8, 0,
|
||||
64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 153, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
32, 134, 25, 0, 210, 31, 8, 0, 129, 26, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 169, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 161, 160, 25, 0, 83, 58, 8, 0,
|
||||
2, 201, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 181, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
163, 105, 27, 0, 85, 3, 10, 0, 25, 1, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 188, 106, 27, 0, 110, 4, 10, 0,
|
||||
67, 153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 207, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 4, 28, 0, 184, 157, 10, 0, 252, 112, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 220, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 252, 116, 28, 0, 180, 14, 11, 0,
|
||||
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 231, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
12, 117, 28, 0, 196, 14, 11, 0, 239, 79, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 251, 196, 28, 0, 179, 94, 11, 0,
|
||||
247, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
244, 197, 28, 0, 108, 99, 11, 0, 80, 77, 0, 0, 23, 0, 0, 0,
|
||||
210, 4, 0, 0, 4, 0, 0, 0, 16, 0, 0, 0, 9, 0, 0, 0,
|
||||
3, 0, 0, 0, 0, 0, 0, 0, 68, 19, 29, 0, 188, 176, 11, 0,
|
||||
107, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 16, 0, 0, 0,
|
||||
127, 2, 0, 0, 128, 251, 1, 0, 5, 0, 0, 0, 20, 0, 0, 0,
|
||||
224, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
|
||||
8, 0, 0, 0, 32, 0, 0, 0, 0, 128, 11, 0, 0, 0, 0, 0,
|
||||
160, 0, 0, 0, 80, 0, 0, 0, 25, 0, 0, 0, 16, 2, 0, 0,
|
||||
14, 0, 0, 0, 28, 0, 0, 0, 82, 83, 68, 32, 80, 84, 82, 32,
|
||||
89, 66, 79, 67, 72, 83, 32, 0, 220, 24, 254, 7, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 8, 0, 0, 0,
|
||||
}
|
||||
)
|
||||
41
src/gopheros/kernel/irq/handler_amd64.go
Normal file
41
src/gopheros/kernel/irq/handler_amd64.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package irq
|
||||
|
||||
// ExceptionNum defines an exception number that can be
|
||||
// passed to the HandleException and HandleExceptionWithCode
|
||||
// functions.
|
||||
type ExceptionNum uint8
|
||||
|
||||
const (
|
||||
// DoubleFault occurs when an exception is unhandled
|
||||
// or when an exception occurs while the CPU is
|
||||
// trying to call an exception handler.
|
||||
DoubleFault = ExceptionNum(8)
|
||||
|
||||
// GPFException is raised when a general protection fault occurs.
|
||||
GPFException = ExceptionNum(13)
|
||||
|
||||
// PageFaultException is raised when a PDT or
|
||||
// PDT-entry is not present or when a privilege
|
||||
// and/or RW protection check fails.
|
||||
PageFaultException = ExceptionNum(14)
|
||||
)
|
||||
|
||||
// ExceptionHandler is a function that handles an exception that does not push
|
||||
// an error code to the stack. If the handler returns, any modifications to the
|
||||
// supplied Frame and/or Regs pointers will be propagated back to the location
|
||||
// where the exception occurred.
|
||||
type ExceptionHandler func(*Frame, *Regs)
|
||||
|
||||
// ExceptionHandlerWithCode is a function that handles an exception that pushes
|
||||
// an error code to the stack. If the handler returns, any modifications to the
|
||||
// supplied Frame and/or Regs pointers will be propagated back to the location
|
||||
// where the exception occurred.
|
||||
type ExceptionHandlerWithCode func(uint64, *Frame, *Regs)
|
||||
|
||||
// HandleException registers an exception handler (without an error code) for
|
||||
// the given interrupt number.
|
||||
func HandleException(exceptionNum ExceptionNum, handler ExceptionHandler)
|
||||
|
||||
// HandleExceptionWithCode registers an exception handler (with an error code)
|
||||
// for the given interrupt number.
|
||||
func HandleExceptionWithCode(exceptionNum ExceptionNum, handler ExceptionHandlerWithCode)
|
||||
36
src/gopheros/kernel/irq/handler_amd64.s
Normal file
36
src/gopheros/kernel/irq/handler_amd64.s
Normal file
@@ -0,0 +1,36 @@
|
||||
#include "textflag.h"
|
||||
|
||||
// The maximum number of interrupt handlers is 256 so we need to allocate space
|
||||
// for 256 x 8-byte pointers. This symbol is made global by the Makefile so it
|
||||
// can be accessed by the gate entries defined in the rt0 assembly code.
|
||||
GLOBL _rt0_interrupt_handlers(SB), NOPTR, $2048
|
||||
|
||||
// In 64-bit mode SIDT stores 8+2 bytes for the IDT address and limit
|
||||
GLOBL _rt0_idtr<>(SB), NOPTR, $10
|
||||
|
||||
TEXT ·HandleException(SB),NOSPLIT,$0
|
||||
JMP ·HandleExceptionWithCode(SB)
|
||||
RET
|
||||
|
||||
TEXT ·HandleExceptionWithCode(SB),NOSPLIT,$0
|
||||
// Install the handler address in _rt0_interrupt_handlers
|
||||
LEAQ _rt0_interrupt_handlers+0(SB), CX
|
||||
MOVBQZX exceptionNum+0(FP), AX // exceptionNum is a uint8 so we zero-extend it to 64bits
|
||||
MOVQ handler+8(FP), BX
|
||||
MOVQ 0(BX), BX // dereference pointer to handler fn
|
||||
MOVQ BX, (CX)(AX*8)
|
||||
|
||||
// To enable the handler we need to lookup the appropriate IDT entry
|
||||
// and modify its type/attribute byte. To acquire the IDT base address
|
||||
// we use the SIDT instruction.
|
||||
MOVQ IDTR, _rt0_idtr<>+0(SB)
|
||||
LEAQ _rt0_idtr<>(SB), CX
|
||||
MOVQ 2(CX), CX // CX points to IDT base address
|
||||
SHLQ $4, AX // Each IDT entry uses 16 bytes so we multiply num by 16
|
||||
ADDQ AX, CX // and add it to CX to get the address of the IDT entry
|
||||
// we want to tweak
|
||||
|
||||
MOVB $0x8e, 5(CX) // 32/64-bit ring-0 interrupt gate that is present
|
||||
// see: http://wiki.osdev.org/Interrupt_Descriptor_Table
|
||||
|
||||
RET
|
||||
51
src/gopheros/kernel/irq/interrupt_amd64.go
Normal file
51
src/gopheros/kernel/irq/interrupt_amd64.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package irq
|
||||
|
||||
import "gopheros/kernel/kfmt/early"
|
||||
|
||||
// Regs contains a snapshot of the register values when an interrupt occurred.
|
||||
type Regs struct {
|
||||
RAX uint64
|
||||
RBX uint64
|
||||
RCX uint64
|
||||
RDX uint64
|
||||
RSI uint64
|
||||
RDI uint64
|
||||
RBP uint64
|
||||
R8 uint64
|
||||
R9 uint64
|
||||
R10 uint64
|
||||
R11 uint64
|
||||
R12 uint64
|
||||
R13 uint64
|
||||
R14 uint64
|
||||
R15 uint64
|
||||
}
|
||||
|
||||
// Print outputs a dump of the register values to the active console.
|
||||
func (r *Regs) Print() {
|
||||
early.Printf("RAX = %16x RBX = %16x\n", r.RAX, r.RBX)
|
||||
early.Printf("RCX = %16x RDX = %16x\n", r.RCX, r.RDX)
|
||||
early.Printf("RSI = %16x RDI = %16x\n", r.RSI, r.RDI)
|
||||
early.Printf("RBP = %16x\n", r.RBP)
|
||||
early.Printf("R8 = %16x R9 = %16x\n", r.R8, r.R9)
|
||||
early.Printf("R10 = %16x R11 = %16x\n", r.R10, r.R11)
|
||||
early.Printf("R12 = %16x R13 = %16x\n", r.R12, r.R13)
|
||||
early.Printf("R14 = %16x R15 = %16x\n", r.R14, r.R15)
|
||||
}
|
||||
|
||||
// Frame describes an exception frame that is automatically pushed by the CPU
|
||||
// to the stack when an exception occurs.
|
||||
type Frame struct {
|
||||
RIP uint64
|
||||
CS uint64
|
||||
RFlags uint64
|
||||
RSP uint64
|
||||
SS uint64
|
||||
}
|
||||
|
||||
// Print outputs a dump of the exception frame to the active console.
|
||||
func (f *Frame) Print() {
|
||||
early.Printf("RIP = %16x CS = %16x\n", f.RIP, f.CS)
|
||||
early.Printf("RSP = %16x SS = %16x\n", f.RSP, f.SS)
|
||||
early.Printf("RFL = %16x\n", f.RFlags)
|
||||
}
|
||||
83
src/gopheros/kernel/irq/interrupt_amd64_test.go
Normal file
83
src/gopheros/kernel/irq/interrupt_amd64_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package irq
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestRegsPrint(t *testing.T) {
|
||||
fb := mockTTY()
|
||||
regs := Regs{
|
||||
RAX: 1,
|
||||
RBX: 2,
|
||||
RCX: 3,
|
||||
RDX: 4,
|
||||
RSI: 5,
|
||||
RDI: 6,
|
||||
RBP: 7,
|
||||
R8: 8,
|
||||
R9: 9,
|
||||
R10: 10,
|
||||
R11: 11,
|
||||
R12: 12,
|
||||
R13: 13,
|
||||
R14: 14,
|
||||
R15: 15,
|
||||
}
|
||||
regs.Print()
|
||||
|
||||
exp := "RAX = 0000000000000001 RBX = 0000000000000002\nRCX = 0000000000000003 RDX = 0000000000000004\nRSI = 0000000000000005 RDI = 0000000000000006\nRBP = 0000000000000007\nR8 = 0000000000000008 R9 = 0000000000000009\nR10 = 000000000000000a R11 = 000000000000000b\nR12 = 000000000000000c R13 = 000000000000000d\nR14 = 000000000000000e R15 = 000000000000000f"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFramePrint(t *testing.T) {
|
||||
fb := mockTTY()
|
||||
frame := Frame{
|
||||
RIP: 1,
|
||||
CS: 2,
|
||||
RFlags: 3,
|
||||
RSP: 4,
|
||||
SS: 5,
|
||||
}
|
||||
frame.Print()
|
||||
|
||||
exp := "RIP = 0000000000000001 CS = 0000000000000002\nRSP = 0000000000000004 SS = 0000000000000005\nRFL = 0000000000000003"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func readTTY(fb []byte) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(fb); i += 2 {
|
||||
ch := fb[i]
|
||||
if ch == 0 {
|
||||
if i+2 < len(fb) && fb[i+2] != 0 {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
buf.WriteByte(ch)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func mockTTY() []byte {
|
||||
// Mock a tty to handle early.Printf output
|
||||
mockConsoleFb := make([]byte, 160*25)
|
||||
mockConsole := &console.Ega{}
|
||||
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
|
||||
hal.ActiveTerminal.AttachTo(mockConsole)
|
||||
|
||||
return mockConsoleFb
|
||||
}
|
||||
269
src/gopheros/kernel/kfmt/early/early_fmt.go
Normal file
269
src/gopheros/kernel/kfmt/early/early_fmt.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package early
|
||||
|
||||
import "gopheros/kernel/hal"
|
||||
|
||||
var (
|
||||
errMissingArg = []byte("(MISSING)")
|
||||
errWrongArgType = []byte("%!(WRONGTYPE)")
|
||||
errNoVerb = []byte("%!(NOVERB)")
|
||||
errExtraArg = []byte("%!(EXTRA)")
|
||||
padding = byte(' ')
|
||||
trueValue = []byte("true")
|
||||
falseValue = []byte("false")
|
||||
)
|
||||
|
||||
// Printf provides a minimal Printf implementation that can be used before the
|
||||
// Go runtime has been properly initialized. This version of printf does not
|
||||
// allocate any memory and uses hal.ActiveTerminal for its output.
|
||||
//
|
||||
// Similar to fmt.Printf, this version of printf supports the following subset
|
||||
// of formatting verbs:
|
||||
//
|
||||
// Strings:
|
||||
// %s the uninterpreted bytes of the string or byte slice
|
||||
//
|
||||
// Integers:
|
||||
// %o base 8
|
||||
// %d base 10
|
||||
// %x base 16, with lower-case letters for a-f
|
||||
//
|
||||
// Booleans:
|
||||
// %t "true" or "false"
|
||||
//
|
||||
// Width is specified by an optional decimal number immediately preceding the verb.
|
||||
// If absent, the width is whatever is necessary to represent the value.
|
||||
//
|
||||
// String values with length less than the specified width will be left-padded with
|
||||
// spaces. Integer values formatted as base-10 will also be left-padded with spaces.
|
||||
// Finally, integer values formatted as base-16 will be left-padded with zeroes.
|
||||
//
|
||||
// Printf supports all built-in string and integer types but assumes that the
|
||||
// Go itables have not been initialized yet so it will not check whether its
|
||||
// arguments support io.Stringer if they don't match one of the supported tupes.
|
||||
//
|
||||
// This function does not provide support for printing pointers (%p) as this
|
||||
// requires importing the reflect package. By importing reflect, the go compiler
|
||||
// starts generating calls to runtime.convT2E (which calls runtime.newobject)
|
||||
// when assembling the argument slice which obviously will crash the kernel since
|
||||
// memory management is not yet available.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
var (
|
||||
nextCh byte
|
||||
nextArgIndex int
|
||||
blockStart, blockEnd, padLen int
|
||||
fmtLen = len(format)
|
||||
)
|
||||
|
||||
for blockEnd < fmtLen {
|
||||
nextCh = format[blockEnd]
|
||||
if nextCh != '%' {
|
||||
blockEnd++
|
||||
continue
|
||||
}
|
||||
|
||||
if blockStart < blockEnd {
|
||||
for i := blockStart; i < blockEnd; i++ {
|
||||
hal.ActiveTerminal.WriteByte(format[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Scan til we hit the format character
|
||||
padLen = 0
|
||||
blockEnd++
|
||||
parseFmt:
|
||||
for ; blockEnd < fmtLen; blockEnd++ {
|
||||
nextCh = format[blockEnd]
|
||||
switch {
|
||||
case nextCh == '%':
|
||||
hal.ActiveTerminal.Write([]byte{'%'})
|
||||
break parseFmt
|
||||
case nextCh >= '0' && nextCh <= '9':
|
||||
padLen = (padLen * 10) + int(nextCh-'0')
|
||||
continue
|
||||
case nextCh == 'd' || nextCh == 'x' || nextCh == 'o' || nextCh == 's' || nextCh == 't':
|
||||
// Run out of args to print
|
||||
if nextArgIndex >= len(args) {
|
||||
hal.ActiveTerminal.Write(errMissingArg)
|
||||
break parseFmt
|
||||
}
|
||||
|
||||
switch nextCh {
|
||||
case 'o':
|
||||
fmtInt(args[nextArgIndex], 8, padLen)
|
||||
case 'd':
|
||||
fmtInt(args[nextArgIndex], 10, padLen)
|
||||
case 'x':
|
||||
fmtInt(args[nextArgIndex], 16, padLen)
|
||||
case 's':
|
||||
fmtString(args[nextArgIndex], padLen)
|
||||
case 't':
|
||||
fmtBool(args[nextArgIndex])
|
||||
}
|
||||
|
||||
nextArgIndex++
|
||||
break parseFmt
|
||||
}
|
||||
|
||||
// reached end of formatting string without finding a verb
|
||||
hal.ActiveTerminal.Write(errNoVerb)
|
||||
}
|
||||
blockStart, blockEnd = blockEnd+1, blockEnd+1
|
||||
}
|
||||
|
||||
if blockStart != blockEnd {
|
||||
for i := blockStart; i < blockEnd; i++ {
|
||||
hal.ActiveTerminal.WriteByte(format[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Check for unused args
|
||||
for ; nextArgIndex < len(args); nextArgIndex++ {
|
||||
hal.ActiveTerminal.Write(errExtraArg)
|
||||
}
|
||||
}
|
||||
|
||||
// fmtBool prints a formatted version of boolean value v using hal.ActiveTerminal
|
||||
// for its output.
|
||||
func fmtBool(v interface{}) {
|
||||
switch bVal := v.(type) {
|
||||
case bool:
|
||||
switch bVal {
|
||||
case true:
|
||||
hal.ActiveTerminal.Write(trueValue)
|
||||
case false:
|
||||
hal.ActiveTerminal.Write(falseValue)
|
||||
}
|
||||
default:
|
||||
hal.ActiveTerminal.Write(errWrongArgType)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fmtString prints a formatted version of string or []byte value v, applying the
|
||||
// padding specified by padLen. This function uses hal.ActiveTerminal for its
|
||||
// output.
|
||||
func fmtString(v interface{}, padLen int) {
|
||||
switch castedVal := v.(type) {
|
||||
case string:
|
||||
fmtRepeat(padding, padLen-len(castedVal))
|
||||
for i := 0; i < len(castedVal); i++ {
|
||||
hal.ActiveTerminal.WriteByte(castedVal[i])
|
||||
}
|
||||
case []byte:
|
||||
fmtRepeat(padding, padLen-len(castedVal))
|
||||
hal.ActiveTerminal.Write(castedVal)
|
||||
default:
|
||||
hal.ActiveTerminal.Write(errWrongArgType)
|
||||
}
|
||||
}
|
||||
|
||||
// fmtRepeat writes count bytes with value ch to the hal.ActiveTerminal.
|
||||
func fmtRepeat(ch byte, count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
hal.ActiveTerminal.WriteByte(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// fmtInt prints out a formatted version of v in the requested base, applying the
|
||||
// padding specified by padLen. This function uses hal.ActiveTerminal for its
|
||||
// output, supports all built-in signed and unsigned integer types and supports
|
||||
// base 8, 10 and 16 output.
|
||||
func fmtInt(v interface{}, base, padLen int) {
|
||||
var (
|
||||
sval int64
|
||||
uval uint64
|
||||
divider uint64
|
||||
remainder uint64
|
||||
buf [20]byte
|
||||
padCh byte
|
||||
left, right, end int
|
||||
)
|
||||
|
||||
switch base {
|
||||
case 8:
|
||||
divider = 8
|
||||
padCh = '0'
|
||||
case 10:
|
||||
divider = 10
|
||||
padCh = ' '
|
||||
case 16:
|
||||
divider = 16
|
||||
padCh = '0'
|
||||
}
|
||||
|
||||
switch v.(type) {
|
||||
case uint8:
|
||||
uval = uint64(v.(uint8))
|
||||
case uint16:
|
||||
uval = uint64(v.(uint16))
|
||||
case uint32:
|
||||
uval = uint64(v.(uint32))
|
||||
case uint64:
|
||||
uval = v.(uint64)
|
||||
case uintptr:
|
||||
uval = uint64(v.(uintptr))
|
||||
case int8:
|
||||
sval = int64(v.(int8))
|
||||
case int16:
|
||||
sval = int64(v.(int16))
|
||||
case int32:
|
||||
sval = int64(v.(int32))
|
||||
case int64:
|
||||
sval = v.(int64)
|
||||
case int:
|
||||
sval = int64(v.(int))
|
||||
default:
|
||||
hal.ActiveTerminal.Write(errWrongArgType)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle signs
|
||||
if sval < 0 {
|
||||
uval = uint64(-sval)
|
||||
} else if sval > 0 {
|
||||
uval = uint64(sval)
|
||||
}
|
||||
|
||||
for {
|
||||
remainder = uval % divider
|
||||
if remainder < 10 {
|
||||
buf[right] = byte(remainder) + '0'
|
||||
} else {
|
||||
// map values from 10 to 15 -> a-f
|
||||
buf[right] = byte(remainder-10) + 'a'
|
||||
}
|
||||
|
||||
right++
|
||||
|
||||
uval /= divider
|
||||
if uval == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Apply padding if required
|
||||
for ; right-left < padLen; right++ {
|
||||
buf[right] = padCh
|
||||
}
|
||||
|
||||
// Apply negative sign to the rightmost blank character (if using enough padding);
|
||||
// otherwise append the sign as a new char
|
||||
if sval < 0 {
|
||||
for end = right - 1; buf[end] == ' '; end-- {
|
||||
}
|
||||
|
||||
if end == right-1 {
|
||||
right++
|
||||
}
|
||||
|
||||
buf[end+1] = '-'
|
||||
}
|
||||
|
||||
// Reverse in place
|
||||
end = right
|
||||
for right = right - 1; left < right; left, right = left+1, right-1 {
|
||||
buf[left], buf[right] = buf[right], buf[left]
|
||||
}
|
||||
|
||||
hal.ActiveTerminal.Write(buf[0:end])
|
||||
}
|
||||
180
src/gopheros/kernel/kfmt/early/early_fmt_test.go
Normal file
180
src/gopheros/kernel/kfmt/early/early_fmt_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package early
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"gopheros/kernel/driver/tty"
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestPrintf(t *testing.T) {
|
||||
origTerm := hal.ActiveTerminal
|
||||
defer func() {
|
||||
hal.ActiveTerminal = origTerm
|
||||
}()
|
||||
|
||||
// mute vet warnings about malformed printf formatting strings
|
||||
printfn := Printf
|
||||
|
||||
ega := &console.Ega{}
|
||||
fb := make([]uint8, 160*25)
|
||||
ega.Init(80, 25, uintptr(unsafe.Pointer(&fb[0])))
|
||||
|
||||
vt := &tty.Vt{}
|
||||
vt.AttachTo(ega)
|
||||
hal.ActiveTerminal = vt
|
||||
|
||||
specs := []struct {
|
||||
fn func()
|
||||
expOutput string
|
||||
}{
|
||||
{
|
||||
func() { printfn("no args") },
|
||||
"no args",
|
||||
},
|
||||
// bool values
|
||||
{
|
||||
func() { printfn("%t", true) },
|
||||
"true",
|
||||
},
|
||||
{
|
||||
func() { printfn("%41t", false) },
|
||||
"false",
|
||||
},
|
||||
// strings and byte slices
|
||||
{
|
||||
func() { printfn("%s arg", "STRING") },
|
||||
"STRING arg",
|
||||
},
|
||||
{
|
||||
func() { printfn("%s arg", []byte("BYTE SLICE")) },
|
||||
"BYTE SLICE arg",
|
||||
},
|
||||
{
|
||||
func() { printfn("'%4s' arg with padding", "ABC") },
|
||||
"' ABC' arg with padding",
|
||||
},
|
||||
{
|
||||
func() { printfn("'%4s' arg longer than padding", "ABCDE") },
|
||||
"'ABCDE' arg longer than padding",
|
||||
},
|
||||
// uints
|
||||
{
|
||||
func() { printfn("uint arg: %d", uint8(10)) },
|
||||
"uint arg: 10",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg: %o", uint16(0777)) },
|
||||
"uint arg: 777",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg: 0x%x", uint32(0xbadf00d)) },
|
||||
"uint arg: 0xbadf00d",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg with padding: '%10d'", uint64(123)) },
|
||||
"uint arg with padding: ' 123'",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg with padding: '%4o'", uint64(0777)) },
|
||||
"uint arg with padding: '0777'",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg with padding: '0x%10x'", uint64(0xbadf00d)) },
|
||||
"uint arg with padding: '0x000badf00d'",
|
||||
},
|
||||
{
|
||||
func() { printfn("uint arg longer than padding: '0x%5x'", int64(0xbadf00d)) },
|
||||
"uint arg longer than padding: '0xbadf00d'",
|
||||
},
|
||||
// pointers
|
||||
{
|
||||
func() { printfn("uintptr 0x%x", uintptr(0xb8000)) },
|
||||
"uintptr 0xb8000",
|
||||
},
|
||||
// ints
|
||||
|
||||
{
|
||||
func() { printfn("int arg: %d", int8(-10)) },
|
||||
"int arg: -10",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg: %o", int16(0777)) },
|
||||
"int arg: 777",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg: %x", int32(-0xbadf00d)) },
|
||||
"int arg: -badf00d",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg with padding: '%10d'", int64(-12345678)) },
|
||||
"int arg with padding: ' -12345678'",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg with padding: '%10d'", int64(-123456789)) },
|
||||
"int arg with padding: '-123456789'",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg with padding: '%10d'", int64(-1234567890)) },
|
||||
"int arg with padding: '-1234567890'",
|
||||
},
|
||||
{
|
||||
func() { printfn("int arg longer than padding: '%5x'", int(-0xbadf00d)) },
|
||||
"int arg longer than padding: '-badf00d'",
|
||||
},
|
||||
// multiple arguments
|
||||
{
|
||||
func() { printfn("%%%s%d%t", "foo", 123, true) },
|
||||
`%foo123true`,
|
||||
},
|
||||
// errors
|
||||
{
|
||||
func() { printfn("more args", "foo", "bar", "baz") },
|
||||
`more args%!(EXTRA)%!(EXTRA)%!(EXTRA)`,
|
||||
},
|
||||
{
|
||||
func() { printfn("missing args %s") },
|
||||
`missing args (MISSING)`,
|
||||
},
|
||||
{
|
||||
func() { printfn("bad verb %Q") },
|
||||
`bad verb %!(NOVERB)`,
|
||||
},
|
||||
{
|
||||
func() { printfn("not bool %t", "foo") },
|
||||
`not bool %!(WRONGTYPE)`,
|
||||
},
|
||||
{
|
||||
func() { printfn("not int %d", "foo") },
|
||||
`not int %!(WRONGTYPE)`,
|
||||
},
|
||||
{
|
||||
func() { printfn("not string %s", 123) },
|
||||
`not string %!(WRONGTYPE)`,
|
||||
},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
for index := 0; index < len(fb); index++ {
|
||||
fb[index] = 0
|
||||
}
|
||||
vt.SetPosition(0, 0)
|
||||
|
||||
spec.fn()
|
||||
|
||||
var buf bytes.Buffer
|
||||
for index := 0; ; index += 2 {
|
||||
if fb[index] == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
buf.WriteByte(fb[index])
|
||||
}
|
||||
|
||||
if got := buf.String(); got != spec.expOutput {
|
||||
t.Errorf("[spec %d] expected to get %q; got %q", specIndex, spec.expOutput, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
45
src/gopheros/kernel/kmain/kmain.go
Normal file
45
src/gopheros/kernel/kmain/kmain.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package kmain
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/goruntime"
|
||||
"gopheros/kernel/hal"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
"gopheros/kernel/mem/pmm/allocator"
|
||||
"gopheros/kernel/mem/vmm"
|
||||
)
|
||||
|
||||
var (
|
||||
errKmainReturned = &kernel.Error{Module: "kmain", Message: "Kmain returned"}
|
||||
)
|
||||
|
||||
// Kmain is the only Go symbol that is visible (exported) from the rt0 initialization
|
||||
// code. This function is invoked by the rt0 assembly code after setting up the GDT
|
||||
// and setting up a a minimal g0 struct that allows Go code using the 4K stack
|
||||
// allocated by the assembly code.
|
||||
//
|
||||
// The rt0 code passes the address of the multiboot info payload provided by the
|
||||
// bootloader as well as the physical addresses for the kernel start/end.
|
||||
//
|
||||
// Kmain is not expected to return. If it does, the rt0 code will halt the CPU.
|
||||
//
|
||||
//go:noinline
|
||||
func Kmain(multibootInfoPtr, kernelStart, kernelEnd uintptr) {
|
||||
multiboot.SetInfoPtr(multibootInfoPtr)
|
||||
|
||||
hal.InitTerminal()
|
||||
hal.ActiveTerminal.Clear()
|
||||
|
||||
var err *kernel.Error
|
||||
if err = allocator.Init(kernelStart, kernelEnd); err != nil {
|
||||
panic(err)
|
||||
} else if err = vmm.Init(); err != nil {
|
||||
panic(err)
|
||||
} else if err = goruntime.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Use kernel.Panic instead of panic to prevent the compiler from
|
||||
// treating kernel.Panic as dead-code and eliminating it.
|
||||
kernel.Panic(errKmainReturned)
|
||||
}
|
||||
17
src/gopheros/kernel/mem/constants_amd64.go
Normal file
17
src/gopheros/kernel/mem/constants_amd64.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// +build amd64
|
||||
|
||||
package mem
|
||||
|
||||
const (
|
||||
// PointerShift is equal to log2(unsafe.Sizeof(uintptr)). The pointer
|
||||
// size for this architecture is defined as (1 << PointerShift).
|
||||
PointerShift = 3
|
||||
|
||||
// PageShift is equal to log2(PageSize). This constant is used when
|
||||
// we need to convert a physical address to a page number (shift right by PageShift)
|
||||
// and vice-versa.
|
||||
PageShift = 12
|
||||
|
||||
// PageSize defines the system's page size in bytes.
|
||||
PageSize = Size(1 << PageShift)
|
||||
)
|
||||
49
src/gopheros/kernel/mem/mem.go
Normal file
49
src/gopheros/kernel/mem/mem.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package mem
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Memset sets size bytes at the given address to the supplied value. The implementation
|
||||
// is based on bytes.Repeat; instead of using a for loop, this function uses
|
||||
// log2(size) copy calls which should give us a speed boost as page addresses
|
||||
// are always aligned.
|
||||
func Memset(addr uintptr, value byte, size Size) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// overlay a slice on top of this address region
|
||||
target := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: int(size),
|
||||
Cap: int(size),
|
||||
Data: addr,
|
||||
}))
|
||||
|
||||
// Set first element and make log2(size) optimized copies
|
||||
target[0] = value
|
||||
for index := Size(1); index < size; index *= 2 {
|
||||
copy(target[index:], target[:index])
|
||||
}
|
||||
}
|
||||
|
||||
// Memcopy copies size bytes from src to dst.
|
||||
func Memcopy(src, dst uintptr, size Size) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
srcSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: int(size),
|
||||
Cap: int(size),
|
||||
Data: src,
|
||||
}))
|
||||
dstSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: int(size),
|
||||
Cap: int(size),
|
||||
Data: dst,
|
||||
}))
|
||||
|
||||
copy(dstSlice, srcSlice)
|
||||
}
|
||||
52
src/gopheros/kernel/mem/mem_test.go
Normal file
52
src/gopheros/kernel/mem/mem_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package mem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestMemset(t *testing.T) {
|
||||
// memset with a 0 size should be a no-op
|
||||
Memset(uintptr(0), 0x00, 0)
|
||||
|
||||
for pageCount := uint32(1); pageCount <= 10; pageCount++ {
|
||||
buf := make([]byte, PageSize<<pageCount)
|
||||
for i := 0; i < len(buf); i++ {
|
||||
buf[i] = 0xFE
|
||||
}
|
||||
|
||||
addr := uintptr(unsafe.Pointer(&buf[0]))
|
||||
Memset(addr, 0x00, Size(len(buf)))
|
||||
|
||||
for i := 0; i < len(buf); i++ {
|
||||
if got := buf[i]; got != 0x00 {
|
||||
t.Errorf("[block with %d pages] expected byte: %d to be 0x00; got 0x%x", pageCount, i, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemcopy(t *testing.T) {
|
||||
// memcopy with a 0 size should be a no-op
|
||||
Memcopy(uintptr(0), uintptr(0), 0)
|
||||
|
||||
var (
|
||||
src = make([]byte, PageSize)
|
||||
dst = make([]byte, PageSize)
|
||||
)
|
||||
for i := 0; i < len(src); i++ {
|
||||
src[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
Memcopy(
|
||||
uintptr(unsafe.Pointer(&src[0])),
|
||||
uintptr(unsafe.Pointer(&dst[0])),
|
||||
PageSize,
|
||||
)
|
||||
|
||||
for i := 0; i < len(src); i++ {
|
||||
if got := dst[i]; got != src[i] {
|
||||
t.Errorf("value mismatch between src and dst at index %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
326
src/gopheros/kernel/mem/pmm/allocator/bitmap_allocator.go
Normal file
326
src/gopheros/kernel/mem/pmm/allocator/bitmap_allocator.go
Normal file
@@ -0,0 +1,326 @@
|
||||
package allocator
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
"gopheros/kernel/kfmt/early"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"gopheros/kernel/mem/vmm"
|
||||
"math"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// bitmapAllocator is a BitmapAllocator instance that serves as the
|
||||
// primary allocator for reserving pages.
|
||||
bitmapAllocator BitmapAllocator
|
||||
|
||||
errBitmapAllocOutOfMemory = &kernel.Error{Module: "bitmap_alloc", Message: "out of memory"}
|
||||
errBitmapAllocFrameNotManaged = &kernel.Error{Module: "bitmap_alloc", Message: "frame not managed by this allocator"}
|
||||
errBitmapAllocDoubleFree = &kernel.Error{Module: "bitmap_alloc", Message: "frame is already free"}
|
||||
|
||||
// The followning functions are used by tests to mock calls to the vmm package
|
||||
// and are automatically inlined by the compiler.
|
||||
reserveRegionFn = vmm.EarlyReserveRegion
|
||||
mapFn = vmm.Map
|
||||
)
|
||||
|
||||
type markAs bool
|
||||
|
||||
const (
|
||||
markReserved markAs = false
|
||||
markFree = true
|
||||
)
|
||||
|
||||
type framePool struct {
|
||||
// startFrame is the frame number for the first page in this pool.
|
||||
// each free bitmap entry i corresponds to frame (startFrame + i).
|
||||
startFrame pmm.Frame
|
||||
|
||||
// endFrame tracks the last frame in the pool. The total number of
|
||||
// frames is given by: (endFrame - startFrame) - 1
|
||||
endFrame pmm.Frame
|
||||
|
||||
// freeCount tracks the available pages in this pool. The allocator
|
||||
// can use this field to skip fully allocated pools without the need
|
||||
// to scan the free bitmap.
|
||||
freeCount uint32
|
||||
|
||||
// freeBitmap tracks used/free pages in the pool.
|
||||
freeBitmap []uint64
|
||||
freeBitmapHdr reflect.SliceHeader
|
||||
}
|
||||
|
||||
// BitmapAllocator implements a physical frame allocator that tracks frame
|
||||
// reservations across the available memory pools using bitmaps.
|
||||
type BitmapAllocator struct {
|
||||
// totalPages tracks the total number of pages across all pools.
|
||||
totalPages uint32
|
||||
|
||||
// reservedPages tracks the number of reserved pages across all pools.
|
||||
reservedPages uint32
|
||||
|
||||
pools []framePool
|
||||
poolsHdr reflect.SliceHeader
|
||||
}
|
||||
|
||||
// init allocates space for the allocator structures using the early bootmem
|
||||
// allocator and flags any allocated pages as reserved.
|
||||
func (alloc *BitmapAllocator) init() *kernel.Error {
|
||||
if err := alloc.setupPoolBitmaps(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
alloc.reserveKernelFrames()
|
||||
alloc.reserveEarlyAllocatorFrames()
|
||||
alloc.printStats()
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupPoolBitmaps uses the early allocator and vmm region reservation helper
|
||||
// to initialize the list of available pools and their free bitmap slices.
|
||||
func (alloc *BitmapAllocator) setupPoolBitmaps() *kernel.Error {
|
||||
var (
|
||||
err *kernel.Error
|
||||
sizeofPool = unsafe.Sizeof(framePool{})
|
||||
pageSizeMinus1 = uint64(mem.PageSize - 1)
|
||||
requiredBitmapBytes mem.Size
|
||||
)
|
||||
|
||||
// Detect available memory regions and calculate their pool bitmap
|
||||
// requirements.
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
if region.Type != multiboot.MemAvailable {
|
||||
return true
|
||||
}
|
||||
|
||||
alloc.poolsHdr.Len++
|
||||
alloc.poolsHdr.Cap++
|
||||
|
||||
// Reported addresses may not be page-aligned; round up to get
|
||||
// the start frame and round down to get the end frame
|
||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
||||
pageCount := uint32(regionEndFrame - regionStartFrame)
|
||||
alloc.totalPages += pageCount
|
||||
|
||||
// To represent the free page bitmap we need pageCount bits. Since our
|
||||
// slice uses uint64 for storing the bitmap we need to round up the
|
||||
// required bits so they are a multiple of 64 bits
|
||||
requiredBitmapBytes += mem.Size(((pageCount + 63) &^ 63) >> 3)
|
||||
return true
|
||||
})
|
||||
|
||||
// Reserve enough pages to hold the allocator state
|
||||
requiredBytes := mem.Size(((uint64(uintptr(alloc.poolsHdr.Len)*sizeofPool) + uint64(requiredBitmapBytes)) + pageSizeMinus1) & ^pageSizeMinus1)
|
||||
requiredPages := requiredBytes >> mem.PageShift
|
||||
alloc.poolsHdr.Data, err = reserveRegionFn(requiredBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for page, index := vmm.PageFromAddress(alloc.poolsHdr.Data), mem.Size(0); index < requiredPages; page, index = page+1, index+1 {
|
||||
nextFrame, err := earlyAllocFrame()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = mapFn(page, nextFrame, vmm.FlagPresent|vmm.FlagRW|vmm.FlagNoExecute); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mem.Memset(page.Address(), 0, mem.PageSize)
|
||||
}
|
||||
|
||||
alloc.pools = *(*[]framePool)(unsafe.Pointer(&alloc.poolsHdr))
|
||||
|
||||
// Run a second pass to initialize the free bitmap slices for all pools
|
||||
bitmapStartAddr := alloc.poolsHdr.Data + uintptr(alloc.poolsHdr.Len)*sizeofPool
|
||||
poolIndex := 0
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
if region.Type != multiboot.MemAvailable {
|
||||
return true
|
||||
}
|
||||
|
||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
||||
bitmapBytes := uintptr((((regionEndFrame - regionStartFrame) + 63) &^ 63) >> 3)
|
||||
|
||||
alloc.pools[poolIndex].startFrame = regionStartFrame
|
||||
alloc.pools[poolIndex].endFrame = regionEndFrame
|
||||
alloc.pools[poolIndex].freeCount = uint32(regionEndFrame - regionStartFrame + 1)
|
||||
alloc.pools[poolIndex].freeBitmapHdr.Len = int(bitmapBytes >> 3)
|
||||
alloc.pools[poolIndex].freeBitmapHdr.Cap = alloc.pools[poolIndex].freeBitmapHdr.Len
|
||||
alloc.pools[poolIndex].freeBitmapHdr.Data = bitmapStartAddr
|
||||
alloc.pools[poolIndex].freeBitmap = *(*[]uint64)(unsafe.Pointer(&alloc.pools[poolIndex].freeBitmapHdr))
|
||||
|
||||
bitmapStartAddr += bitmapBytes
|
||||
poolIndex++
|
||||
return true
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// markFrame updates the reservation flag for the bitmap entry that corresponds
|
||||
// to the supplied frame.
|
||||
func (alloc *BitmapAllocator) markFrame(poolIndex int, frame pmm.Frame, flag markAs) {
|
||||
if poolIndex < 0 || frame > alloc.pools[poolIndex].endFrame {
|
||||
return
|
||||
}
|
||||
|
||||
// The offset in the block is given by: frame % 64. As the bitmap uses a
|
||||
// big-ending representation we need to set the bit at index: 63 - offset
|
||||
relFrame := frame - alloc.pools[poolIndex].startFrame
|
||||
block := relFrame >> 6
|
||||
mask := uint64(1 << (63 - (relFrame - block<<6)))
|
||||
switch flag {
|
||||
case markFree:
|
||||
alloc.pools[poolIndex].freeBitmap[block] &^= mask
|
||||
alloc.pools[poolIndex].freeCount++
|
||||
alloc.reservedPages--
|
||||
case markReserved:
|
||||
alloc.pools[poolIndex].freeBitmap[block] |= mask
|
||||
alloc.pools[poolIndex].freeCount--
|
||||
alloc.reservedPages++
|
||||
}
|
||||
}
|
||||
|
||||
// poolForFrame returns the index of the pool that contains frame or -1 if
|
||||
// the frame is not contained in any of the available memory pools (e.g it
|
||||
// points to a reserved memory region).
|
||||
func (alloc *BitmapAllocator) poolForFrame(frame pmm.Frame) int {
|
||||
for poolIndex, pool := range alloc.pools {
|
||||
if frame >= pool.startFrame && frame <= pool.endFrame {
|
||||
return poolIndex
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// reserveKernelFrames makes as reserved the bitmap entries for the frames
|
||||
// occupied by the kernel image.
|
||||
func (alloc *BitmapAllocator) reserveKernelFrames() {
|
||||
// Flag frames used by kernel image as reserved. Since the kernel must
|
||||
// occupy a contiguous memory block we assume that all its frames will
|
||||
// fall into one of the available memory pools
|
||||
poolIndex := alloc.poolForFrame(earlyAllocator.kernelStartFrame)
|
||||
for frame := earlyAllocator.kernelStartFrame; frame <= earlyAllocator.kernelEndFrame; frame++ {
|
||||
alloc.markFrame(poolIndex, frame, markReserved)
|
||||
}
|
||||
}
|
||||
|
||||
// reserveEarlyAllocatorFrames makes as reserved the bitmap entries for the frames
|
||||
// already allocated by the early allocator.
|
||||
func (alloc *BitmapAllocator) reserveEarlyAllocatorFrames() {
|
||||
// We now need to decomission the early allocator by flagging all frames
|
||||
// allocated by it as reserved. The allocator itself does not track
|
||||
// individual frames but only a counter of allocated frames. To get
|
||||
// the list of frames we reset its internal state and "replay" the
|
||||
// allocation requests to get the correct frames.
|
||||
allocCount := earlyAllocator.allocCount
|
||||
earlyAllocator.allocCount, earlyAllocator.lastAllocFrame = 0, 0
|
||||
for i := uint64(0); i < allocCount; i++ {
|
||||
frame, _ := earlyAllocator.AllocFrame()
|
||||
alloc.markFrame(
|
||||
alloc.poolForFrame(frame),
|
||||
frame,
|
||||
markReserved,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (alloc *BitmapAllocator) printStats() {
|
||||
early.Printf(
|
||||
"[bitmap_alloc] page stats: free: %d/%d (%d reserved)\n",
|
||||
alloc.totalPages-alloc.reservedPages,
|
||||
alloc.totalPages,
|
||||
alloc.reservedPages,
|
||||
)
|
||||
}
|
||||
|
||||
// AllocFrame reserves and returns a physical memory frame. An error will be
|
||||
// returned if no more memory can be allocated.
|
||||
func (alloc *BitmapAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
for poolIndex := 0; poolIndex < len(alloc.pools); poolIndex++ {
|
||||
if alloc.pools[poolIndex].freeCount == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fullBlock := uint64(math.MaxUint64)
|
||||
for blockIndex, block := range alloc.pools[poolIndex].freeBitmap {
|
||||
if block == fullBlock {
|
||||
continue
|
||||
}
|
||||
|
||||
// Block has at least one free slot; we need to scan its bits
|
||||
for blockOffset, mask := 0, uint64(1<<63); mask > 0; blockOffset, mask = blockOffset+1, mask>>1 {
|
||||
if block&mask != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
alloc.pools[poolIndex].freeCount--
|
||||
alloc.pools[poolIndex].freeBitmap[blockIndex] |= mask
|
||||
alloc.reservedPages++
|
||||
return alloc.pools[poolIndex].startFrame + pmm.Frame((blockIndex<<6)+blockOffset), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pmm.InvalidFrame, errBitmapAllocOutOfMemory
|
||||
}
|
||||
|
||||
// FreeFrame releases a frame previously allocated via a call to AllocFrame.
|
||||
// Trying to release a frame not part of the allocator pools or a frame that
|
||||
// is already marked as free will cause an error to be returned.
|
||||
func (alloc *BitmapAllocator) FreeFrame(frame pmm.Frame) *kernel.Error {
|
||||
poolIndex := alloc.poolForFrame(frame)
|
||||
if poolIndex < 0 {
|
||||
return errBitmapAllocFrameNotManaged
|
||||
}
|
||||
|
||||
relFrame := frame - alloc.pools[poolIndex].startFrame
|
||||
block := relFrame >> 6
|
||||
mask := uint64(1 << (63 - (relFrame - block<<6)))
|
||||
|
||||
if alloc.pools[poolIndex].freeBitmap[block]&mask == 0 {
|
||||
return errBitmapAllocDoubleFree
|
||||
}
|
||||
|
||||
alloc.pools[poolIndex].freeBitmap[block] &^= mask
|
||||
alloc.pools[poolIndex].freeCount++
|
||||
alloc.reservedPages--
|
||||
return nil
|
||||
}
|
||||
|
||||
// earlyAllocFrame is a helper that delegates a frame allocation request to the
|
||||
// early allocator instance. This function is passed as an argument to
|
||||
// vmm.SetFrameAllocator instead of earlyAllocator.AllocFrame. The latter
|
||||
// confuses the compiler's escape analysis into thinking that
|
||||
// earlyAllocator.Frame escapes to heap.
|
||||
func earlyAllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
return earlyAllocator.AllocFrame()
|
||||
}
|
||||
|
||||
// AllocFrame is a helper that delegates a frame allocation request to the
|
||||
// bitmap allocator instance.
|
||||
func AllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
return bitmapAllocator.AllocFrame()
|
||||
}
|
||||
|
||||
// Init sets up the kernel physical memory allocation sub-system.
|
||||
func Init(kernelStart, kernelEnd uintptr) *kernel.Error {
|
||||
earlyAllocator.init(kernelStart, kernelEnd)
|
||||
earlyAllocator.printMemoryMap()
|
||||
|
||||
vmm.SetFrameAllocator(earlyAllocFrame)
|
||||
if err := bitmapAllocator.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
vmm.SetFrameAllocator(AllocFrame)
|
||||
|
||||
return nil
|
||||
}
|
||||
431
src/gopheros/kernel/mem/pmm/allocator/bitmap_allocator_test.go
Normal file
431
src/gopheros/kernel/mem/pmm/allocator/bitmap_allocator_test.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package allocator
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"gopheros/kernel/mem/vmm"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestSetupPoolBitmaps(t *testing.T) {
|
||||
defer func() {
|
||||
mapFn = vmm.Map
|
||||
reserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
|
||||
// The captured multiboot data corresponds to qemu running with 128M RAM.
|
||||
// The allocator will need to reserve 2 pages to store the bitmap data.
|
||||
var (
|
||||
alloc BitmapAllocator
|
||||
physMem = make([]byte, 2*mem.PageSize)
|
||||
)
|
||||
|
||||
// Init phys mem with junk
|
||||
for i := 0; i < len(physMem); i++ {
|
||||
physMem[i] = 0xf0
|
||||
}
|
||||
|
||||
mapCallCount := 0
|
||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
mapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
reserveCallCount := 0
|
||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
reserveCallCount++
|
||||
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
||||
}
|
||||
|
||||
if err := alloc.setupPoolBitmaps(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp := 2; mapCallCount != exp {
|
||||
t.Fatalf("expected allocator to call vmm.Map %d times; called %d", exp, mapCallCount)
|
||||
}
|
||||
|
||||
if exp := 1; reserveCallCount != exp {
|
||||
t.Fatalf("expected allocator to call vmm.EarlyReserveRegion %d times; called %d", exp, reserveCallCount)
|
||||
}
|
||||
|
||||
if exp, got := 2, len(alloc.pools); got != exp {
|
||||
t.Fatalf("expected allocator to initialize %d pools; got %d", exp, got)
|
||||
}
|
||||
|
||||
for poolIndex, pool := range alloc.pools {
|
||||
if expFreeCount := uint32(pool.endFrame - pool.startFrame + 1); pool.freeCount != expFreeCount {
|
||||
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount, pool.freeCount)
|
||||
}
|
||||
|
||||
if exp, got := int(math.Ceil(float64(pool.freeCount)/64.0)), len(pool.freeBitmap); got != exp {
|
||||
t.Errorf("[pool %d] expected bitmap len to be %d; got %d", poolIndex, exp, got)
|
||||
}
|
||||
|
||||
for blockIndex, block := range pool.freeBitmap {
|
||||
if block != 0 {
|
||||
t.Errorf("[pool %d] expected bitmap block %d to be cleared; got %d", poolIndex, blockIndex, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetupPoolBitmapsErrors(t *testing.T) {
|
||||
defer func() {
|
||||
mapFn = vmm.Map
|
||||
reserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
var alloc BitmapAllocator
|
||||
|
||||
t.Run("vmm.EarlyReserveRegion returns an error", func(t *testing.T) {
|
||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||
|
||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, expErr
|
||||
}
|
||||
|
||||
if err := alloc.setupPoolBitmaps(); err != expErr {
|
||||
t.Fatalf("expected to get error: %v; got %v", expErr, err)
|
||||
}
|
||||
})
|
||||
t.Run("vmm.Map returns an error", func(t *testing.T) {
|
||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||
|
||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return expErr
|
||||
}
|
||||
|
||||
if err := alloc.setupPoolBitmaps(); err != expErr {
|
||||
t.Fatalf("expected to get error: %v; got %v", expErr, err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("earlyAllocator returns an error", func(t *testing.T) {
|
||||
emptyInfoData := []byte{
|
||||
0, 0, 0, 0, // size
|
||||
0, 0, 0, 0, // reserved
|
||||
0, 0, 0, 0, // tag with type zero and length zero
|
||||
0, 0, 0, 0,
|
||||
}
|
||||
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&emptyInfoData[0])))
|
||||
|
||||
if err := alloc.setupPoolBitmaps(); err != errBootAllocOutOfMemory {
|
||||
t.Fatalf("expected to get error: %v; got %v", errBootAllocOutOfMemory, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBitmapAllocatorMarkFrame(t *testing.T) {
|
||||
var alloc = BitmapAllocator{
|
||||
pools: []framePool{
|
||||
{
|
||||
startFrame: pmm.Frame(0),
|
||||
endFrame: pmm.Frame(127),
|
||||
freeCount: 128,
|
||||
freeBitmap: make([]uint64, 2),
|
||||
},
|
||||
},
|
||||
totalPages: 128,
|
||||
}
|
||||
|
||||
lastFrame := pmm.Frame(alloc.totalPages)
|
||||
for frame := pmm.Frame(0); frame < lastFrame; frame++ {
|
||||
alloc.markFrame(0, frame, markReserved)
|
||||
|
||||
block := uint64(frame / 64)
|
||||
blockOffset := uint64(frame % 64)
|
||||
bitIndex := (63 - blockOffset)
|
||||
bitMask := uint64(1 << bitIndex)
|
||||
|
||||
if alloc.pools[0].freeBitmap[block]&bitMask != bitMask {
|
||||
t.Errorf("[frame %d] expected block[%d], bit %d to be set", frame, block, bitIndex)
|
||||
}
|
||||
|
||||
alloc.markFrame(0, frame, markFree)
|
||||
|
||||
if alloc.pools[0].freeBitmap[block]&bitMask != 0 {
|
||||
t.Errorf("[frame %d] expected block[%d], bit %d to be unset", frame, block, bitIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// Calling markFrame with a frame not part of the pool should be a no-op
|
||||
alloc.markFrame(0, pmm.Frame(0xbadf00d), markReserved)
|
||||
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
||||
if block != 0 {
|
||||
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
||||
}
|
||||
}
|
||||
|
||||
// Calling markFrame with a negative pool index should be a no-op
|
||||
alloc.markFrame(-1, pmm.Frame(0), markReserved)
|
||||
for blockIndex, block := range alloc.pools[0].freeBitmap {
|
||||
if block != 0 {
|
||||
t.Errorf("expected all blocks to be set to 0; block %d is set to %d", blockIndex, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitmapAllocatorPoolForFrame(t *testing.T) {
|
||||
var alloc = BitmapAllocator{
|
||||
pools: []framePool{
|
||||
{
|
||||
startFrame: pmm.Frame(0),
|
||||
endFrame: pmm.Frame(63),
|
||||
freeCount: 64,
|
||||
freeBitmap: make([]uint64, 1),
|
||||
},
|
||||
{
|
||||
startFrame: pmm.Frame(128),
|
||||
endFrame: pmm.Frame(191),
|
||||
freeCount: 64,
|
||||
freeBitmap: make([]uint64, 1),
|
||||
},
|
||||
},
|
||||
totalPages: 128,
|
||||
}
|
||||
|
||||
specs := []struct {
|
||||
frame pmm.Frame
|
||||
expIndex int
|
||||
}{
|
||||
{pmm.Frame(0), 0},
|
||||
{pmm.Frame(63), 0},
|
||||
{pmm.Frame(64), -1},
|
||||
{pmm.Frame(128), 1},
|
||||
{pmm.Frame(192), -1},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
if got := alloc.poolForFrame(spec.frame); got != spec.expIndex {
|
||||
t.Errorf("[spec %d] expected to get pool index %d; got %d", specIndex, spec.expIndex, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitmapAllocatorReserveKernelFrames(t *testing.T) {
|
||||
var alloc = BitmapAllocator{
|
||||
pools: []framePool{
|
||||
{
|
||||
startFrame: pmm.Frame(0),
|
||||
endFrame: pmm.Frame(7),
|
||||
freeCount: 8,
|
||||
freeBitmap: make([]uint64, 1),
|
||||
},
|
||||
{
|
||||
startFrame: pmm.Frame(64),
|
||||
endFrame: pmm.Frame(191),
|
||||
freeCount: 128,
|
||||
freeBitmap: make([]uint64, 2),
|
||||
},
|
||||
},
|
||||
totalPages: 136,
|
||||
}
|
||||
|
||||
// kernel occupies 16 frames and starts at the beginning of pool 1
|
||||
earlyAllocator.kernelStartFrame = pmm.Frame(64)
|
||||
earlyAllocator.kernelEndFrame = pmm.Frame(79)
|
||||
kernelSizePages := uint32(earlyAllocator.kernelEndFrame - earlyAllocator.kernelStartFrame + 1)
|
||||
alloc.reserveKernelFrames()
|
||||
|
||||
if exp, got := kernelSizePages, alloc.reservedPages; got != exp {
|
||||
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
if exp, got := uint32(8), alloc.pools[0].freeCount; got != exp {
|
||||
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
if exp, got := 128-kernelSizePages, alloc.pools[1].freeCount; got != exp {
|
||||
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
// The first 16 bits of block 0 in pool 1 should all be set to 1
|
||||
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[1].freeBitmap[0]; got != exp {
|
||||
t.Fatalf("expected block 0 in pool 1 to be:\n%064s\ngot:\n%064s",
|
||||
strconv.FormatUint(exp, 2),
|
||||
strconv.FormatUint(got, 2),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitmapAllocatorReserveEarlyAllocatorFrames(t *testing.T) {
|
||||
var alloc = BitmapAllocator{
|
||||
pools: []framePool{
|
||||
{
|
||||
startFrame: pmm.Frame(0),
|
||||
endFrame: pmm.Frame(63),
|
||||
freeCount: 64,
|
||||
freeBitmap: make([]uint64, 1),
|
||||
},
|
||||
{
|
||||
startFrame: pmm.Frame(64),
|
||||
endFrame: pmm.Frame(191),
|
||||
freeCount: 128,
|
||||
freeBitmap: make([]uint64, 2),
|
||||
},
|
||||
},
|
||||
totalPages: 64,
|
||||
}
|
||||
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
|
||||
// Simulate 16 allocations made using the early allocator in region 0
|
||||
// as reported by the multiboot data and move the kernel to pool 1
|
||||
allocCount := uint32(16)
|
||||
earlyAllocator.allocCount = uint64(allocCount)
|
||||
earlyAllocator.kernelStartFrame = pmm.Frame(256)
|
||||
earlyAllocator.kernelEndFrame = pmm.Frame(256)
|
||||
alloc.reserveEarlyAllocatorFrames()
|
||||
|
||||
if exp, got := allocCount, alloc.reservedPages; got != exp {
|
||||
t.Fatalf("expected reserved page counter to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
if exp, got := 64-allocCount, alloc.pools[0].freeCount; got != exp {
|
||||
t.Fatalf("expected free count for pool 0 to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
if exp, got := uint32(128), alloc.pools[1].freeCount; got != exp {
|
||||
t.Fatalf("expected free count for pool 1 to be %d; got %d", exp, got)
|
||||
}
|
||||
|
||||
// The first 16 bits of block 0 in pool 0 should all be set to 1
|
||||
if exp, got := uint64(((1<<16)-1)<<48), alloc.pools[0].freeBitmap[0]; got != exp {
|
||||
t.Fatalf("expected block 0 in pool 0 to be:\n%064s\ngot:\n%064s",
|
||||
strconv.FormatUint(exp, 2),
|
||||
strconv.FormatUint(got, 2),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitmapAllocatorAllocAndFreeFrame(t *testing.T) {
|
||||
var alloc = BitmapAllocator{
|
||||
pools: []framePool{
|
||||
{
|
||||
startFrame: pmm.Frame(0),
|
||||
endFrame: pmm.Frame(7),
|
||||
freeCount: 8,
|
||||
// only the first 8 bits of block 0 are used
|
||||
freeBitmap: make([]uint64, 1),
|
||||
},
|
||||
{
|
||||
startFrame: pmm.Frame(64),
|
||||
endFrame: pmm.Frame(191),
|
||||
freeCount: 128,
|
||||
freeBitmap: make([]uint64, 2),
|
||||
},
|
||||
},
|
||||
totalPages: 136,
|
||||
}
|
||||
|
||||
// Test Alloc
|
||||
for poolIndex, pool := range alloc.pools {
|
||||
for expFrame := pool.startFrame; expFrame <= pool.endFrame; expFrame++ {
|
||||
got, err := alloc.AllocFrame()
|
||||
if err != nil {
|
||||
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
|
||||
}
|
||||
|
||||
if got != expFrame {
|
||||
t.Errorf("[pool %d] expected allocated frame to be %d; got %d", poolIndex, expFrame, got)
|
||||
}
|
||||
}
|
||||
|
||||
if alloc.pools[poolIndex].freeCount != 0 {
|
||||
t.Errorf("[pool %d] expected free count to be 0; got %d", poolIndex, alloc.pools[poolIndex].freeCount)
|
||||
}
|
||||
}
|
||||
|
||||
if alloc.reservedPages != alloc.totalPages {
|
||||
t.Errorf("expected reservedPages to match totalPages(%d); got %d", alloc.totalPages, alloc.reservedPages)
|
||||
}
|
||||
|
||||
if _, err := alloc.AllocFrame(); err != errBitmapAllocOutOfMemory {
|
||||
t.Fatalf("expected error errBitmapAllocOutOfMemory; got %v", err)
|
||||
}
|
||||
|
||||
// Test Free
|
||||
expFreeCount := []uint32{8, 128}
|
||||
for poolIndex, pool := range alloc.pools {
|
||||
for frame := pool.startFrame; frame <= pool.endFrame; frame++ {
|
||||
if err := alloc.FreeFrame(frame); err != nil {
|
||||
t.Fatalf("[pool %d] unexpected error: %v", poolIndex, err)
|
||||
}
|
||||
}
|
||||
|
||||
if alloc.pools[poolIndex].freeCount != expFreeCount[poolIndex] {
|
||||
t.Errorf("[pool %d] expected free count to be %d; got %d", poolIndex, expFreeCount[poolIndex], alloc.pools[poolIndex].freeCount)
|
||||
}
|
||||
}
|
||||
|
||||
if alloc.reservedPages != 0 {
|
||||
t.Errorf("expected reservedPages to be 0; got %d", alloc.reservedPages)
|
||||
}
|
||||
|
||||
// Test Free errors
|
||||
if err := alloc.FreeFrame(pmm.Frame(0)); err != errBitmapAllocDoubleFree {
|
||||
t.Fatalf("expected error errBitmapAllocDoubleFree; got %v", err)
|
||||
}
|
||||
|
||||
if err := alloc.FreeFrame(pmm.Frame(0xbadf00d)); err != errBitmapAllocFrameNotManaged {
|
||||
t.Fatalf("expected error errBitmapFrameNotManaged; got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatorPackageInit(t *testing.T) {
|
||||
defer func() {
|
||||
mapFn = vmm.Map
|
||||
reserveRegionFn = vmm.EarlyReserveRegion
|
||||
}()
|
||||
|
||||
var (
|
||||
physMem = make([]byte, 2*mem.PageSize)
|
||||
)
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
reserveRegionFn = func(_ mem.Size) (uintptr, *kernel.Error) {
|
||||
return uintptr(unsafe.Pointer(&physMem[0])), nil
|
||||
}
|
||||
|
||||
mockTTY()
|
||||
if err := Init(0x100000, 0x1fa7c8); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// At this point sysAllocFrame should work
|
||||
if _, err := AllocFrame(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
expErr := &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||
|
||||
mapFn = func(page vmm.Page, frame pmm.Frame, flags vmm.PageTableEntryFlag) *kernel.Error {
|
||||
return expErr
|
||||
}
|
||||
|
||||
if err := Init(0x100000, 0x1fa7c8); err != expErr {
|
||||
t.Fatalf("expected to get error: %v; got %v", expErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
136
src/gopheros/kernel/mem/pmm/allocator/bootmem.go
Normal file
136
src/gopheros/kernel/mem/pmm/allocator/bootmem.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package allocator
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
"gopheros/kernel/kfmt/early"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
)
|
||||
|
||||
var (
|
||||
// earlyAllocator is a boot mem allocator instance used for page
|
||||
// allocations before switching to a more advanced allocator.
|
||||
earlyAllocator bootMemAllocator
|
||||
|
||||
errBootAllocOutOfMemory = &kernel.Error{Module: "boot_mem_alloc", Message: "out of memory"}
|
||||
)
|
||||
|
||||
// bootMemAllocator implements a rudimentary physical memory allocator which is
|
||||
// used to bootstrap the kernel.
|
||||
//
|
||||
// The allocator implementation uses the memory region information provided by
|
||||
// the bootloader to detect free memory blocks and return the next available
|
||||
// free frame. Allocations are tracked via an internal counter that contains
|
||||
// the last allocated frame.
|
||||
//
|
||||
// Due to the way that the allocator works, it is not possible to free
|
||||
// allocated pages. Once the kernel is properly initialized, the allocated
|
||||
// blocks will be handed over to a more advanced memory allocator that does
|
||||
// support freeing.
|
||||
type bootMemAllocator struct {
|
||||
// allocCount tracks the total number of allocated frames.
|
||||
allocCount uint64
|
||||
|
||||
// lastAllocFrame tracks the last allocated frame number.
|
||||
lastAllocFrame pmm.Frame
|
||||
|
||||
// Keep track of kernel location so we exclude this region.
|
||||
kernelStartAddr, kernelEndAddr uintptr
|
||||
kernelStartFrame, kernelEndFrame pmm.Frame
|
||||
}
|
||||
|
||||
// init sets up the boot memory allocator internal state.
|
||||
func (alloc *bootMemAllocator) init(kernelStart, kernelEnd uintptr) {
|
||||
// round down kernel start to the nearest page and round up kernel end
|
||||
// to the nearest page.
|
||||
pageSizeMinus1 := uintptr(mem.PageSize - 1)
|
||||
alloc.kernelStartAddr = kernelStart
|
||||
alloc.kernelEndAddr = kernelEnd
|
||||
alloc.kernelStartFrame = pmm.Frame((kernelStart & ^pageSizeMinus1) >> mem.PageShift)
|
||||
alloc.kernelEndFrame = pmm.Frame(((kernelEnd+pageSizeMinus1) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
||||
|
||||
}
|
||||
|
||||
// AllocFrame scans the system memory regions reported by the bootloader and
|
||||
// reserves the next available free frame.
|
||||
//
|
||||
// AllocFrame returns an error if no more memory can be allocated.
|
||||
func (alloc *bootMemAllocator) AllocFrame() (pmm.Frame, *kernel.Error) {
|
||||
var err = errBootAllocOutOfMemory
|
||||
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
// Ignore reserved regions and regions smaller than a single page
|
||||
if region.Type != multiboot.MemAvailable || region.Length < uint64(mem.PageSize) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Reported addresses may not be page-aligned; round up to get
|
||||
// the start frame and round down to get the end frame
|
||||
pageSizeMinus1 := uint64(mem.PageSize - 1)
|
||||
regionStartFrame := pmm.Frame(((region.PhysAddress + pageSizeMinus1) & ^pageSizeMinus1) >> mem.PageShift)
|
||||
regionEndFrame := pmm.Frame(((region.PhysAddress+region.Length) & ^pageSizeMinus1)>>mem.PageShift) - 1
|
||||
|
||||
// Skip over already allocated regions
|
||||
if alloc.lastAllocFrame >= regionEndFrame {
|
||||
return true
|
||||
}
|
||||
|
||||
// If last frame used a different region and the kernel image
|
||||
// is located at the beginning of this region OR we are in
|
||||
// current region but lastAllocFrame + 1 points to the kernel
|
||||
// start we need to jump to the page following the kernel end
|
||||
// frame
|
||||
if (alloc.lastAllocFrame <= regionStartFrame && alloc.kernelStartFrame == regionStartFrame) ||
|
||||
(alloc.lastAllocFrame <= regionEndFrame && alloc.lastAllocFrame+1 == alloc.kernelStartFrame) {
|
||||
//fmt.Printf("last: %d, case: 1, set last: %d\n", alloc.lastAllocFrame, alloc.kernelEndFrame+1)
|
||||
alloc.lastAllocFrame = alloc.kernelEndFrame + 1
|
||||
} else if alloc.lastAllocFrame < regionStartFrame || alloc.allocCount == 0 {
|
||||
// we are in the previous region and need to jump to this one OR
|
||||
// this is the first allocation and the region begins at frame 0
|
||||
//fmt.Printf("last: %d, case: 2, set last: %d\n", alloc.lastAllocFrame, regionStartFrame)
|
||||
alloc.lastAllocFrame = regionStartFrame
|
||||
} else {
|
||||
// we are in the region and we can select the next frame
|
||||
//fmt.Printf("last: %d, case: 3, set last: %d\n", alloc.lastAllocFrame, alloc.lastAllocFrame+1)
|
||||
alloc.lastAllocFrame++
|
||||
}
|
||||
|
||||
// The above adjustment might push lastAllocFrame outside of the
|
||||
// region end (e.g kernel ends at last page in the region)
|
||||
if alloc.lastAllocFrame > regionEndFrame {
|
||||
return true
|
||||
}
|
||||
|
||||
err = nil
|
||||
return false
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return pmm.InvalidFrame, errBootAllocOutOfMemory
|
||||
}
|
||||
|
||||
alloc.allocCount++
|
||||
return alloc.lastAllocFrame, nil
|
||||
}
|
||||
|
||||
// printMemoryMap scans the memory region information provided by the
|
||||
// bootloader and prints out the system's memory map.
|
||||
func (alloc *bootMemAllocator) printMemoryMap() {
|
||||
early.Printf("[boot_mem_alloc] system memory map:\n")
|
||||
var totalFree mem.Size
|
||||
multiboot.VisitMemRegions(func(region *multiboot.MemoryMapEntry) bool {
|
||||
early.Printf("\t[0x%10x - 0x%10x], size: %10d, type: %s\n", region.PhysAddress, region.PhysAddress+region.Length, region.Length, region.Type.String())
|
||||
|
||||
if region.Type == multiboot.MemAvailable {
|
||||
totalFree += mem.Size(region.Length)
|
||||
}
|
||||
return true
|
||||
})
|
||||
early.Printf("[boot_mem_alloc] available memory: %dKb\n", uint64(totalFree/mem.Kb))
|
||||
early.Printf("[boot_mem_alloc] kernel loaded at 0x%x - 0x%x\n", alloc.kernelStartAddr, alloc.kernelEndAddr)
|
||||
early.Printf("[boot_mem_alloc] size: %d bytes, reserved pages: %d\n",
|
||||
uint64(alloc.kernelEndAddr-alloc.kernelStartAddr),
|
||||
uint64(alloc.kernelEndFrame-alloc.kernelStartFrame+1),
|
||||
)
|
||||
}
|
||||
130
src/gopheros/kernel/mem/pmm/allocator/bootmem_test.go
Normal file
130
src/gopheros/kernel/mem/pmm/allocator/bootmem_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package allocator
|
||||
|
||||
import (
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal"
|
||||
"gopheros/kernel/hal/multiboot"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestBootMemoryAllocator(t *testing.T) {
|
||||
multiboot.SetInfoPtr(uintptr(unsafe.Pointer(&multibootMemoryMap[0])))
|
||||
|
||||
specs := []struct {
|
||||
kernelStart, kernelEnd uintptr
|
||||
expAllocCount uint64
|
||||
}{
|
||||
{
|
||||
// the kernel is loaded in a reserved memory region
|
||||
0xa0000,
|
||||
0xa0000,
|
||||
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]
|
||||
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
|
||||
159 + 32480,
|
||||
},
|
||||
{
|
||||
// the kernel is loaded at the beginning of region 1 taking 2.5 pages
|
||||
0x0,
|
||||
0x2800,
|
||||
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these
|
||||
// frames 0,1 and 2 (round up kernel end) are used by the kernel
|
||||
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
|
||||
159 - 3 + 32480,
|
||||
},
|
||||
{
|
||||
// the kernel is loaded at the end of region 1 taking 2.5 pages
|
||||
0x9c800,
|
||||
0x9f000,
|
||||
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; out of these
|
||||
// frames 156,157 and 158 (round down kernel start) are used by the kernel
|
||||
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
|
||||
159 - 3 + 32480,
|
||||
},
|
||||
{
|
||||
// the kernel (after rounding) uses the entire region 1
|
||||
0x123,
|
||||
0x9fc00,
|
||||
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]; all are used
|
||||
// by the kernel
|
||||
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735]
|
||||
32480,
|
||||
},
|
||||
{
|
||||
// the kernel is loaded at region 2 start + 2K taking 1.5 pages
|
||||
0x100800,
|
||||
0x102000,
|
||||
// region 1 extents get rounded to [0, 9f000] and provides 159 frames [0 to 158]
|
||||
// region 1 uses the original extents [100000 - 7fe0000] and provides 32480 frames [256-32735];
|
||||
// out of these frames 256 (kernel start rounded down) and 257 is used by the kernel
|
||||
159 + 32480 - 2,
|
||||
},
|
||||
}
|
||||
|
||||
var alloc bootMemAllocator
|
||||
for specIndex, spec := range specs {
|
||||
alloc.allocCount = 0
|
||||
alloc.lastAllocFrame = 0
|
||||
alloc.init(spec.kernelStart, spec.kernelEnd)
|
||||
|
||||
for {
|
||||
frame, err := alloc.AllocFrame()
|
||||
if err != nil {
|
||||
if err == errBootAllocOutOfMemory {
|
||||
break
|
||||
}
|
||||
t.Errorf("[spec %d] [frame %d] unexpected allocator error: %v", specIndex, alloc.allocCount, err)
|
||||
break
|
||||
}
|
||||
|
||||
if frame != alloc.lastAllocFrame {
|
||||
t.Errorf("[spec %d] [frame %d] expected allocated frame to be %d; got %d", specIndex, alloc.allocCount, alloc.lastAllocFrame, frame)
|
||||
}
|
||||
|
||||
if !frame.Valid() {
|
||||
t.Errorf("[spec %d] [frame %d] expected IsValid() to return true", specIndex, alloc.allocCount)
|
||||
}
|
||||
}
|
||||
|
||||
if alloc.allocCount != spec.expAllocCount {
|
||||
t.Errorf("[spec %d] expected allocator to allocate %d frames; allocated %d", specIndex, spec.expAllocCount, alloc.allocCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// A dump of multiboot data when running under qemu containing only the
|
||||
// memory region tag. The dump encodes the following available memory
|
||||
// regions:
|
||||
// [ 0 - 9fc00] length: 654336
|
||||
// [100000 - 7fe0000] length: 133038080
|
||||
multibootMemoryMap = []byte{
|
||||
72, 5, 0, 0, 0, 0, 0, 0,
|
||||
6, 0, 0, 0, 160, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 252, 9, 0, 0, 0, 0, 0,
|
||||
0, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,
|
||||
0, 0, 238, 7, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 254, 7, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0,
|
||||
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 252, 255, 0, 0, 0, 0,
|
||||
0, 0, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
|
||||
9, 0, 0, 0, 212, 3, 0, 0, 24, 0, 0, 0, 40, 0, 0, 0,
|
||||
21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0,
|
||||
1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 16, 0, 0, 16, 0, 0,
|
||||
24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
)
|
||||
|
||||
func mockTTY() []byte {
|
||||
// Mock a tty to handle early.Printf output
|
||||
mockConsoleFb := make([]byte, 160*25)
|
||||
mockConsole := &console.Ega{}
|
||||
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
|
||||
hal.ActiveTerminal.AttachTo(mockConsole)
|
||||
|
||||
return mockConsoleFb
|
||||
}
|
||||
26
src/gopheros/kernel/mem/pmm/frame.go
Normal file
26
src/gopheros/kernel/mem/pmm/frame.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package pmm contains code that manages physical memory frame allocations.
|
||||
package pmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem"
|
||||
"math"
|
||||
)
|
||||
|
||||
// Frame describes a physical memory page index.
|
||||
type Frame uintptr
|
||||
|
||||
const (
|
||||
// InvalidFrame is returned by page allocators when
|
||||
// they fail to reserve the requested frame.
|
||||
InvalidFrame = Frame(math.MaxUint64)
|
||||
)
|
||||
|
||||
// Valid returns true if this is a valid frame.
|
||||
func (f Frame) Valid() bool {
|
||||
return f != InvalidFrame
|
||||
}
|
||||
|
||||
// Address returns a pointer to the physical memory address pointed to by this Frame.
|
||||
func (f Frame) Address() uintptr {
|
||||
return uintptr(f << mem.PageShift)
|
||||
}
|
||||
25
src/gopheros/kernel/mem/pmm/frame_test.go
Normal file
25
src/gopheros/kernel/mem/pmm/frame_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package pmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFrameMethods(t *testing.T) {
|
||||
for frameIndex := uint64(0); frameIndex < 128; frameIndex++ {
|
||||
frame := Frame(frameIndex)
|
||||
|
||||
if !frame.Valid() {
|
||||
t.Errorf("expected frame %d to be valid", frameIndex)
|
||||
}
|
||||
|
||||
if exp, got := uintptr(frameIndex<<mem.PageShift), frame.Address(); got != exp {
|
||||
t.Errorf("expected frame (%d, index: %d) call to Address() to return %x; got %x", frame, frameIndex, exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
invalidFrame := InvalidFrame
|
||||
if invalidFrame.Valid() {
|
||||
t.Error("expected InvalidFrame.Valid() to return false")
|
||||
}
|
||||
}
|
||||
12
src/gopheros/kernel/mem/size.go
Normal file
12
src/gopheros/kernel/mem/size.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package mem
|
||||
|
||||
// Size represents a memory block size in bytes.
|
||||
type Size uint64
|
||||
|
||||
// Common memory block sizes.
|
||||
const (
|
||||
Byte Size = 1
|
||||
Kb = 1024 * Byte
|
||||
Mb = 1024 * Kb
|
||||
Gb = 1024 * Mb
|
||||
)
|
||||
35
src/gopheros/kernel/mem/vmm/addr_space.go
Normal file
35
src/gopheros/kernel/mem/vmm/addr_space.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
)
|
||||
|
||||
var (
|
||||
// earlyReserveLastUsed tracks the last reserved page address and is
|
||||
// decreased after each allocation request. Initially, it points to
|
||||
// tempMappingAddr which coincides with the end of the kernel address
|
||||
// space.
|
||||
earlyReserveLastUsed = tempMappingAddr
|
||||
|
||||
errEarlyReserveNoSpace = &kernel.Error{Module: "early_reserve", Message: "remaining virtual address space not large enough to satisfy reservation request"}
|
||||
)
|
||||
|
||||
// EarlyReserveRegion reserves a page-aligned contiguous virtual memory region
|
||||
// with the requested size in the kernel address space and returns its virtual
|
||||
// address. If size is not a multiple of mem.PageSize it will be automatically
|
||||
// rounded up.
|
||||
//
|
||||
// This function allocates regions starting at the end of the kernel address
|
||||
// space. It should only be used during the early stages of kernel initialization.
|
||||
func EarlyReserveRegion(size mem.Size) (uintptr, *kernel.Error) {
|
||||
size = (size + (mem.PageSize - 1)) & ^(mem.PageSize - 1)
|
||||
|
||||
// reserving a region of the requested size will cause an underflow
|
||||
if uintptr(size) > earlyReserveLastUsed {
|
||||
return 0, errEarlyReserveNoSpace
|
||||
}
|
||||
|
||||
earlyReserveLastUsed -= uintptr(size)
|
||||
return earlyReserveLastUsed, nil
|
||||
}
|
||||
29
src/gopheros/kernel/mem/vmm/addr_space_test.go
Normal file
29
src/gopheros/kernel/mem/vmm/addr_space_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEarlyReserveAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origLastUsed uintptr) {
|
||||
earlyReserveLastUsed = origLastUsed
|
||||
}(earlyReserveLastUsed)
|
||||
|
||||
earlyReserveLastUsed = 4096
|
||||
next, err := EarlyReserveRegion(42)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exp := uintptr(0); next != exp {
|
||||
t.Fatal("expected reservation request to be rounded to nearest page")
|
||||
}
|
||||
|
||||
if _, err = EarlyReserveRegion(1); err != errEarlyReserveNoSpace {
|
||||
t.Fatalf("expected to get errEarlyReserveNoSpace; got %v", err)
|
||||
}
|
||||
}
|
||||
89
src/gopheros/kernel/mem/vmm/constants_amd64.go
Normal file
89
src/gopheros/kernel/mem/vmm/constants_amd64.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// +build amd64
|
||||
|
||||
package vmm
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
// pageLevels indicates the number of page levels supported by the amd64 architecture.
|
||||
pageLevels = 4
|
||||
|
||||
// ptePhysPageMask is a mask that allows us to extract the physical memory
|
||||
// address pointed to by a page table entry. For this particular architecture,
|
||||
// bits 12-51 contain the physical memory address.
|
||||
ptePhysPageMask = uintptr(0x000ffffffffff000)
|
||||
|
||||
// tempMappingAddr is a reserved virtual page address used for
|
||||
// temporary physical page mappings (e.g. when mapping inactive PDT
|
||||
// pages). For amd64 this address uses the following table indices:
|
||||
// 510, 511, 511, 511.
|
||||
tempMappingAddr = uintptr(0Xffffff7ffffff000)
|
||||
)
|
||||
|
||||
var (
|
||||
// pdtVirtualAddr is a special virtual address that exploits the
|
||||
// recursive mapping used in the last PDT entry for each page directory
|
||||
// to allow accessing the PDT (P4) table using the system's MMU address
|
||||
// translation mechanism. By setting all page level bits to 1 the MMU
|
||||
// keeps following the last P4 entry for all page levels landing on the
|
||||
// P4.
|
||||
pdtVirtualAddr = uintptr(math.MaxUint64 &^ ((1 << 12) - 1))
|
||||
|
||||
// pageLevelBits defines the number of virtual address bits that correspond to each
|
||||
// page level. For the amd64 architecture each PageLevel uses 9 bits which amounts to
|
||||
// 512 entries for each page level.
|
||||
pageLevelBits = [pageLevels]uint8{
|
||||
9,
|
||||
9,
|
||||
9,
|
||||
9,
|
||||
}
|
||||
|
||||
// pageLevelShifts defines the shift required to access each page table component
|
||||
// of a virtual address.
|
||||
pageLevelShifts = [pageLevels]uint8{
|
||||
39,
|
||||
30,
|
||||
21,
|
||||
12,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// FlagPresent is set when the page is available in memory and not swapped out.
|
||||
FlagPresent PageTableEntryFlag = 1 << iota
|
||||
|
||||
// FlagRW is set if the page can be written to.
|
||||
FlagRW
|
||||
|
||||
// FlagUserAccessible is set if user-mode processes can access this page. If
|
||||
// not set only kernel code can access this page.
|
||||
FlagUserAccessible
|
||||
|
||||
// FlagWriteThroughCaching implies write-through caching when set and write-back
|
||||
// caching if cleared.
|
||||
FlagWriteThroughCaching
|
||||
|
||||
// FlagDoNotCache prevents this page from being cached if set.
|
||||
FlagDoNotCache
|
||||
|
||||
// FlagAccessed is set by the CPU when this page is accessed.
|
||||
FlagAccessed
|
||||
|
||||
// FlagDirty is set by the CPU when this page is modified.
|
||||
FlagDirty
|
||||
|
||||
// FlagHugePage is set if when using 2Mb pages instead of 4K pages.
|
||||
FlagHugePage
|
||||
|
||||
// FlagGlobal if set, prevents the TLB from flushing the cached memory address
|
||||
// for this page when the swapping page tables by updating the CR3 register.
|
||||
FlagGlobal
|
||||
|
||||
// FlagCopyOnWrite is used to implement copy-on-write functionality. This
|
||||
// flag and FlagRW are mutually exclusive.
|
||||
FlagCopyOnWrite = 1 << 9
|
||||
|
||||
// FlagNoExecute if set, indicates that a page contains non-executable code.
|
||||
FlagNoExecute = 1 << 63
|
||||
)
|
||||
154
src/gopheros/kernel/mem/vmm/map.go
Normal file
154
src/gopheros/kernel/mem/vmm/map.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ReservedZeroedFrame is a special zero-cleared frame allocated by the
|
||||
// vmm package's Init function. The purpose of this frame is to assist
|
||||
// in implementing on-demand memory allocation when mapping it in
|
||||
// conjunction with the CopyOnWrite flag. Here is an example of how it
|
||||
// can be used:
|
||||
//
|
||||
// func ReserveOnDemand(start vmm.Page, pageCount int) *kernel.Error {
|
||||
// var err *kernel.Error
|
||||
// mapFlags := vmm.FlagPresent|vmm.FlagCopyOnWrite
|
||||
// for page := start; pageCount > 0; pageCount, page = pageCount-1, page+1 {
|
||||
// if err = vmm.Map(page, vmm.ReservedZeroedFrame, mapFlags); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// In the above example, page mappings are set up for the requested number of
|
||||
// pages but no physical memory is reserved for their contents. A write to any
|
||||
// of the above pages will trigger a page-fault causing a new frame to be
|
||||
// allocated, cleared (the blank frame is copied to the new frame) and
|
||||
// installed in-place with RW permissions.
|
||||
var ReservedZeroedFrame pmm.Frame
|
||||
|
||||
var (
|
||||
// protectReservedZeroedPage is set to true to prevent mapping to
|
||||
protectReservedZeroedPage bool
|
||||
|
||||
// nextAddrFn is used by used by tests to override the nextTableAddr
|
||||
// calculations used by Map. When compiling the kernel this function
|
||||
// will be automatically inlined.
|
||||
nextAddrFn = func(entryAddr uintptr) uintptr {
|
||||
return entryAddr
|
||||
}
|
||||
|
||||
// flushTLBEntryFn is used by tests to override calls to flushTLBEntry
|
||||
// which will cause a fault if called in user-mode.
|
||||
flushTLBEntryFn = cpu.FlushTLBEntry
|
||||
|
||||
errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"}
|
||||
errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"}
|
||||
)
|
||||
|
||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
||||
// using the currently active page directory table. Calls to Map will use the
|
||||
// supplied physical frame allocator to initialize missing page tables at each
|
||||
// paging level supported by the MMU.
|
||||
//
|
||||
// Attempts to map ReservedZeroedFrame with a RW flag will result in an error.
|
||||
func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||
if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 {
|
||||
return errAttemptToRWMapReservedFrame
|
||||
}
|
||||
|
||||
var err *kernel.Error
|
||||
|
||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
// If we reached the last level all we need to do is to map the
|
||||
// frame in place and flag it as present and flush its TLB entry
|
||||
if pteLevel == pageLevels-1 {
|
||||
*pte = 0
|
||||
pte.SetFrame(frame)
|
||||
pte.SetFlags(flags)
|
||||
flushTLBEntryFn(page.Address())
|
||||
return true
|
||||
}
|
||||
|
||||
if pte.HasFlags(FlagHugePage) {
|
||||
err = errNoHugePageSupport
|
||||
return false
|
||||
}
|
||||
|
||||
// Next table does not yet exist; we need to allocate a
|
||||
// physical frame for it map it and clear its contents.
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
var newTableFrame pmm.Frame
|
||||
newTableFrame, err = frameAllocator()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
*pte = 0
|
||||
pte.SetFrame(newTableFrame)
|
||||
pte.SetFlags(FlagPresent | FlagRW)
|
||||
|
||||
// The next pte entry becomes available but we need to
|
||||
// make sure that the new page is properly cleared
|
||||
nextTableAddr := (uintptr(unsafe.Pointer(pte)) << pageLevelBits[pteLevel+1])
|
||||
mem.Memset(nextAddrFn(nextTableAddr), 0, mem.PageSize)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MapTemporary establishes a temporary RW mapping of a physical memory frame
|
||||
// to a fixed virtual address overwriting any previous mapping. The temporary
|
||||
// mapping mechanism is primarily used by the kernel to access and initialize
|
||||
// inactive page tables.
|
||||
//
|
||||
// Attempts to map ReservedZeroedFrame will result in an error.
|
||||
func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) {
|
||||
if protectReservedZeroedPage && frame == ReservedZeroedFrame {
|
||||
return 0, errAttemptToRWMapReservedFrame
|
||||
}
|
||||
|
||||
if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return PageFromAddress(tempMappingAddr), nil
|
||||
}
|
||||
|
||||
// Unmap removes a mapping previously installed via a call to Map or MapTemporary.
|
||||
func Unmap(page Page) *kernel.Error {
|
||||
var err *kernel.Error
|
||||
|
||||
walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
// If we reached the last level all we need to do is to set the
|
||||
// page as non-present and flush its TLB entry
|
||||
if pteLevel == pageLevels-1 {
|
||||
pte.ClearFlags(FlagPresent)
|
||||
flushTLBEntryFn(page.Address())
|
||||
return true
|
||||
}
|
||||
|
||||
// Next table is not present; this is an invalid mapping
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
err = ErrInvalidMapping
|
||||
return false
|
||||
}
|
||||
|
||||
if pte.HasFlags(FlagHugePage) {
|
||||
err = errNoHugePageSupport
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
270
src/gopheros/kernel/mem/vmm/map_test.go
Normal file
270
src/gopheros/kernel/mem/vmm/map_test.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestNextAddrFn(t *testing.T) {
|
||||
// Dummy test to keep coverage happy
|
||||
if exp, got := uintptr(123), nextAddrFn(uintptr(123)); exp != got {
|
||||
t.Fatalf("expected nextAddrFn to return %v; got %v", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapTemporaryAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
frameAllocator = nil
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
nextPhysPage := 0
|
||||
|
||||
// allocFn returns pages from index 1; we keep index 0 for the P4 entry
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
||||
nextPhysPage++
|
||||
pageAddr := unsafe.Pointer(&physPages[nextPhysPage][0])
|
||||
return pmm.Frame(uintptr(pageAddr) >> mem.PageShift), nil
|
||||
})
|
||||
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
pteCallCount++
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[pteCallCount-1][pteIndex])
|
||||
}
|
||||
|
||||
nextAddrFn = func(entry uintptr) uintptr {
|
||||
return uintptr(unsafe.Pointer(&physPages[nextPhysPage][0]))
|
||||
}
|
||||
|
||||
flushTLBEntryCallCount := 0
|
||||
flushTLBEntryFn = func(uintptr) {
|
||||
flushTLBEntryCallCount++
|
||||
}
|
||||
|
||||
// The temporary mappin address breaks down to:
|
||||
// p4 index: 510
|
||||
// p3 index: 511
|
||||
// p2 index: 511
|
||||
// p1 index: 511
|
||||
frame := pmm.Frame(123)
|
||||
levelIndices := []uint{510, 511, 511, 511}
|
||||
|
||||
page, err := MapTemporary(frame)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if got := page.Address(); got != tempMappingAddr {
|
||||
t.Fatalf("expected temp mapping virtual address to be %x; got %x", tempMappingAddr, got)
|
||||
}
|
||||
|
||||
for level, physPage := range physPages {
|
||||
pte := physPage[levelIndices[level]]
|
||||
if !pte.HasFlags(FlagPresent | FlagRW) {
|
||||
t.Errorf("[pte at level %d] expected entry to have FlagPresent and FlagRW set", level)
|
||||
}
|
||||
|
||||
switch {
|
||||
case level < pageLevels-1:
|
||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, exp, got)
|
||||
}
|
||||
default:
|
||||
// The last pte entry should point to frame
|
||||
if got := pte.Frame(); got != frame {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exp := 1; flushTLBEntryCallCount != exp {
|
||||
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapTemporaryErrorsAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
|
||||
// The reserved virt address uses the following page level indices: 510, 511, 511, 511
|
||||
p4Index := 510
|
||||
frame := pmm.Frame(123)
|
||||
|
||||
t.Run("encounter huge page", func(t *testing.T) {
|
||||
physPages[0][p4Index].SetFlags(FlagPresent | FlagHugePage)
|
||||
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||
}
|
||||
|
||||
if _, err := MapTemporary(frame); err != errNoHugePageSupport {
|
||||
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("allocFn returns an error", func(t *testing.T) {
|
||||
defer func() { frameAllocator = nil }()
|
||||
physPages[0][p4Index] = 0
|
||||
|
||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
||||
return 0, expErr
|
||||
})
|
||||
|
||||
if _, err := MapTemporary(frame); err != expErr {
|
||||
t.Fatalf("got unexpected error %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map BlankReservedFrame RW", func(t *testing.T) {
|
||||
defer func() { protectReservedZeroedPage = false }()
|
||||
|
||||
protectReservedZeroedPage = true
|
||||
if err := Map(Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame {
|
||||
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("temp-map BlankReservedFrame RW", func(t *testing.T) {
|
||||
defer func() { protectReservedZeroedPage = false }()
|
||||
|
||||
protectReservedZeroedPage = true
|
||||
if _, err := MapTemporary(ReservedZeroedFrame); err != errAttemptToRWMapReservedFrame {
|
||||
t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnmapAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, flushTLBEntryFn)
|
||||
|
||||
var (
|
||||
physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
frame = pmm.Frame(123)
|
||||
)
|
||||
|
||||
// Emulate a page mapped to virtAddr 0 across all page levels
|
||||
for level := 0; level < pageLevels; level++ {
|
||||
physPages[level][0].SetFlags(FlagPresent | FlagRW)
|
||||
if level < pageLevels-1 {
|
||||
physPages[level][0].SetFrame(pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0])) >> mem.PageShift))
|
||||
} else {
|
||||
physPages[level][0].SetFrame(frame)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
pteCallCount++
|
||||
return unsafe.Pointer(&physPages[pteCallCount-1][0])
|
||||
}
|
||||
|
||||
flushTLBEntryCallCount := 0
|
||||
flushTLBEntryFn = func(uintptr) {
|
||||
flushTLBEntryCallCount++
|
||||
}
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for level, physPage := range physPages {
|
||||
pte := physPage[0]
|
||||
|
||||
switch {
|
||||
case level < pageLevels-1:
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
t.Errorf("[pte at level %d] expected entry to retain have FlagPresent set", level)
|
||||
}
|
||||
if exp, got := pmm.Frame(uintptr(unsafe.Pointer(&physPages[level+1][0]))>>mem.PageShift), pte.Frame(); got != exp {
|
||||
t.Errorf("[pte at level %d] expected entry frame to still be %d; got %d", level, exp, got)
|
||||
}
|
||||
default:
|
||||
if pte.HasFlags(FlagPresent) {
|
||||
t.Errorf("[pte at level %d] expected entry not to have FlagPresent set", level)
|
||||
}
|
||||
|
||||
// The last pte entry should still point to frame
|
||||
if got := pte.Frame(); got != frame {
|
||||
t.Errorf("[pte at level %d] expected entry frame to be %d; got %d", level, frame, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exp := 1; flushTLBEntryCallCount != exp {
|
||||
t.Errorf("expected flushTLBEntry to be called %d times; got %d", exp, flushTLBEntryCallCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmapErrorsAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer, origNextAddrFn func(uintptr) uintptr, origFlushTLBEntryFn func(uintptr)) {
|
||||
ptePtrFn = origPtePtr
|
||||
nextAddrFn = origNextAddrFn
|
||||
flushTLBEntryFn = origFlushTLBEntryFn
|
||||
}(ptePtrFn, nextAddrFn, flushTLBEntryFn)
|
||||
|
||||
var physPages [pageLevels][mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
|
||||
t.Run("encounter huge page", func(t *testing.T) {
|
||||
physPages[0][0].SetFlags(FlagPresent | FlagHugePage)
|
||||
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
// The last 12 bits encode the page table offset in bytes
|
||||
// which we need to convert to a uint64 entry
|
||||
pteIndex := (entry & uintptr(mem.PageSize-1)) >> mem.PointerShift
|
||||
return unsafe.Pointer(&physPages[0][pteIndex])
|
||||
}
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != errNoHugePageSupport {
|
||||
t.Fatalf("expected to get errNoHugePageSupport; got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("virtual address not mapped", func(t *testing.T) {
|
||||
physPages[0][0].ClearFlags(FlagPresent)
|
||||
|
||||
if err := Unmap(PageFromAddress(0)); err != ErrInvalidMapping {
|
||||
t.Fatalf("expected to get ErrInvalidMapping; got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
19
src/gopheros/kernel/mem/vmm/page.go
Normal file
19
src/gopheros/kernel/mem/vmm/page.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package vmm
|
||||
|
||||
import "gopheros/kernel/mem"
|
||||
|
||||
// Page describes a virtual memory page index.
|
||||
type Page uintptr
|
||||
|
||||
// Address returns a pointer to the virtual memory address pointed to by this Page.
|
||||
func (f Page) Address() uintptr {
|
||||
return uintptr(f << mem.PageShift)
|
||||
}
|
||||
|
||||
// PageFromAddress returns a Page that corresponds to the given virtual
|
||||
// address. This function can handle both page-aligned and not aligned virtual
|
||||
// addresses. in the latter case, the input address will be rounded down to the
|
||||
// page that contains it.
|
||||
func PageFromAddress(virtAddr uintptr) Page {
|
||||
return Page((virtAddr & ^(uintptr(mem.PageSize - 1))) >> mem.PageShift)
|
||||
}
|
||||
34
src/gopheros/kernel/mem/vmm/page_test.go
Normal file
34
src/gopheros/kernel/mem/vmm/page_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPageMethods(t *testing.T) {
|
||||
for pageIndex := uint64(0); pageIndex < 128; pageIndex++ {
|
||||
page := Page(pageIndex)
|
||||
|
||||
if exp, got := uintptr(pageIndex<<mem.PageShift), page.Address(); got != exp {
|
||||
t.Errorf("expected page (%d, index: %d) call to Address() to return %x; got %x", page, pageIndex, exp, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPageFromAddress(t *testing.T) {
|
||||
specs := []struct {
|
||||
input uintptr
|
||||
expPage Page
|
||||
}{
|
||||
{0, Page(0)},
|
||||
{4095, Page(0)},
|
||||
{4096, Page(1)},
|
||||
{4123, Page(1)},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
if got := PageFromAddress(spec.input); got != spec.expPage {
|
||||
t.Errorf("[spec %d] expected returned page to be %v; got %v", specIndex, spec.expPage, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
135
src/gopheros/kernel/mem/vmm/pdt.go
Normal file
135
src/gopheros/kernel/mem/vmm/pdt.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// activePDTFn is used by tests to override calls to activePDT which
|
||||
// will cause a fault if called in user-mode.
|
||||
activePDTFn = cpu.ActivePDT
|
||||
|
||||
// switchPDTFn is used by tests to override calls to switchPDT which
|
||||
// will cause a fault if called in user-mode.
|
||||
switchPDTFn = cpu.SwitchPDT
|
||||
|
||||
// mapFn is used by tests and is automatically inlined by the compiler.
|
||||
mapFn = Map
|
||||
|
||||
// mapTemporaryFn is used by tests and is automatically inlined by the compiler.
|
||||
mapTemporaryFn = MapTemporary
|
||||
|
||||
// unmapmFn is used by tests and is automatically inlined by the compiler.
|
||||
unmapFn = Unmap
|
||||
)
|
||||
|
||||
// PageDirectoryTable describes the top-most table in a multi-level paging scheme.
|
||||
type PageDirectoryTable struct {
|
||||
pdtFrame pmm.Frame
|
||||
}
|
||||
|
||||
// Init sets up the page table directory starting at the supplied physical
|
||||
// address. If the supplied frame does not match the currently active PDT, then
|
||||
// Init assumes that this is a new page table directory that needs
|
||||
// bootstapping. In such a case, a temporary mapping is established so that
|
||||
// Init can:
|
||||
// - call mem.Memset to clear the frame contents
|
||||
// - setup a recursive mapping for the last table entry to the page itself.
|
||||
func (pdt *PageDirectoryTable) Init(pdtFrame pmm.Frame) *kernel.Error {
|
||||
pdt.pdtFrame = pdtFrame
|
||||
|
||||
// Check active PDT physical address. If it matches the input pdt then
|
||||
// nothing more needs to be done
|
||||
activePdtAddr := activePDTFn()
|
||||
if pdtFrame.Address() == activePdtAddr {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary mapping for the pdt frame so we can work on it
|
||||
pdtPage, err := mapTemporaryFn(pdtFrame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear the page contents and setup recursive mapping for the last PDT entry
|
||||
mem.Memset(pdtPage.Address(), 0, mem.PageSize)
|
||||
lastPdtEntry := (*pageTableEntry)(unsafe.Pointer(pdtPage.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)))
|
||||
*lastPdtEntry = 0
|
||||
lastPdtEntry.SetFlags(FlagPresent | FlagRW)
|
||||
lastPdtEntry.SetFrame(pdtFrame)
|
||||
|
||||
// Remove temporary mapping
|
||||
unmapFn(pdtPage)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Map establishes a mapping between a virtual page and a physical memory frame
|
||||
// using this PDT. This method behaves in a similar fashion to the global Map()
|
||||
// function with the difference that it also supports inactive page PDTs by
|
||||
// establishing a temporary mapping so that Map() can access the inactive PDT
|
||||
// entries.
|
||||
func (pdt PageDirectoryTable) Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error {
|
||||
var (
|
||||
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
|
||||
lastPdtEntryAddr uintptr
|
||||
lastPdtEntry *pageTableEntry
|
||||
)
|
||||
// If this table is not active we need to temporarily map it to the
|
||||
// last entry in the active PDT so we can access it using the recursive
|
||||
// virtual address scheme.
|
||||
if activePdtFrame != pdt.pdtFrame {
|
||||
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
|
||||
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
||||
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
||||
flushTLBEntryFn(lastPdtEntryAddr)
|
||||
}
|
||||
|
||||
err := mapFn(page, frame, flags)
|
||||
|
||||
if activePdtFrame != pdt.pdtFrame {
|
||||
lastPdtEntry.SetFrame(activePdtFrame)
|
||||
flushTLBEntryFn(lastPdtEntryAddr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmap removes a mapping previousle installed by a call to Map() on this PDT.
|
||||
// This method behaves in a similar fashion to the global Unmap() function with
|
||||
// the difference that it also supports inactive page PDTs by establishing a
|
||||
// temporary mapping so that Unmap() can access the inactive PDT entries.
|
||||
func (pdt PageDirectoryTable) Unmap(page Page) *kernel.Error {
|
||||
var (
|
||||
activePdtFrame = pmm.Frame(activePDTFn() >> mem.PageShift)
|
||||
lastPdtEntryAddr uintptr
|
||||
lastPdtEntry *pageTableEntry
|
||||
)
|
||||
// If this table is not active we need to temporarily map it to the
|
||||
// last entry in the active PDT so we can access it using the recursive
|
||||
// virtual address scheme.
|
||||
if activePdtFrame != pdt.pdtFrame {
|
||||
lastPdtEntryAddr = activePdtFrame.Address() + (((1 << pageLevelBits[0]) - 1) << mem.PointerShift)
|
||||
lastPdtEntry = (*pageTableEntry)(unsafe.Pointer(lastPdtEntryAddr))
|
||||
lastPdtEntry.SetFrame(pdt.pdtFrame)
|
||||
flushTLBEntryFn(lastPdtEntryAddr)
|
||||
}
|
||||
|
||||
err := unmapFn(page)
|
||||
|
||||
if activePdtFrame != pdt.pdtFrame {
|
||||
lastPdtEntry.SetFrame(activePdtFrame)
|
||||
flushTLBEntryFn(lastPdtEntryAddr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Activate enables this page directory table and flushes the TLB
|
||||
func (pdt PageDirectoryTable) Activate() {
|
||||
switchPDTFn(pdt.pdtFrame.Address())
|
||||
}
|
||||
330
src/gopheros/kernel/mem/vmm/pdt_test.go
Normal file
330
src/gopheros/kernel/mem/vmm/pdt_test.go
Normal file
@@ -0,0 +1,330 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestPageDirectoryTableInitAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMapTemporary func(pmm.Frame) (Page, *kernel.Error), origUnmap func(Page) *kernel.Error) {
|
||||
flushTLBEntryFn = origFlushTLBEntry
|
||||
activePDTFn = origActivePDT
|
||||
mapTemporaryFn = origMapTemporary
|
||||
unmapFn = origUnmap
|
||||
}(flushTLBEntryFn, activePDTFn, mapTemporaryFn, unmapFn)
|
||||
|
||||
t.Run("already mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdt PageDirectoryTable
|
||||
pdtFrame = pmm.Frame(123)
|
||||
)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return pdtFrame.Address()
|
||||
}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
||||
t.Fatal("unexpected call to MapTemporary")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
unmapFn = func(_ Page) *kernel.Error {
|
||||
t.Fatal("unexpected call to Unmap")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pdt.Init(pdtFrame); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("not mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdt PageDirectoryTable
|
||||
pdtFrame = pmm.Frame(123)
|
||||
physPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
)
|
||||
|
||||
// Fill phys page with random junk
|
||||
mem.Memset(uintptr(unsafe.Pointer(&physPage[0])), 0xf0, mem.PageSize)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return 0
|
||||
}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
||||
return PageFromAddress(uintptr(unsafe.Pointer(&physPage[0]))), nil
|
||||
}
|
||||
|
||||
flushTLBEntryFn = func(_ uintptr) {}
|
||||
|
||||
unmapCallCount := 0
|
||||
unmapFn = func(_ Page) *kernel.Error {
|
||||
unmapCallCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pdt.Init(pdtFrame); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if unmapCallCount != 1 {
|
||||
t.Fatalf("expected Unmap to be called 1 time; called %d", unmapCallCount)
|
||||
}
|
||||
|
||||
for i := 0; i < len(physPage)-1; i++ {
|
||||
if physPage[i] != 0 {
|
||||
t.Errorf("expected PDT entry %d to be cleared; got %x", i, physPage[i])
|
||||
}
|
||||
}
|
||||
|
||||
// The last page should be recursively mapped to the PDT
|
||||
lastPdtEntry := physPage[len(physPage)-1]
|
||||
if !lastPdtEntry.HasFlags(FlagPresent | FlagRW) {
|
||||
t.Fatal("expected last PDT entry to have FlagPresent and FlagRW set")
|
||||
}
|
||||
|
||||
if lastPdtEntry.Frame() != pdtFrame {
|
||||
t.Fatalf("expected last PDT entry to be recursively mapped to physical frame %x; got %x", pdtFrame, lastPdtEntry.Frame())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("temporary mapping failure", func(t *testing.T) {
|
||||
var (
|
||||
pdt PageDirectoryTable
|
||||
pdtFrame = pmm.Frame(123)
|
||||
)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return 0
|
||||
}
|
||||
|
||||
expErr := &kernel.Error{Module: "test", Message: "error mapping page"}
|
||||
|
||||
mapTemporaryFn = func(_ pmm.Frame) (Page, *kernel.Error) {
|
||||
return 0, expErr
|
||||
}
|
||||
|
||||
unmapFn = func(_ Page) *kernel.Error {
|
||||
t.Fatal("unexpected call to Unmap")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pdt.Init(pdtFrame); err != expErr {
|
||||
t.Fatalf("expected to get error: %v; got %v", *expErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestPageDirectoryTableMapAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origMap func(Page, pmm.Frame, PageTableEntryFlag) *kernel.Error) {
|
||||
flushTLBEntryFn = origFlushTLBEntry
|
||||
activePDTFn = origActivePDT
|
||||
mapFn = origMap
|
||||
}(flushTLBEntryFn, activePDTFn, mapFn)
|
||||
|
||||
t.Run("already mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdtFrame = pmm.Frame(123)
|
||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
||||
)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return pdtFrame.Address()
|
||||
}
|
||||
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
flushCallCount := 0
|
||||
flushTLBEntryFn = func(_ uintptr) {
|
||||
flushCallCount++
|
||||
}
|
||||
|
||||
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp := 0; flushCallCount != exp {
|
||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("not mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdtFrame = pmm.Frame(123)
|
||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
||||
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
|
||||
)
|
||||
|
||||
// Initially, activePhysPage is recursively mapped to itself
|
||||
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
||||
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return activePdtFrame.Address()
|
||||
}
|
||||
|
||||
mapFn = func(_ Page, _ pmm.Frame, _ PageTableEntryFlag) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
flushCallCount := 0
|
||||
flushTLBEntryFn = func(_ uintptr) {
|
||||
switch flushCallCount {
|
||||
case 0:
|
||||
// the first time we flush the tlb entry, the last entry of
|
||||
// the active pdt should be pointing to pdtFrame
|
||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
||||
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
||||
}
|
||||
case 1:
|
||||
// the second time we flush the tlb entry, the last entry of
|
||||
// the active pdt should be pointing back to activePdtFrame
|
||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
||||
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
||||
}
|
||||
}
|
||||
flushCallCount++
|
||||
}
|
||||
|
||||
if err := pdt.Map(page, pmm.Frame(321), FlagRW); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp := 2; flushCallCount != exp {
|
||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestPageDirectoryTableUnmapAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origFlushTLBEntry func(uintptr), origActivePDT func() uintptr, origUnmap func(Page) *kernel.Error) {
|
||||
flushTLBEntryFn = origFlushTLBEntry
|
||||
activePDTFn = origActivePDT
|
||||
unmapFn = origUnmap
|
||||
}(flushTLBEntryFn, activePDTFn, unmapFn)
|
||||
|
||||
t.Run("already mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdtFrame = pmm.Frame(123)
|
||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
||||
)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return pdtFrame.Address()
|
||||
}
|
||||
|
||||
unmapFn = func(_ Page) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
flushCallCount := 0
|
||||
flushTLBEntryFn = func(_ uintptr) {
|
||||
flushCallCount++
|
||||
}
|
||||
|
||||
if err := pdt.Unmap(page); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp := 0; flushCallCount != exp {
|
||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("not mapped PDT", func(t *testing.T) {
|
||||
var (
|
||||
pdtFrame = pmm.Frame(123)
|
||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||
page = PageFromAddress(uintptr(100 * mem.Mb))
|
||||
activePhysPage [mem.PageSize >> mem.PointerShift]pageTableEntry
|
||||
activePdtFrame = pmm.Frame(uintptr(unsafe.Pointer(&activePhysPage[0])) >> mem.PageShift)
|
||||
)
|
||||
|
||||
// Initially, activePhysPage is recursively mapped to itself
|
||||
activePhysPage[len(activePhysPage)-1].SetFlags(FlagPresent | FlagRW)
|
||||
activePhysPage[len(activePhysPage)-1].SetFrame(activePdtFrame)
|
||||
|
||||
activePDTFn = func() uintptr {
|
||||
return activePdtFrame.Address()
|
||||
}
|
||||
|
||||
unmapFn = func(_ Page) *kernel.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
flushCallCount := 0
|
||||
flushTLBEntryFn = func(_ uintptr) {
|
||||
switch flushCallCount {
|
||||
case 0:
|
||||
// the first time we flush the tlb entry, the last entry of
|
||||
// the active pdt should be pointing to pdtFrame
|
||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != pdtFrame {
|
||||
t.Fatalf("expected last PDT entry of active PDT to be re-mapped to frame %x; got %x", pdtFrame, got)
|
||||
}
|
||||
case 1:
|
||||
// the second time we flush the tlb entry, the last entry of
|
||||
// the active pdt should be pointing back to activePdtFrame
|
||||
if got := activePhysPage[len(activePhysPage)-1].Frame(); got != activePdtFrame {
|
||||
t.Fatalf("expected last PDT entry of active PDT to be mapped back frame %x; got %x", activePdtFrame, got)
|
||||
}
|
||||
}
|
||||
flushCallCount++
|
||||
}
|
||||
|
||||
if err := pdt.Unmap(page); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if exp := 2; flushCallCount != exp {
|
||||
t.Fatalf("expected flushTLBEntry to be called %d times; called %d", exp, flushCallCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestPageDirectoryTableActivateAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origSwitchPDT func(uintptr)) {
|
||||
switchPDTFn = origSwitchPDT
|
||||
}(switchPDTFn)
|
||||
|
||||
var (
|
||||
pdtFrame = pmm.Frame(123)
|
||||
pdt = PageDirectoryTable{pdtFrame: pdtFrame}
|
||||
)
|
||||
|
||||
switchPDTCallCount := 0
|
||||
switchPDTFn = func(_ uintptr) {
|
||||
switchPDTCallCount++
|
||||
}
|
||||
|
||||
pdt.Activate()
|
||||
if exp := 1; switchPDTCallCount != exp {
|
||||
t.Fatalf("expected switchPDT to be called %d times; called %d", exp, switchPDTCallCount)
|
||||
}
|
||||
}
|
||||
74
src/gopheros/kernel/mem/vmm/pte.go
Normal file
74
src/gopheros/kernel/mem/vmm/pte.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidMapping is returned when trying to lookup a virtual memory address that is not yet mapped.
|
||||
ErrInvalidMapping = &kernel.Error{Module: "vmm", Message: "virtual address does not point to a mapped physical page"}
|
||||
)
|
||||
|
||||
// PageTableEntryFlag describes a flag that can be applied to a page table entry.
|
||||
type PageTableEntryFlag uintptr
|
||||
|
||||
// pageTableEntry describes a page table entry. These entries encode
|
||||
// a physical frame address and a set of flags. The actual format
|
||||
// of the entry and flags is architecture-dependent.
|
||||
type pageTableEntry uintptr
|
||||
|
||||
// HasFlags returns true if this entry has all the input flags set.
|
||||
func (pte pageTableEntry) HasFlags(flags PageTableEntryFlag) bool {
|
||||
return (uintptr(pte) & uintptr(flags)) == uintptr(flags)
|
||||
}
|
||||
|
||||
// HasAnyFlag returns true if this entry has at least one of the input flags set.
|
||||
func (pte pageTableEntry) HasAnyFlag(flags PageTableEntryFlag) bool {
|
||||
return (uintptr(pte) & uintptr(flags)) != 0
|
||||
}
|
||||
|
||||
// SetFlags sets the input list of flags to the page table entry.
|
||||
func (pte *pageTableEntry) SetFlags(flags PageTableEntryFlag) {
|
||||
*pte = (pageTableEntry)(uintptr(*pte) | uintptr(flags))
|
||||
}
|
||||
|
||||
// ClearFlags unsets the input list of flags from the page table entry.
|
||||
func (pte *pageTableEntry) ClearFlags(flags PageTableEntryFlag) {
|
||||
*pte = (pageTableEntry)(uintptr(*pte) &^ uintptr(flags))
|
||||
}
|
||||
|
||||
// Frame returns the physical page frame that this page table entry points to.
|
||||
func (pte pageTableEntry) Frame() pmm.Frame {
|
||||
return pmm.Frame((uintptr(pte) & ptePhysPageMask) >> mem.PageShift)
|
||||
}
|
||||
|
||||
// SetFrame updates the page table entry to point the the given physical frame .
|
||||
func (pte *pageTableEntry) SetFrame(frame pmm.Frame) {
|
||||
*pte = (pageTableEntry)((uintptr(*pte) &^ ptePhysPageMask) | frame.Address())
|
||||
}
|
||||
|
||||
// pteForAddress returns the final page table entry that correspond to a
|
||||
// particular virtual address. The function performs a page table walk till it
|
||||
// reaches the final page table entry returning ErrInvalidMapping if the page
|
||||
// is not present.
|
||||
func pteForAddress(virtAddr uintptr) (*pageTableEntry, *kernel.Error) {
|
||||
var (
|
||||
err *kernel.Error
|
||||
entry *pageTableEntry
|
||||
)
|
||||
|
||||
walk(virtAddr, func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
if !pte.HasFlags(FlagPresent) {
|
||||
entry = nil
|
||||
err = ErrInvalidMapping
|
||||
return false
|
||||
}
|
||||
|
||||
entry = pte
|
||||
return true
|
||||
})
|
||||
|
||||
return entry, err
|
||||
}
|
||||
60
src/gopheros/kernel/mem/vmm/pte_test.go
Normal file
60
src/gopheros/kernel/mem/vmm/pte_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPageTableEntryFlags(t *testing.T) {
|
||||
var (
|
||||
pte pageTableEntry
|
||||
flag1 = PageTableEntryFlag(1 << 10)
|
||||
flag2 = PageTableEntryFlag(1 << 21)
|
||||
)
|
||||
|
||||
if pte.HasAnyFlag(flag1 | flag2) {
|
||||
t.Fatalf("expected HasAnyFlags to return false")
|
||||
}
|
||||
|
||||
pte.SetFlags(flag1 | flag2)
|
||||
|
||||
if !pte.HasAnyFlag(flag1 | flag2) {
|
||||
t.Fatalf("expected HasAnyFlags to return true")
|
||||
}
|
||||
|
||||
if !pte.HasFlags(flag1 | flag2) {
|
||||
t.Fatalf("expected HasFlags to return true")
|
||||
}
|
||||
|
||||
pte.ClearFlags(flag1)
|
||||
|
||||
if !pte.HasAnyFlag(flag1 | flag2) {
|
||||
t.Fatalf("expected HasAnyFlags to return true")
|
||||
}
|
||||
|
||||
if pte.HasFlags(flag1 | flag2) {
|
||||
t.Fatalf("expected HasFlags to return false")
|
||||
}
|
||||
|
||||
pte.ClearFlags(flag1 | flag2)
|
||||
|
||||
if pte.HasAnyFlag(flag1 | flag2) {
|
||||
t.Fatalf("expected HasAnyFlags to return false")
|
||||
}
|
||||
|
||||
if pte.HasFlags(flag1 | flag2) {
|
||||
t.Fatalf("expected HasFlags to return false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPageTableEntryFrameEncoding(t *testing.T) {
|
||||
var (
|
||||
pte pageTableEntry
|
||||
physFrame = pmm.Frame(123)
|
||||
)
|
||||
|
||||
pte.SetFrame(physFrame)
|
||||
if got := pte.Frame(); got != physFrame {
|
||||
t.Fatalf("expected pte.Frame() to return %v; got %v", physFrame, got)
|
||||
}
|
||||
}
|
||||
19
src/gopheros/kernel/mem/vmm/translate.go
Normal file
19
src/gopheros/kernel/mem/vmm/translate.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package vmm
|
||||
|
||||
import "gopheros/kernel"
|
||||
|
||||
// Translate returns the physical address that corresponds to the supplied
|
||||
// virtual address or ErrInvalidMapping if the virtual address does not
|
||||
// correspond to a mapped physical address.
|
||||
func Translate(virtAddr uintptr) (uintptr, *kernel.Error) {
|
||||
pte, err := pteForAddress(virtAddr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Calculate the physical address by taking the physical frame address and
|
||||
// appending the offset from the virtual address
|
||||
physAddr := pte.Frame().Address() + (virtAddr & ((1 << pageLevelShifts[pageLevels-1]) - 1))
|
||||
|
||||
return physAddr, nil
|
||||
}
|
||||
72
src/gopheros/kernel/mem/vmm/translate_test.go
Normal file
72
src/gopheros/kernel/mem/vmm/translate_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestTranslateAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||
ptePtrFn = origPtePtr
|
||||
}(ptePtrFn)
|
||||
|
||||
// the virtual address just contains the page offset
|
||||
virtAddr := uintptr(1234)
|
||||
expFrame := pmm.Frame(42)
|
||||
expPhysAddr := expFrame.Address() + virtAddr
|
||||
specs := [][pageLevels]bool{
|
||||
{true, true, true, true},
|
||||
{false, true, true, true},
|
||||
{true, false, true, true},
|
||||
{true, true, false, true},
|
||||
{true, true, true, false},
|
||||
}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
var pte pageTableEntry
|
||||
pte.SetFrame(expFrame)
|
||||
if specs[specIndex][pteCallCount] {
|
||||
pte.SetFlags(FlagPresent)
|
||||
}
|
||||
pteCallCount++
|
||||
|
||||
return unsafe.Pointer(&pte)
|
||||
}
|
||||
|
||||
// An error is expected if any page level contains a non-present page
|
||||
expError := false
|
||||
for _, hasMapping := range spec {
|
||||
if !hasMapping {
|
||||
expError = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
physAddr, err := Translate(virtAddr)
|
||||
switch {
|
||||
case expError && err != ErrInvalidMapping:
|
||||
t.Errorf("[spec %d] expected to get ErrInvalidMapping; got %v", specIndex, err)
|
||||
case !expError && err != nil:
|
||||
t.Errorf("[spec %d] unexpected error %v", specIndex, err)
|
||||
case !expError && physAddr != expPhysAddr:
|
||||
t.Errorf("[spec %d] expected phys addr to be 0x%x; got 0x%x", specIndex, expPhysAddr, physAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
phys, err := vmm.Translate(uintptr(100 * mem.Mb))
|
||||
if err != nil {
|
||||
early.Printf("err: %s\n", err.Error())
|
||||
} else {
|
||||
early.Printf("phys: 0x%x\n", phys)
|
||||
}
|
||||
*/
|
||||
155
src/gopheros/kernel/mem/vmm/vmm.go
Normal file
155
src/gopheros/kernel/mem/vmm/vmm.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/irq"
|
||||
"gopheros/kernel/kfmt/early"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
)
|
||||
|
||||
var (
|
||||
// frameAllocator points to a frame allocator function registered using
|
||||
// SetFrameAllocator.
|
||||
frameAllocator FrameAllocatorFn
|
||||
|
||||
// the following functions are mocked by tests and are automatically
|
||||
// inlined by the compiler.
|
||||
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
||||
readCR2Fn = cpu.ReadCR2
|
||||
|
||||
errUnrecoverableFault = &kernel.Error{Module: "vmm", Message: "page/gpf fault"}
|
||||
)
|
||||
|
||||
// FrameAllocatorFn is a function that can allocate physical frames.
|
||||
type FrameAllocatorFn func() (pmm.Frame, *kernel.Error)
|
||||
|
||||
// SetFrameAllocator registers a frame allocator function that will be used by
|
||||
// the vmm code when new physical frames need to be allocated.
|
||||
func SetFrameAllocator(allocFn FrameAllocatorFn) {
|
||||
frameAllocator = allocFn
|
||||
}
|
||||
|
||||
func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) {
|
||||
var (
|
||||
faultAddress = uintptr(readCR2Fn())
|
||||
faultPage = PageFromAddress(faultAddress)
|
||||
pageEntry *pageTableEntry
|
||||
)
|
||||
|
||||
// Lookup entry for the page where the fault occurred
|
||||
walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool {
|
||||
nextIsPresent := pte.HasFlags(FlagPresent)
|
||||
|
||||
if pteLevel == pageLevels-1 && nextIsPresent {
|
||||
pageEntry = pte
|
||||
}
|
||||
|
||||
// Abort walk if the next page table entry is missing
|
||||
return nextIsPresent
|
||||
})
|
||||
|
||||
// CoW is supported for RO pages with the CoW flag set
|
||||
if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) {
|
||||
var (
|
||||
copy pmm.Frame
|
||||
tmpPage Page
|
||||
err *kernel.Error
|
||||
)
|
||||
|
||||
if copy, err = frameAllocator(); err != nil {
|
||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
||||
} else if tmpPage, err = mapTemporaryFn(copy); err != nil {
|
||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err)
|
||||
} else {
|
||||
// Copy page contents, mark as RW and remove CoW flag
|
||||
mem.Memcopy(faultPage.Address(), tmpPage.Address(), mem.PageSize)
|
||||
unmapFn(tmpPage)
|
||||
|
||||
// Update mapping to point to the new frame, flag it as RW and
|
||||
// remove the CoW flag
|
||||
pageEntry.ClearFlags(FlagCopyOnWrite)
|
||||
pageEntry.SetFlags(FlagPresent | FlagRW)
|
||||
pageEntry.SetFrame(copy)
|
||||
flushTLBEntryFn(faultPage.Address())
|
||||
|
||||
// Fault recovered; retry the instruction that caused the fault
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
nonRecoverablePageFault(faultAddress, errorCode, frame, regs, errUnrecoverableFault)
|
||||
}
|
||||
|
||||
func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) {
|
||||
early.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress)
|
||||
switch {
|
||||
case errorCode == 0:
|
||||
early.Printf("read from non-present page")
|
||||
case errorCode == 1:
|
||||
early.Printf("page protection violation (read)")
|
||||
case errorCode == 2:
|
||||
early.Printf("write to non-present page")
|
||||
case errorCode == 3:
|
||||
early.Printf("page protection violation (write)")
|
||||
case errorCode == 4:
|
||||
early.Printf("page-fault in user-mode")
|
||||
case errorCode == 8:
|
||||
early.Printf("page table has reserved bit set")
|
||||
case errorCode == 16:
|
||||
early.Printf("instruction fetch")
|
||||
default:
|
||||
early.Printf("unknown")
|
||||
}
|
||||
|
||||
early.Printf("\n\nRegisters:\n")
|
||||
regs.Print()
|
||||
frame.Print()
|
||||
|
||||
// TODO: Revisit this when user-mode tasks are implemented
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) {
|
||||
early.Printf("\nGeneral protection fault while accessing address: 0x%x\n", readCR2Fn())
|
||||
early.Printf("Registers:\n")
|
||||
regs.Print()
|
||||
frame.Print()
|
||||
|
||||
// TODO: Revisit this when user-mode tasks are implemented
|
||||
panic(errUnrecoverableFault)
|
||||
}
|
||||
|
||||
// reserveZeroedFrame reserves a physical frame to be used together with
|
||||
// FlagCopyOnWrite for lazy allocation requests.
|
||||
func reserveZeroedFrame() *kernel.Error {
|
||||
var (
|
||||
err *kernel.Error
|
||||
tempPage Page
|
||||
)
|
||||
|
||||
if ReservedZeroedFrame, err = frameAllocator(); err != nil {
|
||||
return err
|
||||
} else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil {
|
||||
return err
|
||||
}
|
||||
mem.Memset(tempPage.Address(), 0, mem.PageSize)
|
||||
unmapFn(tempPage)
|
||||
|
||||
// From this point on, ReservedZeroedFrame cannot be mapped with a RW flag
|
||||
protectReservedZeroedPage = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init initializes the vmm system and installs paging-related exception
|
||||
// handlers.
|
||||
func Init() *kernel.Error {
|
||||
if err := reserveZeroedFrame(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler)
|
||||
handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler)
|
||||
return nil
|
||||
}
|
||||
281
src/gopheros/kernel/mem/vmm/vmm_test.go
Normal file
281
src/gopheros/kernel/mem/vmm/vmm_test.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"gopheros/kernel"
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal"
|
||||
"gopheros/kernel/irq"
|
||||
"gopheros/kernel/mem"
|
||||
"gopheros/kernel/mem/pmm"
|
||||
"strings"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestRecoverablePageFault(t *testing.T) {
|
||||
var (
|
||||
frame irq.Frame
|
||||
regs irq.Regs
|
||||
pageEntry pageTableEntry
|
||||
origPage = make([]byte, mem.PageSize)
|
||||
clonedPage = make([]byte, mem.PageSize)
|
||||
err = &kernel.Error{Module: "test", Message: "something went wrong"}
|
||||
)
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||
ptePtrFn = origPtePtr
|
||||
readCR2Fn = cpu.ReadCR2
|
||||
frameAllocator = nil
|
||||
mapTemporaryFn = MapTemporary
|
||||
unmapFn = Unmap
|
||||
flushTLBEntryFn = cpu.FlushTLBEntry
|
||||
}(ptePtrFn)
|
||||
|
||||
specs := []struct {
|
||||
pteFlags PageTableEntryFlag
|
||||
allocError *kernel.Error
|
||||
mapError *kernel.Error
|
||||
expPanic bool
|
||||
}{
|
||||
// Missing pge
|
||||
{0, nil, nil, true},
|
||||
// Page is present but CoW flag not set
|
||||
{FlagPresent, nil, nil, true},
|
||||
// Page is present but both CoW and RW flags set
|
||||
{FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true},
|
||||
// Page is present with CoW flag set but allocating a page copy fails
|
||||
{FlagPresent | FlagCopyOnWrite, err, nil, true},
|
||||
// Page is present with CoW flag set but mapping the page copy fails
|
||||
{FlagPresent | FlagCopyOnWrite, nil, err, true},
|
||||
// Page is present with CoW flag set
|
||||
{FlagPresent | FlagCopyOnWrite, nil, nil, false},
|
||||
}
|
||||
|
||||
mockTTY()
|
||||
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) }
|
||||
readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) }
|
||||
unmapFn = func(_ Page) *kernel.Error { return nil }
|
||||
flushTLBEntryFn = func(_ uintptr) {}
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if spec.expPanic && err == nil {
|
||||
t.Error("expected a panic")
|
||||
} else if !spec.expPanic {
|
||||
if err != nil {
|
||||
t.Error("unexpected panic")
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(origPage); i++ {
|
||||
if origPage[i] != clonedPage[i] {
|
||||
t.Errorf("expected clone page to be a copy of the original page; mismatch at index %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), spec.mapError }
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
||||
addr := uintptr(unsafe.Pointer(&clonedPage[0]))
|
||||
return pmm.Frame(addr >> mem.PageShift), spec.allocError
|
||||
})
|
||||
|
||||
for i := 0; i < len(origPage); i++ {
|
||||
origPage[i] = byte(i % 256)
|
||||
clonedPage[i] = 0
|
||||
}
|
||||
|
||||
pageEntry = 0
|
||||
pageEntry.SetFlags(spec.pteFlags)
|
||||
|
||||
pageFaultHandler(2, &frame, ®s)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNonRecoverablePageFault(t *testing.T) {
|
||||
specs := []struct {
|
||||
errCode uint64
|
||||
expReason string
|
||||
}{
|
||||
{
|
||||
0,
|
||||
"read from non-present page",
|
||||
},
|
||||
{
|
||||
1,
|
||||
"page protection violation (read)",
|
||||
},
|
||||
{
|
||||
2,
|
||||
"write to non-present page",
|
||||
},
|
||||
{
|
||||
3,
|
||||
"page protection violation (write)",
|
||||
},
|
||||
{
|
||||
4,
|
||||
"page-fault in user-mode",
|
||||
},
|
||||
{
|
||||
8,
|
||||
"page table has reserved bit set",
|
||||
},
|
||||
{
|
||||
16,
|
||||
"instruction fetch",
|
||||
},
|
||||
{
|
||||
0xf00,
|
||||
"unknown",
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
regs irq.Regs
|
||||
frame irq.Frame
|
||||
)
|
||||
|
||||
for specIndex, spec := range specs {
|
||||
t.Run(fmt.Sprint(specIndex), func(t *testing.T) {
|
||||
defer func() {
|
||||
if err := recover(); err != errUnrecoverableFault {
|
||||
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
||||
}
|
||||
}()
|
||||
fb := mockTTY()
|
||||
|
||||
nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, ®s, errUnrecoverableFault)
|
||||
if got := readTTY(fb); !strings.Contains(got, spec.expReason) {
|
||||
t.Errorf("expected reason %q; got output:\n%q", spec.expReason, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGPtHandler(t *testing.T) {
|
||||
defer func() {
|
||||
readCR2Fn = cpu.ReadCR2
|
||||
}()
|
||||
|
||||
var (
|
||||
regs irq.Regs
|
||||
frame irq.Frame
|
||||
)
|
||||
|
||||
readCR2Fn = func() uint64 {
|
||||
return 0xbadf00d000
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != errUnrecoverableFault {
|
||||
t.Errorf("expected a panic with errUnrecoverableFault; got %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
mockTTY()
|
||||
generalProtectionFaultHandler(0, &frame, ®s)
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
defer func() {
|
||||
frameAllocator = nil
|
||||
mapTemporaryFn = MapTemporary
|
||||
unmapFn = Unmap
|
||||
handleExceptionWithCodeFn = irq.HandleExceptionWithCode
|
||||
}()
|
||||
|
||||
// reserve space for an allocated page
|
||||
reservedPage := make([]byte, mem.PageSize)
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
// fill page with junk
|
||||
for i := 0; i < len(reservedPage); i++ {
|
||||
reservedPage[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||
return pmm.Frame(addr >> mem.PageShift), nil
|
||||
})
|
||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// reserved page should be zeroed
|
||||
for i := 0; i < len(reservedPage); i++ {
|
||||
if reservedPage[i] != 0 {
|
||||
t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("blank page allocation error", func(t *testing.T) {
|
||||
expErr := &kernel.Error{Module: "test", Message: "out of memory"}
|
||||
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr })
|
||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil }
|
||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||
|
||||
if err := Init(); err != expErr {
|
||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("blank page mapping error", func(t *testing.T) {
|
||||
expErr := &kernel.Error{Module: "test", Message: "map failed"}
|
||||
|
||||
SetFrameAllocator(func() (pmm.Frame, *kernel.Error) {
|
||||
addr := uintptr(unsafe.Pointer(&reservedPage[0]))
|
||||
return pmm.Frame(addr >> mem.PageShift), nil
|
||||
})
|
||||
unmapFn = func(p Page) *kernel.Error { return nil }
|
||||
mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr }
|
||||
handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {}
|
||||
|
||||
if err := Init(); err != expErr {
|
||||
t.Fatalf("expected error: %v; got %v", expErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func readTTY(fb []byte) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(fb); i += 2 {
|
||||
ch := fb[i]
|
||||
if ch == 0 {
|
||||
if i+2 < len(fb) && fb[i+2] != 0 {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
buf.WriteByte(ch)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func mockTTY() []byte {
|
||||
// Mock a tty to handle early.Printf output
|
||||
mockConsoleFb := make([]byte, 160*25)
|
||||
mockConsole := &console.Ega{}
|
||||
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
|
||||
hal.ActiveTerminal.AttachTo(mockConsole)
|
||||
|
||||
return mockConsoleFb
|
||||
}
|
||||
55
src/gopheros/kernel/mem/vmm/walk.go
Normal file
55
src/gopheros/kernel/mem/vmm/walk.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// ptePointerFn returns a pointer to the supplied entry address. It is
|
||||
// used by tests to override the generated page table entry pointers so
|
||||
// walk() can be properly tested. When compiling the kernel this function
|
||||
// will be automatically inlined.
|
||||
ptePtrFn = func(entryAddr uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(entryAddr)
|
||||
}
|
||||
)
|
||||
|
||||
// pageTableWalker is a function that can be passed to the walk method. The
|
||||
// function receives the current page level and page table entry as its
|
||||
// arguments. If the function returns false, then the page walk is aborted.
|
||||
type pageTableWalker func(pteLevel uint8, pte *pageTableEntry) bool
|
||||
|
||||
// walk performs a page table walk for the given virtual address. It calls the
|
||||
// suppplied walkFn with the page table entry that corresponds to each page
|
||||
// table level. If walkFn returns an error then the walk is aborted and the
|
||||
// error is returned to the caller.
|
||||
func walk(virtAddr uintptr, walkFn pageTableWalker) {
|
||||
var (
|
||||
level uint8
|
||||
tableAddr, entryAddr, entryIndex uintptr
|
||||
ok bool
|
||||
)
|
||||
|
||||
// tableAddr is initially set to the recursively mapped virtual address for the
|
||||
// last entry in the top-most page table. Dereferencing a pointer to this address
|
||||
// will allow us to access
|
||||
for level, tableAddr = uint8(0), pdtVirtualAddr; level < pageLevels; level, tableAddr = level+1, entryAddr {
|
||||
// Extract the bits from virtual address that correspond to the
|
||||
// index in this level's page table
|
||||
entryIndex = (virtAddr >> pageLevelShifts[level]) & ((1 << pageLevelBits[level]) - 1)
|
||||
|
||||
// By shifting the table virtual address left by pageLevelShifts[level] we add
|
||||
// a new level of indirection to our recursive mapping allowing us to access
|
||||
// the table pointed to by the page entry
|
||||
entryAddr = tableAddr + (entryIndex << mem.PointerShift)
|
||||
|
||||
if ok = walkFn(level, (*pageTableEntry)(ptePtrFn(entryAddr))); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Shift left by the number of bits for this paging level to get
|
||||
// the virtual address of the table pointed to by entryAddr
|
||||
entryAddr <<= pageLevelBits[level]
|
||||
}
|
||||
}
|
||||
75
src/gopheros/kernel/mem/vmm/walk_test.go
Normal file
75
src/gopheros/kernel/mem/vmm/walk_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package vmm
|
||||
|
||||
import (
|
||||
"gopheros/kernel/mem"
|
||||
"runtime"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestPtePtrFn(t *testing.T) {
|
||||
// Dummy test to keep coverage happy
|
||||
if exp, got := unsafe.Pointer(uintptr(123)), ptePtrFn(uintptr(123)); exp != got {
|
||||
t.Fatalf("expected ptePtrFn to return %v; got %v", exp, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkAmd64(t *testing.T) {
|
||||
if runtime.GOARCH != "amd64" {
|
||||
t.Skip("test requires amd64 runtime; skipping")
|
||||
}
|
||||
|
||||
defer func(origPtePtr func(uintptr) unsafe.Pointer) {
|
||||
ptePtrFn = origPtePtr
|
||||
}(ptePtrFn)
|
||||
|
||||
// This address breaks down to:
|
||||
// p4 index: 1
|
||||
// p3 index: 2
|
||||
// p2 index: 3
|
||||
// p1 index: 4
|
||||
// offset : 1024
|
||||
targetAddr := uintptr(0x8080604400)
|
||||
|
||||
sizeofPteEntry := uintptr(unsafe.Sizeof(pageTableEntry(0)))
|
||||
expEntryAddrBits := [pageLevels][pageLevels + 1]uintptr{
|
||||
{511, 511, 511, 511, 1 * sizeofPteEntry},
|
||||
{511, 511, 511, 1, 2 * sizeofPteEntry},
|
||||
{511, 511, 1, 2, 3 * sizeofPteEntry},
|
||||
{511, 1, 2, 3, 4 * sizeofPteEntry},
|
||||
}
|
||||
|
||||
pteCallCount := 0
|
||||
ptePtrFn = func(entry uintptr) unsafe.Pointer {
|
||||
if pteCallCount >= pageLevels {
|
||||
t.Fatalf("unexpected call to ptePtrFn; already called %d times", pageLevels)
|
||||
}
|
||||
|
||||
for i := 0; i < pageLevels; i++ {
|
||||
pteIndex := (entry >> pageLevelShifts[i]) & ((1 << pageLevelBits[i]) - 1)
|
||||
if pteIndex != expEntryAddrBits[pteCallCount][i] {
|
||||
t.Errorf("[ptePtrFn call %d] expected pte entry for level %d to use offset %d; got %d", pteCallCount, i, expEntryAddrBits[pteCallCount][i], pteIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the page offset
|
||||
pteIndex := entry & ((1 << mem.PageShift) - 1)
|
||||
if pteIndex != expEntryAddrBits[pteCallCount][pageLevels] {
|
||||
t.Errorf("[ptePtrFn call %d] expected pte offset to be %d; got %d", pteCallCount, expEntryAddrBits[pteCallCount][pageLevels], pteIndex)
|
||||
}
|
||||
|
||||
pteCallCount++
|
||||
|
||||
return unsafe.Pointer(uintptr(0xf00))
|
||||
}
|
||||
|
||||
walkFnCallCount := 0
|
||||
walk(targetAddr, func(level uint8, entry *pageTableEntry) bool {
|
||||
walkFnCallCount++
|
||||
return walkFnCallCount != pageLevels
|
||||
})
|
||||
|
||||
if pteCallCount != pageLevels {
|
||||
t.Errorf("expected ptePtrFn to be called %d times; got %d", pageLevels, pteCallCount)
|
||||
}
|
||||
}
|
||||
48
src/gopheros/kernel/panic.go
Normal file
48
src/gopheros/kernel/panic.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/kfmt/early"
|
||||
)
|
||||
|
||||
var (
|
||||
// cpuHaltFn is mocked by tests and is automatically inlined by the compiler.
|
||||
cpuHaltFn = cpu.Halt
|
||||
|
||||
errRuntimePanic = &Error{Module: "rt", Message: "unknown cause"}
|
||||
)
|
||||
|
||||
// Panic outputs the supplied error (if not nil) to the console and halts the
|
||||
// CPU. Calls to Panic never return. Panic also works as a redirection target
|
||||
// for calls to panic() (resolved via runtime.gopanic)
|
||||
//go:redirect-from runtime.gopanic
|
||||
func Panic(e interface{}) {
|
||||
var err *Error
|
||||
|
||||
switch t := e.(type) {
|
||||
case *Error:
|
||||
err = t
|
||||
case string:
|
||||
panicString(t)
|
||||
return
|
||||
case error:
|
||||
errRuntimePanic.Message = t.Error()
|
||||
err = errRuntimePanic
|
||||
}
|
||||
|
||||
early.Printf("\n-----------------------------------\n")
|
||||
if err != nil {
|
||||
early.Printf("[%s] unrecoverable error: %s\n", err.Module, err.Message)
|
||||
}
|
||||
early.Printf("*** kernel panic: system halted ***")
|
||||
early.Printf("\n-----------------------------------\n")
|
||||
|
||||
cpuHaltFn()
|
||||
}
|
||||
|
||||
// panicString serves as a redirect target for runtime.throw
|
||||
//go:redirect-from runtime.throw
|
||||
func panicString(msg string) {
|
||||
errRuntimePanic.Message = msg
|
||||
Panic(errRuntimePanic)
|
||||
}
|
||||
120
src/gopheros/kernel/panic_test.go
Normal file
120
src/gopheros/kernel/panic_test.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package kernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"gopheros/kernel/cpu"
|
||||
"gopheros/kernel/driver/video/console"
|
||||
"gopheros/kernel/hal"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func TestPanic(t *testing.T) {
|
||||
defer func() {
|
||||
cpuHaltFn = cpu.Halt
|
||||
}()
|
||||
|
||||
var cpuHaltCalled bool
|
||||
cpuHaltFn = func() {
|
||||
cpuHaltCalled = true
|
||||
}
|
||||
|
||||
t.Run("with *kernel.Error", func(t *testing.T) {
|
||||
cpuHaltCalled = false
|
||||
fb := mockTTY()
|
||||
err := &Error{Module: "test", Message: "panic test"}
|
||||
|
||||
Panic(err)
|
||||
|
||||
exp := "\n-----------------------------------\n[test] unrecoverable error: panic test\n*** kernel panic: system halted ***\n-----------------------------------"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
|
||||
if !cpuHaltCalled {
|
||||
t.Fatal("expected cpu.Halt() to be called by Panic")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with error", func(t *testing.T) {
|
||||
cpuHaltCalled = false
|
||||
fb := mockTTY()
|
||||
err := errors.New("go error")
|
||||
|
||||
Panic(err)
|
||||
|
||||
exp := "\n-----------------------------------\n[rt] unrecoverable error: go error\n*** kernel panic: system halted ***\n-----------------------------------"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
|
||||
if !cpuHaltCalled {
|
||||
t.Fatal("expected cpu.Halt() to be called by Panic")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with string", func(t *testing.T) {
|
||||
cpuHaltCalled = false
|
||||
fb := mockTTY()
|
||||
err := "string error"
|
||||
|
||||
Panic(err)
|
||||
|
||||
exp := "\n-----------------------------------\n[rt] unrecoverable error: string error\n*** kernel panic: system halted ***\n-----------------------------------"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
|
||||
if !cpuHaltCalled {
|
||||
t.Fatal("expected cpu.Halt() to be called by Panic")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("without error", func(t *testing.T) {
|
||||
cpuHaltCalled = false
|
||||
fb := mockTTY()
|
||||
|
||||
Panic(nil)
|
||||
|
||||
exp := "\n-----------------------------------\n*** kernel panic: system halted ***\n-----------------------------------"
|
||||
|
||||
if got := readTTY(fb); got != exp {
|
||||
t.Fatalf("expected to get:\n%q\ngot:\n%q", exp, got)
|
||||
}
|
||||
|
||||
if !cpuHaltCalled {
|
||||
t.Fatal("expected cpu.Halt() to be called by Panic")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func readTTY(fb []byte) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(fb); i += 2 {
|
||||
ch := fb[i]
|
||||
if ch == 0 {
|
||||
if i+2 < len(fb) && fb[i+2] != 0 {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
buf.WriteByte(ch)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func mockTTY() []byte {
|
||||
// Mock a tty to handle early.Printf output
|
||||
mockConsoleFb := make([]byte, 160*25)
|
||||
mockConsole := &console.Ega{}
|
||||
mockConsole.Init(80, 25, uintptr(unsafe.Pointer(&mockConsoleFb[0])))
|
||||
hal.ActiveTerminal.AttachTo(mockConsole)
|
||||
|
||||
return mockConsoleFb
|
||||
}
|
||||
15
src/gopheros/stub.go
Normal file
15
src/gopheros/stub.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package main
|
||||
|
||||
import "gopheros/kernel/kmain"
|
||||
|
||||
var multibootInfoPtr uintptr
|
||||
|
||||
// main makes a dummy call to the actual kernel main entrypoint function. It
|
||||
// is intentionally defined to prevent the Go compiler from optimizing away the
|
||||
// real kernel code.
|
||||
//
|
||||
// A global variable is passed as an argument to Kmain to prevent the compiler
|
||||
// from inlining the actual call and removing Kmain from the generated .o file.
|
||||
func main() {
|
||||
kmain.Kmain(multibootInfoPtr, 0, 0)
|
||||
}
|
||||
Reference in New Issue
Block a user