From 71dfc9ae709fb5be64bb0cbaf238526e5b4bbd54 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 22 Jun 2017 05:41:32 +0100 Subject: [PATCH 1/5] FlagPresent must be explicitly specified in calls to Map --- kernel/mem/vmm/map.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/mem/vmm/map.go b/kernel/mem/vmm/map.go index fc472da..298e29a 100644 --- a/kernel/mem/vmm/map.go +++ b/kernel/mem/vmm/map.go @@ -37,7 +37,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { if pteLevel == pageLevels-1 { *pte = 0 pte.SetFrame(frame) - pte.SetFlags(FlagPresent | flags) + pte.SetFlags(flags) flushTLBEntryFn(page.Address()) return true } @@ -77,7 +77,7 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { // mapping mechanism is primarily used by the kernel to access and initialize // inactive page tables. func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) { - if err := Map(PageFromAddress(tempMappingAddr), frame, FlagRW); err != nil { + if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil { return 0, err } From 42ee2d13259681ae722dfcc9828c0ec2c678772c Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 22 Jun 2017 06:06:05 +0100 Subject: [PATCH 2/5] Implement memcopy --- kernel/mem/{memset.go => mem.go} | 20 +++++++++++++++++ kernel/mem/{memset_test.go => mem_test.go} | 25 ++++++++++++++++++++++ 2 files changed, 45 insertions(+) rename kernel/mem/{memset.go => mem.go} (65%) rename kernel/mem/{memset_test.go => mem_test.go} (53%) diff --git a/kernel/mem/memset.go b/kernel/mem/mem.go similarity index 65% rename from kernel/mem/memset.go rename to kernel/mem/mem.go index 6041487..74b944e 100644 --- a/kernel/mem/memset.go +++ b/kernel/mem/mem.go @@ -27,3 +27,23 @@ func Memset(addr uintptr, value byte, size Size) { copy(target[index:], target[:index]) } } + +// Memcopy copies size bytes from src to dst. +func Memcopy(src, dst uintptr, size Size) { + if size == 0 { + return + } + + srcSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: int(size), + Cap: int(size), + Data: src, + })) + dstSlice := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: int(size), + Cap: int(size), + Data: dst, + })) + + copy(dstSlice, srcSlice) +} diff --git a/kernel/mem/memset_test.go b/kernel/mem/mem_test.go similarity index 53% rename from kernel/mem/memset_test.go rename to kernel/mem/mem_test.go index c4af743..eef37da 100644 --- a/kernel/mem/memset_test.go +++ b/kernel/mem/mem_test.go @@ -25,3 +25,28 @@ func TestMemset(t *testing.T) { } } } + +func TestMemcopy(t *testing.T) { + // memcopy with a 0 size should be a no-op + Memcopy(uintptr(0), uintptr(0), 0) + + var ( + src = make([]byte, PageSize) + dst = make([]byte, PageSize) + ) + for i := 0; i < len(src); i++ { + src[i] = byte(i % 256) + } + + Memcopy( + uintptr(unsafe.Pointer(&src[0])), + uintptr(unsafe.Pointer(&dst[0])), + PageSize, + ) + + for i := 0; i < len(src); i++ { + if got := dst[i]; got != src[i] { + t.Errorf("value mismatch between src and dst at index %d", i) + } + } +} From 56d23f50ae2c549d058e4b9f226084548c323636 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 22 Jun 2017 06:24:30 +0100 Subject: [PATCH 3/5] Enable page write protection for both kernel and user space If the WP bit in CR0 is not set then write protection for pages is only enforced for user-land code. --- arch/x86_64/asm/rt0_32.s | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86_64/asm/rt0_32.s b/arch/x86_64/asm/rt0_32.s index 8480a1f..ae4b124 100644 --- a/arch/x86_64/asm/rt0_32.s +++ b/arch/x86_64/asm/rt0_32.s @@ -304,9 +304,9 @@ _rt0_enter_long_mode: or eax, (1 << 8) | (1<<11) wrmsr - ; Finally enable paging + ; Finally enable paging (bit 31) and user/kernel page write protection (bit 16) mov eax, cr0 - or eax, 1 << 31 + or eax, (1 << 31) | (1<<16) mov cr0, eax ; We are in 32-bit compatibility submode. We need to load a 64bit GDT From 32a10601ac3d9a62a5d984b4889f1d826212fd74 Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 22 Jun 2017 07:44:55 +0100 Subject: [PATCH 4/5] Add support for copy-on-write to the page fault handler Page faults occurring on RO pages with the CopyOnWrite flag set will be handled by the page handler as follows: - allocate new frame - establish temporary mapping for new frame - copy original page to new frame - update entry for the page where the fault occurred: - set physical frame address to the allocated frame - clear CoW flag and set Present, RW flags - return from the fault handler to resume execution at the instruction that caused the fault Any other page faults will still cause a kernel panic --- kernel/mem/vmm/constants_amd64.go | 4 ++ kernel/mem/vmm/vmm.go | 56 +++++++++++++++++- kernel/mem/vmm/vmm_test.go | 96 ++++++++++++++++++++++++++++--- 3 files changed, 147 insertions(+), 9 deletions(-) diff --git a/kernel/mem/vmm/constants_amd64.go b/kernel/mem/vmm/constants_amd64.go index 5e3b95d..c785244 100644 --- a/kernel/mem/vmm/constants_amd64.go +++ b/kernel/mem/vmm/constants_amd64.go @@ -80,6 +80,10 @@ const ( // for this page when the swapping page tables by updating the CR3 register. FlagGlobal + // FlagCopyOnWrite is used to implement copy-on-write functionality. This + // flag and FlagRW are mutually exclusive. + FlagCopyOnWrite = 1 << 9 + // FlagNoExecute if set, indicates that a page contains non-executable code. FlagNoExecute = 1 << 63 ) diff --git a/kernel/mem/vmm/vmm.go b/kernel/mem/vmm/vmm.go index ba7cf37..be04f6f 100644 --- a/kernel/mem/vmm/vmm.go +++ b/kernel/mem/vmm/vmm.go @@ -5,6 +5,7 @@ import ( "github.com/achilleasa/gopher-os/kernel/cpu" "github.com/achilleasa/gopher-os/kernel/irq" "github.com/achilleasa/gopher-os/kernel/kfmt/early" + "github.com/achilleasa/gopher-os/kernel/mem" "github.com/achilleasa/gopher-os/kernel/mem/pmm" ) @@ -30,7 +31,58 @@ func SetFrameAllocator(allocFn FrameAllocatorFn) { } func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) { - early.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", readCR2Fn()) + var ( + faultAddress = uintptr(readCR2Fn()) + faultPage = PageFromAddress(faultAddress) + pageEntry *pageTableEntry + ) + + // Lookup entry for the page where the fault occurred + walk(faultPage.Address(), func(pteLevel uint8, pte *pageTableEntry) bool { + nextIsPresent := pte.HasFlags(FlagPresent) + + if pteLevel == pageLevels-1 && nextIsPresent { + pageEntry = pte + } + + // Abort walk if the next page table entry is missing + return nextIsPresent + }) + + // CoW is supported for RO pages with the CoW flag set + if pageEntry != nil && !pageEntry.HasFlags(FlagRW) && pageEntry.HasFlags(FlagCopyOnWrite) { + var ( + copy pmm.Frame + tmpPage Page + err *kernel.Error + ) + + if copy, err = frameAllocator(); err != nil { + nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err) + } else if tmpPage, err = mapTemporaryFn(copy); err != nil { + nonRecoverablePageFault(faultAddress, errorCode, frame, regs, err) + } else { + // Copy page contents, mark as RW and remove CoW flag + mem.Memcopy(faultPage.Address(), tmpPage.Address(), mem.PageSize) + unmapFn(tmpPage) + + // Update mapping to point to the new frame, flag it as RW and + // remove the CoW flag + pageEntry.ClearFlags(FlagCopyOnWrite) + pageEntry.SetFlags(FlagPresent | FlagRW) + pageEntry.SetFrame(copy) + flushTLBEntryFn(faultPage.Address()) + + // Fault recovered; retry the instruction that caused the fault + return + } + } + + nonRecoverablePageFault(faultAddress, errorCode, frame, regs, nil) +} + +func nonRecoverablePageFault(faultAddress uintptr, errorCode uint64, frame *irq.Frame, regs *irq.Regs, err *kernel.Error) { + early.Printf("\nPage fault while accessing address: 0x%16x\nReason: ", faultAddress) switch { case errorCode == 0: early.Printf("read from non-present page") @@ -55,7 +107,7 @@ func pageFaultHandler(errorCode uint64, frame *irq.Frame, regs *irq.Regs) { frame.Print() // TODO: Revisit this when user-mode tasks are implemented - panicFn(nil) + panicFn(err) } func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) { diff --git a/kernel/mem/vmm/vmm_test.go b/kernel/mem/vmm/vmm_test.go index d1aedd3..f7e732b 100644 --- a/kernel/mem/vmm/vmm_test.go +++ b/kernel/mem/vmm/vmm_test.go @@ -11,12 +11,98 @@ import ( "github.com/achilleasa/gopher-os/kernel/driver/video/console" "github.com/achilleasa/gopher-os/kernel/hal" "github.com/achilleasa/gopher-os/kernel/irq" + "github.com/achilleasa/gopher-os/kernel/mem" + "github.com/achilleasa/gopher-os/kernel/mem/pmm" ) -func TestPageFaultHandler(t *testing.T) { - defer func() { +func TestRecoverablePageFault(t *testing.T) { + var ( + frame irq.Frame + regs irq.Regs + panicCalled bool + pageEntry pageTableEntry + origPage = make([]byte, mem.PageSize) + clonedPage = make([]byte, mem.PageSize) + err = &kernel.Error{Module: "test", Message: "something went wrong"} + ) + + defer func(origPtePtr func(uintptr) unsafe.Pointer) { + ptePtrFn = origPtePtr panicFn = kernel.Panic readCR2Fn = cpu.ReadCR2 + frameAllocator = nil + mapTemporaryFn = MapTemporary + unmapFn = Unmap + flushTLBEntryFn = cpu.FlushTLBEntry + }(ptePtrFn) + + specs := []struct { + pteFlags PageTableEntryFlag + allocError *kernel.Error + mapError *kernel.Error + expPanic bool + }{ + // Missing pge + {0, nil, nil, true}, + // Page is present but CoW flag not set + {FlagPresent, nil, nil, true}, + // Page is present but both CoW and RW flags set + {FlagPresent | FlagRW | FlagCopyOnWrite, nil, nil, true}, + // Page is present with CoW flag set but allocating a page copy fails + {FlagPresent | FlagCopyOnWrite, err, nil, true}, + // Page is present with CoW flag set but mapping the page copy fails + {FlagPresent | FlagCopyOnWrite, nil, err, true}, + // Page is present with CoW flag set + {FlagPresent | FlagCopyOnWrite, nil, nil, false}, + } + + mockTTY() + + panicFn = func(_ *kernel.Error) { + panicCalled = true + } + + ptePtrFn = func(entry uintptr) unsafe.Pointer { return unsafe.Pointer(&pageEntry) } + readCR2Fn = func() uint64 { return uint64(uintptr(unsafe.Pointer(&origPage[0]))) } + unmapFn = func(_ Page) *kernel.Error { return nil } + flushTLBEntryFn = func(_ uintptr) {} + + for specIndex, spec := range specs { + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), spec.mapError } + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&clonedPage[0])) + return pmm.Frame(addr >> mem.PageShift), spec.allocError + }) + + for i := 0; i < len(origPage); i++ { + origPage[i] = byte(i % 256) + clonedPage[i] = 0 + } + + panicCalled = false + pageEntry = 0 + pageEntry.SetFlags(spec.pteFlags) + + pageFaultHandler(2, &frame, ®s) + + if spec.expPanic != panicCalled { + t.Errorf("[spec %d] expected panic %t; got %t", specIndex, spec.expPanic, panicCalled) + } + + if !spec.expPanic { + for i := 0; i < len(origPage); i++ { + if origPage[i] != clonedPage[i] { + t.Errorf("[spec %d] expected clone page to be a copy of the original page; mismatch at index %d", specIndex, i) + } + } + } + } + +} + +func TestNonRecoverablePageFault(t *testing.T) { + defer func() { + panicFn = kernel.Panic }() specs := []struct { @@ -71,10 +157,6 @@ func TestPageFaultHandler(t *testing.T) { frame irq.Frame ) - readCR2Fn = func() uint64 { - return 0xbadf00d000 - } - panicCalled := false panicFn = func(_ *kernel.Error) { panicCalled = true @@ -84,7 +166,7 @@ func TestPageFaultHandler(t *testing.T) { fb := mockTTY() panicCalled = false - pageFaultHandler(spec.errCode, &frame, ®s) + nonRecoverablePageFault(0xbadf00d000, spec.errCode, &frame, ®s, nil) if got := readTTY(fb); !strings.Contains(got, spec.expReason) { t.Errorf("[spec %d] expected reason %q; got output:\n%q", specIndex, spec.expReason, got) continue From 1fc9d20ed2aba3928bde2299ec039179f79576ce Mon Sep 17 00:00:00 2001 From: Achilleas Anagnostopoulos Date: Thu, 22 Jun 2017 19:17:19 +0100 Subject: [PATCH 5/5] Reserve and protect zereod frame when initializing vmm This vmm package exports ReservedZeroedFrame which can be used to setup a lazy physical page allocation scheme. This is implemented by mapping ReservedZeroedFrame to each page in a virtual memory region using the following flag combination: FlagPresent | FlagCopyOnWrite. This has the effect that all reads from the virtual address region target the contents of ReservedZeroedFrame (always returning zero). On the other hand, writes to the virtual address region trigger a page fault which is resolved as follows: - a new physical frame is allocated and the contents of ReservedZeroedFrame are copied to it (effectively clearing the new frame). - the page entry for the virtual address that caused the fault is updated to point to the new frame and its flags are changed to: FlagPresent | FlagRW - execution control is returned back to the code that caused the fault --- kernel/mem/vmm/map.go | 42 ++++++++++++++++++++++++- kernel/mem/vmm/map_test.go | 18 +++++++++++ kernel/mem/vmm/vmm.go | 26 +++++++++++++++- kernel/mem/vmm/vmm_test.go | 63 +++++++++++++++++++++++++++++++++++--- 4 files changed, 143 insertions(+), 6 deletions(-) diff --git a/kernel/mem/vmm/map.go b/kernel/mem/vmm/map.go index 298e29a..ccfa128 100644 --- a/kernel/mem/vmm/map.go +++ b/kernel/mem/vmm/map.go @@ -9,7 +9,34 @@ import ( "github.com/achilleasa/gopher-os/kernel/mem/pmm" ) +// ReservedZeroedFrame is a special zero-cleared frame allocated by the +// vmm package's Init function. The purpose of this frame is to assist +// in implementing on-demand memory allocation when mapping it in +// conjunction with the CopyOnWrite flag. Here is an example of how it +// can be used: +// +// func ReserveOnDemand(start vmm.Page, pageCount int) *kernel.Error { +// var err *kernel.Error +// mapFlags := vmm.FlagPresent|vmm.FlagCopyOnWrite +// for page := start; pageCount > 0; pageCount, page = pageCount-1, page+1 { +// if err = vmm.Map(page, vmm.ReservedZeroedFrame, mapFlags); err != nil { +// return err +// } +// } +// return nil +// } +// +// In the above example, page mappings are set up for the requested number of +// pages but no physical memory is reserved for their contents. A write to any +// of the above pages will trigger a page-fault causing a new frame to be +// allocated, cleared (the blank frame is copied to the new frame) and +// installed in-place with RW permissions. +var ReservedZeroedFrame pmm.Frame + var ( + // protectReservedZeroedPage is set to true to prevent mapping to + protectReservedZeroedPage bool + // nextAddrFn is used by used by tests to override the nextTableAddr // calculations used by Map. When compiling the kernel this function // will be automatically inlined. @@ -21,14 +48,21 @@ var ( // which will cause a fault if called in user-mode. flushTLBEntryFn = cpu.FlushTLBEntry - errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"} + errNoHugePageSupport = &kernel.Error{Module: "vmm", Message: "huge pages are not supported"} + errAttemptToRWMapReservedFrame = &kernel.Error{Module: "vmm", Message: "reserved blank frame cannot be mapped with a RW flag"} ) // Map establishes a mapping between a virtual page and a physical memory frame // using the currently active page directory table. Calls to Map will use the // supplied physical frame allocator to initialize missing page tables at each // paging level supported by the MMU. +// +// Attempts to map ReservedZeroedFrame with a RW flag will result in an error. func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { + if protectReservedZeroedPage && frame == ReservedZeroedFrame && (flags&FlagRW) != 0 { + return errAttemptToRWMapReservedFrame + } + var err *kernel.Error walk(page.Address(), func(pteLevel uint8, pte *pageTableEntry) bool { @@ -76,7 +110,13 @@ func Map(page Page, frame pmm.Frame, flags PageTableEntryFlag) *kernel.Error { // to a fixed virtual address overwriting any previous mapping. The temporary // mapping mechanism is primarily used by the kernel to access and initialize // inactive page tables. +// +// Attempts to map ReservedZeroedFrame will result in an error. func MapTemporary(frame pmm.Frame) (Page, *kernel.Error) { + if protectReservedZeroedPage && frame == ReservedZeroedFrame { + return 0, errAttemptToRWMapReservedFrame + } + if err := Map(PageFromAddress(tempMappingAddr), frame, FlagPresent|FlagRW); err != nil { return 0, err } diff --git a/kernel/mem/vmm/map_test.go b/kernel/mem/vmm/map_test.go index cee8379..1ecd15c 100644 --- a/kernel/mem/vmm/map_test.go +++ b/kernel/mem/vmm/map_test.go @@ -144,6 +144,24 @@ func TestMapTemporaryErrorsAmd64(t *testing.T) { t.Fatalf("got unexpected error %v", err) } }) + + t.Run("map BlankReservedFrame RW", func(t *testing.T) { + defer func() { protectReservedZeroedPage = false }() + + protectReservedZeroedPage = true + if err := Map(Page(0), ReservedZeroedFrame, FlagRW); err != errAttemptToRWMapReservedFrame { + t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err) + } + }) + + t.Run("temp-map BlankReservedFrame RW", func(t *testing.T) { + defer func() { protectReservedZeroedPage = false }() + + protectReservedZeroedPage = true + if _, err := MapTemporary(ReservedZeroedFrame); err != errAttemptToRWMapReservedFrame { + t.Fatalf("expected errAttemptToRWMapReservedFrame; got: %v", err) + } + }) } func TestUnmapAmd64(t *testing.T) { diff --git a/kernel/mem/vmm/vmm.go b/kernel/mem/vmm/vmm.go index be04f6f..b698f5c 100644 --- a/kernel/mem/vmm/vmm.go +++ b/kernel/mem/vmm/vmm.go @@ -120,11 +120,35 @@ func generalProtectionFaultHandler(_ uint64, frame *irq.Frame, regs *irq.Regs) { panicFn(nil) } +// reserveZeroedFrame reserves a physical frame to be used together with +// FlagCopyOnWrite for lazy allocation requests. +func reserveZeroedFrame() *kernel.Error { + var ( + err *kernel.Error + tempPage Page + ) + + if ReservedZeroedFrame, err = frameAllocator(); err != nil { + return err + } else if tempPage, err = mapTemporaryFn(ReservedZeroedFrame); err != nil { + return err + } + mem.Memset(tempPage.Address(), 0, mem.PageSize) + unmapFn(tempPage) + + // From this point on, ReservedZeroedFrame cannot be mapped with a RW flag + protectReservedZeroedPage = true + return nil +} + // Init initializes the vmm system and installs paging-related exception // handlers. func Init() *kernel.Error { + if err := reserveZeroedFrame(); err != nil { + return err + } + handleExceptionWithCodeFn(irq.PageFaultException, pageFaultHandler) handleExceptionWithCodeFn(irq.GPFException, generalProtectionFaultHandler) - return nil } diff --git a/kernel/mem/vmm/vmm_test.go b/kernel/mem/vmm/vmm_test.go index f7e732b..f4ecbb0 100644 --- a/kernel/mem/vmm/vmm_test.go +++ b/kernel/mem/vmm/vmm_test.go @@ -213,14 +213,69 @@ func TestGPtHandler(t *testing.T) { func TestInit(t *testing.T) { defer func() { + frameAllocator = nil + mapTemporaryFn = MapTemporary + unmapFn = Unmap handleExceptionWithCodeFn = irq.HandleExceptionWithCode }() - handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} + // reserve space for an allocated page + reservedPage := make([]byte, mem.PageSize) - if err := Init(); err != nil { - t.Fatal(err) - } + t.Run("success", func(t *testing.T) { + // fill page with junk + for i := 0; i < len(reservedPage); i++ { + reservedPage[i] = byte(i % 256) + } + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + unmapFn = func(p Page) *kernel.Error { return nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} + + if err := Init(); err != nil { + t.Fatal(err) + } + + // reserved page should be zeroed + for i := 0; i < len(reservedPage); i++ { + if reservedPage[i] != 0 { + t.Errorf("expected reserved page to be zeroed; got byte %d at index %d", reservedPage[i], i) + } + } + }) + + t.Run("blank page allocation error", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "out of memory"} + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { return pmm.InvalidFrame, expErr }) + unmapFn = func(p Page) *kernel.Error { return nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), nil } + handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} + + if err := Init(); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) + + t.Run("blank page mapping error", func(t *testing.T) { + expErr := &kernel.Error{Module: "test", Message: "map failed"} + + SetFrameAllocator(func() (pmm.Frame, *kernel.Error) { + addr := uintptr(unsafe.Pointer(&reservedPage[0])) + return pmm.Frame(addr >> mem.PageShift), nil + }) + unmapFn = func(p Page) *kernel.Error { return nil } + mapTemporaryFn = func(f pmm.Frame) (Page, *kernel.Error) { return Page(f), expErr } + handleExceptionWithCodeFn = func(_ irq.ExceptionNum, _ irq.ExceptionHandlerWithCode) {} + + if err := Init(); err != expErr { + t.Fatalf("expected error: %v; got %v", expErr, err) + } + }) } func readTTY(fb []byte) string {