mirror of
https://github.com/taigrr/gopher-os
synced 2025-01-18 04:43:13 -08:00
acpi: refactor block exec code and add support for stack traces
The VM.execBlock method has been converted to a standalone function. Access to the vm is facilitated via the ctx argument. The VM will calculate the start and end instruction pointer offsets for all scoped blocks in a pre-processing pass which is initiated by the VM.checkEntities call when it encounters a Method entity. As opcodes may trigger the execution of multiple opcodes (e.g. if/else) or even mutate the execution flow (e.g. a break inside a while loop) we need to keep track of the IP offsets in all scoped blocks so the VM can provide accurate IP values for stack traces. The stack trace is included as part of the *Error struct and is populated automatically by execBlock whenever an error occurs. A convenience Error.StackTrace() method is provided for obtaining a formatted version of the stack trace as a string. Each stack trace entry contains information about the method name inside which an error executed, the table name where the method was defined as well as the opcode type and IP offset (relative to the method start) where the error occured. Stack traces are also preserved across method invocations. An example flow that generates a fatal error is included in the vm-testsuite-DSDT.dsl file (method \NST2). Calling this method with the appropriate arguments generates a stack trace that looks like this: Stack trace: [000] [DSDT] [NST2():0x2] opcode: Store [001] [DSDT] [NST3():0x1] opcode: Add [002] [DSDT] [NST4():0x8] opcode: If [003] [DSDT] [NST4():0x9] opcode: Fatal
This commit is contained in:
parent
540986cb0b
commit
ef88921fb9
@ -33,6 +33,10 @@ type ScopeEntity interface {
|
|||||||
|
|
||||||
removeChild(Entity)
|
removeChild(Entity)
|
||||||
lastChild() Entity
|
lastChild() Entity
|
||||||
|
|
||||||
|
setBlockIPOffsets(uint32, uint32)
|
||||||
|
blockStartIPOffset() uint32
|
||||||
|
blockEndIPOffset() uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// unnamedEntity defines an unnamed entity that can be attached to a parent scope.
|
// unnamedEntity defines an unnamed entity that can be attached to a parent scope.
|
||||||
@ -136,6 +140,12 @@ type scopeEntity struct {
|
|||||||
|
|
||||||
name string
|
name string
|
||||||
children []Entity
|
children []Entity
|
||||||
|
|
||||||
|
// The VM keeps track of the start and end instruction offsets for each
|
||||||
|
// scope entity relative to its parent scope. This allows the VM to report
|
||||||
|
// accurate IP values when emitting stack traces.
|
||||||
|
blockStartIP uint32
|
||||||
|
blockEndIP uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ent *scopeEntity) getOpcode() opcode { return ent.op }
|
func (ent *scopeEntity) getOpcode() opcode { return ent.op }
|
||||||
@ -174,6 +184,11 @@ func (ent *scopeEntity) removeChild(child Entity) {
|
|||||||
}
|
}
|
||||||
func (ent *scopeEntity) TableHandle() uint8 { return ent.tableHandle }
|
func (ent *scopeEntity) TableHandle() uint8 { return ent.tableHandle }
|
||||||
func (ent *scopeEntity) setTableHandle(h uint8) { ent.tableHandle = h }
|
func (ent *scopeEntity) setTableHandle(h uint8) { ent.tableHandle = h }
|
||||||
|
func (ent *scopeEntity) setBlockIPOffsets(start, end uint32) {
|
||||||
|
ent.blockStartIP, ent.blockEndIP = start, end
|
||||||
|
}
|
||||||
|
func (ent *scopeEntity) blockStartIPOffset() uint32 { return ent.blockStartIP }
|
||||||
|
func (ent *scopeEntity) blockEndIPOffset() uint32 { return ent.blockEndIP }
|
||||||
|
|
||||||
// bufferEntity defines a buffer object.
|
// bufferEntity defines a buffer object.
|
||||||
type bufferEntity struct {
|
type bufferEntity struct {
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package aml
|
package aml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"gopheros/device/acpi/table"
|
"gopheros/device/acpi/table"
|
||||||
|
"gopheros/kernel/kfmt"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -61,11 +63,29 @@ type execContext struct {
|
|||||||
retVal interface{}
|
retVal interface{}
|
||||||
|
|
||||||
vm *VM
|
vm *VM
|
||||||
|
IP uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// frame contains information about the location within a method (the VM
|
||||||
|
// instruction pointer) and the actual AML opcode that the VM was processing
|
||||||
|
// when an error occurred. Entry also contains information about the method
|
||||||
|
// name and the ACPI table that defined it.
|
||||||
|
type frame struct {
|
||||||
|
table string
|
||||||
|
method string
|
||||||
|
IP uint32
|
||||||
|
instr string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error describes errors that occur while executing AML code.
|
// Error describes errors that occur while executing AML code.
|
||||||
type Error struct {
|
type Error struct {
|
||||||
message string
|
message string
|
||||||
|
|
||||||
|
// trace contains a list of trace entries that correspond to the AML method
|
||||||
|
// invocations up to the point where an error occurred. To construct the
|
||||||
|
// correct execution tree from a Trace, its entries must be processed in
|
||||||
|
// LIFO order.
|
||||||
|
trace []*frame
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error implements the error interface.
|
// Error implements the error interface.
|
||||||
@ -73,6 +93,24 @@ func (e *Error) Error() string {
|
|||||||
return e.message
|
return e.message
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StackTrace returns a formatted stack trace for this error.
|
||||||
|
func (e *Error) StackTrace() string {
|
||||||
|
if len(e.trace) == 0 {
|
||||||
|
return "No stack trace available"
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("Stack trace:\n")
|
||||||
|
|
||||||
|
// We need to process the trace list in LIFO order.
|
||||||
|
for index, offset := 0, len(e.trace)-1; index < len(e.trace); index, offset = index+1, offset-1 {
|
||||||
|
entry := e.trace[offset]
|
||||||
|
kfmt.Fprintf(&buf, "[%3x] [%s] [%s():0x%x] opcode: %s\n", index, entry.table, entry.method, entry.IP, entry.instr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
// VM is a structure that stores the output of the AML bytecode parser and
|
// VM is a structure that stores the output of the AML bytecode parser and
|
||||||
// provides methods for interpreting any executable opcode.
|
// provides methods for interpreting any executable opcode.
|
||||||
type VM struct {
|
type VM struct {
|
||||||
@ -89,7 +127,8 @@ type VM struct {
|
|||||||
// value so that it can be used by the data conversion helpers.
|
// value so that it can be used by the data conversion helpers.
|
||||||
sizeOfIntInBits int
|
sizeOfIntInBits int
|
||||||
|
|
||||||
jumpTable [numOpcodes + 1]opHandler
|
jumpTable [numOpcodes + 1]opHandler
|
||||||
|
tableHandleToName map[uint8]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewVM creates a new AML VM and initializes it with the default scope
|
// NewVM creates a new AML VM and initializes it with the default scope
|
||||||
@ -108,12 +147,14 @@ func NewVM(errWriter io.Writer, resolver table.Resolver) *VM {
|
|||||||
// Init attempts to locate and parse the AML byte-code contained in the
|
// Init attempts to locate and parse the AML byte-code contained in the
|
||||||
// system's DSDT and SSDT tables.
|
// system's DSDT and SSDT tables.
|
||||||
func (vm *VM) Init() *Error {
|
func (vm *VM) Init() *Error {
|
||||||
for tableHandle, tableName := range []string{"DSDT", "SSDT"} {
|
for _, tableName := range []string{"DSDT", "SSDT"} {
|
||||||
header := vm.tableResolver.LookupTable(tableName)
|
header := vm.tableResolver.LookupTable(tableName)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := vm.tableParser.ParseAML(uint8(tableHandle+1), tableName, header); err != nil {
|
|
||||||
|
tableHandle := vm.allocateTableHandle(tableName)
|
||||||
|
if err := vm.tableParser.ParseAML(tableHandle, tableName, header); err != nil {
|
||||||
return &Error{message: err.Module + ": " + err.Error()}
|
return &Error{message: err.Module + ": " + err.Error()}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,6 +170,18 @@ func (vm *VM) Init() *Error {
|
|||||||
return vm.checkEntities()
|
return vm.checkEntities()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// allocateTableHandle reserves a handle for tableName and updates the internal
|
||||||
|
// tableHandleToName map.
|
||||||
|
func (vm *VM) allocateTableHandle(tableName string) uint8 {
|
||||||
|
if vm.tableHandleToName == nil {
|
||||||
|
vm.tableHandleToName = make(map[uint8]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
nextHandle := uint8(len(vm.tableHandleToName) + 1)
|
||||||
|
vm.tableHandleToName[nextHandle] = tableName
|
||||||
|
return nextHandle
|
||||||
|
}
|
||||||
|
|
||||||
// Lookup traverses a potentially nested absolute AML path and returns the
|
// Lookup traverses a potentially nested absolute AML path and returns the
|
||||||
// Entity reachable via that path or nil if the path does not point to a
|
// Entity reachable via that path or nil if the path does not point to a
|
||||||
// defined Entity.
|
// defined Entity.
|
||||||
@ -169,8 +222,10 @@ func (vm *VM) checkEntities() *Error {
|
|||||||
|
|
||||||
switch typ := ent.(type) {
|
switch typ := ent.(type) {
|
||||||
case *Method:
|
case *Method:
|
||||||
// Do not recurse into methods; at this stage we are only interested in
|
// Calculate the start and end IP value for each scoped entity inside the
|
||||||
// initializing static entities.
|
// method. This is required for emitting accurate stack traces when the
|
||||||
|
// method is invoked.
|
||||||
|
_ = calcIPOffsets(typ, 0)
|
||||||
return false
|
return false
|
||||||
case *bufferEntity:
|
case *bufferEntity:
|
||||||
// According to p.911-912 of the spec:
|
// According to p.911-912 of the spec:
|
||||||
@ -222,17 +277,35 @@ func (vm *VM) execMethod(ctx *execContext, method *Method, args ...interface{})
|
|||||||
for argIndex := 0; argIndex < len(args); argIndex++ {
|
for argIndex := 0; argIndex < len(args); argIndex++ {
|
||||||
invCtx.methodArg[argIndex], err = vmLoad(ctx, args[argIndex])
|
invCtx.methodArg[argIndex], err = vmLoad(ctx, args[argIndex])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
err.trace = append(err.trace, &frame{
|
||||||
|
table: vm.tableHandleToName[method.TableHandle()],
|
||||||
|
method: method.Name(),
|
||||||
|
IP: 0,
|
||||||
|
instr: "read method args",
|
||||||
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute method and resolve the return value before storing it to the
|
// Execute method and resolve the return value before storing it to the
|
||||||
// parent context's retVal.
|
// parent context's retVal.
|
||||||
err = vm.execBlock(&invCtx, method)
|
if err = execBlock(&invCtx, method); err == nil {
|
||||||
if err == nil {
|
|
||||||
ctx.retVal, err = vmLoad(&invCtx, invCtx.retVal)
|
ctx.retVal, err = vmLoad(&invCtx, invCtx.retVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Populate missing data in captured trace till we reach a frame that has its
|
||||||
|
// table name field populated.
|
||||||
|
if err != nil {
|
||||||
|
for index := len(err.trace) - 1; index >= 0; index-- {
|
||||||
|
if err.trace[index].table != "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
err.trace[index].table = vm.tableHandleToName[method.TableHandle()]
|
||||||
|
err.trace[index].method = method.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,13 +313,29 @@ func (vm *VM) execMethod(ctx *execContext, method *Method, args ...interface{})
|
|||||||
// If all opcodes are successfully executed, the provided execContext will be
|
// If all opcodes are successfully executed, the provided execContext will be
|
||||||
// updated to reflect the current VM state. Otherwise, an error will be
|
// updated to reflect the current VM state. Otherwise, an error will be
|
||||||
// returned.
|
// returned.
|
||||||
func (vm *VM) execBlock(ctx *execContext, block ScopeEntity) *Error {
|
func execBlock(ctx *execContext, block ScopeEntity) *Error {
|
||||||
instrList := block.Children()
|
var (
|
||||||
numInstr := len(instrList)
|
instrList = block.Children()
|
||||||
|
numInstr = len(instrList)
|
||||||
|
instrIndex int
|
||||||
|
lastIP uint32
|
||||||
|
)
|
||||||
|
|
||||||
|
for ctx.IP, instrIndex = block.blockStartIPOffset(), 0; instrIndex < numInstr && ctx.ctrlFlow == ctrlFlowTypeNextOpcode; instrIndex++ {
|
||||||
|
// If the opcode executes a scoped block then ctx.IP will be modified and
|
||||||
|
// unless we keep track of its original value we will not be able to
|
||||||
|
// provide an accurate trace if the opcode handler returns back an error.
|
||||||
|
ctx.IP++
|
||||||
|
lastIP = ctx.IP
|
||||||
|
|
||||||
for instrIndex := 0; instrIndex < numInstr && ctx.ctrlFlow == ctrlFlowTypeNextOpcode; instrIndex++ {
|
|
||||||
instr := instrList[instrIndex]
|
instr := instrList[instrIndex]
|
||||||
if err := vm.jumpTable[instr.getOpcode()](ctx, instr); err != nil {
|
if err := ctx.vm.jumpTable[instr.getOpcode()](ctx, instr); err != nil {
|
||||||
|
// Append an entry to the stack trace; the parent execMethod call will
|
||||||
|
// automatically populate the missing method and table information.
|
||||||
|
err.trace = append(err.trace, &frame{
|
||||||
|
IP: lastIP,
|
||||||
|
instr: instr.getOpcode().String(),
|
||||||
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -254,6 +343,34 @@ func (vm *VM) execBlock(ctx *execContext, block ScopeEntity) *Error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calcIPOffsets visits all scoped entities inside the method m and updates
|
||||||
|
// their start and end IP offset values relative to the provided relIP value.
|
||||||
|
func calcIPOffsets(scope ScopeEntity, relIP uint32) uint32 {
|
||||||
|
var startIP = relIP
|
||||||
|
|
||||||
|
for _, ent := range scope.Children() {
|
||||||
|
relIP++
|
||||||
|
|
||||||
|
switch ent.getOpcode() {
|
||||||
|
case opIf, opWhile:
|
||||||
|
// arg 0 is the preficate which we must exclude from the calculation
|
||||||
|
for argIndex, arg := range ent.getArgs() {
|
||||||
|
if argIndex == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if argEnt, isScopedEnt := arg.(ScopeEntity); isScopedEnt {
|
||||||
|
// Recursively visit scoped entities and adjust the current IP
|
||||||
|
relIP = calcIPOffsets(argEnt, relIP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
scope.setBlockIPOffsets(startIP, relIP)
|
||||||
|
return relIP
|
||||||
|
}
|
||||||
|
|
||||||
// defaultACPIScopes constructs a tree of scoped entities that correspond to
|
// defaultACPIScopes constructs a tree of scoped entities that correspond to
|
||||||
// the predefined scopes contained in the ACPI specification and returns back
|
// the predefined scopes contained in the ACPI specification and returns back
|
||||||
// its root node.
|
// its root node.
|
||||||
|
@ -366,7 +366,7 @@ func TestVMConvert(t *testing.T) {
|
|||||||
&unnamedEntity{op: 0}, // uses our patched jumpTable[0] that always errors
|
&unnamedEntity{op: 0}, // uses our patched jumpTable[0] that always errors
|
||||||
valueTypeString,
|
valueTypeString,
|
||||||
nil,
|
nil,
|
||||||
&Error{message: "vmLoad: something went wrong"},
|
&Error{message: "something went wrong"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ func (vm *VM) populateJumpTable() {
|
|||||||
vm.jumpTable[opContinue] = vmOpContinue
|
vm.jumpTable[opContinue] = vmOpContinue
|
||||||
vm.jumpTable[opWhile] = vmOpWhile
|
vm.jumpTable[opWhile] = vmOpWhile
|
||||||
vm.jumpTable[opIf] = vmOpIf
|
vm.jumpTable[opIf] = vmOpIf
|
||||||
|
vm.jumpTable[opFatal] = vmOpFatal
|
||||||
|
|
||||||
// ALU opcodes
|
// ALU opcodes
|
||||||
vm.jumpTable[opAdd] = vmOpAdd
|
vm.jumpTable[opAdd] = vmOpAdd
|
||||||
|
@ -25,7 +25,6 @@ func vmLoad(ctx *execContext, arg interface{}) (interface{}, *Error) {
|
|||||||
// In this case, try evaluating the opcode and replace arg with the
|
// In this case, try evaluating the opcode and replace arg with the
|
||||||
// output value that gets stored stored into ctx.retVal
|
// output value that gets stored stored into ctx.retVal
|
||||||
if err := ctx.vm.jumpTable[typ.getOpcode()](ctx, typ); err != nil {
|
if err := ctx.vm.jumpTable[typ.getOpcode()](ctx, typ); err != nil {
|
||||||
err.message = "vmLoad: " + err.message
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ func TestVMLoad(t *testing.T) {
|
|||||||
&execContext{vm: vm},
|
&execContext{vm: vm},
|
||||||
&unnamedEntity{op: 0}, // uses our patched jumpTable[0] that always errors
|
&unnamedEntity{op: 0}, // uses our patched jumpTable[0] that always errors
|
||||||
nil,
|
nil,
|
||||||
&Error{message: "vmLoad: something went wrong"},
|
&Error{message: "something went wrong"},
|
||||||
},
|
},
|
||||||
// nested opcode which does not return an error
|
// nested opcode which does not return an error
|
||||||
{
|
{
|
||||||
|
@ -53,7 +53,7 @@ func TestArithmeticExpressions(t *testing.T) {
|
|||||||
vm: vm,
|
vm: vm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := vm.execBlock(ctx, method); err != nil {
|
if err := execBlock(ctx, method); err != nil {
|
||||||
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -174,7 +174,7 @@ func TestBitwiseExpressions(t *testing.T) {
|
|||||||
vm: vm,
|
vm: vm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := vm.execBlock(ctx, method); err != nil {
|
if err := execBlock(ctx, method); err != nil {
|
||||||
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -323,7 +323,7 @@ func TestLogicExpressions(t *testing.T) {
|
|||||||
vm: vm,
|
vm: vm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := vm.execBlock(ctx, method); err != nil {
|
if err := execBlock(ctx, method); err != nil {
|
||||||
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
package aml
|
package aml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"gopheros/kernel/kfmt"
|
||||||
|
)
|
||||||
|
|
||||||
// Args: val
|
// Args: val
|
||||||
// Set val as the return value in ctx and change the ctrlFlow
|
// Set val as the return value in ctx and change the ctrlFlow
|
||||||
// type to ctrlFlowTypeFnReturn.
|
// type to ctrlFlowTypeFnReturn.
|
||||||
@ -56,13 +61,15 @@ func vmOpWhile(ctx *execContext, ent Entity) *Error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ctx.vm.execBlock(ctx, whileBlock)
|
err = execBlock(ctx, whileBlock)
|
||||||
if ctx.ctrlFlow == ctrlFlowTypeFnReturn {
|
if ctx.ctrlFlow == ctrlFlowTypeFnReturn {
|
||||||
// Preserve return flow type so we exit the innermost function
|
// Preserve return flow type so we exit the innermost function
|
||||||
break
|
break
|
||||||
} else if ctx.ctrlFlow == ctrlFlowTypeBreak {
|
} else if ctx.ctrlFlow == ctrlFlowTypeBreak {
|
||||||
// Exit while block and switch to sequential execution for the code
|
// Exit while block and switch to sequential execution for the code
|
||||||
// that follows
|
// that follows. The current IP needs to be adjusted to point to the
|
||||||
|
// end of the current block
|
||||||
|
ctx.IP = whileBlock.blockEndIPOffset()
|
||||||
ctx.ctrlFlow = ctrlFlowTypeNextOpcode
|
ctx.ctrlFlow = ctrlFlowTypeNextOpcode
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -110,9 +117,9 @@ func vmOpIf(ctx *execContext, ent Entity) *Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if predResAsUint, isUint := predRes.(uint64); !isUint || predResAsUint == 1 {
|
if predResAsUint, isUint := predRes.(uint64); !isUint || predResAsUint == 1 {
|
||||||
return ctx.vm.execBlock(ctx, ifBlock)
|
return execBlock(ctx, ifBlock)
|
||||||
} else if elseBlock != nil {
|
} else if elseBlock != nil {
|
||||||
return ctx.vm.execBlock(ctx, elseBlock)
|
return execBlock(ctx, elseBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -131,3 +138,28 @@ func vmOpMethodInvocation(ctx *execContext, ent Entity) *Error {
|
|||||||
|
|
||||||
return ctx.vm.execMethod(ctx, inv.method, ent.getArgs()...)
|
return ctx.vm.execMethod(ctx, inv.method, ent.getArgs()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Args: type, code, arg
|
||||||
|
//
|
||||||
|
// Generate an OEM-defined fatal error. The OSPM must catch this error,
|
||||||
|
// optionally log it and perform a controlled system shutdown
|
||||||
|
func vmOpFatal(ctx *execContext, ent Entity) *Error {
|
||||||
|
var (
|
||||||
|
buf bytes.Buffer
|
||||||
|
errType uint64
|
||||||
|
errCode uint64
|
||||||
|
errArg uint64
|
||||||
|
err *Error
|
||||||
|
)
|
||||||
|
|
||||||
|
if errType, err = vmToIntArg(ctx, ent, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if errCode, errArg, err = vmToIntArgs2(ctx, ent, 1, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
kfmt.Fprintf(&buf, "fatal OEM-defined error (type: 0x%x, code: 0x%x, arg: 0x%x)", errType, errCode, errArg)
|
||||||
|
return &Error{message: buf.String()}
|
||||||
|
}
|
||||||
|
@ -53,7 +53,7 @@ func TestVMFlowChanges(t *testing.T) {
|
|||||||
vm: vm,
|
vm: vm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := vm.execBlock(ctx, method); err != nil {
|
if err := execBlock(ctx, method); err != nil {
|
||||||
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
t.Errorf("[spec %02d] %s: invocation failed: %v\n", specIndex, spec.method, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -178,6 +178,24 @@ func TestVMFlowOpErrors(t *testing.T) {
|
|||||||
},
|
},
|
||||||
op2Err,
|
op2Err,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
vmOpFatal,
|
||||||
|
[]interface{}{
|
||||||
|
&scopeEntity{},
|
||||||
|
uint64(42),
|
||||||
|
uint64(128),
|
||||||
|
},
|
||||||
|
op0Err,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
vmOpFatal,
|
||||||
|
[]interface{}{
|
||||||
|
uint64(42),
|
||||||
|
&scopeEntity{},
|
||||||
|
uint64(128),
|
||||||
|
},
|
||||||
|
op0Err,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := &execContext{vm: vm}
|
ctx := &execContext{vm: vm}
|
||||||
@ -222,9 +240,16 @@ func TestVMNestedMethodCalls(t *testing.T) {
|
|||||||
|
|
||||||
ctx := &execContext{vm: vm}
|
ctx := &execContext{vm: vm}
|
||||||
expErr := "call to undefined method: UNDEFINED"
|
expErr := "call to undefined method: UNDEFINED"
|
||||||
if err := vmOpMethodInvocation(ctx, inv); err == nil || err.Error() != expErr {
|
err := vmOpMethodInvocation(ctx, inv)
|
||||||
|
if err == nil || err.Error() != expErr {
|
||||||
t.Fatalf("expected error: %s; got %v", expErr, err)
|
t.Fatalf("expected error: %s; got %v", expErr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Since we are invoking the method directly instead of within an execBlock
|
||||||
|
// call, the error stack trace will not be populated
|
||||||
|
if exp, got := "No stack trace available", err.StackTrace(); got != exp {
|
||||||
|
t.Fatalf("expected error.StackTrace() to return:\n%s\ngot:\n%s", exp, got)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("method arg load error", func(t *testing.T) {
|
t.Run("method arg load error", func(t *testing.T) {
|
||||||
@ -245,4 +270,29 @@ func TestVMNestedMethodCalls(t *testing.T) {
|
|||||||
t.Fatalf("expected error: %s; got %v", op0Err, err)
|
t.Fatalf("expected error: %s; got %v", op0Err, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("method raises fatal error", func(t *testing.T) {
|
||||||
|
inv := &methodInvocationEntity{
|
||||||
|
unnamedEntity: unnamedEntity{args: []interface{}{uint64(0x42)}},
|
||||||
|
methodName: `\NST2`,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &execContext{vm: vm}
|
||||||
|
err := vmOpMethodInvocation(ctx, inv)
|
||||||
|
expErr := "fatal OEM-defined error (type: 0xde, code: 0xad, arg: 0xc0de)"
|
||||||
|
if err == nil || err.Error() != expErr {
|
||||||
|
t.Fatalf("expected to get error: %s; got %v", expErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expTrace := `Stack trace:
|
||||||
|
[000] [DSDT] [NST2():0x2] opcode: Store
|
||||||
|
[001] [DSDT] [NST3():0x1] opcode: Add
|
||||||
|
[002] [DSDT] [NST4():0x8] opcode: If
|
||||||
|
[003] [DSDT] [NST4():0x9] opcode: Fatal
|
||||||
|
`
|
||||||
|
|
||||||
|
if got := err.StackTrace(); got != expTrace {
|
||||||
|
t.Fatalf("expected error.StackTrace() to return:\n%s\ngot:\n%s", expTrace, got)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -119,8 +119,8 @@ func TestVMExecBlockControlFlows(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := new(execContext)
|
ctx := &execContext{vm: vm}
|
||||||
if err := vm.execBlock(ctx, block); err != nil {
|
if err := execBlock(ctx, block); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,8 +157,8 @@ func TestVMExecBlockControlFlows(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := new(execContext)
|
ctx := &execContext{vm: vm}
|
||||||
if err := vm.execBlock(ctx, block); err != nil {
|
if err := execBlock(ctx, block); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,8 +197,8 @@ func TestVMExecBlockControlFlows(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := new(execContext)
|
ctx := &execContext{vm: vm}
|
||||||
if err := vm.execBlock(ctx, block); err != nil {
|
if err := execBlock(ctx, block); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,8 +233,8 @@ func TestVMExecBlockControlFlows(t *testing.T) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := new(execContext)
|
ctx := &execContext{vm: vm}
|
||||||
if err := vm.execBlock(ctx, block); err != nil {
|
if err := execBlock(ctx, block); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,9 +262,9 @@ func TestVMExecBlockControlFlows(t *testing.T) {
|
|||||||
|
|
||||||
vm.jumpTable[0] = opExecNotImplemented
|
vm.jumpTable[0] = opExecNotImplemented
|
||||||
|
|
||||||
ctx := new(execContext)
|
ctx := &execContext{vm: vm}
|
||||||
expErr := &Error{message: "opcode Zero not implemented"}
|
expErr := &Error{message: "opcode Zero not implemented"}
|
||||||
if err := vm.execBlock(ctx, block); err == nil || err.Error() != expErr.Error() {
|
if err := execBlock(ctx, block); err == nil || err.Error() != expErr.Error() {
|
||||||
t.Errorf("expected to get error: %v; got: %v", expErr, err)
|
t.Errorf("expected to get error: %v; got: %v", expErr, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
Binary file not shown.
@ -247,4 +247,36 @@ DefinitionBlock ("vm-testsuite-DSDT.aml", "DSDT", 2, "GOPHER", "GOPHEROS", 0x000
|
|||||||
Return(Arg0+42)
|
Return(Arg0+42)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Netsed method invocations that trigger an error. This block tests the
|
||||||
|
// generation of execution traces by the AML VM
|
||||||
|
Method(NST2, 1, NotSerialized)
|
||||||
|
{
|
||||||
|
Local1 = Arg0
|
||||||
|
Local2 = NST3(Local1)
|
||||||
|
Return(Local2)
|
||||||
|
}
|
||||||
|
|
||||||
|
Method(NST3, 1, NotSerialized)
|
||||||
|
{
|
||||||
|
Local1 = Arg0 + NST4(0x42)
|
||||||
|
Return(Local1)
|
||||||
|
}
|
||||||
|
|
||||||
|
Method(NST4, 1, NotSerialized)
|
||||||
|
{
|
||||||
|
Local0 = 0;
|
||||||
|
Local1 = 1;
|
||||||
|
While(Local0 != 10){
|
||||||
|
Local0++
|
||||||
|
if( Local0 == 5 ) {
|
||||||
|
Break
|
||||||
|
}
|
||||||
|
Local1++
|
||||||
|
}
|
||||||
|
|
||||||
|
if(Arg0 == 0x42){
|
||||||
|
Fatal(0xde, 0xad, 0xc0de)
|
||||||
|
}
|
||||||
|
Return(0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user