diff options
author | upstream source tree <ports@midipix.org> | 2015-03-15 20:14:05 -0400 |
---|---|---|
committer | upstream source tree <ports@midipix.org> | 2015-03-15 20:14:05 -0400 |
commit | 554fd8c5195424bdbcabf5de30fdc183aba391bd (patch) | |
tree | 976dc5ab7fddf506dadce60ae936f43f58787092 /libgo/go/exp | |
download | cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2 cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz |
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig;
imported gcc-4.6.4 source tree from verified upstream tarball.
downloading a git-generated archive based on the 'upstream' tag
should provide you with a source tree that is binary identical
to the one extracted from the above tarball.
if you have obtained the source via the command 'git clone',
however, do note that line-endings of files in your working
directory might differ from line-endings of the respective
files in the upstream repository.
Diffstat (limited to 'libgo/go/exp')
36 files changed, 15057 insertions, 0 deletions
diff --git a/libgo/go/exp/README b/libgo/go/exp/README new file mode 100644 index 000000000..e602e3ac9 --- /dev/null +++ b/libgo/go/exp/README @@ -0,0 +1,3 @@ +This directory tree contains experimental packages and +unfinished code that is subject to even more change than the +rest of the Go tree. diff --git a/libgo/go/exp/datafmt/datafmt.go b/libgo/go/exp/datafmt/datafmt.go new file mode 100644 index 000000000..46c412342 --- /dev/null +++ b/libgo/go/exp/datafmt/datafmt.go @@ -0,0 +1,731 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* The datafmt package implements syntax-directed, type-driven formatting + of arbitrary data structures. Formatting a data structure consists of + two phases: first, a parser reads a format specification and builds a + "compiled" format. Then, the format can be applied repeatedly to + arbitrary values. Applying a format to a value evaluates to a []byte + containing the formatted value bytes, or nil. + + A format specification is a set of package declarations and format rules: + + Format = [ Entry { ";" Entry } [ ";" ] ] . + Entry = PackageDecl | FormatRule . + + (The syntax of a format specification is presented in the same EBNF + notation as used in the Go language specification. The syntax of white + space, comments, identifiers, and string literals is the same as in Go.) + + A package declaration binds a package name (such as 'ast') to a + package import path (such as '"go/ast"'). Each package used (in + a type name, see below) must be declared once before use. + + PackageDecl = PackageName ImportPath . + PackageName = identifier . + ImportPath = string . + + A format rule binds a rule name to a format expression. A rule name + may be a type name or one of the special names 'default' or '/'. + A type name may be the name of a predeclared type (for example, 'int', + 'float32', etc.), the package-qualified name of a user-defined type + (for example, 'ast.MapType'), or an identifier indicating the structure + of unnamed composite types ('array', 'chan', 'func', 'interface', 'map', + or 'ptr'). Each rule must have a unique name; rules can be declared in + any order. + + FormatRule = RuleName "=" Expression . + RuleName = TypeName | "default" | "/" . + TypeName = [ PackageName "." ] identifier . + + To format a value, the value's type name is used to select the format rule + (there is an override mechanism, see below). The format expression of the + selected rule specifies how the value is formatted. Each format expression, + when applied to a value, evaluates to a byte sequence or nil. + + In its most general form, a format expression is a list of alternatives, + each of which is a sequence of operands: + + Expression = [ Sequence ] { "|" [ Sequence ] } . + Sequence = Operand { Operand } . + + The formatted result produced by an expression is the result of the first + alternative sequence that evaluates to a non-nil result; if there is no + such alternative, the expression evaluates to nil. The result produced by + an operand sequence is the concatenation of the results of its operands. + If any operand in the sequence evaluates to nil, the entire sequence + evaluates to nil. + + There are five kinds of operands: + + Operand = Literal | Field | Group | Option | Repetition . + + Literals evaluate to themselves, with two substitutions. First, + %-formats expand in the manner of fmt.Printf, with the current value + passed as the parameter. Second, the current indentation (see below) + is inserted after every newline or form feed character. + + Literal = string . + + This table shows string literals applied to the value 42 and the + corresponding formatted result: + + "foo" foo + "%x" 2a + "x = %d" x = 42 + "%#x = %d" 0x2a = 42 + + A field operand is a field name optionally followed by an alternate + rule name. The field name may be an identifier or one of the special + names @ or *. + + Field = FieldName [ ":" RuleName ] . + FieldName = identifier | "@" | "*" . + + If the field name is an identifier, the current value must be a struct, + and there must be a field with that name in the struct. The same lookup + rules apply as in the Go language (for instance, the name of an anonymous + field is the unqualified type name). The field name denotes the field + value in the struct. If the field is not found, formatting is aborted + and an error message is returned. (TODO consider changing the semantics + such that if a field is not found, it evaluates to nil). + + The special name '@' denotes the current value. + + The meaning of the special name '*' depends on the type of the current + value: + + array, slice types array, slice element (inside {} only, see below) + interfaces value stored in interface + pointers value pointed to by pointer + + (Implementation restriction: channel, function and map types are not + supported due to missing reflection support). + + Fields are evaluated as follows: If the field value is nil, or an array + or slice element does not exist, the result is nil (see below for details + on array/slice elements). If the value is not nil the field value is + formatted (recursively) using the rule corresponding to its type name, + or the alternate rule name, if given. + + The following example shows a complete format specification for a + struct 'myPackage.Point'. Assume the package + + package myPackage // in directory myDir/myPackage + type Point struct { + name string; + x, y int; + } + + Applying the format specification + + myPackage "myDir/myPackage"; + int = "%d"; + hexInt = "0x%x"; + string = "---%s---"; + myPackage.Point = name "{" x ", " y:hexInt "}"; + + to the value myPackage.Point{"foo", 3, 15} results in + + ---foo---{3, 0xf} + + Finally, an operand may be a grouped, optional, or repeated expression. + A grouped expression ("group") groups a more complex expression (body) + so that it can be used in place of a single operand: + + Group = "(" [ Indentation ">>" ] Body ")" . + Indentation = Expression . + Body = Expression . + + A group body may be prefixed by an indentation expression followed by '>>'. + The indentation expression is applied to the current value like any other + expression and the result, if not nil, is appended to the current indentation + during the evaluation of the body (see also formatting state, below). + + An optional expression ("option") is enclosed in '[]' brackets. + + Option = "[" Body "]" . + + An option evaluates to its body, except that if the body evaluates to nil, + the option expression evaluates to an empty []byte. Thus an option's purpose + is to protect the expression containing the option from a nil operand. + + A repeated expression ("repetition") is enclosed in '{}' braces. + + Repetition = "{" Body [ "/" Separator ] "}" . + Separator = Expression . + + A repeated expression is evaluated as follows: The body is evaluated + repeatedly and its results are concatenated until the body evaluates + to nil. The result of the repetition is the (possibly empty) concatenation, + but it is never nil. An implicit index is supplied for the evaluation of + the body: that index is used to address elements of arrays or slices. If + the corresponding elements do not exist, the field denoting the element + evaluates to nil (which in turn may terminate the repetition). + + The body of a repetition may be followed by a '/' and a "separator" + expression. If the separator is present, it is invoked between repetitions + of the body. + + The following example shows a complete format specification for formatting + a slice of unnamed type. Applying the specification + + int = "%b"; + array = { * / ", " }; // array is the type name for an unnamed slice + + to the value '[]int{2, 3, 5, 7}' results in + + 10, 11, 101, 111 + + Default rule: If a format rule named 'default' is present, it is used for + formatting a value if no other rule was found. A common default rule is + + default = "%v" + + to provide default formatting for basic types without having to specify + a specific rule for each basic type. + + Global separator rule: If a format rule named '/' is present, it is + invoked with the current value between literals. If the separator + expression evaluates to nil, it is ignored. + + For instance, a global separator rule may be used to punctuate a sequence + of values with commas. The rules: + + default = "%v"; + / = ", "; + + will format an argument list by printing each one in its default format, + separated by a comma and a space. +*/ +package datafmt + +import ( + "bytes" + "fmt" + "go/token" + "io" + "os" + "reflect" + "runtime" +) + + +// ---------------------------------------------------------------------------- +// Format representation + +// Custom formatters implement the Formatter function type. +// A formatter is invoked with the current formatting state, the +// value to format, and the rule name under which the formatter +// was installed (the same formatter function may be installed +// under different names). The formatter may access the current state +// to guide formatting and use State.Write to append to the state's +// output. +// +// A formatter must return a boolean value indicating if it evaluated +// to a non-nil value (true), or a nil value (false). +// +type Formatter func(state *State, value interface{}, ruleName string) bool + + +// A FormatterMap is a set of custom formatters. +// It maps a rule name to a formatter function. +// +type FormatterMap map[string]Formatter + + +// A parsed format expression is built from the following nodes. +// +type ( + expr interface{} + + alternatives []expr // x | y | z + + sequence []expr // x y z + + literal [][]byte // a list of string segments, possibly starting with '%' + + field struct { + fieldName string // including "@", "*" + ruleName string // "" if no rule name specified + } + + group struct { + indent, body expr // (indent >> body) + } + + option struct { + body expr // [body] + } + + repetition struct { + body, separator expr // {body / separator} + } + + custom struct { + ruleName string + fun Formatter + } +) + + +// A Format is the result of parsing a format specification. +// The format may be applied repeatedly to format values. +// +type Format map[string]expr + + +// ---------------------------------------------------------------------------- +// Formatting + +// An application-specific environment may be provided to Format.Apply; +// the environment is available inside custom formatters via State.Env(). +// Environments must implement copying; the Copy method must return an +// complete copy of the receiver. This is necessary so that the formatter +// can save and restore an environment (in case of an absent expression). +// +// If the Environment doesn't change during formatting (this is under +// control of the custom formatters), the Copy function can simply return +// the receiver, and thus can be very light-weight. +// +type Environment interface { + Copy() Environment +} + + +// State represents the current formatting state. +// It is provided as argument to custom formatters. +// +type State struct { + fmt Format // format in use + env Environment // user-supplied environment + errors chan os.Error // not chan *Error (errors <- nil would be wrong!) + hasOutput bool // true after the first literal has been written + indent bytes.Buffer // current indentation + output bytes.Buffer // format output + linePos token.Position // position of line beginning (Column == 0) + default_ expr // possibly nil + separator expr // possibly nil +} + + +func newState(fmt Format, env Environment, errors chan os.Error) *State { + s := new(State) + s.fmt = fmt + s.env = env + s.errors = errors + s.linePos = token.Position{Line: 1} + + // if we have a default rule, cache it's expression for fast access + if x, found := fmt["default"]; found { + s.default_ = x + } + + // if we have a global separator rule, cache it's expression for fast access + if x, found := fmt["/"]; found { + s.separator = x + } + + return s +} + + +// Env returns the environment passed to Format.Apply. +func (s *State) Env() interface{} { return s.env } + + +// LinePos returns the position of the current line beginning +// in the state's output buffer. Line numbers start at 1. +// +func (s *State) LinePos() token.Position { return s.linePos } + + +// Pos returns the position of the next byte to be written to the +// output buffer. Line numbers start at 1. +// +func (s *State) Pos() token.Position { + offs := s.output.Len() + return token.Position{Line: s.linePos.Line, Column: offs - s.linePos.Offset, Offset: offs} +} + + +// Write writes data to the output buffer, inserting the indentation +// string after each newline or form feed character. It cannot return an error. +// +func (s *State) Write(data []byte) (int, os.Error) { + n := 0 + i0 := 0 + for i, ch := range data { + if ch == '\n' || ch == '\f' { + // write text segment and indentation + n1, _ := s.output.Write(data[i0 : i+1]) + n2, _ := s.output.Write(s.indent.Bytes()) + n += n1 + n2 + i0 = i + 1 + s.linePos.Offset = s.output.Len() + s.linePos.Line++ + } + } + n3, _ := s.output.Write(data[i0:]) + return n + n3, nil +} + + +type checkpoint struct { + env Environment + hasOutput bool + outputLen int + linePos token.Position +} + + +func (s *State) save() checkpoint { + saved := checkpoint{nil, s.hasOutput, s.output.Len(), s.linePos} + if s.env != nil { + saved.env = s.env.Copy() + } + return saved +} + + +func (s *State) restore(m checkpoint) { + s.env = m.env + s.output.Truncate(m.outputLen) +} + + +func (s *State) error(msg string) { + s.errors <- os.NewError(msg) + runtime.Goexit() +} + + +// TODO At the moment, unnamed types are simply mapped to the default +// names below. For instance, all unnamed arrays are mapped to +// 'array' which is not really sufficient. Eventually one may want +// to be able to specify rules for say an unnamed slice of T. +// + +func typename(typ reflect.Type) string { + switch typ.(type) { + case *reflect.ArrayType: + return "array" + case *reflect.SliceType: + return "array" + case *reflect.ChanType: + return "chan" + case *reflect.FuncType: + return "func" + case *reflect.InterfaceType: + return "interface" + case *reflect.MapType: + return "map" + case *reflect.PtrType: + return "ptr" + } + return typ.String() +} + +func (s *State) getFormat(name string) expr { + if fexpr, found := s.fmt[name]; found { + return fexpr + } + + if s.default_ != nil { + return s.default_ + } + + s.error(fmt.Sprintf("no format rule for type: '%s'", name)) + return nil +} + + +// eval applies a format expression fexpr to a value. If the expression +// evaluates internally to a non-nil []byte, that slice is appended to +// the state's output buffer and eval returns true. Otherwise, eval +// returns false and the state remains unchanged. +// +func (s *State) eval(fexpr expr, value reflect.Value, index int) bool { + // an empty format expression always evaluates + // to a non-nil (but empty) []byte + if fexpr == nil { + return true + } + + switch t := fexpr.(type) { + case alternatives: + // append the result of the first alternative that evaluates to + // a non-nil []byte to the state's output + mark := s.save() + for _, x := range t { + if s.eval(x, value, index) { + return true + } + s.restore(mark) + } + return false + + case sequence: + // append the result of all operands to the state's output + // unless a nil result is encountered + mark := s.save() + for _, x := range t { + if !s.eval(x, value, index) { + s.restore(mark) + return false + } + } + return true + + case literal: + // write separator, if any + if s.hasOutput { + // not the first literal + if s.separator != nil { + sep := s.separator // save current separator + s.separator = nil // and disable it (avoid recursion) + mark := s.save() + if !s.eval(sep, value, index) { + s.restore(mark) + } + s.separator = sep // enable it again + } + } + s.hasOutput = true + // write literal segments + for _, lit := range t { + if len(lit) > 1 && lit[0] == '%' { + // segment contains a %-format at the beginning + if lit[1] == '%' { + // "%%" is printed as a single "%" + s.Write(lit[1:]) + } else { + // use s instead of s.output to get indentation right + fmt.Fprintf(s, string(lit), value.Interface()) + } + } else { + // segment contains no %-formats + s.Write(lit) + } + } + return true // a literal never evaluates to nil + + case *field: + // determine field value + switch t.fieldName { + case "@": + // field value is current value + + case "*": + // indirection: operation is type-specific + switch v := value.(type) { + case *reflect.ArrayValue: + if v.Len() <= index { + return false + } + value = v.Elem(index) + + case *reflect.SliceValue: + if v.IsNil() || v.Len() <= index { + return false + } + value = v.Elem(index) + + case *reflect.MapValue: + s.error("reflection support for maps incomplete") + + case *reflect.PtrValue: + if v.IsNil() { + return false + } + value = v.Elem() + + case *reflect.InterfaceValue: + if v.IsNil() { + return false + } + value = v.Elem() + + case *reflect.ChanValue: + s.error("reflection support for chans incomplete") + + case *reflect.FuncValue: + s.error("reflection support for funcs incomplete") + + default: + s.error(fmt.Sprintf("error: * does not apply to `%s`", value.Type())) + } + + default: + // value is value of named field + var field reflect.Value + if sval, ok := value.(*reflect.StructValue); ok { + field = sval.FieldByName(t.fieldName) + if field == nil { + // TODO consider just returning false in this case + s.error(fmt.Sprintf("error: no field `%s` in `%s`", t.fieldName, value.Type())) + } + } + value = field + } + + // determine rule + ruleName := t.ruleName + if ruleName == "" { + // no alternate rule name, value type determines rule + ruleName = typename(value.Type()) + } + fexpr = s.getFormat(ruleName) + + mark := s.save() + if !s.eval(fexpr, value, index) { + s.restore(mark) + return false + } + return true + + case *group: + // remember current indentation + indentLen := s.indent.Len() + + // update current indentation + mark := s.save() + s.eval(t.indent, value, index) + // if the indentation evaluates to nil, the state's output buffer + // didn't change - either way it's ok to append the difference to + // the current identation + s.indent.Write(s.output.Bytes()[mark.outputLen:s.output.Len()]) + s.restore(mark) + + // format group body + mark = s.save() + b := true + if !s.eval(t.body, value, index) { + s.restore(mark) + b = false + } + + // reset indentation + s.indent.Truncate(indentLen) + return b + + case *option: + // evaluate the body and append the result to the state's output + // buffer unless the result is nil + mark := s.save() + if !s.eval(t.body, value, 0) { // TODO is 0 index correct? + s.restore(mark) + } + return true // an option never evaluates to nil + + case *repetition: + // evaluate the body and append the result to the state's output + // buffer until a result is nil + for i := 0; ; i++ { + mark := s.save() + // write separator, if any + if i > 0 && t.separator != nil { + // nil result from separator is ignored + mark := s.save() + if !s.eval(t.separator, value, i) { + s.restore(mark) + } + } + if !s.eval(t.body, value, i) { + s.restore(mark) + break + } + } + return true // a repetition never evaluates to nil + + case *custom: + // invoke the custom formatter to obtain the result + mark := s.save() + if !t.fun(s, value.Interface(), t.ruleName) { + s.restore(mark) + return false + } + return true + } + + panic("unreachable") + return false +} + + +// Eval formats each argument according to the format +// f and returns the resulting []byte and os.Error. If +// an error occurred, the []byte contains the partially +// formatted result. An environment env may be passed +// in which is available in custom formatters through +// the state parameter. +// +func (f Format) Eval(env Environment, args ...interface{}) ([]byte, os.Error) { + if f == nil { + return nil, os.NewError("format is nil") + } + + errors := make(chan os.Error) + s := newState(f, env, errors) + + go func() { + for _, v := range args { + fld := reflect.NewValue(v) + if fld == nil { + errors <- os.NewError("nil argument") + return + } + mark := s.save() + if !s.eval(s.getFormat(typename(fld.Type())), fld, 0) { // TODO is 0 index correct? + s.restore(mark) + } + } + errors <- nil // no errors + }() + + err := <-errors + return s.output.Bytes(), err +} + + +// ---------------------------------------------------------------------------- +// Convenience functions + +// Fprint formats each argument according to the format f +// and writes to w. The result is the total number of bytes +// written and an os.Error, if any. +// +func (f Format) Fprint(w io.Writer, env Environment, args ...interface{}) (int, os.Error) { + data, err := f.Eval(env, args...) + if err != nil { + // TODO should we print partial result in case of error? + return 0, err + } + return w.Write(data) +} + + +// Print formats each argument according to the format f +// and writes to standard output. The result is the total +// number of bytes written and an os.Error, if any. +// +func (f Format) Print(args ...interface{}) (int, os.Error) { + return f.Fprint(os.Stdout, nil, args...) +} + + +// Sprint formats each argument according to the format f +// and returns the resulting string. If an error occurs +// during formatting, the result string contains the +// partially formatted result followed by an error message. +// +func (f Format) Sprint(args ...interface{}) string { + var buf bytes.Buffer + _, err := f.Fprint(&buf, nil, args...) + if err != nil { + var i interface{} = args + fmt.Fprintf(&buf, "--- Sprint(%s) failed: %v", fmt.Sprint(i), err) + } + return buf.String() +} diff --git a/libgo/go/exp/datafmt/datafmt_test.go b/libgo/go/exp/datafmt/datafmt_test.go new file mode 100644 index 000000000..d7c70b21d --- /dev/null +++ b/libgo/go/exp/datafmt/datafmt_test.go @@ -0,0 +1,351 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package datafmt + +import ( + "fmt" + "testing" + "go/token" +) + + +var fset = token.NewFileSet() + + +func parse(t *testing.T, form string, fmap FormatterMap) Format { + f, err := Parse(fset, "", []byte(form), fmap) + if err != nil { + t.Errorf("Parse(%s): %v", form, err) + return nil + } + return f +} + + +func verify(t *testing.T, f Format, expected string, args ...interface{}) { + if f == nil { + return // allow other tests to run + } + result := f.Sprint(args...) + if result != expected { + t.Errorf( + "result : `%s`\nexpected: `%s`\n\n", + result, expected) + } +} + + +func formatter(s *State, value interface{}, rule_name string) bool { + switch rule_name { + case "/": + fmt.Fprintf(s, "%d %d %d", s.Pos().Line, s.LinePos().Column, s.Pos().Column) + return true + case "blank": + s.Write([]byte{' '}) + return true + case "int": + if value.(int)&1 == 0 { + fmt.Fprint(s, "even ") + } else { + fmt.Fprint(s, "odd ") + } + return true + case "nil": + return false + case "testing.T": + s.Write([]byte("testing.T")) + return true + } + panic("unreachable") + return false +} + + +func TestCustomFormatters(t *testing.T) { + fmap0 := FormatterMap{"/": formatter} + fmap1 := FormatterMap{"int": formatter, "blank": formatter, "nil": formatter} + fmap2 := FormatterMap{"testing.T": formatter} + + f := parse(t, `int=`, fmap0) + verify(t, f, ``, 1, 2, 3) + + f = parse(t, `int="#"`, nil) + verify(t, f, `###`, 1, 2, 3) + + f = parse(t, `int="#";string="%s"`, fmap0) + verify(t, f, "#1 0 1#1 0 7#1 0 13\n2 0 0foo2 0 8\n", 1, 2, 3, "\n", "foo", "\n") + + f = parse(t, ``, fmap1) + verify(t, f, `even odd even odd `, 0, 1, 2, 3) + + f = parse(t, `/ =@:blank; float64="#"`, fmap1) + verify(t, f, `# # #`, 0.0, 1.0, 2.0) + + f = parse(t, `float64=@:nil`, fmap1) + verify(t, f, ``, 0.0, 1.0, 2.0) + + f = parse(t, `testing "testing"; ptr=*`, fmap2) + verify(t, f, `testing.T`, t) + + // TODO needs more tests +} + + +// ---------------------------------------------------------------------------- +// Formatting of basic and simple composite types + +func check(t *testing.T, form, expected string, args ...interface{}) { + f := parse(t, form, nil) + if f == nil { + return // allow other tests to run + } + result := f.Sprint(args...) + if result != expected { + t.Errorf( + "format : %s\nresult : `%s`\nexpected: `%s`\n\n", + form, result, expected) + } +} + + +func TestBasicTypes(t *testing.T) { + check(t, ``, ``) + check(t, `bool=":%v"`, `:true:false`, true, false) + check(t, `int="%b %d %o 0x%x"`, `101010 42 52 0x2a`, 42) + + check(t, `int="%"`, `%`, 42) + check(t, `int="%%"`, `%`, 42) + check(t, `int="**%%**"`, `**%**`, 42) + check(t, `int="%%%%%%"`, `%%%`, 42) + check(t, `int="%%%d%%"`, `%42%`, 42) + + const i = -42 + const is = `-42` + check(t, `int ="%d"`, is, i) + check(t, `int8 ="%d"`, is, int8(i)) + check(t, `int16="%d"`, is, int16(i)) + check(t, `int32="%d"`, is, int32(i)) + check(t, `int64="%d"`, is, int64(i)) + + const u = 42 + const us = `42` + check(t, `uint ="%d"`, us, uint(u)) + check(t, `uint8 ="%d"`, us, uint8(u)) + check(t, `uint16="%d"`, us, uint16(u)) + check(t, `uint32="%d"`, us, uint32(u)) + check(t, `uint64="%d"`, us, uint64(u)) + + const f = 3.141592 + const fs = `3.141592` + check(t, `float64="%g"`, fs, f) + check(t, `float32="%g"`, fs, float32(f)) + check(t, `float64="%g"`, fs, float64(f)) +} + + +func TestArrayTypes(t *testing.T) { + var a0 [10]int + check(t, `array="array";`, `array`, a0) + + a1 := [...]int{1, 2, 3} + check(t, `array="array";`, `array`, a1) + check(t, `array={*}; int="%d";`, `123`, a1) + check(t, `array={* / ", "}; int="%d";`, `1, 2, 3`, a1) + check(t, `array={* / *}; int="%d";`, `12233`, a1) + + a2 := []interface{}{42, "foo", 3.14} + check(t, `array={* / ", "}; interface=*; string="bar"; default="%v";`, `42, bar, 3.14`, a2) +} + + +func TestChanTypes(t *testing.T) { + var c0 chan int + check(t, `chan="chan"`, `chan`, c0) + + c1 := make(chan int) + go func() { c1 <- 42 }() + check(t, `chan="chan"`, `chan`, c1) + // check(t, `chan=*`, `42`, c1); // reflection support for chans incomplete +} + + +func TestFuncTypes(t *testing.T) { + var f0 func() int + check(t, `func="func"`, `func`, f0) + + f1 := func() int { return 42 } + check(t, `func="func"`, `func`, f1) + // check(t, `func=*`, `42`, f1); // reflection support for funcs incomplete +} + + +func TestMapTypes(t *testing.T) { + var m0 map[string]int + check(t, `map="map"`, `map`, m0) + + m1 := map[string]int{} + check(t, `map="map"`, `map`, m1) + // check(t, `map=*`, ``, m1); // reflection support for maps incomplete +} + + +func TestPointerTypes(t *testing.T) { + var p0 *int + check(t, `ptr="ptr"`, `ptr`, p0) + check(t, `ptr=*`, ``, p0) + check(t, `ptr=*|"nil"`, `nil`, p0) + + x := 99991 + p1 := &x + check(t, `ptr="ptr"`, `ptr`, p1) + check(t, `ptr=*; int="%d"`, `99991`, p1) +} + + +func TestDefaultRule(t *testing.T) { + check(t, `default="%v"`, `42foo3.14`, 42, "foo", 3.14) + check(t, `default="%v"; int="%x"`, `abcdef`, 10, 11, 12, 13, 14, 15) + check(t, `default="%v"; int="%x"`, `ab**ef`, 10, 11, "**", 14, 15) + check(t, `default="%x"; int=@:default`, `abcdef`, 10, 11, 12, 13, 14, 15) +} + + +func TestGlobalSeparatorRule(t *testing.T) { + check(t, `int="%d"; / ="-"`, `1-2-3-4`, 1, 2, 3, 4) + check(t, `int="%x%x"; / ="*"`, `aa*aa`, 10, 10) +} + + +// ---------------------------------------------------------------------------- +// Formatting of a struct + +type T1 struct { + a int +} + +const F1 = `datafmt "datafmt";` + + `int = "%d";` + + `datafmt.T1 = "<" a ">";` + +func TestStruct1(t *testing.T) { check(t, F1, "<42>", T1{42}) } + + +// ---------------------------------------------------------------------------- +// Formatting of a struct with an optional field (ptr) + +type T2 struct { + s string + p *T1 +} + +const F2a = F1 + + `string = "%s";` + + `ptr = *;` + + `datafmt.T2 = s ["-" p "-"];` + +const F2b = F1 + + `string = "%s";` + + `ptr = *;` + + `datafmt.T2 = s ("-" p "-" | "empty");` + +func TestStruct2(t *testing.T) { + check(t, F2a, "foo", T2{"foo", nil}) + check(t, F2a, "bar-<17>-", T2{"bar", &T1{17}}) + check(t, F2b, "fooempty", T2{"foo", nil}) +} + + +// ---------------------------------------------------------------------------- +// Formatting of a struct with a repetitive field (slice) + +type T3 struct { + s string + a []int +} + +const F3a = `datafmt "datafmt";` + + `default = "%v";` + + `array = *;` + + `datafmt.T3 = s {" " a a / ","};` + +const F3b = `datafmt "datafmt";` + + `int = "%d";` + + `string = "%s";` + + `array = *;` + + `nil = ;` + + `empty = *:nil;` + + `datafmt.T3 = s [a:empty ": " {a / "-"}]` + +func TestStruct3(t *testing.T) { + check(t, F3a, "foo", T3{"foo", nil}) + check(t, F3a, "foo 00, 11, 22", T3{"foo", []int{0, 1, 2}}) + check(t, F3b, "bar", T3{"bar", nil}) + check(t, F3b, "bal: 2-3-5", T3{"bal", []int{2, 3, 5}}) +} + + +// ---------------------------------------------------------------------------- +// Formatting of a struct with alternative field + +type T4 struct { + x *int + a []int +} + +const F4a = `datafmt "datafmt";` + + `int = "%d";` + + `ptr = *;` + + `array = *;` + + `nil = ;` + + `empty = *:nil;` + + `datafmt.T4 = "<" (x:empty x | "-") ">" ` + +const F4b = `datafmt "datafmt";` + + `int = "%d";` + + `ptr = *;` + + `array = *;` + + `nil = ;` + + `empty = *:nil;` + + `datafmt.T4 = "<" (a:empty {a / ", "} | "-") ">" ` + +func TestStruct4(t *testing.T) { + x := 7 + check(t, F4a, "<->", T4{nil, nil}) + check(t, F4a, "<7>", T4{&x, nil}) + check(t, F4b, "<->", T4{nil, nil}) + check(t, F4b, "<2, 3, 7>", T4{nil, []int{2, 3, 7}}) +} + + +// ---------------------------------------------------------------------------- +// Formatting a struct (documentation example) + +type Point struct { + name string + x, y int +} + +const FPoint = `datafmt "datafmt";` + + `int = "%d";` + + `hexInt = "0x%x";` + + `string = "---%s---";` + + `datafmt.Point = name "{" x ", " y:hexInt "}";` + +func TestStructPoint(t *testing.T) { + p := Point{"foo", 3, 15} + check(t, FPoint, "---foo---{3, 0xf}", p) +} + + +// ---------------------------------------------------------------------------- +// Formatting a slice (documentation example) + +const FSlice = `int = "%b";` + + `array = { * / ", " }` + +func TestSlice(t *testing.T) { check(t, FSlice, "10, 11, 101, 111", []int{2, 3, 5, 7}) } + + +// TODO add more tests diff --git a/libgo/go/exp/datafmt/parser.go b/libgo/go/exp/datafmt/parser.go new file mode 100644 index 000000000..c6d140264 --- /dev/null +++ b/libgo/go/exp/datafmt/parser.go @@ -0,0 +1,386 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package datafmt + +import ( + "container/vector" + "go/scanner" + "go/token" + "os" + "strconv" + "strings" +) + +// ---------------------------------------------------------------------------- +// Parsing + +type parser struct { + scanner.ErrorVector + scanner scanner.Scanner + file *token.File + pos token.Pos // token position + tok token.Token // one token look-ahead + lit []byte // token literal + + packs map[string]string // PackageName -> ImportPath + rules map[string]expr // RuleName -> Expression +} + + +func (p *parser) next() { + p.pos, p.tok, p.lit = p.scanner.Scan() + switch p.tok { + case token.CHAN, token.FUNC, token.INTERFACE, token.MAP, token.STRUCT: + // Go keywords for composite types are type names + // returned by reflect. Accept them as identifiers. + p.tok = token.IDENT // p.lit is already set correctly + } +} + + +func (p *parser) init(fset *token.FileSet, filename string, src []byte) { + p.ErrorVector.Reset() + p.file = fset.AddFile(filename, fset.Base(), len(src)) + p.scanner.Init(p.file, src, p, scanner.AllowIllegalChars) // return '@' as token.ILLEGAL w/o error message + p.next() // initializes pos, tok, lit + p.packs = make(map[string]string) + p.rules = make(map[string]expr) +} + + +func (p *parser) error(pos token.Pos, msg string) { + p.Error(p.file.Position(pos), msg) +} + + +func (p *parser) errorExpected(pos token.Pos, msg string) { + msg = "expected " + msg + if pos == p.pos { + // the error happened at the current position; + // make the error message more specific + msg += ", found '" + p.tok.String() + "'" + if p.tok.IsLiteral() { + msg += " " + string(p.lit) + } + } + p.error(pos, msg) +} + + +func (p *parser) expect(tok token.Token) token.Pos { + pos := p.pos + if p.tok != tok { + p.errorExpected(pos, "'"+tok.String()+"'") + } + p.next() // make progress in any case + return pos +} + + +func (p *parser) parseIdentifier() string { + name := string(p.lit) + p.expect(token.IDENT) + return name +} + + +func (p *parser) parseTypeName() (string, bool) { + pos := p.pos + name, isIdent := p.parseIdentifier(), true + if p.tok == token.PERIOD { + // got a package name, lookup package + if importPath, found := p.packs[name]; found { + name = importPath + } else { + p.error(pos, "package not declared: "+name) + } + p.next() + name, isIdent = name+"."+p.parseIdentifier(), false + } + return name, isIdent +} + + +// Parses a rule name and returns it. If the rule name is +// a package-qualified type name, the package name is resolved. +// The 2nd result value is true iff the rule name consists of a +// single identifier only (and thus could be a package name). +// +func (p *parser) parseRuleName() (string, bool) { + name, isIdent := "", false + switch p.tok { + case token.IDENT: + name, isIdent = p.parseTypeName() + case token.DEFAULT: + name = "default" + p.next() + case token.QUO: + name = "/" + p.next() + default: + p.errorExpected(p.pos, "rule name") + p.next() // make progress in any case + } + return name, isIdent +} + + +func (p *parser) parseString() string { + s := "" + if p.tok == token.STRING { + s, _ = strconv.Unquote(string(p.lit)) + // Unquote may fail with an error, but only if the scanner found + // an illegal string in the first place. In this case the error + // has already been reported. + p.next() + return s + } else { + p.expect(token.STRING) + } + return s +} + + +func (p *parser) parseLiteral() literal { + s := []byte(p.parseString()) + + // A string literal may contain %-format specifiers. To simplify + // and speed up printing of the literal, split it into segments + // that start with "%" possibly followed by a last segment that + // starts with some other character. + var list vector.Vector + i0 := 0 + for i := 0; i < len(s); i++ { + if s[i] == '%' && i+1 < len(s) { + // the next segment starts with a % format + if i0 < i { + // the current segment is not empty, split it off + list.Push(s[i0:i]) + i0 = i + } + i++ // skip %; let loop skip over char after % + } + } + // the final segment may start with any character + // (it is empty iff the string is empty) + list.Push(s[i0:]) + + // convert list into a literal + lit := make(literal, list.Len()) + for i := 0; i < list.Len(); i++ { + lit[i] = list.At(i).([]byte) + } + + return lit +} + + +func (p *parser) parseField() expr { + var fname string + switch p.tok { + case token.ILLEGAL: + if string(p.lit) != "@" { + return nil + } + fname = "@" + p.next() + case token.MUL: + fname = "*" + p.next() + case token.IDENT: + fname = p.parseIdentifier() + default: + return nil + } + + var ruleName string + if p.tok == token.COLON { + p.next() + ruleName, _ = p.parseRuleName() + } + + return &field{fname, ruleName} +} + + +func (p *parser) parseOperand() (x expr) { + switch p.tok { + case token.STRING: + x = p.parseLiteral() + + case token.LPAREN: + p.next() + x = p.parseExpression() + if p.tok == token.SHR { + p.next() + x = &group{x, p.parseExpression()} + } + p.expect(token.RPAREN) + + case token.LBRACK: + p.next() + x = &option{p.parseExpression()} + p.expect(token.RBRACK) + + case token.LBRACE: + p.next() + x = p.parseExpression() + var div expr + if p.tok == token.QUO { + p.next() + div = p.parseExpression() + } + x = &repetition{x, div} + p.expect(token.RBRACE) + + default: + x = p.parseField() // may be nil + } + + return x +} + + +func (p *parser) parseSequence() expr { + var list vector.Vector + + for x := p.parseOperand(); x != nil; x = p.parseOperand() { + list.Push(x) + } + + // no need for a sequence if list.Len() < 2 + switch list.Len() { + case 0: + return nil + case 1: + return list.At(0).(expr) + } + + // convert list into a sequence + seq := make(sequence, list.Len()) + for i := 0; i < list.Len(); i++ { + seq[i] = list.At(i).(expr) + } + return seq +} + + +func (p *parser) parseExpression() expr { + var list vector.Vector + + for { + x := p.parseSequence() + if x != nil { + list.Push(x) + } + if p.tok != token.OR { + break + } + p.next() + } + + // no need for an alternatives if list.Len() < 2 + switch list.Len() { + case 0: + return nil + case 1: + return list.At(0).(expr) + } + + // convert list into a alternatives + alt := make(alternatives, list.Len()) + for i := 0; i < list.Len(); i++ { + alt[i] = list.At(i).(expr) + } + return alt +} + + +func (p *parser) parseFormat() { + for p.tok != token.EOF { + pos := p.pos + + name, isIdent := p.parseRuleName() + switch p.tok { + case token.STRING: + // package declaration + importPath := p.parseString() + + // add package declaration + if !isIdent { + p.error(pos, "illegal package name: "+name) + } else if _, found := p.packs[name]; !found { + p.packs[name] = importPath + } else { + p.error(pos, "package already declared: "+name) + } + + case token.ASSIGN: + // format rule + p.next() + x := p.parseExpression() + + // add rule + if _, found := p.rules[name]; !found { + p.rules[name] = x + } else { + p.error(pos, "format rule already declared: "+name) + } + + default: + p.errorExpected(p.pos, "package declaration or format rule") + p.next() // make progress in any case + } + + if p.tok == token.SEMICOLON { + p.next() + } else { + break + } + } + p.expect(token.EOF) +} + + +func remap(p *parser, name string) string { + i := strings.Index(name, ".") + if i >= 0 { + packageName, suffix := name[0:i], name[i:] + // lookup package + if importPath, found := p.packs[packageName]; found { + name = importPath + suffix + } else { + var invalidPos token.Position + p.Error(invalidPos, "package not declared: "+packageName) + } + } + return name +} + + +// Parse parses a set of format productions from source src. Custom +// formatters may be provided via a map of formatter functions. If +// there are no errors, the result is a Format and the error is nil. +// Otherwise the format is nil and a non-empty ErrorList is returned. +// +func Parse(fset *token.FileSet, filename string, src []byte, fmap FormatterMap) (Format, os.Error) { + // parse source + var p parser + p.init(fset, filename, src) + p.parseFormat() + + // add custom formatters, if any + for name, form := range fmap { + name = remap(&p, name) + if _, found := p.rules[name]; !found { + p.rules[name] = &custom{name, form} + } else { + var invalidPos token.Position + p.Error(invalidPos, "formatter already declared: "+name) + } + } + + return p.rules, p.GetError(scanner.NoMultiples) +} diff --git a/libgo/go/exp/draw/draw.go b/libgo/go/exp/draw/draw.go new file mode 100644 index 000000000..1d0729d92 --- /dev/null +++ b/libgo/go/exp/draw/draw.go @@ -0,0 +1,363 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package draw provides basic graphics and drawing primitives, +// in the style of the Plan 9 graphics library +// (see http://plan9.bell-labs.com/magic/man2html/2/draw) +// and the X Render extension. +package draw + +import "image" + +// m is the maximum color value returned by image.Color.RGBA. +const m = 1<<16 - 1 + +// A Porter-Duff compositing operator. +type Op int + +const ( + // Over specifies ``(src in mask) over dst''. + Over Op = iota + // Src specifies ``src in mask''. + Src +) + +var zeroColor image.Color = image.AlphaColor{0} + +// A draw.Image is an image.Image with a Set method to change a single pixel. +type Image interface { + image.Image + Set(x, y int, c image.Color) +} + +// Draw calls DrawMask with a nil mask and an Over op. +func Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) { + DrawMask(dst, r, src, sp, nil, image.ZP, Over) +} + +// DrawMask aligns r.Min in dst with sp in src and mp in mask and then replaces the rectangle r +// in dst with the result of a Porter-Duff composition. A nil mask is treated as opaque. +func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { + sb := src.Bounds() + dx, dy := sb.Max.X-sp.X, sb.Max.Y-sp.Y + if mask != nil { + mb := mask.Bounds() + if dx > mb.Max.X-mp.X { + dx = mb.Max.X - mp.X + } + if dy > mb.Max.Y-mp.Y { + dy = mb.Max.Y - mp.Y + } + } + if r.Dx() > dx { + r.Max.X = r.Min.X + dx + } + if r.Dy() > dy { + r.Max.Y = r.Min.Y + dy + } + r = r.Intersect(dst.Bounds()) + if r.Empty() { + return + } + + // Fast paths for special cases. If none of them apply, then we fall back to a general but slow implementation. + if dst0, ok := dst.(*image.RGBA); ok { + if op == Over { + if mask == nil { + if src0, ok := src.(*image.ColorImage); ok { + drawFillOver(dst0, r, src0) + return + } + if src0, ok := src.(*image.RGBA); ok { + drawCopyOver(dst0, r, src0, sp) + return + } + } else if mask0, ok := mask.(*image.Alpha); ok { + if src0, ok := src.(*image.ColorImage); ok { + drawGlyphOver(dst0, r, src0, mask0, mp) + return + } + } + } else { + if mask == nil { + if src0, ok := src.(*image.ColorImage); ok { + drawFillSrc(dst0, r, src0) + return + } + if src0, ok := src.(*image.RGBA); ok { + drawCopySrc(dst0, r, src0, sp) + return + } + } + } + drawRGBA(dst0, r, src, sp, mask, mp, op) + return + } + + x0, x1, dx := r.Min.X, r.Max.X, 1 + y0, y1, dy := r.Min.Y, r.Max.Y, 1 + if image.Image(dst) == src && r.Overlaps(r.Add(sp.Sub(r.Min))) { + // Rectangles overlap: process backward? + if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { + x0, x1, dx = x1-1, x0-1, -1 + y0, y1, dy = y1-1, y0-1, -1 + } + } + + var out *image.RGBA64Color + sy := sp.Y + y0 - r.Min.Y + my := mp.Y + y0 - r.Min.Y + for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { + sx := sp.X + x0 - r.Min.X + mx := mp.X + x0 - r.Min.X + for x := x0; x != x1; x, sx, mx = x+dx, sx+dx, mx+dx { + ma := uint32(m) + if mask != nil { + _, _, _, ma = mask.At(mx, my).RGBA() + } + switch { + case ma == 0: + if op == Over { + // No-op. + } else { + dst.Set(x, y, zeroColor) + } + case ma == m && op == Src: + dst.Set(x, y, src.At(sx, sy)) + default: + sr, sg, sb, sa := src.At(sx, sy).RGBA() + if out == nil { + out = new(image.RGBA64Color) + } + if op == Over { + dr, dg, db, da := dst.At(x, y).RGBA() + a := m - (sa * ma / m) + out.R = uint16((dr*a + sr*ma) / m) + out.G = uint16((dg*a + sg*ma) / m) + out.B = uint16((db*a + sb*ma) / m) + out.A = uint16((da*a + sa*ma) / m) + } else { + out.R = uint16(sr * ma / m) + out.G = uint16(sg * ma / m) + out.B = uint16(sb * ma / m) + out.A = uint16(sa * ma / m) + } + dst.Set(x, y, out) + } + } + } +} + +func drawFillOver(dst *image.RGBA, r image.Rectangle, src *image.ColorImage) { + cr, cg, cb, ca := src.RGBA() + // The 0x101 is here for the same reason as in drawRGBA. + a := (m - ca) * 0x101 + x0, x1 := r.Min.X, r.Max.X + y0, y1 := r.Min.Y, r.Max.Y + for y := y0; y != y1; y++ { + dbase := y * dst.Stride + dpix := dst.Pix[dbase+x0 : dbase+x1] + for i, rgba := range dpix { + dr := (uint32(rgba.R)*a)/m + cr + dg := (uint32(rgba.G)*a)/m + cg + db := (uint32(rgba.B)*a)/m + cb + da := (uint32(rgba.A)*a)/m + ca + dpix[i] = image.RGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + } +} + +func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) { + dx0, dx1 := r.Min.X, r.Max.X + dy0, dy1 := r.Min.Y, r.Max.Y + nrows := dy1 - dy0 + sx0, sx1 := sp.X, sp.X+dx1-dx0 + d0 := dy0*dst.Stride + dx0 + d1 := dy0*dst.Stride + dx1 + s0 := sp.Y*src.Stride + sx0 + s1 := sp.Y*src.Stride + sx1 + var ( + ddelta, sdelta int + i0, i1, idelta int + ) + if r.Min.Y < sp.Y || r.Min.Y == sp.Y && r.Min.X <= sp.X { + ddelta = dst.Stride + sdelta = src.Stride + i0, i1, idelta = 0, d1-d0, +1 + } else { + // If the source start point is higher than the destination start point, or equal height but to the left, + // then we compose the rows in right-to-left, bottom-up order instead of left-to-right, top-down. + d0 += (nrows - 1) * dst.Stride + d1 += (nrows - 1) * dst.Stride + s0 += (nrows - 1) * src.Stride + s1 += (nrows - 1) * src.Stride + ddelta = -dst.Stride + sdelta = -src.Stride + i0, i1, idelta = d1-d0-1, -1, -1 + } + for ; nrows > 0; nrows-- { + dpix := dst.Pix[d0:d1] + spix := src.Pix[s0:s1] + for i := i0; i != i1; i += idelta { + // For unknown reasons, even though both dpix[i] and spix[i] are + // image.RGBAColors, on an x86 CPU it seems fastest to call RGBA + // for the source but to do it manually for the destination. + sr, sg, sb, sa := spix[i].RGBA() + rgba := dpix[i] + dr := uint32(rgba.R) + dg := uint32(rgba.G) + db := uint32(rgba.B) + da := uint32(rgba.A) + // The 0x101 is here for the same reason as in drawRGBA. + a := (m - sa) * 0x101 + dr = (dr*a)/m + sr + dg = (dg*a)/m + sg + db = (db*a)/m + sb + da = (da*a)/m + sa + dpix[i] = image.RGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + d0 += ddelta + d1 += ddelta + s0 += sdelta + s1 += sdelta + } +} + +func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.ColorImage, mask *image.Alpha, mp image.Point) { + x0, x1 := r.Min.X, r.Max.X + y0, y1 := r.Min.Y, r.Max.Y + cr, cg, cb, ca := src.RGBA() + for y, my := y0, mp.Y; y != y1; y, my = y+1, my+1 { + dbase := y * dst.Stride + dpix := dst.Pix[dbase+x0 : dbase+x1] + mbase := my * mask.Stride + mpix := mask.Pix[mbase+mp.X:] + for i, rgba := range dpix { + ma := uint32(mpix[i].A) + if ma == 0 { + continue + } + ma |= ma << 8 + dr := uint32(rgba.R) + dg := uint32(rgba.G) + db := uint32(rgba.B) + da := uint32(rgba.A) + // The 0x101 is here for the same reason as in drawRGBA. + a := (m - (ca * ma / m)) * 0x101 + dr = (dr*a + cr*ma) / m + dg = (dg*a + cg*ma) / m + db = (db*a + cb*ma) / m + da = (da*a + ca*ma) / m + dpix[i] = image.RGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + } +} + +func drawFillSrc(dst *image.RGBA, r image.Rectangle, src *image.ColorImage) { + if r.Dy() < 1 { + return + } + cr, cg, cb, ca := src.RGBA() + color := image.RGBAColor{uint8(cr >> 8), uint8(cg >> 8), uint8(cb >> 8), uint8(ca >> 8)} + // The built-in copy function is faster than a straightforward for loop to fill the destination with + // the color, but copy requires a slice source. We therefore use a for loop to fill the first row, and + // then use the first row as the slice source for the remaining rows. + dx0, dx1 := r.Min.X, r.Max.X + dy0, dy1 := r.Min.Y, r.Max.Y + dbase := dy0 * dst.Stride + i0, i1 := dbase+dx0, dbase+dx1 + firstRow := dst.Pix[i0:i1] + for i := range firstRow { + firstRow[i] = color + } + for y := dy0 + 1; y < dy1; y++ { + i0 += dst.Stride + i1 += dst.Stride + copy(dst.Pix[i0:i1], firstRow) + } +} + +func drawCopySrc(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image.Point) { + dx0, dx1 := r.Min.X, r.Max.X + dy0, dy1 := r.Min.Y, r.Max.Y + nrows := dy1 - dy0 + sx0, sx1 := sp.X, sp.X+dx1-dx0 + d0 := dy0*dst.Stride + dx0 + d1 := dy0*dst.Stride + dx1 + s0 := sp.Y*src.Stride + sx0 + s1 := sp.Y*src.Stride + sx1 + var ddelta, sdelta int + if r.Min.Y <= sp.Y { + ddelta = dst.Stride + sdelta = src.Stride + } else { + // If the source start point is higher than the destination start point, then we compose the rows + // in bottom-up order instead of top-down. Unlike the drawCopyOver function, we don't have to + // check the x co-ordinates because the built-in copy function can handle overlapping slices. + d0 += (nrows - 1) * dst.Stride + d1 += (nrows - 1) * dst.Stride + s0 += (nrows - 1) * src.Stride + s1 += (nrows - 1) * src.Stride + ddelta = -dst.Stride + sdelta = -src.Stride + } + for ; nrows > 0; nrows-- { + copy(dst.Pix[d0:d1], src.Pix[s0:s1]) + d0 += ddelta + d1 += ddelta + s0 += sdelta + s1 += sdelta + } +} + +func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op) { + x0, x1, dx := r.Min.X, r.Max.X, 1 + y0, y1, dy := r.Min.Y, r.Max.Y, 1 + if image.Image(dst) == src && r.Overlaps(r.Add(sp.Sub(r.Min))) { + if sp.Y < r.Min.Y || sp.Y == r.Min.Y && sp.X < r.Min.X { + x0, x1, dx = x1-1, x0-1, -1 + y0, y1, dy = y1-1, y0-1, -1 + } + } + + sy := sp.Y + y0 - r.Min.Y + my := mp.Y + y0 - r.Min.Y + for y := y0; y != y1; y, sy, my = y+dy, sy+dy, my+dy { + sx := sp.X + x0 - r.Min.X + mx := mp.X + x0 - r.Min.X + dpix := dst.Pix[y*dst.Stride : (y+1)*dst.Stride] + for x := x0; x != x1; x, sx, mx = x+dx, sx+dx, mx+dx { + ma := uint32(m) + if mask != nil { + _, _, _, ma = mask.At(mx, my).RGBA() + } + sr, sg, sb, sa := src.At(sx, sy).RGBA() + var dr, dg, db, da uint32 + if op == Over { + rgba := dpix[x] + dr = uint32(rgba.R) + dg = uint32(rgba.G) + db = uint32(rgba.B) + da = uint32(rgba.A) + // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. + // We work in 16-bit color, and so would normally do: + // dr |= dr << 8 + // and similarly for dg, db and da, but instead we multiply a + // (which is a 16-bit color, ranging in [0,65535]) by 0x101. + // This yields the same result, but is fewer arithmetic operations. + a := (m - (sa * ma / m)) * 0x101 + dr = (dr*a + sr*ma) / m + dg = (dg*a + sg*ma) / m + db = (db*a + sb*ma) / m + da = (da*a + sa*ma) / m + } else { + dr = sr * ma / m + dg = sg * ma / m + db = sb * ma / m + da = sa * ma / m + } + dpix[x] = image.RGBAColor{uint8(dr >> 8), uint8(dg >> 8), uint8(db >> 8), uint8(da >> 8)} + } + } +} diff --git a/libgo/go/exp/draw/draw_test.go b/libgo/go/exp/draw/draw_test.go new file mode 100644 index 000000000..90c9e823d --- /dev/null +++ b/libgo/go/exp/draw/draw_test.go @@ -0,0 +1,228 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package draw + +import ( + "image" + "testing" +) + +func eq(c0, c1 image.Color) bool { + r0, g0, b0, a0 := c0.RGBA() + r1, g1, b1, a1 := c1.RGBA() + return r0 == r1 && g0 == g1 && b0 == b1 && a0 == a1 +} + +func fillBlue(alpha int) image.Image { + return image.NewColorImage(image.RGBAColor{0, 0, uint8(alpha), uint8(alpha)}) +} + +func fillAlpha(alpha int) image.Image { + return image.NewColorImage(image.AlphaColor{uint8(alpha)}) +} + +func vgradGreen(alpha int) image.Image { + m := image.NewRGBA(16, 16) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + m.Set(x, y, image.RGBAColor{0, uint8(y * alpha / 15), 0, uint8(alpha)}) + } + } + return m +} + +func vgradAlpha(alpha int) image.Image { + m := image.NewAlpha(16, 16) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + m.Set(x, y, image.AlphaColor{uint8(y * alpha / 15)}) + } + } + return m +} + +func hgradRed(alpha int) Image { + m := image.NewRGBA(16, 16) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + m.Set(x, y, image.RGBAColor{uint8(x * alpha / 15), 0, 0, uint8(alpha)}) + } + } + return m +} + +func gradYellow(alpha int) Image { + m := image.NewRGBA(16, 16) + for y := 0; y < 16; y++ { + for x := 0; x < 16; x++ { + m.Set(x, y, image.RGBAColor{uint8(x * alpha / 15), uint8(y * alpha / 15), 0, uint8(alpha)}) + } + } + return m +} + +type drawTest struct { + desc string + src image.Image + mask image.Image + op Op + expected image.Color +} + +var drawTests = []drawTest{ + // Uniform mask (0% opaque). + {"nop", vgradGreen(255), fillAlpha(0), Over, image.RGBAColor{136, 0, 0, 255}}, + {"clear", vgradGreen(255), fillAlpha(0), Src, image.RGBAColor{0, 0, 0, 0}}, + // Uniform mask (100%, 75%, nil) and uniform source. + // At (x, y) == (8, 8): + // The destination pixel is {136, 0, 0, 255}. + // The source pixel is {0, 0, 90, 90}. + {"fill", fillBlue(90), fillAlpha(255), Over, image.RGBAColor{88, 0, 90, 255}}, + {"fillSrc", fillBlue(90), fillAlpha(255), Src, image.RGBAColor{0, 0, 90, 90}}, + {"fillAlpha", fillBlue(90), fillAlpha(192), Over, image.RGBAColor{100, 0, 68, 255}}, + {"fillAlphaSrc", fillBlue(90), fillAlpha(192), Src, image.RGBAColor{0, 0, 68, 68}}, + {"fillNil", fillBlue(90), nil, Over, image.RGBAColor{88, 0, 90, 255}}, + {"fillNilSrc", fillBlue(90), nil, Src, image.RGBAColor{0, 0, 90, 90}}, + // Uniform mask (100%, 75%, nil) and variable source. + // At (x, y) == (8, 8): + // The destination pixel is {136, 0, 0, 255}. + // The source pixel is {0, 48, 0, 90}. + {"copy", vgradGreen(90), fillAlpha(255), Over, image.RGBAColor{88, 48, 0, 255}}, + {"copySrc", vgradGreen(90), fillAlpha(255), Src, image.RGBAColor{0, 48, 0, 90}}, + {"copyAlpha", vgradGreen(90), fillAlpha(192), Over, image.RGBAColor{100, 36, 0, 255}}, + {"copyAlphaSrc", vgradGreen(90), fillAlpha(192), Src, image.RGBAColor{0, 36, 0, 68}}, + {"copyNil", vgradGreen(90), nil, Over, image.RGBAColor{88, 48, 0, 255}}, + {"copyNilSrc", vgradGreen(90), nil, Src, image.RGBAColor{0, 48, 0, 90}}, + // Variable mask and variable source. + // At (x, y) == (8, 8): + // The destination pixel is {136, 0, 0, 255}. + // The source pixel is {0, 0, 255, 255}. + // The mask pixel's alpha is 102, or 40%. + {"generic", fillBlue(255), vgradAlpha(192), Over, image.RGBAColor{81, 0, 102, 255}}, + {"genericSrc", fillBlue(255), vgradAlpha(192), Src, image.RGBAColor{0, 0, 102, 102}}, +} + +func makeGolden(dst, src, mask image.Image, op Op) image.Image { + // Since golden is a newly allocated image, we don't have to check if the + // input source and mask images and the output golden image overlap. + b := dst.Bounds() + sx0 := src.Bounds().Min.X - b.Min.X + sy0 := src.Bounds().Min.Y - b.Min.Y + var mx0, my0 int + if mask != nil { + mx0 = mask.Bounds().Min.X - b.Min.X + my0 = mask.Bounds().Min.Y - b.Min.Y + } + golden := image.NewRGBA(b.Max.X, b.Max.Y) + for y := b.Min.Y; y < b.Max.Y; y++ { + my, sy := my0+y, sy0+y + for x := b.Min.X; x < b.Max.X; x++ { + mx, sx := mx0+x, sx0+x + const M = 1<<16 - 1 + var dr, dg, db, da uint32 + if op == Over { + dr, dg, db, da = dst.At(x, y).RGBA() + } + sr, sg, sb, sa := src.At(sx, sy).RGBA() + ma := uint32(M) + if mask != nil { + _, _, _, ma = mask.At(mx, my).RGBA() + } + a := M - (sa * ma / M) + golden.Set(x, y, image.RGBA64Color{ + uint16((dr*a + sr*ma) / M), + uint16((dg*a + sg*ma) / M), + uint16((db*a + sb*ma) / M), + uint16((da*a + sa*ma) / M), + }) + } + } + golden.Rect = b + return golden +} + +func TestDraw(t *testing.T) { +loop: + for _, test := range drawTests { + dst := hgradRed(255) + // Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation. + golden := makeGolden(dst, test.src, test.mask, test.op) + b := dst.Bounds() + if !b.Eq(golden.Bounds()) { + t.Errorf("draw %s: bounds %v versus %v", test.desc, dst.Bounds(), golden.Bounds()) + continue + } + // Draw the same combination onto the actual dst using the optimized DrawMask implementation. + DrawMask(dst, b, test.src, image.ZP, test.mask, image.ZP, test.op) + // Check that the resultant pixel at (8, 8) matches what we expect + // (the expected value can be verified by hand). + if !eq(dst.At(8, 8), test.expected) { + t.Errorf("draw %s: at (8, 8) %v versus %v", test.desc, dst.At(8, 8), test.expected) + continue + } + // Check that the resultant dst image matches the golden output. + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + if !eq(dst.At(x, y), golden.At(x, y)) { + t.Errorf("draw %s: at (%d, %d), %v versus golden %v", test.desc, x, y, dst.At(x, y), golden.At(x, y)) + continue loop + } + } + } + } +} + +func TestDrawOverlap(t *testing.T) { + for _, op := range []Op{Over, Src} { + for yoff := -2; yoff <= 2; yoff++ { + loop: + for xoff := -2; xoff <= 2; xoff++ { + m := gradYellow(127).(*image.RGBA) + dst := &image.RGBA{ + Pix: m.Pix, + Stride: m.Stride, + Rect: image.Rect(5, 5, 10, 10), + } + src := &image.RGBA{ + Pix: m.Pix, + Stride: m.Stride, + Rect: image.Rect(5+xoff, 5+yoff, 10+xoff, 10+yoff), + } + // Draw the (src, mask, op) onto a copy of dst using a slow but obviously correct implementation. + golden := makeGolden(dst, src, nil, op) + b := dst.Bounds() + if !b.Eq(golden.Bounds()) { + t.Errorf("drawOverlap xoff=%d,yoff=%d: bounds %v versus %v", xoff, yoff, dst.Bounds(), golden.Bounds()) + continue + } + // Draw the same combination onto the actual dst using the optimized DrawMask implementation. + DrawMask(dst, b, src, src.Bounds().Min, nil, image.ZP, op) + // Check that the resultant dst image matches the golden output. + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + if !eq(dst.At(x, y), golden.At(x, y)) { + t.Errorf("drawOverlap xoff=%d,yoff=%d: at (%d, %d), %v versus golden %v", xoff, yoff, x, y, dst.At(x, y), golden.At(x, y)) + continue loop + } + } + } + } + } + } +} + +// TestIssue836 verifies http://code.google.com/p/go/issues/detail?id=836. +func TestIssue836(t *testing.T) { + a := image.NewRGBA(1, 1) + b := image.NewRGBA(2, 2) + b.Set(0, 0, image.RGBAColor{0, 0, 0, 5}) + b.Set(1, 0, image.RGBAColor{0, 0, 5, 5}) + b.Set(0, 1, image.RGBAColor{0, 5, 0, 5}) + b.Set(1, 1, image.RGBAColor{5, 0, 0, 5}) + Draw(a, image.Rect(0, 0, 1, 1), b, image.Pt(1, 1)) + if !eq(image.RGBAColor{5, 0, 0, 5}, a.At(0, 0)) { + t.Errorf("Issue 836: want %v got %v", image.RGBAColor{5, 0, 0, 5}, a.At(0, 0)) + } +} diff --git a/libgo/go/exp/draw/event.go b/libgo/go/exp/draw/event.go new file mode 100644 index 000000000..b777d912e --- /dev/null +++ b/libgo/go/exp/draw/event.go @@ -0,0 +1,56 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package draw + +import ( + "image" + "os" +) + +// A Window represents a single graphics window. +type Window interface { + // Screen returns an editable Image for the window. + Screen() Image + // FlushImage flushes changes made to Screen() back to screen. + FlushImage() + // EventChan returns a channel carrying UI events such as key presses, + // mouse movements and window resizes. + EventChan() <-chan interface{} + // Close closes the window. + Close() os.Error +} + +// A KeyEvent is sent for a key press or release. +type KeyEvent struct { + // The value k represents key k being pressed. + // The value -k represents key k being released. + // The specific set of key values is not specified, + // but ordinary characters represent themselves. + Key int +} + +// A MouseEvent is sent for a button press or release or for a mouse movement. +type MouseEvent struct { + // Buttons is a bit mask of buttons: 1<<0 is left, 1<<1 middle, 1<<2 right. + // It represents button state and not necessarily the state delta: bit 0 + // being on means that the left mouse button is down, but does not imply + // that the same button was up in the previous MouseEvent. + Buttons int + // Loc is the location of the cursor. + Loc image.Point + // Nsec is the event's timestamp. + Nsec int64 +} + +// A ConfigEvent is sent each time the window's color model or size changes. +// The client should respond by calling Window.Screen to obtain a new image. +type ConfigEvent struct { + Config image.Config +} + +// An ErrEvent is sent when an error occurs. +type ErrEvent struct { + Err os.Error +} diff --git a/libgo/go/exp/draw/x11/auth.go b/libgo/go/exp/draw/x11/auth.go new file mode 100644 index 000000000..896dedf05 --- /dev/null +++ b/libgo/go/exp/draw/x11/auth.go @@ -0,0 +1,93 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x11 + +import ( + "bufio" + "io" + "os" +) + +// readU16BE reads a big-endian uint16 from r, using b as a scratch buffer. +func readU16BE(r io.Reader, b []byte) (uint16, os.Error) { + _, err := io.ReadFull(r, b[0:2]) + if err != nil { + return 0, err + } + return uint16(b[0])<<8 + uint16(b[1]), nil +} + +// readStr reads a length-prefixed string from r, using b as a scratch buffer. +func readStr(r io.Reader, b []byte) (string, os.Error) { + n, err := readU16BE(r, b) + if err != nil { + return "", err + } + if int(n) > len(b) { + return "", os.NewError("Xauthority entry too long for buffer") + } + _, err = io.ReadFull(r, b[0:n]) + if err != nil { + return "", err + } + return string(b[0:n]), nil +} + +// readAuth reads the X authority file and returns the name/data pair for the display. +// displayStr is the "12" out of a $DISPLAY like ":12.0". +func readAuth(displayStr string) (name, data string, err os.Error) { + // b is a scratch buffer to use and should be at least 256 bytes long + // (i.e. it should be able to hold a hostname). + var b [256]byte + // As per /usr/include/X11/Xauth.h. + const familyLocal = 256 + + fn := os.Getenv("XAUTHORITY") + if fn == "" { + home := os.Getenv("HOME") + if home == "" { + err = os.NewError("Xauthority not found: $XAUTHORITY, $HOME not set") + return + } + fn = home + "/.Xauthority" + } + r, err := os.Open(fn, os.O_RDONLY, 0444) + if err != nil { + return + } + defer r.Close() + br := bufio.NewReader(r) + + hostname, err := os.Hostname() + if err != nil { + return + } + for { + family, err := readU16BE(br, b[0:2]) + if err != nil { + return + } + addr, err := readStr(br, b[0:]) + if err != nil { + return + } + disp, err := readStr(br, b[0:]) + if err != nil { + return + } + name0, err := readStr(br, b[0:]) + if err != nil { + return + } + data0, err := readStr(br, b[0:]) + if err != nil { + return + } + if family == familyLocal && addr == hostname && disp == displayStr { + return name0, data0, nil + } + } + panic("unreachable") +} diff --git a/libgo/go/exp/draw/x11/conn.go b/libgo/go/exp/draw/x11/conn.go new file mode 100644 index 000000000..da2181536 --- /dev/null +++ b/libgo/go/exp/draw/x11/conn.go @@ -0,0 +1,622 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package implements an X11 backend for the exp/draw package. +// +// The X protocol specification is at ftp://ftp.x.org/pub/X11R7.0/doc/PDF/proto.pdf. +// A summary of the wire format can be found in XCB's xproto.xml. +package x11 + +import ( + "bufio" + "exp/draw" + "image" + "io" + "log" + "net" + "os" + "strconv" + "strings" + "time" +) + +type resID uint32 // X resource IDs. + +// TODO(nigeltao): Handle window resizes. +const ( + windowHeight = 600 + windowWidth = 800 +) + +const ( + keymapLo = 8 + keymapHi = 255 +) + +type conn struct { + c io.Closer + r *bufio.Reader + w *bufio.Writer + + gc, window, root, visual resID + + img *image.RGBA + eventc chan interface{} + mouseState draw.MouseEvent + + buf [256]byte // General purpose scratch buffer. + + flush chan bool + flushBuf0 [24]byte + flushBuf1 [4 * 1024]byte +} + +// writeSocket runs in its own goroutine, serving both FlushImage calls +// directly from the exp/draw client and indirectly from X expose events. +// It paints c.img to the X server via PutImage requests. +func (c *conn) writeSocket() { + defer c.c.Close() + for _ = range c.flush { + b := c.img.Bounds() + if b.Empty() { + continue + } + // Each X request has a 16-bit length (in terms of 4-byte units). To avoid going over + // this limit, we send PutImage for each row of the image, rather than trying to paint + // the entire image in one X request. This approach could easily be optimized (or the + // X protocol may have an escape sequence to delimit very large requests). + // TODO(nigeltao): See what XCB's xcb_put_image does in this situation. + units := 6 + b.Dx() + if units > 0xffff || b.Dy() > 0xffff { + log.Print("x11: window is too large for PutImage") + return + } + + c.flushBuf0[0] = 0x48 // PutImage opcode. + c.flushBuf0[1] = 0x02 // XCB_IMAGE_FORMAT_Z_PIXMAP. + c.flushBuf0[2] = uint8(units) + c.flushBuf0[3] = uint8(units >> 8) + setU32LE(c.flushBuf0[4:8], uint32(c.window)) + setU32LE(c.flushBuf0[8:12], uint32(c.gc)) + setU32LE(c.flushBuf0[12:16], 1<<16|uint32(b.Dx())) + c.flushBuf0[21] = 0x18 // depth = 24 bits. + + for y := b.Min.Y; y < b.Max.Y; y++ { + setU32LE(c.flushBuf0[16:20], uint32(y<<16)) + if _, err := c.w.Write(c.flushBuf0[0:24]); err != nil { + if err != os.EOF { + log.Println("x11:", err.String()) + } + return + } + p := c.img.Pix[y*c.img.Stride : (y+1)*c.img.Stride] + for x := b.Min.X; x < b.Max.X; { + nx := b.Max.X - x + if nx > len(c.flushBuf1)/4 { + nx = len(c.flushBuf1) / 4 + } + for i, rgba := range p[x : x+nx] { + c.flushBuf1[4*i+0] = rgba.B + c.flushBuf1[4*i+1] = rgba.G + c.flushBuf1[4*i+2] = rgba.R + } + x += nx + if _, err := c.w.Write(c.flushBuf1[0 : 4*nx]); err != nil { + if err != os.EOF { + log.Println("x11:", err.String()) + } + return + } + } + } + if err := c.w.Flush(); err != nil { + if err != os.EOF { + log.Println("x11:", err.String()) + } + return + } + } +} + +func (c *conn) Screen() draw.Image { return c.img } + +func (c *conn) FlushImage() { + // We do the send (the <- operator) in an expression context, rather than in + // a statement context, so that it does not block, and fails if the buffered + // channel is full (in which case there already is a flush request pending). + _ = c.flush <- false +} + +func (c *conn) Close() os.Error { + // Shut down the writeSocket goroutine. This will close the socket to the + // X11 server, which will cause c.eventc to close. + close(c.flush) + for _ = range c.eventc { + // Drain the channel to allow the readSocket goroutine to shut down. + } + return nil +} + +func (c *conn) EventChan() <-chan interface{} { return c.eventc } + +// readSocket runs in its own goroutine, reading X events and sending draw +// events on c's EventChan. +func (c *conn) readSocket() { + var ( + keymap [256][]int + keysymsPerKeycode int + ) + defer close(c.eventc) + for { + // X events are always 32 bytes long. + if _, err := io.ReadFull(c.r, c.buf[0:32]); err != nil { + if err != os.EOF { + c.eventc <- draw.ErrEvent{err} + } + return + } + switch c.buf[0] { + case 0x01: // Reply from a request (e.g. GetKeyboardMapping). + cookie := int(c.buf[3])<<8 | int(c.buf[2]) + if cookie != 1 { + // We issued only one request (GetKeyboardMapping) with a cookie of 1, + // so we shouldn't get any other reply from the X server. + c.eventc <- draw.ErrEvent{os.NewError("x11: unexpected cookie")} + return + } + keysymsPerKeycode = int(c.buf[1]) + b := make([]int, 256*keysymsPerKeycode) + for i := range keymap { + keymap[i] = b[i*keysymsPerKeycode : (i+1)*keysymsPerKeycode] + } + for i := keymapLo; i <= keymapHi; i++ { + m := keymap[i] + for j := range m { + u, err := readU32LE(c.r, c.buf[0:4]) + if err != nil { + if err != os.EOF { + c.eventc <- draw.ErrEvent{err} + } + return + } + m[j] = int(u) + } + } + case 0x02, 0x03: // Key press, key release. + // X Keyboard Encoding is documented at http://tronche.com/gui/x/xlib/input/keyboard-encoding.html + // TODO(nigeltao): Do we need to implement the "MODE SWITCH / group modifier" feature + // or is that some no-longer-used X construct? + if keysymsPerKeycode < 2 { + // Either we haven't yet received the GetKeyboardMapping reply or + // the X server has sent one that's too short. + continue + } + keycode := int(c.buf[1]) + shift := int(c.buf[28]) & 0x01 + keysym := keymap[keycode][shift] + if keysym == 0 { + keysym = keymap[keycode][0] + } + // TODO(nigeltao): Should we send KeyEvents for Shift/Ctrl/Alt? Should Shift-A send + // the same int down the channel as the sent on just the A key? + // TODO(nigeltao): How should IME events (e.g. key presses that should generate CJK text) work? Or + // is that outside the scope of the draw.Window interface? + if c.buf[0] == 0x03 { + keysym = -keysym + } + c.eventc <- draw.KeyEvent{keysym} + case 0x04, 0x05: // Button press, button release. + mask := 1 << (c.buf[1] - 1) + if c.buf[0] == 0x04 { + c.mouseState.Buttons |= mask + } else { + c.mouseState.Buttons &^= mask + } + c.mouseState.Nsec = time.Nanoseconds() + c.eventc <- c.mouseState + case 0x06: // Motion notify. + c.mouseState.Loc.X = int(int16(c.buf[25])<<8 | int16(c.buf[24])) + c.mouseState.Loc.Y = int(int16(c.buf[27])<<8 | int16(c.buf[26])) + c.mouseState.Nsec = time.Nanoseconds() + c.eventc <- c.mouseState + case 0x0c: // Expose. + // A single user action could trigger multiple expose events (e.g. if moving another + // window with XShape'd rounded corners over our window). In that case, the X server will + // send a uint16 count (in bytes 16-17) of the number of additional expose events coming. + // We could parse each event for the (x, y, width, height) and maintain a minimal dirty + // rectangle, but for now, the simplest approach is to paint the entire window, when + // receiving the final event in the series. + if c.buf[17] == 0 && c.buf[16] == 0 { + // TODO(nigeltao): Should we ignore the very first expose event? A freshly mapped window + // will trigger expose, but until the first c.FlushImage call, there's probably nothing to + // paint but black. For an 800x600 window, at 4 bytes per pixel, each repaint writes about + // 2MB over the socket. + c.FlushImage() + } + // TODO(nigeltao): Should we listen to DestroyNotify (0x11) and ResizeRequest (0x19) events? + // What about EnterNotify (0x07) and LeaveNotify (0x08)? + } + } +} + +// connect connects to the X server given by the full X11 display name (e.g. +// ":12.0") and returns the connection as well as the portion of the full name +// that is the display number (e.g. "12"). +// Examples: +// connect(":1") // calls net.Dial("unix", "", "/tmp/.X11-unix/X1"), displayStr="1" +// connect("/tmp/launch-123/:0") // calls net.Dial("unix", "", "/tmp/launch-123/:0"), displayStr="0" +// connect("hostname:2.1") // calls net.Dial("tcp", "", "hostname:6002"), displayStr="2" +// connect("tcp/hostname:1.0") // calls net.Dial("tcp", "", "hostname:6001"), displayStr="1" +func connect(display string) (conn net.Conn, displayStr string, err os.Error) { + colonIdx := strings.LastIndex(display, ":") + if colonIdx < 0 { + return nil, "", os.NewError("bad display: " + display) + } + // Parse the section before the colon. + var protocol, host, socket string + if display[0] == '/' { + socket = display[0:colonIdx] + } else { + if i := strings.LastIndex(display, "/"); i < 0 { + // The default protocol is TCP. + protocol = "tcp" + host = display[0:colonIdx] + } else { + protocol = display[0:i] + host = display[i+1 : colonIdx] + } + } + // Parse the section after the colon. + after := display[colonIdx+1:] + if after == "" { + return nil, "", os.NewError("bad display: " + display) + } + if i := strings.LastIndex(after, "."); i < 0 { + displayStr = after + } else { + displayStr = after[0:i] + } + displayInt, err := strconv.Atoi(displayStr) + if err != nil || displayInt < 0 { + return nil, "", os.NewError("bad display: " + display) + } + // Make the connection. + if socket != "" { + conn, err = net.Dial("unix", "", socket+":"+displayStr) + } else if host != "" { + conn, err = net.Dial(protocol, "", host+":"+strconv.Itoa(6000+displayInt)) + } else { + conn, err = net.Dial("unix", "", "/tmp/.X11-unix/X"+displayStr) + } + if err != nil { + return nil, "", os.NewError("cannot connect to " + display + ": " + err.String()) + } + return +} + +// authenticate authenticates ourselves with the X server. +// displayStr is the "12" out of ":12.0". +func authenticate(w *bufio.Writer, displayStr string) os.Error { + key, value, err := readAuth(displayStr) + if err != nil { + return err + } + // Assume that the authentication protocol is "MIT-MAGIC-COOKIE-1". + if len(key) != 18 || len(value) != 16 { + return os.NewError("unsupported Xauth") + } + // 0x006c means little-endian. 0x000b, 0x0000 means X major version 11, minor version 0. + // 0x0012 and 0x0010 means the auth key and value have lenths 18 and 16. + // The final 0x0000 is padding, so that the string length is a multiple of 4. + _, err = io.WriteString(w, "\x6c\x00\x0b\x00\x00\x00\x12\x00\x10\x00\x00\x00") + if err != nil { + return err + } + _, err = io.WriteString(w, key) + if err != nil { + return err + } + // Again, the 0x0000 is padding. + _, err = io.WriteString(w, "\x00\x00") + if err != nil { + return err + } + _, err = io.WriteString(w, value) + if err != nil { + return err + } + err = w.Flush() + if err != nil { + return err + } + return nil +} + +// readU8 reads a uint8 from r, using b as a scratch buffer. +func readU8(r io.Reader, b []byte) (uint8, os.Error) { + _, err := io.ReadFull(r, b[0:1]) + if err != nil { + return 0, err + } + return uint8(b[0]), nil +} + +// readU16LE reads a little-endian uint16 from r, using b as a scratch buffer. +func readU16LE(r io.Reader, b []byte) (uint16, os.Error) { + _, err := io.ReadFull(r, b[0:2]) + if err != nil { + return 0, err + } + return uint16(b[0]) | uint16(b[1])<<8, nil +} + +// readU32LE reads a little-endian uint32 from r, using b as a scratch buffer. +func readU32LE(r io.Reader, b []byte) (uint32, os.Error) { + _, err := io.ReadFull(r, b[0:4]) + if err != nil { + return 0, err + } + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, nil +} + +// setU32LE sets b[0:4] to be the little-endian representation of u. +func setU32LE(b []byte, u uint32) { + b[0] = byte((u >> 0) & 0xff) + b[1] = byte((u >> 8) & 0xff) + b[2] = byte((u >> 16) & 0xff) + b[3] = byte((u >> 24) & 0xff) +} + +// checkPixmapFormats checks that we have an agreeable X pixmap Format. +func checkPixmapFormats(r io.Reader, b []byte, n int) (agree bool, err os.Error) { + for i := 0; i < n; i++ { + _, err = io.ReadFull(r, b[0:8]) + if err != nil { + return + } + // Byte 0 is depth, byte 1 is bits-per-pixel, byte 2 is scanline-pad, the rest (5) is padding. + if b[0] == 24 && b[1] == 32 { + agree = true + } + } + return +} + +// checkDepths checks that we have an agreeable X Depth (i.e. one that has an agreeable X VisualType). +func checkDepths(r io.Reader, b []byte, n int, visual uint32) (agree bool, err os.Error) { + for i := 0; i < n; i++ { + depth, err := readU16LE(r, b) + if err != nil { + return + } + depth &= 0xff + visualsLen, err := readU16LE(r, b) + if err != nil { + return + } + // Ignore 4 bytes of padding. + _, err = io.ReadFull(r, b[0:4]) + if err != nil { + return + } + for j := 0; j < int(visualsLen); j++ { + // Read 24 bytes: visual(4), class(1), bits per rgb value(1), colormap entries(2), + // red mask(4), green mask(4), blue mask(4), padding(4). + v, err := readU32LE(r, b) + _, err = readU32LE(r, b) + rm, err := readU32LE(r, b) + gm, err := readU32LE(r, b) + bm, err := readU32LE(r, b) + _, err = readU32LE(r, b) + if err != nil { + return + } + if v == visual && rm == 0xff0000 && gm == 0xff00 && bm == 0xff && depth == 24 { + agree = true + } + } + } + return +} + +// checkScreens checks that we have an agreeable X Screen. +func checkScreens(r io.Reader, b []byte, n int) (root, visual uint32, err os.Error) { + for i := 0; i < n; i++ { + root0, err := readU32LE(r, b) + if err != nil { + return + } + // Ignore the next 7x4 bytes, which is: colormap, whitepixel, blackpixel, current input masks, + // width and height (pixels), width and height (mm), min and max installed maps. + _, err = io.ReadFull(r, b[0:28]) + if err != nil { + return + } + visual0, err := readU32LE(r, b) + if err != nil { + return + } + // Next 4 bytes: backing stores, save unders, root depth, allowed depths length. + x, err := readU32LE(r, b) + if err != nil { + return + } + nDepths := int(x >> 24) + agree, err := checkDepths(r, b, nDepths, visual0) + if err != nil { + return + } + if agree && root == 0 { + root = root0 + visual = visual0 + } + } + return +} + +// handshake performs the protocol handshake with the X server, and ensures +// that the server provides a compatible Screen, Depth, etc. +func (c *conn) handshake() os.Error { + _, err := io.ReadFull(c.r, c.buf[0:8]) + if err != nil { + return err + } + // Byte 0:1 should be 1 (success), bytes 2:6 should be 0xb0000000 (major/minor version 11.0). + if c.buf[0] != 1 || c.buf[2] != 11 || c.buf[3] != 0 || c.buf[4] != 0 || c.buf[5] != 0 { + return os.NewError("unsupported X version") + } + // Ignore the release number. + _, err = io.ReadFull(c.r, c.buf[0:4]) + if err != nil { + return err + } + // Read the resource ID base. + resourceIdBase, err := readU32LE(c.r, c.buf[0:4]) + if err != nil { + return err + } + // Read the resource ID mask. + resourceIdMask, err := readU32LE(c.r, c.buf[0:4]) + if err != nil { + return err + } + if resourceIdMask < 256 { + return os.NewError("X resource ID mask is too small") + } + // Ignore the motion buffer size. + _, err = io.ReadFull(c.r, c.buf[0:4]) + if err != nil { + return err + } + // Read the vendor length and round it up to a multiple of 4, + // for X11 protocol alignment reasons. + vendorLen, err := readU16LE(c.r, c.buf[0:2]) + if err != nil { + return err + } + vendorLen = (vendorLen + 3) &^ 3 + // Read the maximum request length. + maxReqLen, err := readU16LE(c.r, c.buf[0:2]) + if err != nil { + return err + } + if maxReqLen != 0xffff { + return os.NewError("unsupported X maximum request length") + } + // Read the roots length. + rootsLen, err := readU8(c.r, c.buf[0:1]) + if err != nil { + return err + } + // Read the pixmap formats length. + pixmapFormatsLen, err := readU8(c.r, c.buf[0:1]) + if err != nil { + return err + } + // Ignore some things that we don't care about (totalling 10 + vendorLen bytes): + // imageByteOrder(1), bitmapFormatBitOrder(1), bitmapFormatScanlineUnit(1) bitmapFormatScanlinePad(1), + // minKeycode(1), maxKeycode(1), padding(4), vendor (vendorLen). + if 10+int(vendorLen) > cap(c.buf) { + return os.NewError("unsupported X vendor") + } + _, err = io.ReadFull(c.r, c.buf[0:10+int(vendorLen)]) + if err != nil { + return err + } + // Check that we have an agreeable pixmap format. + agree, err := checkPixmapFormats(c.r, c.buf[0:8], int(pixmapFormatsLen)) + if err != nil { + return err + } + if !agree { + return os.NewError("unsupported X pixmap formats") + } + // Check that we have an agreeable screen. + root, visual, err := checkScreens(c.r, c.buf[0:24], int(rootsLen)) + if err != nil { + return err + } + if root == 0 || visual == 0 { + return os.NewError("unsupported X screen") + } + c.gc = resID(resourceIdBase) + c.window = resID(resourceIdBase + 1) + c.root = resID(root) + c.visual = resID(visual) + return nil +} + +// NewWindow calls NewWindowDisplay with $DISPLAY. +func NewWindow() (draw.Window, os.Error) { + display := os.Getenv("DISPLAY") + if len(display) == 0 { + return nil, os.NewError("$DISPLAY not set") + } + return NewWindowDisplay(display) +} + +// NewWindowDisplay returns a new draw.Window, backed by a newly created and +// mapped X11 window. The X server to connect to is specified by the display +// string, such as ":1". +func NewWindowDisplay(display string) (draw.Window, os.Error) { + socket, displayStr, err := connect(display) + if err != nil { + return nil, err + } + c := new(conn) + c.c = socket + c.r = bufio.NewReader(socket) + c.w = bufio.NewWriter(socket) + err = authenticate(c.w, displayStr) + if err != nil { + return nil, err + } + err = c.handshake() + if err != nil { + return nil, err + } + + // Now that we're connected, show a window, via three X protocol messages. + // First, issue a GetKeyboardMapping request. This is the first request, and + // will be associated with a cookie of 1. + setU32LE(c.buf[0:4], 0x00020065) // 0x65 is the GetKeyboardMapping opcode, and the message is 2 x 4 bytes long. + setU32LE(c.buf[4:8], uint32((keymapHi-keymapLo+1)<<8|keymapLo)) + // Second, create a graphics context (GC). + setU32LE(c.buf[8:12], 0x00060037) // 0x37 is the CreateGC opcode, and the message is 6 x 4 bytes long. + setU32LE(c.buf[12:16], uint32(c.gc)) + setU32LE(c.buf[16:20], uint32(c.root)) + setU32LE(c.buf[20:24], 0x00010004) // Bit 2 is XCB_GC_FOREGROUND, bit 16 is XCB_GC_GRAPHICS_EXPOSURES. + setU32LE(c.buf[24:28], 0x00000000) // The Foreground is black. + setU32LE(c.buf[28:32], 0x00000000) // GraphicsExposures' value is unused. + // Third, create the window. + setU32LE(c.buf[32:36], 0x000a0001) // 0x01 is the CreateWindow opcode, and the message is 10 x 4 bytes long. + setU32LE(c.buf[36:40], uint32(c.window)) + setU32LE(c.buf[40:44], uint32(c.root)) + setU32LE(c.buf[44:48], 0x00000000) // Initial (x, y) is (0, 0). + setU32LE(c.buf[48:52], windowHeight<<16|windowWidth) + setU32LE(c.buf[52:56], 0x00010000) // Border width is 0, XCB_WINDOW_CLASS_INPUT_OUTPUT is 1. + setU32LE(c.buf[56:60], uint32(c.visual)) + setU32LE(c.buf[60:64], 0x00000802) // Bit 1 is XCB_CW_BACK_PIXEL, bit 11 is XCB_CW_EVENT_MASK. + setU32LE(c.buf[64:68], 0x00000000) // The Back-Pixel is black. + setU32LE(c.buf[68:72], 0x0000804f) // Key/button press and release, pointer motion, and expose event masks. + // Fourth, map the window. + setU32LE(c.buf[72:76], 0x00020008) // 0x08 is the MapWindow opcode, and the message is 2 x 4 bytes long. + setU32LE(c.buf[76:80], uint32(c.window)) + // Write the bytes. + _, err = c.w.Write(c.buf[0:80]) + if err != nil { + return nil, err + } + err = c.w.Flush() + if err != nil { + return nil, err + } + + c.img = image.NewRGBA(windowWidth, windowHeight) + c.eventc = make(chan interface{}, 16) + c.flush = make(chan bool, 1) + go c.readSocket() + go c.writeSocket() + return c, nil +} diff --git a/libgo/go/exp/eval/abort.go b/libgo/go/exp/eval/abort.go new file mode 100644 index 000000000..22e17cec4 --- /dev/null +++ b/libgo/go/exp/eval/abort.go @@ -0,0 +1,85 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "fmt" + "os" + "runtime" +) + +// Abort aborts the thread's current computation, +// causing the innermost Try to return err. +func (t *Thread) Abort(err os.Error) { + if t.abort == nil { + panic("abort: " + err.String()) + } + t.abort <- err + runtime.Goexit() +} + +// Try executes a computation; if the computation +// Aborts, Try returns the error passed to abort. +func (t *Thread) Try(f func(t *Thread)) os.Error { + oc := t.abort + c := make(chan os.Error) + t.abort = c + go func() { + f(t) + c <- nil + }() + err := <-c + t.abort = oc + return err +} + +type DivByZeroError struct{} + +func (DivByZeroError) String() string { return "divide by zero" } + +type NilPointerError struct{} + +func (NilPointerError) String() string { return "nil pointer dereference" } + +type IndexError struct { + Idx, Len int64 +} + +func (e IndexError) String() string { + if e.Idx < 0 { + return fmt.Sprintf("negative index: %d", e.Idx) + } + return fmt.Sprintf("index %d exceeds length %d", e.Idx, e.Len) +} + +type SliceError struct { + Lo, Hi, Cap int64 +} + +func (e SliceError) String() string { + return fmt.Sprintf("slice [%d:%d]; cap %d", e.Lo, e.Hi, e.Cap) +} + +type KeyError struct { + Key interface{} +} + +func (e KeyError) String() string { return fmt.Sprintf("key '%v' not found in map", e.Key) } + +type NegativeLengthError struct { + Len int64 +} + +func (e NegativeLengthError) String() string { + return fmt.Sprintf("negative length: %d", e.Len) +} + +type NegativeCapacityError struct { + Len int64 +} + +func (e NegativeCapacityError) String() string { + return fmt.Sprintf("negative capacity: %d", e.Len) +} diff --git a/libgo/go/exp/eval/bridge.go b/libgo/go/exp/eval/bridge.go new file mode 100644 index 000000000..12835c4c0 --- /dev/null +++ b/libgo/go/exp/eval/bridge.go @@ -0,0 +1,169 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "log" + "go/token" + "reflect" +) + +/* + * Type bridging + */ + +var ( + evalTypes = make(map[reflect.Type]Type) + nativeTypes = make(map[Type]reflect.Type) +) + +// TypeFromNative converts a regular Go type into a the corresponding +// interpreter Type. +func TypeFromNative(t reflect.Type) Type { + if et, ok := evalTypes[t]; ok { + return et + } + + var nt *NamedType + if t.Name() != "" { + name := t.PkgPath() + "·" + t.Name() + nt = &NamedType{token.NoPos, name, nil, true, make(map[string]Method)} + evalTypes[t] = nt + } + + var et Type + switch t := t.(type) { + case *reflect.BoolType: + et = BoolType + case *reflect.FloatType: + switch t.Kind() { + case reflect.Float32: + et = Float32Type + case reflect.Float64: + et = Float64Type + } + case *reflect.IntType: + switch t.Kind() { + case reflect.Int16: + et = Int16Type + case reflect.Int32: + et = Int32Type + case reflect.Int64: + et = Int64Type + case reflect.Int8: + et = Int8Type + case reflect.Int: + et = IntType + } + case *reflect.UintType: + switch t.Kind() { + case reflect.Uint16: + et = Uint16Type + case reflect.Uint32: + et = Uint32Type + case reflect.Uint64: + et = Uint64Type + case reflect.Uint8: + et = Uint8Type + case reflect.Uint: + et = UintType + case reflect.Uintptr: + et = UintptrType + } + case *reflect.StringType: + et = StringType + case *reflect.ArrayType: + et = NewArrayType(int64(t.Len()), TypeFromNative(t.Elem())) + case *reflect.ChanType: + log.Panicf("%T not implemented", t) + case *reflect.FuncType: + nin := t.NumIn() + // Variadic functions have DotDotDotType at the end + variadic := t.DotDotDot() + if variadic { + nin-- + } + in := make([]Type, nin) + for i := range in { + in[i] = TypeFromNative(t.In(i)) + } + out := make([]Type, t.NumOut()) + for i := range out { + out[i] = TypeFromNative(t.Out(i)) + } + et = NewFuncType(in, variadic, out) + case *reflect.InterfaceType: + log.Panicf("%T not implemented", t) + case *reflect.MapType: + log.Panicf("%T not implemented", t) + case *reflect.PtrType: + et = NewPtrType(TypeFromNative(t.Elem())) + case *reflect.SliceType: + et = NewSliceType(TypeFromNative(t.Elem())) + case *reflect.StructType: + n := t.NumField() + fields := make([]StructField, n) + for i := 0; i < n; i++ { + sf := t.Field(i) + // TODO(austin) What to do about private fields? + fields[i].Name = sf.Name + fields[i].Type = TypeFromNative(sf.Type) + fields[i].Anonymous = sf.Anonymous + } + et = NewStructType(fields) + case *reflect.UnsafePointerType: + log.Panicf("%T not implemented", t) + default: + log.Panicf("unexpected reflect.Type: %T", t) + } + + if nt != nil { + if _, ok := et.(*NamedType); !ok { + nt.Complete(et) + et = nt + } + } + + nativeTypes[et] = t + evalTypes[t] = et + + return et +} + +// TypeOfNative returns the interpreter Type of a regular Go value. +func TypeOfNative(v interface{}) Type { return TypeFromNative(reflect.Typeof(v)) } + +/* + * Function bridging + */ + +type nativeFunc struct { + fn func(*Thread, []Value, []Value) + in, out int +} + +func (f *nativeFunc) NewFrame() *Frame { + vars := make([]Value, f.in+f.out) + return &Frame{nil, vars} +} + +func (f *nativeFunc) Call(t *Thread) { f.fn(t, t.f.Vars[0:f.in], t.f.Vars[f.in:f.in+f.out]) } + +// FuncFromNative creates an interpreter function from a native +// function that takes its in and out arguments as slices of +// interpreter Value's. While somewhat inconvenient, this avoids +// value marshalling. +func FuncFromNative(fn func(*Thread, []Value, []Value), t *FuncType) FuncValue { + return &funcV{&nativeFunc{fn, len(t.In), len(t.Out)}} +} + +// FuncFromNativeTyped is like FuncFromNative, but constructs the +// function type from a function pointer using reflection. Typically, +// the type will be given as a nil pointer to a function with the +// desired signature. +func FuncFromNativeTyped(fn func(*Thread, []Value, []Value), t interface{}) (*FuncType, FuncValue) { + ft := TypeOfNative(t).(*FuncType) + return ft, FuncFromNative(fn, ft) +} diff --git a/libgo/go/exp/eval/compiler.go b/libgo/go/exp/eval/compiler.go new file mode 100644 index 000000000..9d2923bfc --- /dev/null +++ b/libgo/go/exp/eval/compiler.go @@ -0,0 +1,92 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "fmt" + "go/scanner" + "go/token" +) + + +// A compiler captures information used throughout an entire +// compilation. Currently it includes only the error handler. +// +// TODO(austin) This might actually represent package level, in which +// case it should be package compiler. +type compiler struct { + fset *token.FileSet + errors scanner.ErrorHandler + numErrors int + silentErrors int +} + +func (a *compiler) diagAt(pos token.Pos, format string, args ...interface{}) { + a.errors.Error(a.fset.Position(pos), fmt.Sprintf(format, args...)) + a.numErrors++ +} + +func (a *compiler) numError() int { return a.numErrors + a.silentErrors } + +// The universal scope +func newUniverse() *Scope { + sc := &Scope{nil, 0} + sc.block = &block{ + offset: 0, + scope: sc, + global: true, + defs: make(map[string]Def), + } + return sc +} + +var universe *Scope = newUniverse() + + +// TODO(austin) These can all go in stmt.go now +type label struct { + name string + desc string + // The PC goto statements should jump to, or nil if this label + // cannot be goto'd (such as an anonymous for loop label). + gotoPC *uint + // The PC break statements should jump to, or nil if a break + // statement is invalid. + breakPC *uint + // The PC continue statements should jump to, or nil if a + // continue statement is invalid. + continuePC *uint + // The position where this label was resolved. If it has not + // been resolved yet, an invalid position. + resolved token.Pos + // The position where this label was first jumped to. + used token.Pos +} + +// A funcCompiler captures information used throughout the compilation +// of a single function body. +type funcCompiler struct { + *compiler + fnType *FuncType + // Whether the out variables are named. This affects what + // kinds of return statements are legal. + outVarsNamed bool + *codeBuf + flow *flowBuf + labels map[string]*label +} + +// A blockCompiler captures information used throughout the compilation +// of a single block within a function. +type blockCompiler struct { + *funcCompiler + block *block + // The label of this block, used for finding break and + // continue labels. + label *label + // The blockCompiler for the block enclosing this one, or nil + // for a function-level block. + parent *blockCompiler +} diff --git a/libgo/go/exp/eval/eval_test.go b/libgo/go/exp/eval/eval_test.go new file mode 100644 index 000000000..ff28cf1a9 --- /dev/null +++ b/libgo/go/exp/eval/eval_test.go @@ -0,0 +1,259 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "flag" + "fmt" + "go/token" + "log" + "os" + "reflect" + "regexp" + "testing" +) + +// All tests are done using the same file set. +var fset = token.NewFileSet() + +// Print each statement or expression before parsing it +var noisy = false + +func init() { flag.BoolVar(&noisy, "noisy", false, "chatter during eval tests") } + +/* + * Generic statement/expression test framework + */ + +type test []job + +type job struct { + code string + cerr string + rterr string + val Value + noval bool +} + +func runTests(t *testing.T, baseName string, tests []test) { + for i, test := range tests { + name := fmt.Sprintf("%s[%d]", baseName, i) + test.run(t, name) + } +} + +func (a test) run(t *testing.T, name string) { + w := newTestWorld() + for _, j := range a { + src := j.code + ";" // trailing semicolon to finish statement + if noisy { + println("code:", src) + } + + code, err := w.Compile(fset, src) + if err != nil { + if j.cerr == "" { + t.Errorf("%s: Compile %s: %v", name, src, err) + break + } + if !match(t, err, j.cerr) { + t.Errorf("%s: Compile %s = error %s; want %v", name, src, err, j.cerr) + break + } + continue + } + if j.cerr != "" { + t.Errorf("%s: Compile %s succeeded; want %s", name, src, j.cerr) + break + } + + val, err := code.Run() + if err != nil { + if j.rterr == "" { + t.Errorf("%s: Run %s: %v", name, src, err) + break + } + if !match(t, err, j.rterr) { + t.Errorf("%s: Run %s = error %s; want %v", name, src, err, j.rterr) + break + } + continue + } + if j.rterr != "" { + t.Errorf("%s: Run %s succeeded; want %s", name, src, j.rterr) + break + } + + if !j.noval && !reflect.DeepEqual(val, j.val) { + t.Errorf("%s: Run %s = %T(%v) want %T(%v)", name, src, val, val, j.val, j.val) + } + } +} + +func match(t *testing.T, err os.Error, pat string) bool { + ok, err1 := regexp.MatchString(pat, err.String()) + if err1 != nil { + t.Fatalf("compile regexp %s: %v", pat, err1) + } + return ok +} + + +/* + * Test constructors + */ + +// Expression compile error +func CErr(expr string, cerr string) test { return test([]job{{code: expr, cerr: cerr}}) } + +// Expression runtime error +func RErr(expr string, rterr string) test { return test([]job{{code: expr, rterr: rterr}}) } + +// Expression value +func Val(expr string, val interface{}) test { + return test([]job{{code: expr, val: toValue(val)}}) +} + +// Statement runs without error +func Run(stmts string) test { return test([]job{{code: stmts, noval: true}}) } + +// Two statements without error. +// TODO(rsc): Should be possible with Run but the parser +// won't let us do both top-level and non-top-level statements. +func Run2(stmt1, stmt2 string) test { + return test([]job{{code: stmt1, noval: true}, {code: stmt2, noval: true}}) +} + +// Statement runs and test one expression's value +func Val1(stmts string, expr1 string, val1 interface{}) test { + return test([]job{ + {code: stmts, noval: true}, + {code: expr1, val: toValue(val1)}, + }) +} + +// Statement runs and test two expressions' values +func Val2(stmts string, expr1 string, val1 interface{}, expr2 string, val2 interface{}) test { + return test([]job{ + {code: stmts, noval: true}, + {code: expr1, val: toValue(val1)}, + {code: expr2, val: toValue(val2)}, + }) +} + +/* + * Value constructors + */ + +type vstruct []interface{} + +type varray []interface{} + +type vslice struct { + arr varray + len, cap int +} + +func toValue(val interface{}) Value { + switch val := val.(type) { + case bool: + r := boolV(val) + return &r + case uint8: + r := uint8V(val) + return &r + case uint: + r := uintV(val) + return &r + case int: + r := intV(val) + return &r + case *big.Int: + return &idealIntV{val} + case float64: + r := float64V(val) + return &r + case *big.Rat: + return &idealFloatV{val} + case string: + r := stringV(val) + return &r + case vstruct: + elems := make([]Value, len(val)) + for i, e := range val { + elems[i] = toValue(e) + } + r := structV(elems) + return &r + case varray: + elems := make([]Value, len(val)) + for i, e := range val { + elems[i] = toValue(e) + } + r := arrayV(elems) + return &r + case vslice: + return &sliceV{Slice{toValue(val.arr).(ArrayValue), int64(val.len), int64(val.cap)}} + case Func: + return &funcV{val} + } + log.Panicf("toValue(%T) not implemented", val) + panic("unreachable") +} + +/* + * Default test scope + */ + +type testFunc struct{} + +func (*testFunc) NewFrame() *Frame { return &Frame{nil, make([]Value, 2)} } + +func (*testFunc) Call(t *Thread) { + n := t.f.Vars[0].(IntValue).Get(t) + + res := n + 1 + + t.f.Vars[1].(IntValue).Set(t, res) +} + +type oneTwoFunc struct{} + +func (*oneTwoFunc) NewFrame() *Frame { return &Frame{nil, make([]Value, 2)} } + +func (*oneTwoFunc) Call(t *Thread) { + t.f.Vars[0].(IntValue).Set(t, 1) + t.f.Vars[1].(IntValue).Set(t, 2) +} + +type voidFunc struct{} + +func (*voidFunc) NewFrame() *Frame { return &Frame{nil, []Value{}} } + +func (*voidFunc) Call(t *Thread) {} + +func newTestWorld() *World { + w := NewWorld() + + def := func(name string, t Type, val interface{}) { w.DefineVar(name, t, toValue(val)) } + + w.DefineConst("c", IdealIntType, toValue(big.NewInt(1))) + def("i", IntType, 1) + def("i2", IntType, 2) + def("u", UintType, uint(1)) + def("f", Float64Type, 1.0) + def("s", StringType, "abc") + def("t", NewStructType([]StructField{{"a", IntType, false}}), vstruct{1}) + def("ai", NewArrayType(2, IntType), varray{1, 2}) + def("aai", NewArrayType(2, NewArrayType(2, IntType)), varray{varray{1, 2}, varray{3, 4}}) + def("aai2", NewArrayType(2, NewArrayType(2, IntType)), varray{varray{5, 6}, varray{7, 8}}) + def("fn", NewFuncType([]Type{IntType}, false, []Type{IntType}), &testFunc{}) + def("oneTwo", NewFuncType([]Type{}, false, []Type{IntType, IntType}), &oneTwoFunc{}) + def("void", NewFuncType([]Type{}, false, []Type{}), &voidFunc{}) + def("sli", NewSliceType(IntType), vslice{varray{1, 2, 3}, 2, 3}) + + return w +} diff --git a/libgo/go/exp/eval/expr.go b/libgo/go/exp/eval/expr.go new file mode 100644 index 000000000..e65f47617 --- /dev/null +++ b/libgo/go/exp/eval/expr.go @@ -0,0 +1,2015 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "fmt" + "go/ast" + "go/token" + "log" + "strconv" + "strings" + "os" +) + +var ( + idealZero = big.NewInt(0) + idealOne = big.NewInt(1) +) + +// An expr is the result of compiling an expression. It stores the +// type of the expression and its evaluator function. +type expr struct { + *exprInfo + t Type + + // Evaluate this node as the given type. + eval interface{} + + // Map index expressions permit special forms of assignment, + // for which we need to know the Map and key. + evalMapValue func(t *Thread) (Map, interface{}) + + // Evaluate to the "address of" this value; that is, the + // settable Value object. nil for expressions whose address + // cannot be taken. + evalAddr func(t *Thread) Value + + // Execute this expression as a statement. Only expressions + // that are valid expression statements should set this. + exec func(t *Thread) + + // If this expression is a type, this is its compiled type. + // This is only permitted in the function position of a call + // expression. In this case, t should be nil. + valType Type + + // A short string describing this expression for error + // messages. + desc string +} + +// exprInfo stores information needed to compile any expression node. +// Each expr also stores its exprInfo so further expressions can be +// compiled from it. +type exprInfo struct { + *compiler + pos token.Pos +} + +func (a *exprInfo) newExpr(t Type, desc string) *expr { + return &expr{exprInfo: a, t: t, desc: desc} +} + +func (a *exprInfo) diag(format string, args ...interface{}) { + a.diagAt(a.pos, format, args...) +} + +func (a *exprInfo) diagOpType(op token.Token, vt Type) { + a.diag("illegal operand type for '%v' operator\n\t%v", op, vt) +} + +func (a *exprInfo) diagOpTypes(op token.Token, lt Type, rt Type) { + a.diag("illegal operand types for '%v' operator\n\t%v\n\t%v", op, lt, rt) +} + +/* + * Common expression manipulations + */ + +// a.convertTo(t) converts the value of the analyzed expression a, +// which must be a constant, ideal number, to a new analyzed +// expression with a constant value of type t. +// +// TODO(austin) Rename to resolveIdeal or something? +func (a *expr) convertTo(t Type) *expr { + if !a.t.isIdeal() { + log.Panicf("attempted to convert from %v, expected ideal", a.t) + } + + var rat *big.Rat + + // XXX(Spec) The spec says "It is erroneous". + // + // It is an error to assign a value with a non-zero fractional + // part to an integer, or if the assignment would overflow or + // underflow, or in general if the value cannot be represented + // by the type of the variable. + switch a.t { + case IdealFloatType: + rat = a.asIdealFloat()() + if t.isInteger() && !rat.IsInt() { + a.diag("constant %v truncated to integer", rat.FloatString(6)) + return nil + } + case IdealIntType: + i := a.asIdealInt()() + rat = new(big.Rat).SetInt(i) + default: + log.Panicf("unexpected ideal type %v", a.t) + } + + // Check bounds + if t, ok := t.lit().(BoundedType); ok { + if rat.Cmp(t.minVal()) < 0 { + a.diag("constant %v underflows %v", rat.FloatString(6), t) + return nil + } + if rat.Cmp(t.maxVal()) > 0 { + a.diag("constant %v overflows %v", rat.FloatString(6), t) + return nil + } + } + + // Convert rat to type t. + res := a.newExpr(t, a.desc) + switch t := t.lit().(type) { + case *uintType: + n, d := rat.Num(), rat.Denom() + f := new(big.Int).Quo(n, d) + f = f.Abs(f) + v := uint64(f.Int64()) + res.eval = func(*Thread) uint64 { return v } + case *intType: + n, d := rat.Num(), rat.Denom() + f := new(big.Int).Quo(n, d) + v := f.Int64() + res.eval = func(*Thread) int64 { return v } + case *idealIntType: + n, d := rat.Num(), rat.Denom() + f := new(big.Int).Quo(n, d) + res.eval = func() *big.Int { return f } + case *floatType: + n, d := rat.Num(), rat.Denom() + v := float64(n.Int64()) / float64(d.Int64()) + res.eval = func(*Thread) float64 { return v } + case *idealFloatType: + res.eval = func() *big.Rat { return rat } + default: + log.Panicf("cannot convert to type %T", t) + } + + return res +} + +// convertToInt converts this expression to an integer, if possible, +// or produces an error if not. This accepts ideal ints, uints, and +// ints. If max is not -1, produces an error if possible if the value +// exceeds max. If negErr is not "", produces an error if possible if +// the value is negative. +func (a *expr) convertToInt(max int64, negErr string, errOp string) *expr { + switch a.t.lit().(type) { + case *idealIntType: + val := a.asIdealInt()() + if negErr != "" && val.Sign() < 0 { + a.diag("negative %s: %s", negErr, val) + return nil + } + bound := max + if negErr == "slice" { + bound++ + } + if max != -1 && val.Cmp(big.NewInt(bound)) >= 0 { + a.diag("index %s exceeds length %d", val, max) + return nil + } + return a.convertTo(IntType) + + case *uintType: + // Convert to int + na := a.newExpr(IntType, a.desc) + af := a.asUint() + na.eval = func(t *Thread) int64 { return int64(af(t)) } + return na + + case *intType: + // Good as is + return a + } + + a.diag("illegal operand type for %s\n\t%v", errOp, a.t) + return nil +} + +// derefArray returns an expression of array type if the given +// expression is a *array type. Otherwise, returns the given +// expression. +func (a *expr) derefArray() *expr { + if pt, ok := a.t.lit().(*PtrType); ok { + if _, ok := pt.Elem.lit().(*ArrayType); ok { + deref := a.compileStarExpr(a) + if deref == nil { + log.Panicf("failed to dereference *array") + } + return deref + } + } + return a +} + +/* + * Assignments + */ + +// An assignCompiler compiles assignment operations. Anything other +// than short declarations should use the compileAssign wrapper. +// +// There are three valid types of assignment: +// 1) T = T +// Assigning a single expression with single-valued type to a +// single-valued type. +// 2) MT = T, T, ... +// Assigning multiple expressions with single-valued types to a +// multi-valued type. +// 3) MT = MT +// Assigning a single expression with multi-valued type to a +// multi-valued type. +type assignCompiler struct { + *compiler + pos token.Pos + // The RHS expressions. This may include nil's for + // expressions that failed to compile. + rs []*expr + // The (possibly unary) MultiType of the RHS. + rmt *MultiType + // Whether this is an unpack assignment (case 3). + isUnpack bool + // Whether map special assignment forms are allowed. + allowMap bool + // Whether this is a "r, ok = a[x]" assignment. + isMapUnpack bool + // The operation name to use in error messages, such as + // "assignment" or "function call". + errOp string + // The name to use for positions in error messages, such as + // "argument". + errPosName string +} + +// Type check the RHS of an assignment, returning a new assignCompiler +// and indicating if the type check succeeded. This always returns an +// assignCompiler with rmt set, but if type checking fails, slots in +// the MultiType may be nil. If rs contains nil's, type checking will +// fail and these expressions given a nil type. +func (a *compiler) checkAssign(pos token.Pos, rs []*expr, errOp, errPosName string) (*assignCompiler, bool) { + c := &assignCompiler{ + compiler: a, + pos: pos, + rs: rs, + errOp: errOp, + errPosName: errPosName, + } + + // Is this an unpack? + if len(rs) == 1 && rs[0] != nil { + if rmt, isUnpack := rs[0].t.(*MultiType); isUnpack { + c.rmt = rmt + c.isUnpack = true + return c, true + } + } + + // Create MultiType for RHS and check that all RHS expressions + // are single-valued. + rts := make([]Type, len(rs)) + ok := true + for i, r := range rs { + if r == nil { + ok = false + continue + } + + if _, isMT := r.t.(*MultiType); isMT { + r.diag("multi-valued expression not allowed in %s", errOp) + ok = false + continue + } + + rts[i] = r.t + } + + c.rmt = NewMultiType(rts) + return c, ok +} + +func (a *assignCompiler) allowMapForms(nls int) { + a.allowMap = true + + // Update unpacking info if this is r, ok = a[x] + if nls == 2 && len(a.rs) == 1 && a.rs[0] != nil && a.rs[0].evalMapValue != nil { + a.isUnpack = true + a.rmt = NewMultiType([]Type{a.rs[0].t, BoolType}) + a.isMapUnpack = true + } +} + +// compile type checks and compiles an assignment operation, returning +// a function that expects an l-value and the frame in which to +// evaluate the RHS expressions. The l-value must have exactly the +// type given by lt. Returns nil if type checking fails. +func (a *assignCompiler) compile(b *block, lt Type) func(Value, *Thread) { + lmt, isMT := lt.(*MultiType) + rmt, isUnpack := a.rmt, a.isUnpack + + // Create unary MultiType for single LHS + if !isMT { + lmt = NewMultiType([]Type{lt}) + } + + // Check that the assignment count matches + lcount := len(lmt.Elems) + rcount := len(rmt.Elems) + if lcount != rcount { + msg := "not enough" + pos := a.pos + if rcount > lcount { + msg = "too many" + if lcount > 0 { + pos = a.rs[lcount-1].pos + } + } + a.diagAt(pos, "%s %ss for %s\n\t%s\n\t%s", msg, a.errPosName, a.errOp, lt, rmt) + return nil + } + + bad := false + + // If this is an unpack, create a temporary to store the + // multi-value and replace the RHS with expressions to pull + // out values from the temporary. Technically, this is only + // necessary when we need to perform assignment conversions. + var effect func(*Thread) + if isUnpack { + // This leaks a slot, but is definitely safe. + temp := b.DefineTemp(a.rmt) + tempIdx := temp.Index + if tempIdx < 0 { + panic(fmt.Sprintln("tempidx", tempIdx)) + } + if a.isMapUnpack { + rf := a.rs[0].evalMapValue + vt := a.rmt.Elems[0] + effect = func(t *Thread) { + m, k := rf(t) + v := m.Elem(t, k) + found := boolV(true) + if v == nil { + found = boolV(false) + v = vt.Zero() + } + t.f.Vars[tempIdx] = multiV([]Value{v, &found}) + } + } else { + rf := a.rs[0].asMulti() + effect = func(t *Thread) { t.f.Vars[tempIdx] = multiV(rf(t)) } + } + orig := a.rs[0] + a.rs = make([]*expr, len(a.rmt.Elems)) + for i, t := range a.rmt.Elems { + if t.isIdeal() { + log.Panicf("Right side of unpack contains ideal: %s", rmt) + } + a.rs[i] = orig.newExpr(t, orig.desc) + index := i + a.rs[i].genValue(func(t *Thread) Value { return t.f.Vars[tempIdx].(multiV)[index] }) + } + } + // Now len(a.rs) == len(a.rmt) and we've reduced any unpacking + // to multi-assignment. + + // TODO(austin) Deal with assignment special cases. + + // Values of any type may always be assigned to variables of + // compatible static type. + for i, lt := range lmt.Elems { + rt := rmt.Elems[i] + + // When [an ideal is] (used in an expression) assigned + // to a variable or typed constant, the destination + // must be able to represent the assigned value. + if rt.isIdeal() { + a.rs[i] = a.rs[i].convertTo(lmt.Elems[i]) + if a.rs[i] == nil { + bad = true + continue + } + rt = a.rs[i].t + } + + // A pointer p to an array can be assigned to a slice + // variable v with compatible element type if the type + // of p or v is unnamed. + if rpt, ok := rt.lit().(*PtrType); ok { + if at, ok := rpt.Elem.lit().(*ArrayType); ok { + if lst, ok := lt.lit().(*SliceType); ok { + if lst.Elem.compat(at.Elem, false) && (rt.lit() == Type(rt) || lt.lit() == Type(lt)) { + rf := a.rs[i].asPtr() + a.rs[i] = a.rs[i].newExpr(lt, a.rs[i].desc) + len := at.Len + a.rs[i].eval = func(t *Thread) Slice { return Slice{rf(t).(ArrayValue), len, len} } + rt = a.rs[i].t + } + } + } + } + + if !lt.compat(rt, false) { + if len(a.rs) == 1 { + a.rs[0].diag("illegal operand types for %s\n\t%v\n\t%v", a.errOp, lt, rt) + } else { + a.rs[i].diag("illegal operand types in %s %d of %s\n\t%v\n\t%v", a.errPosName, i+1, a.errOp, lt, rt) + } + bad = true + } + } + if bad { + return nil + } + + // Compile + if !isMT { + // Case 1 + return genAssign(lt, a.rs[0]) + } + // Case 2 or 3 + as := make([]func(lv Value, t *Thread), len(a.rs)) + for i, r := range a.rs { + as[i] = genAssign(lmt.Elems[i], r) + } + return func(lv Value, t *Thread) { + if effect != nil { + effect(t) + } + lmv := lv.(multiV) + for i, a := range as { + a(lmv[i], t) + } + } +} + +// compileAssign compiles an assignment operation without the full +// generality of an assignCompiler. See assignCompiler for a +// description of the arguments. +func (a *compiler) compileAssign(pos token.Pos, b *block, lt Type, rs []*expr, errOp, errPosName string) func(Value, *Thread) { + ac, ok := a.checkAssign(pos, rs, errOp, errPosName) + if !ok { + return nil + } + return ac.compile(b, lt) +} + +/* + * Expression compiler + */ + +// An exprCompiler stores information used throughout the compilation +// of a single expression. It does not embed funcCompiler because +// expressions can appear at top level. +type exprCompiler struct { + *compiler + // The block this expression is being compiled in. + block *block + // Whether this expression is used in a constant context. + constant bool +} + +// compile compiles an expression AST. callCtx should be true if this +// AST is in the function position of a function call node; it allows +// the returned expression to be a type or a built-in function (which +// otherwise result in errors). +func (a *exprCompiler) compile(x ast.Expr, callCtx bool) *expr { + ei := &exprInfo{a.compiler, x.Pos()} + + switch x := x.(type) { + // Literals + case *ast.BasicLit: + switch x.Kind { + case token.INT: + return ei.compileIntLit(string(x.Value)) + case token.FLOAT: + return ei.compileFloatLit(string(x.Value)) + case token.CHAR: + return ei.compileCharLit(string(x.Value)) + case token.STRING: + return ei.compileStringLit(string(x.Value)) + default: + log.Panicf("unexpected basic literal type %v", x.Kind) + } + + case *ast.CompositeLit: + goto notimpl + + case *ast.FuncLit: + decl := ei.compileFuncType(a.block, x.Type) + if decl == nil { + // TODO(austin) Try compiling the body, + // perhaps with dummy argument definitions + return nil + } + fn := ei.compileFunc(a.block, decl, x.Body) + if fn == nil { + return nil + } + if a.constant { + a.diagAt(x.Pos(), "function literal used in constant expression") + return nil + } + return ei.compileFuncLit(decl, fn) + + // Types + case *ast.ArrayType: + // TODO(austin) Use a multi-type case + goto typeexpr + + case *ast.ChanType: + goto typeexpr + + case *ast.Ellipsis: + goto typeexpr + + case *ast.FuncType: + goto typeexpr + + case *ast.InterfaceType: + goto typeexpr + + case *ast.MapType: + goto typeexpr + + // Remaining expressions + case *ast.BadExpr: + // Error already reported by parser + a.silentErrors++ + return nil + + case *ast.BinaryExpr: + l, r := a.compile(x.X, false), a.compile(x.Y, false) + if l == nil || r == nil { + return nil + } + return ei.compileBinaryExpr(x.Op, l, r) + + case *ast.CallExpr: + l := a.compile(x.Fun, true) + args := make([]*expr, len(x.Args)) + bad := false + for i, arg := range x.Args { + if i == 0 && l != nil && (l.t == Type(makeType) || l.t == Type(newType)) { + argei := &exprInfo{a.compiler, arg.Pos()} + args[i] = argei.exprFromType(a.compileType(a.block, arg)) + } else { + args[i] = a.compile(arg, false) + } + if args[i] == nil { + bad = true + } + } + if bad || l == nil { + return nil + } + if a.constant { + a.diagAt(x.Pos(), "function call in constant context") + return nil + } + + if l.valType != nil { + a.diagAt(x.Pos(), "type conversions not implemented") + return nil + } else if ft, ok := l.t.(*FuncType); ok && ft.builtin != "" { + return ei.compileBuiltinCallExpr(a.block, ft, args) + } else { + return ei.compileCallExpr(a.block, l, args) + } + + case *ast.Ident: + return ei.compileIdent(a.block, a.constant, callCtx, x.Name) + + case *ast.IndexExpr: + l, r := a.compile(x.X, false), a.compile(x.Index, false) + if l == nil || r == nil { + return nil + } + return ei.compileIndexExpr(l, r) + + case *ast.SliceExpr: + var lo, hi *expr + arr := a.compile(x.X, false) + if x.Low == nil { + // beginning was omitted, so we need to provide it + ei := &exprInfo{a.compiler, x.Pos()} + lo = ei.compileIntLit("0") + } else { + lo = a.compile(x.Low, false) + } + if x.High == nil { + // End was omitted, so we need to compute len(x.X) + ei := &exprInfo{a.compiler, x.Pos()} + hi = ei.compileBuiltinCallExpr(a.block, lenType, []*expr{arr}) + } else { + hi = a.compile(x.High, false) + } + if arr == nil || lo == nil || hi == nil { + return nil + } + return ei.compileSliceExpr(arr, lo, hi) + + case *ast.KeyValueExpr: + goto notimpl + + case *ast.ParenExpr: + return a.compile(x.X, callCtx) + + case *ast.SelectorExpr: + v := a.compile(x.X, false) + if v == nil { + return nil + } + return ei.compileSelectorExpr(v, x.Sel.Name) + + case *ast.StarExpr: + // We pass down our call context because this could be + // a pointer type (and thus a type conversion) + v := a.compile(x.X, callCtx) + if v == nil { + return nil + } + if v.valType != nil { + // Turns out this was a pointer type, not a dereference + return ei.exprFromType(NewPtrType(v.valType)) + } + return ei.compileStarExpr(v) + + case *ast.StructType: + goto notimpl + + case *ast.TypeAssertExpr: + goto notimpl + + case *ast.UnaryExpr: + v := a.compile(x.X, false) + if v == nil { + return nil + } + return ei.compileUnaryExpr(x.Op, v) + } + log.Panicf("unexpected ast node type %T", x) + panic("unreachable") + +typeexpr: + if !callCtx { + a.diagAt(x.Pos(), "type used as expression") + return nil + } + return ei.exprFromType(a.compileType(a.block, x)) + +notimpl: + a.diagAt(x.Pos(), "%T expression node not implemented", x) + return nil +} + +func (a *exprInfo) exprFromType(t Type) *expr { + if t == nil { + return nil + } + expr := a.newExpr(nil, "type") + expr.valType = t + return expr +} + +func (a *exprInfo) compileIdent(b *block, constant bool, callCtx bool, name string) *expr { + bl, level, def := b.Lookup(name) + if def == nil { + a.diag("%s: undefined", name) + return nil + } + switch def := def.(type) { + case *Constant: + expr := a.newExpr(def.Type, "constant") + if ft, ok := def.Type.(*FuncType); ok && ft.builtin != "" { + // XXX(Spec) I don't think anything says that + // built-in functions can't be used as values. + if !callCtx { + a.diag("built-in function %s cannot be used as a value", ft.builtin) + return nil + } + // Otherwise, we leave the evaluators empty + // because this is handled specially + } else { + expr.genConstant(def.Value) + } + return expr + case *Variable: + if constant { + a.diag("variable %s used in constant expression", name) + return nil + } + if bl.global { + return a.compileGlobalVariable(def) + } + return a.compileVariable(level, def) + case Type: + if callCtx { + return a.exprFromType(def) + } + a.diag("type %v used as expression", name) + return nil + } + log.Panicf("name %s has unknown type %T", name, def) + panic("unreachable") +} + +func (a *exprInfo) compileVariable(level int, v *Variable) *expr { + if v.Type == nil { + // Placeholder definition from an earlier error + a.silentErrors++ + return nil + } + expr := a.newExpr(v.Type, "variable") + expr.genIdentOp(level, v.Index) + return expr +} + +func (a *exprInfo) compileGlobalVariable(v *Variable) *expr { + if v.Type == nil { + // Placeholder definition from an earlier error + a.silentErrors++ + return nil + } + if v.Init == nil { + v.Init = v.Type.Zero() + } + expr := a.newExpr(v.Type, "variable") + val := v.Init + expr.genValue(func(t *Thread) Value { return val }) + return expr +} + +func (a *exprInfo) compileIdealInt(i *big.Int, desc string) *expr { + expr := a.newExpr(IdealIntType, desc) + expr.eval = func() *big.Int { return i } + return expr +} + +func (a *exprInfo) compileIntLit(lit string) *expr { + i, _ := new(big.Int).SetString(lit, 0) + return a.compileIdealInt(i, "integer literal") +} + +func (a *exprInfo) compileCharLit(lit string) *expr { + if lit[0] != '\'' { + // Caught by parser + a.silentErrors++ + return nil + } + v, _, tail, err := strconv.UnquoteChar(lit[1:], '\'') + if err != nil || tail != "'" { + // Caught by parser + a.silentErrors++ + return nil + } + return a.compileIdealInt(big.NewInt(int64(v)), "character literal") +} + +func (a *exprInfo) compileFloatLit(lit string) *expr { + f, ok := new(big.Rat).SetString(lit) + if !ok { + log.Panicf("malformed float literal %s at %v passed parser", lit, a.pos) + } + expr := a.newExpr(IdealFloatType, "float literal") + expr.eval = func() *big.Rat { return f } + return expr +} + +func (a *exprInfo) compileString(s string) *expr { + // Ideal strings don't have a named type but they are + // compatible with type string. + + // TODO(austin) Use unnamed string type. + expr := a.newExpr(StringType, "string literal") + expr.eval = func(*Thread) string { return s } + return expr +} + +func (a *exprInfo) compileStringLit(lit string) *expr { + s, err := strconv.Unquote(lit) + if err != nil { + a.diag("illegal string literal, %v", err) + return nil + } + return a.compileString(s) +} + +func (a *exprInfo) compileStringList(list []*expr) *expr { + ss := make([]string, len(list)) + for i, s := range list { + ss[i] = s.asString()(nil) + } + return a.compileString(strings.Join(ss, "")) +} + +func (a *exprInfo) compileFuncLit(decl *FuncDecl, fn func(*Thread) Func) *expr { + expr := a.newExpr(decl.Type, "function literal") + expr.eval = fn + return expr +} + +func (a *exprInfo) compileSelectorExpr(v *expr, name string) *expr { + // mark marks a field that matches the selector name. It + // tracks the best depth found so far and whether more than + // one field has been found at that depth. + bestDepth := -1 + ambig := false + amberr := "" + mark := func(depth int, pathName string) { + switch { + case bestDepth == -1 || depth < bestDepth: + bestDepth = depth + ambig = false + amberr = "" + + case depth == bestDepth: + ambig = true + + default: + log.Panicf("Marked field at depth %d, but already found one at depth %d", depth, bestDepth) + } + amberr += "\n\t" + pathName[1:] + } + + visited := make(map[Type]bool) + + // find recursively searches for the named field, starting at + // type t. If it finds the named field, it returns a function + // which takes an expr that represents a value of type 't' and + // returns an expr that retrieves the named field. We delay + // expr construction to avoid producing lots of useless expr's + // as we search. + // + // TODO(austin) Now that the expression compiler works on + // semantic values instead of AST's, there should be a much + // better way of doing this. + var find func(Type, int, string) func(*expr) *expr + find = func(t Type, depth int, pathName string) func(*expr) *expr { + // Don't bother looking if we've found something shallower + if bestDepth != -1 && bestDepth < depth { + return nil + } + + // Don't check the same type twice and avoid loops + if visited[t] { + return nil + } + visited[t] = true + + // Implicit dereference + deref := false + if ti, ok := t.(*PtrType); ok { + deref = true + t = ti.Elem + } + + // If it's a named type, look for methods + if ti, ok := t.(*NamedType); ok { + _, ok := ti.methods[name] + if ok { + mark(depth, pathName+"."+name) + log.Panic("Methods not implemented") + } + t = ti.Def + } + + // If it's a struct type, check fields and embedded types + var builder func(*expr) *expr + if t, ok := t.(*StructType); ok { + for i, f := range t.Elems { + var sub func(*expr) *expr + switch { + case f.Name == name: + mark(depth, pathName+"."+name) + sub = func(e *expr) *expr { return e } + + case f.Anonymous: + sub = find(f.Type, depth+1, pathName+"."+f.Name) + if sub == nil { + continue + } + + default: + continue + } + + // We found something. Create a + // builder for accessing this field. + ft := f.Type + index := i + builder = func(parent *expr) *expr { + if deref { + parent = a.compileStarExpr(parent) + } + expr := a.newExpr(ft, "selector expression") + pf := parent.asStruct() + evalAddr := func(t *Thread) Value { return pf(t).Field(t, index) } + expr.genValue(evalAddr) + return sub(expr) + } + } + } + + return builder + } + + builder := find(v.t, 0, "") + if builder == nil { + a.diag("type %v has no field or method %s", v.t, name) + return nil + } + if ambig { + a.diag("field %s is ambiguous in type %v%s", name, v.t, amberr) + return nil + } + + return builder(v) +} + +func (a *exprInfo) compileSliceExpr(arr, lo, hi *expr) *expr { + // Type check object + arr = arr.derefArray() + + var at Type + var maxIndex int64 = -1 + + switch lt := arr.t.lit().(type) { + case *ArrayType: + at = NewSliceType(lt.Elem) + maxIndex = lt.Len + + case *SliceType: + at = lt + + case *stringType: + at = lt + + default: + a.diag("cannot slice %v", arr.t) + return nil + } + + // Type check index and convert to int + // XXX(Spec) It's unclear if ideal floats with no + // fractional part are allowed here. 6g allows it. I + // believe that's wrong. + lo = lo.convertToInt(maxIndex, "slice", "slice") + hi = hi.convertToInt(maxIndex, "slice", "slice") + if lo == nil || hi == nil { + return nil + } + + expr := a.newExpr(at, "slice expression") + + // Compile + lof := lo.asInt() + hif := hi.asInt() + switch lt := arr.t.lit().(type) { + case *ArrayType: + arrf := arr.asArray() + bound := lt.Len + expr.eval = func(t *Thread) Slice { + arr, lo, hi := arrf(t), lof(t), hif(t) + if lo > hi || hi > bound || lo < 0 { + t.Abort(SliceError{lo, hi, bound}) + } + return Slice{arr.Sub(lo, bound-lo), hi - lo, bound - lo} + } + + case *SliceType: + arrf := arr.asSlice() + expr.eval = func(t *Thread) Slice { + arr, lo, hi := arrf(t), lof(t), hif(t) + if lo > hi || hi > arr.Cap || lo < 0 { + t.Abort(SliceError{lo, hi, arr.Cap}) + } + return Slice{arr.Base.Sub(lo, arr.Cap-lo), hi - lo, arr.Cap - lo} + } + + case *stringType: + arrf := arr.asString() + // TODO(austin) This pulls over the whole string in a + // remote setting, instead of creating a substring backed + // by remote memory. + expr.eval = func(t *Thread) string { + arr, lo, hi := arrf(t), lof(t), hif(t) + if lo > hi || hi > int64(len(arr)) || lo < 0 { + t.Abort(SliceError{lo, hi, int64(len(arr))}) + } + return arr[lo:hi] + } + + default: + log.Panicf("unexpected left operand type %T", arr.t.lit()) + } + + return expr +} + +func (a *exprInfo) compileIndexExpr(l, r *expr) *expr { + // Type check object + l = l.derefArray() + + var at Type + intIndex := false + var maxIndex int64 = -1 + + switch lt := l.t.lit().(type) { + case *ArrayType: + at = lt.Elem + intIndex = true + maxIndex = lt.Len + + case *SliceType: + at = lt.Elem + intIndex = true + + case *stringType: + at = Uint8Type + intIndex = true + + case *MapType: + at = lt.Elem + if r.t.isIdeal() { + r = r.convertTo(lt.Key) + if r == nil { + return nil + } + } + if !lt.Key.compat(r.t, false) { + a.diag("cannot use %s as index into %s", r.t, lt) + return nil + } + + default: + a.diag("cannot index into %v", l.t) + return nil + } + + // Type check index and convert to int if necessary + if intIndex { + // XXX(Spec) It's unclear if ideal floats with no + // fractional part are allowed here. 6g allows it. I + // believe that's wrong. + r = r.convertToInt(maxIndex, "index", "index") + if r == nil { + return nil + } + } + + expr := a.newExpr(at, "index expression") + + // Compile + switch lt := l.t.lit().(type) { + case *ArrayType: + lf := l.asArray() + rf := r.asInt() + bound := lt.Len + expr.genValue(func(t *Thread) Value { + l, r := lf(t), rf(t) + if r < 0 || r >= bound { + t.Abort(IndexError{r, bound}) + } + return l.Elem(t, r) + }) + + case *SliceType: + lf := l.asSlice() + rf := r.asInt() + expr.genValue(func(t *Thread) Value { + l, r := lf(t), rf(t) + if l.Base == nil { + t.Abort(NilPointerError{}) + } + if r < 0 || r >= l.Len { + t.Abort(IndexError{r, l.Len}) + } + return l.Base.Elem(t, r) + }) + + case *stringType: + lf := l.asString() + rf := r.asInt() + // TODO(austin) This pulls over the whole string in a + // remote setting, instead of just the one character. + expr.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + if r < 0 || r >= int64(len(l)) { + t.Abort(IndexError{r, int64(len(l))}) + } + return uint64(l[r]) + } + + case *MapType: + lf := l.asMap() + rf := r.asInterface() + expr.genValue(func(t *Thread) Value { + m := lf(t) + k := rf(t) + if m == nil { + t.Abort(NilPointerError{}) + } + e := m.Elem(t, k) + if e == nil { + t.Abort(KeyError{k}) + } + return e + }) + // genValue makes things addressable, but map values + // aren't addressable. + expr.evalAddr = nil + expr.evalMapValue = func(t *Thread) (Map, interface{}) { + // TODO(austin) Key check? nil check? + return lf(t), rf(t) + } + + default: + log.Panicf("unexpected left operand type %T", l.t.lit()) + } + + return expr +} + +func (a *exprInfo) compileCallExpr(b *block, l *expr, as []*expr) *expr { + // TODO(austin) Variadic functions. + + // Type check + + // XXX(Spec) Calling a named function type is okay. I really + // think there needs to be a general discussion of named + // types. A named type creates a new, distinct type, but the + // type of that type is still whatever it's defined to. Thus, + // in "type Foo int", Foo is still an integer type and in + // "type Foo func()", Foo is a function type. + lt, ok := l.t.lit().(*FuncType) + if !ok { + a.diag("cannot call non-function type %v", l.t) + return nil + } + + // The arguments must be single-valued expressions assignment + // compatible with the parameters of F. + // + // XXX(Spec) The spec is wrong. It can also be a single + // multi-valued expression. + nin := len(lt.In) + assign := a.compileAssign(a.pos, b, NewMultiType(lt.In), as, "function call", "argument") + if assign == nil { + return nil + } + + var t Type + nout := len(lt.Out) + switch nout { + case 0: + t = EmptyType + case 1: + t = lt.Out[0] + default: + t = NewMultiType(lt.Out) + } + expr := a.newExpr(t, "function call") + + // Gather argument and out types to initialize frame variables + vts := make([]Type, nin+nout) + copy(vts, lt.In) + copy(vts[nin:], lt.Out) + + // Compile + lf := l.asFunc() + call := func(t *Thread) []Value { + fun := lf(t) + fr := fun.NewFrame() + for i, t := range vts { + fr.Vars[i] = t.Zero() + } + assign(multiV(fr.Vars[0:nin]), t) + oldf := t.f + t.f = fr + fun.Call(t) + t.f = oldf + return fr.Vars[nin : nin+nout] + } + expr.genFuncCall(call) + + return expr +} + +func (a *exprInfo) compileBuiltinCallExpr(b *block, ft *FuncType, as []*expr) *expr { + checkCount := func(min, max int) bool { + if len(as) < min { + a.diag("not enough arguments to %s", ft.builtin) + return false + } else if len(as) > max { + a.diag("too many arguments to %s", ft.builtin) + return false + } + return true + } + + switch ft { + case capType: + if !checkCount(1, 1) { + return nil + } + arg := as[0].derefArray() + expr := a.newExpr(IntType, "function call") + switch t := arg.t.lit().(type) { + case *ArrayType: + // TODO(austin) It would be nice if this could + // be a constant int. + v := t.Len + expr.eval = func(t *Thread) int64 { return v } + + case *SliceType: + vf := arg.asSlice() + expr.eval = func(t *Thread) int64 { return vf(t).Cap } + + //case *ChanType: + + default: + a.diag("illegal argument type for cap function\n\t%v", arg.t) + return nil + } + return expr + + case copyType: + if !checkCount(2, 2) { + return nil + } + src := as[1] + dst := as[0] + if src.t != dst.t { + a.diag("arguments to built-in function 'copy' must have same type\nsrc: %s\ndst: %s\n", src.t, dst.t) + return nil + } + if _, ok := src.t.lit().(*SliceType); !ok { + a.diag("src argument to 'copy' must be a slice (got: %s)", src.t) + return nil + } + if _, ok := dst.t.lit().(*SliceType); !ok { + a.diag("dst argument to 'copy' must be a slice (got: %s)", dst.t) + return nil + } + expr := a.newExpr(IntType, "function call") + srcf := src.asSlice() + dstf := dst.asSlice() + expr.eval = func(t *Thread) int64 { + src, dst := srcf(t), dstf(t) + nelems := src.Len + if nelems > dst.Len { + nelems = dst.Len + } + dst.Base.Sub(0, nelems).Assign(t, src.Base.Sub(0, nelems)) + return nelems + } + return expr + + case lenType: + if !checkCount(1, 1) { + return nil + } + arg := as[0].derefArray() + expr := a.newExpr(IntType, "function call") + switch t := arg.t.lit().(type) { + case *stringType: + vf := arg.asString() + expr.eval = func(t *Thread) int64 { return int64(len(vf(t))) } + + case *ArrayType: + // TODO(austin) It would be nice if this could + // be a constant int. + v := t.Len + expr.eval = func(t *Thread) int64 { return v } + + case *SliceType: + vf := arg.asSlice() + expr.eval = func(t *Thread) int64 { return vf(t).Len } + + case *MapType: + vf := arg.asMap() + expr.eval = func(t *Thread) int64 { + // XXX(Spec) What's the len of an + // uninitialized map? + m := vf(t) + if m == nil { + return 0 + } + return m.Len(t) + } + + //case *ChanType: + + default: + a.diag("illegal argument type for len function\n\t%v", arg.t) + return nil + } + return expr + + case makeType: + if !checkCount(1, 3) { + return nil + } + // XXX(Spec) What are the types of the + // arguments? Do they have to be ints? 6g + // accepts any integral type. + var lenexpr, capexpr *expr + var lenf, capf func(*Thread) int64 + if len(as) > 1 { + lenexpr = as[1].convertToInt(-1, "length", "make function") + if lenexpr == nil { + return nil + } + lenf = lenexpr.asInt() + } + if len(as) > 2 { + capexpr = as[2].convertToInt(-1, "capacity", "make function") + if capexpr == nil { + return nil + } + capf = capexpr.asInt() + } + + switch t := as[0].valType.lit().(type) { + case *SliceType: + // A new, initialized slice value for a given + // element type T is made using the built-in + // function make, which takes a slice type and + // parameters specifying the length and + // optionally the capacity. + if !checkCount(2, 3) { + return nil + } + et := t.Elem + expr := a.newExpr(t, "function call") + expr.eval = func(t *Thread) Slice { + l := lenf(t) + // XXX(Spec) What if len or cap is + // negative? The runtime panics. + if l < 0 { + t.Abort(NegativeLengthError{l}) + } + c := l + if capf != nil { + c = capf(t) + if c < 0 { + t.Abort(NegativeCapacityError{c}) + } + // XXX(Spec) What happens if + // len > cap? The runtime + // sets cap to len. + if l > c { + c = l + } + } + base := arrayV(make([]Value, c)) + for i := int64(0); i < c; i++ { + base[i] = et.Zero() + } + return Slice{&base, l, c} + } + return expr + + case *MapType: + // A new, empty map value is made using the + // built-in function make, which takes the map + // type and an optional capacity hint as + // arguments. + if !checkCount(1, 2) { + return nil + } + expr := a.newExpr(t, "function call") + expr.eval = func(t *Thread) Map { + if lenf == nil { + return make(evalMap) + } + l := lenf(t) + return make(evalMap, l) + } + return expr + + //case *ChanType: + + default: + a.diag("illegal argument type for make function\n\t%v", as[0].valType) + return nil + } + + case closeType, closedType: + a.diag("built-in function %s not implemented", ft.builtin) + return nil + + case newType: + if !checkCount(1, 1) { + return nil + } + + t := as[0].valType + expr := a.newExpr(NewPtrType(t), "new") + expr.eval = func(*Thread) Value { return t.Zero() } + return expr + + case panicType, printType, printlnType: + evals := make([]func(*Thread) interface{}, len(as)) + for i, x := range as { + evals[i] = x.asInterface() + } + spaces := ft == printlnType + newline := ft != printType + printer := func(t *Thread) { + for i, eval := range evals { + if i > 0 && spaces { + print(" ") + } + v := eval(t) + type stringer interface { + String() string + } + switch v1 := v.(type) { + case bool: + print(v1) + case uint64: + print(v1) + case int64: + print(v1) + case float64: + print(v1) + case string: + print(v1) + case stringer: + print(v1.String()) + default: + print("???") + } + } + if newline { + print("\n") + } + } + expr := a.newExpr(EmptyType, "print") + expr.exec = printer + if ft == panicType { + expr.exec = func(t *Thread) { + printer(t) + t.Abort(os.NewError("panic")) + } + } + return expr + } + + log.Panicf("unexpected built-in function '%s'", ft.builtin) + panic("unreachable") +} + +func (a *exprInfo) compileStarExpr(v *expr) *expr { + switch vt := v.t.lit().(type) { + case *PtrType: + expr := a.newExpr(vt.Elem, "indirect expression") + vf := v.asPtr() + expr.genValue(func(t *Thread) Value { + v := vf(t) + if v == nil { + t.Abort(NilPointerError{}) + } + return v + }) + return expr + } + + a.diagOpType(token.MUL, v.t) + return nil +} + +var unaryOpDescs = make(map[token.Token]string) + +func (a *exprInfo) compileUnaryExpr(op token.Token, v *expr) *expr { + // Type check + var t Type + switch op { + case token.ADD, token.SUB: + if !v.t.isInteger() && !v.t.isFloat() { + a.diagOpType(op, v.t) + return nil + } + t = v.t + + case token.NOT: + if !v.t.isBoolean() { + a.diagOpType(op, v.t) + return nil + } + t = BoolType + + case token.XOR: + if !v.t.isInteger() { + a.diagOpType(op, v.t) + return nil + } + t = v.t + + case token.AND: + // The unary prefix address-of operator & generates + // the address of its operand, which must be a + // variable, pointer indirection, field selector, or + // array or slice indexing operation. + if v.evalAddr == nil { + a.diag("cannot take the address of %s", v.desc) + return nil + } + + // TODO(austin) Implement "It is illegal to take the + // address of a function result variable" once I have + // function result variables. + + t = NewPtrType(v.t) + + case token.ARROW: + log.Panicf("Unary op %v not implemented", op) + + default: + log.Panicf("unknown unary operator %v", op) + } + + desc, ok := unaryOpDescs[op] + if !ok { + desc = "unary " + op.String() + " expression" + unaryOpDescs[op] = desc + } + + // Compile + expr := a.newExpr(t, desc) + switch op { + case token.ADD: + // Just compile it out + expr = v + expr.desc = desc + + case token.SUB: + expr.genUnaryOpNeg(v) + + case token.NOT: + expr.genUnaryOpNot(v) + + case token.XOR: + expr.genUnaryOpXor(v) + + case token.AND: + vf := v.evalAddr + expr.eval = func(t *Thread) Value { return vf(t) } + + default: + log.Panicf("Compilation of unary op %v not implemented", op) + } + + return expr +} + +var binOpDescs = make(map[token.Token]string) + +func (a *exprInfo) compileBinaryExpr(op token.Token, l, r *expr) *expr { + // Save the original types of l.t and r.t for error messages. + origlt := l.t + origrt := r.t + + // XXX(Spec) What is the exact definition of a "named type"? + + // XXX(Spec) Arithmetic operators: "Integer types" apparently + // means all types compatible with basic integer types, though + // this is never explained. Likewise for float types, etc. + // This relates to the missing explanation of named types. + + // XXX(Spec) Operators: "If both operands are ideal numbers, + // the conversion is to ideal floats if one of the operands is + // an ideal float (relevant for / and %)." How is that + // relevant only for / and %? If I add an ideal int and an + // ideal float, I get an ideal float. + + if op != token.SHL && op != token.SHR { + // Except in shift expressions, if one operand has + // numeric type and the other operand is an ideal + // number, the ideal number is converted to match the + // type of the other operand. + if (l.t.isInteger() || l.t.isFloat()) && !l.t.isIdeal() && r.t.isIdeal() { + r = r.convertTo(l.t) + } else if (r.t.isInteger() || r.t.isFloat()) && !r.t.isIdeal() && l.t.isIdeal() { + l = l.convertTo(r.t) + } + if l == nil || r == nil { + return nil + } + + // Except in shift expressions, if both operands are + // ideal numbers and one is an ideal float, the other + // is converted to ideal float. + if l.t.isIdeal() && r.t.isIdeal() { + if l.t.isInteger() && r.t.isFloat() { + l = l.convertTo(r.t) + } else if l.t.isFloat() && r.t.isInteger() { + r = r.convertTo(l.t) + } + if l == nil || r == nil { + return nil + } + } + } + + // Useful type predicates + // TODO(austin) CL 33668 mandates identical types except for comparisons. + compat := func() bool { return l.t.compat(r.t, false) } + integers := func() bool { return l.t.isInteger() && r.t.isInteger() } + floats := func() bool { return l.t.isFloat() && r.t.isFloat() } + strings := func() bool { + // TODO(austin) Deal with named types + return l.t == StringType && r.t == StringType + } + booleans := func() bool { return l.t.isBoolean() && r.t.isBoolean() } + + // Type check + var t Type + switch op { + case token.ADD: + if !compat() || (!integers() && !floats() && !strings()) { + a.diagOpTypes(op, origlt, origrt) + return nil + } + t = l.t + + case token.SUB, token.MUL, token.QUO: + if !compat() || (!integers() && !floats()) { + a.diagOpTypes(op, origlt, origrt) + return nil + } + t = l.t + + case token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + if !compat() || !integers() { + a.diagOpTypes(op, origlt, origrt) + return nil + } + t = l.t + + case token.SHL, token.SHR: + // XXX(Spec) Is it okay for the right operand to be an + // ideal float with no fractional part? "The right + // operand in a shift operation must be always be of + // unsigned integer type or an ideal number that can + // be safely converted into an unsigned integer type + // (§Arithmetic operators)" suggests so and 6g agrees. + + if !l.t.isInteger() || !(r.t.isInteger() || r.t.isIdeal()) { + a.diagOpTypes(op, origlt, origrt) + return nil + } + + // The right operand in a shift operation must be + // always be of unsigned integer type or an ideal + // number that can be safely converted into an + // unsigned integer type. + if r.t.isIdeal() { + r2 := r.convertTo(UintType) + if r2 == nil { + return nil + } + + // If the left operand is not ideal, convert + // the right to not ideal. + if !l.t.isIdeal() { + r = r2 + } + + // If both are ideal, but the right side isn't + // an ideal int, convert it to simplify things. + if l.t.isIdeal() && !r.t.isInteger() { + r = r.convertTo(IdealIntType) + if r == nil { + log.Panicf("conversion to uintType succeeded, but conversion to idealIntType failed") + } + } + } else if _, ok := r.t.lit().(*uintType); !ok { + a.diag("right operand of shift must be unsigned") + return nil + } + + if l.t.isIdeal() && !r.t.isIdeal() { + // XXX(Spec) What is the meaning of "ideal >> + // non-ideal"? Russ says the ideal should be + // converted to an int. 6g propagates the + // type down from assignments as a hint. + + l = l.convertTo(IntType) + if l == nil { + return nil + } + } + + // At this point, we should have one of three cases: + // 1) uint SHIFT uint + // 2) int SHIFT uint + // 3) ideal int SHIFT ideal int + + t = l.t + + case token.LOR, token.LAND: + if !booleans() { + return nil + } + // XXX(Spec) There's no mention of *which* boolean + // type the logical operators return. From poking at + // 6g, it appears to be the named boolean type, NOT + // the type of the left operand, and NOT an unnamed + // boolean type. + + t = BoolType + + case token.ARROW: + // The operands in channel sends differ in type: one + // is always a channel and the other is a variable or + // value of the channel's element type. + log.Panic("Binary op <- not implemented") + t = BoolType + + case token.LSS, token.GTR, token.LEQ, token.GEQ: + // XXX(Spec) It's really unclear what types which + // comparison operators apply to. I feel like the + // text is trying to paint a Venn diagram for me, + // which it's really pretty simple: <, <=, >, >= apply + // only to numeric types and strings. == and != apply + // to everything except arrays and structs, and there + // are some restrictions on when it applies to slices. + + if !compat() || (!integers() && !floats() && !strings()) { + a.diagOpTypes(op, origlt, origrt) + return nil + } + t = BoolType + + case token.EQL, token.NEQ: + // XXX(Spec) The rules for type checking comparison + // operators are spread across three places that all + // partially overlap with each other: the Comparison + // Compatibility section, the Operators section, and + // the Comparison Operators section. The Operators + // section should just say that operators require + // identical types (as it does currently) except that + // there a few special cases for comparison, which are + // described in section X. Currently it includes just + // one of the four special cases. The Comparison + // Compatibility section and the Comparison Operators + // section should either be merged, or at least the + // Comparison Compatibility section should be + // exclusively about type checking and the Comparison + // Operators section should be exclusively about + // semantics. + + // XXX(Spec) Comparison operators: "All comparison + // operators apply to basic types except bools." This + // is very difficult to parse. It's explained much + // better in the Comparison Compatibility section. + + // XXX(Spec) Comparison compatibility: "Function + // values are equal if they refer to the same + // function." is rather vague. It should probably be + // similar to the way the rule for map values is + // written: Function values are equal if they were + // created by the same execution of a function literal + // or refer to the same function declaration. This is + // *almost* but not quite waht 6g implements. If a + // function literals does not capture any variables, + // then multiple executions of it will result in the + // same closure. Russ says he'll change that. + + // TODO(austin) Deal with remaining special cases + + if !compat() { + a.diagOpTypes(op, origlt, origrt) + return nil + } + // Arrays and structs may not be compared to anything. + switch l.t.(type) { + case *ArrayType, *StructType: + a.diagOpTypes(op, origlt, origrt) + return nil + } + t = BoolType + + default: + log.Panicf("unknown binary operator %v", op) + } + + desc, ok := binOpDescs[op] + if !ok { + desc = op.String() + " expression" + binOpDescs[op] = desc + } + + // Check for ideal divide by zero + switch op { + case token.QUO, token.REM: + if r.t.isIdeal() { + if (r.t.isInteger() && r.asIdealInt()().Sign() == 0) || + (r.t.isFloat() && r.asIdealFloat()().Sign() == 0) { + a.diag("divide by zero") + return nil + } + } + } + + // Compile + expr := a.newExpr(t, desc) + switch op { + case token.ADD: + expr.genBinOpAdd(l, r) + + case token.SUB: + expr.genBinOpSub(l, r) + + case token.MUL: + expr.genBinOpMul(l, r) + + case token.QUO: + expr.genBinOpQuo(l, r) + + case token.REM: + expr.genBinOpRem(l, r) + + case token.AND: + expr.genBinOpAnd(l, r) + + case token.OR: + expr.genBinOpOr(l, r) + + case token.XOR: + expr.genBinOpXor(l, r) + + case token.AND_NOT: + expr.genBinOpAndNot(l, r) + + case token.SHL: + if l.t.isIdeal() { + lv := l.asIdealInt()() + rv := r.asIdealInt()() + const maxShift = 99999 + if rv.Cmp(big.NewInt(maxShift)) > 0 { + a.diag("left shift by %v; exceeds implementation limit of %v", rv, maxShift) + expr.t = nil + return nil + } + val := new(big.Int).Lsh(lv, uint(rv.Int64())) + expr.eval = func() *big.Int { return val } + } else { + expr.genBinOpShl(l, r) + } + + case token.SHR: + if l.t.isIdeal() { + lv := l.asIdealInt()() + rv := r.asIdealInt()() + val := new(big.Int).Rsh(lv, uint(rv.Int64())) + expr.eval = func() *big.Int { return val } + } else { + expr.genBinOpShr(l, r) + } + + case token.LSS: + expr.genBinOpLss(l, r) + + case token.GTR: + expr.genBinOpGtr(l, r) + + case token.LEQ: + expr.genBinOpLeq(l, r) + + case token.GEQ: + expr.genBinOpGeq(l, r) + + case token.EQL: + expr.genBinOpEql(l, r) + + case token.NEQ: + expr.genBinOpNeq(l, r) + + case token.LAND: + expr.genBinOpLogAnd(l, r) + + case token.LOR: + expr.genBinOpLogOr(l, r) + + default: + log.Panicf("Compilation of binary op %v not implemented", op) + } + + return expr +} + +// TODO(austin) This is a hack to eliminate a circular dependency +// between type.go and expr.go +func (a *compiler) compileArrayLen(b *block, expr ast.Expr) (int64, bool) { + lenExpr := a.compileExpr(b, true, expr) + if lenExpr == nil { + return 0, false + } + + // XXX(Spec) Are ideal floats with no fractional part okay? + if lenExpr.t.isIdeal() { + lenExpr = lenExpr.convertTo(IntType) + if lenExpr == nil { + return 0, false + } + } + + if !lenExpr.t.isInteger() { + a.diagAt(expr.Pos(), "array size must be an integer") + return 0, false + } + + switch lenExpr.t.lit().(type) { + case *intType: + return lenExpr.asInt()(nil), true + case *uintType: + return int64(lenExpr.asUint()(nil)), true + } + log.Panicf("unexpected integer type %T", lenExpr.t) + return 0, false +} + +func (a *compiler) compileExpr(b *block, constant bool, expr ast.Expr) *expr { + ec := &exprCompiler{a, b, constant} + nerr := a.numError() + e := ec.compile(expr, false) + if e == nil && nerr == a.numError() { + log.Panicf("expression compilation failed without reporting errors") + } + return e +} + +// extractEffect separates out any effects that the expression may +// have, returning a function that will perform those effects and a +// new exprCompiler that is guaranteed to be side-effect free. These +// are the moral equivalents of "temp := expr" and "temp" (or "temp := +// &expr" and "*temp" for addressable exprs). Because this creates a +// temporary variable, the caller should create a temporary block for +// the compilation of this expression and the evaluation of the +// results. +func (a *expr) extractEffect(b *block, errOp string) (func(*Thread), *expr) { + // Create "&a" if a is addressable + rhs := a + if a.evalAddr != nil { + rhs = a.compileUnaryExpr(token.AND, rhs) + } + + // Create temp + ac, ok := a.checkAssign(a.pos, []*expr{rhs}, errOp, "") + if !ok { + return nil, nil + } + if len(ac.rmt.Elems) != 1 { + a.diag("multi-valued expression not allowed in %s", errOp) + return nil, nil + } + tempType := ac.rmt.Elems[0] + if tempType.isIdeal() { + // It's too bad we have to duplicate this rule. + switch { + case tempType.isInteger(): + tempType = IntType + case tempType.isFloat(): + tempType = Float64Type + default: + log.Panicf("unexpected ideal type %v", tempType) + } + } + temp := b.DefineTemp(tempType) + tempIdx := temp.Index + + // Create "temp := rhs" + assign := ac.compile(b, tempType) + if assign == nil { + log.Panicf("compileAssign type check failed") + } + + effect := func(t *Thread) { + tempVal := tempType.Zero() + t.f.Vars[tempIdx] = tempVal + assign(tempVal, t) + } + + // Generate "temp" or "*temp" + getTemp := a.compileVariable(0, temp) + if a.evalAddr == nil { + return effect, getTemp + } + + deref := a.compileStarExpr(getTemp) + if deref == nil { + return nil, nil + } + return effect, deref +} diff --git a/libgo/go/exp/eval/expr1.go b/libgo/go/exp/eval/expr1.go new file mode 100644 index 000000000..5d0e50000 --- /dev/null +++ b/libgo/go/exp/eval/expr1.go @@ -0,0 +1,1874 @@ +// This file is machine generated by gen.go. +// 6g gen.go && 6l gen.6 && ./6.out >expr1.go + +package eval + +import ( + "big" + "log" +) + +/* + * "As" functions. These retrieve evaluator functions from an + * expr, panicking if the requested evaluator has the wrong type. + */ +func (a *expr) asBool() func(*Thread) bool { + return a.eval.(func(*Thread) bool) +} +func (a *expr) asUint() func(*Thread) uint64 { + return a.eval.(func(*Thread) uint64) +} +func (a *expr) asInt() func(*Thread) int64 { + return a.eval.(func(*Thread) int64) +} +func (a *expr) asIdealInt() func() *big.Int { + return a.eval.(func() *big.Int) +} +func (a *expr) asFloat() func(*Thread) float64 { + return a.eval.(func(*Thread) float64) +} +func (a *expr) asIdealFloat() func() *big.Rat { + return a.eval.(func() *big.Rat) +} +func (a *expr) asString() func(*Thread) string { + return a.eval.(func(*Thread) string) +} +func (a *expr) asArray() func(*Thread) ArrayValue { + return a.eval.(func(*Thread) ArrayValue) +} +func (a *expr) asStruct() func(*Thread) StructValue { + return a.eval.(func(*Thread) StructValue) +} +func (a *expr) asPtr() func(*Thread) Value { + return a.eval.(func(*Thread) Value) +} +func (a *expr) asFunc() func(*Thread) Func { + return a.eval.(func(*Thread) Func) +} +func (a *expr) asSlice() func(*Thread) Slice { + return a.eval.(func(*Thread) Slice) +} +func (a *expr) asMap() func(*Thread) Map { + return a.eval.(func(*Thread) Map) +} +func (a *expr) asMulti() func(*Thread) []Value { + return a.eval.(func(*Thread) []Value) +} + +func (a *expr) asInterface() func(*Thread) interface{} { + switch sf := a.eval.(type) { + case func(t *Thread) bool: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) uint64: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) int64: + return func(t *Thread) interface{} { return sf(t) } + case func() *big.Int: + return func(*Thread) interface{} { return sf() } + case func(t *Thread) float64: + return func(t *Thread) interface{} { return sf(t) } + case func() *big.Rat: + return func(*Thread) interface{} { return sf() } + case func(t *Thread) string: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) ArrayValue: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) StructValue: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) Value: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) Func: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) Slice: + return func(t *Thread) interface{} { return sf(t) } + case func(t *Thread) Map: + return func(t *Thread) interface{} { return sf(t) } + default: + log.Panicf("unexpected expression node type %T at %v", a.eval, a.pos) + } + panic("fail") +} + +/* + * Operator generators. + */ + +func (a *expr) genConstant(v Value) { + switch a.t.lit().(type) { + case *boolType: + a.eval = func(t *Thread) bool { return v.(BoolValue).Get(t) } + case *uintType: + a.eval = func(t *Thread) uint64 { return v.(UintValue).Get(t) } + case *intType: + a.eval = func(t *Thread) int64 { return v.(IntValue).Get(t) } + case *idealIntType: + val := v.(IdealIntValue).Get() + a.eval = func() *big.Int { return val } + case *floatType: + a.eval = func(t *Thread) float64 { return v.(FloatValue).Get(t) } + case *idealFloatType: + val := v.(IdealFloatValue).Get() + a.eval = func() *big.Rat { return val } + case *stringType: + a.eval = func(t *Thread) string { return v.(StringValue).Get(t) } + case *ArrayType: + a.eval = func(t *Thread) ArrayValue { return v.(ArrayValue).Get(t) } + case *StructType: + a.eval = func(t *Thread) StructValue { return v.(StructValue).Get(t) } + case *PtrType: + a.eval = func(t *Thread) Value { return v.(PtrValue).Get(t) } + case *FuncType: + a.eval = func(t *Thread) Func { return v.(FuncValue).Get(t) } + case *SliceType: + a.eval = func(t *Thread) Slice { return v.(SliceValue).Get(t) } + case *MapType: + a.eval = func(t *Thread) Map { return v.(MapValue).Get(t) } + default: + log.Panicf("unexpected constant type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genIdentOp(level, index int) { + a.evalAddr = func(t *Thread) Value { return t.f.Get(level, index) } + switch a.t.lit().(type) { + case *boolType: + a.eval = func(t *Thread) bool { return t.f.Get(level, index).(BoolValue).Get(t) } + case *uintType: + a.eval = func(t *Thread) uint64 { return t.f.Get(level, index).(UintValue).Get(t) } + case *intType: + a.eval = func(t *Thread) int64 { return t.f.Get(level, index).(IntValue).Get(t) } + case *floatType: + a.eval = func(t *Thread) float64 { return t.f.Get(level, index).(FloatValue).Get(t) } + case *stringType: + a.eval = func(t *Thread) string { return t.f.Get(level, index).(StringValue).Get(t) } + case *ArrayType: + a.eval = func(t *Thread) ArrayValue { return t.f.Get(level, index).(ArrayValue).Get(t) } + case *StructType: + a.eval = func(t *Thread) StructValue { return t.f.Get(level, index).(StructValue).Get(t) } + case *PtrType: + a.eval = func(t *Thread) Value { return t.f.Get(level, index).(PtrValue).Get(t) } + case *FuncType: + a.eval = func(t *Thread) Func { return t.f.Get(level, index).(FuncValue).Get(t) } + case *SliceType: + a.eval = func(t *Thread) Slice { return t.f.Get(level, index).(SliceValue).Get(t) } + case *MapType: + a.eval = func(t *Thread) Map { return t.f.Get(level, index).(MapValue).Get(t) } + default: + log.Panicf("unexpected identifier type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genFuncCall(call func(t *Thread) []Value) { + a.exec = func(t *Thread) { call(t) } + switch a.t.lit().(type) { + case *boolType: + a.eval = func(t *Thread) bool { return call(t)[0].(BoolValue).Get(t) } + case *uintType: + a.eval = func(t *Thread) uint64 { return call(t)[0].(UintValue).Get(t) } + case *intType: + a.eval = func(t *Thread) int64 { return call(t)[0].(IntValue).Get(t) } + case *floatType: + a.eval = func(t *Thread) float64 { return call(t)[0].(FloatValue).Get(t) } + case *stringType: + a.eval = func(t *Thread) string { return call(t)[0].(StringValue).Get(t) } + case *ArrayType: + a.eval = func(t *Thread) ArrayValue { return call(t)[0].(ArrayValue).Get(t) } + case *StructType: + a.eval = func(t *Thread) StructValue { return call(t)[0].(StructValue).Get(t) } + case *PtrType: + a.eval = func(t *Thread) Value { return call(t)[0].(PtrValue).Get(t) } + case *FuncType: + a.eval = func(t *Thread) Func { return call(t)[0].(FuncValue).Get(t) } + case *SliceType: + a.eval = func(t *Thread) Slice { return call(t)[0].(SliceValue).Get(t) } + case *MapType: + a.eval = func(t *Thread) Map { return call(t)[0].(MapValue).Get(t) } + case *MultiType: + a.eval = func(t *Thread) []Value { return call(t) } + default: + log.Panicf("unexpected result type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genValue(vf func(*Thread) Value) { + a.evalAddr = vf + switch a.t.lit().(type) { + case *boolType: + a.eval = func(t *Thread) bool { return vf(t).(BoolValue).Get(t) } + case *uintType: + a.eval = func(t *Thread) uint64 { return vf(t).(UintValue).Get(t) } + case *intType: + a.eval = func(t *Thread) int64 { return vf(t).(IntValue).Get(t) } + case *floatType: + a.eval = func(t *Thread) float64 { return vf(t).(FloatValue).Get(t) } + case *stringType: + a.eval = func(t *Thread) string { return vf(t).(StringValue).Get(t) } + case *ArrayType: + a.eval = func(t *Thread) ArrayValue { return vf(t).(ArrayValue).Get(t) } + case *StructType: + a.eval = func(t *Thread) StructValue { return vf(t).(StructValue).Get(t) } + case *PtrType: + a.eval = func(t *Thread) Value { return vf(t).(PtrValue).Get(t) } + case *FuncType: + a.eval = func(t *Thread) Func { return vf(t).(FuncValue).Get(t) } + case *SliceType: + a.eval = func(t *Thread) Slice { return vf(t).(SliceValue).Get(t) } + case *MapType: + a.eval = func(t *Thread) Map { return vf(t).(MapValue).Get(t) } + default: + log.Panicf("unexpected result type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genUnaryOpNeg(v *expr) { + switch a.t.lit().(type) { + case *uintType: + vf := v.asUint() + a.eval = func(t *Thread) uint64 { v := vf(t); return -v } + case *intType: + vf := v.asInt() + a.eval = func(t *Thread) int64 { v := vf(t); return -v } + case *idealIntType: + val := v.asIdealInt()() + val.Neg(val) + a.eval = func() *big.Int { return val } + case *floatType: + vf := v.asFloat() + a.eval = func(t *Thread) float64 { v := vf(t); return -v } + case *idealFloatType: + val := v.asIdealFloat()() + val.Neg(val) + a.eval = func() *big.Rat { return val } + default: + log.Panicf("unexpected type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genUnaryOpNot(v *expr) { + switch a.t.lit().(type) { + case *boolType: + vf := v.asBool() + a.eval = func(t *Thread) bool { v := vf(t); return !v } + default: + log.Panicf("unexpected type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genUnaryOpXor(v *expr) { + switch a.t.lit().(type) { + case *uintType: + vf := v.asUint() + a.eval = func(t *Thread) uint64 { v := vf(t); return ^v } + case *intType: + vf := v.asInt() + a.eval = func(t *Thread) int64 { v := vf(t); return ^v } + case *idealIntType: + val := v.asIdealInt()() + val.Not(val) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", a.t, a.pos) + } +} + +func (a *expr) genBinOpLogAnd(l, r *expr) { + lf := l.asBool() + rf := r.asBool() + a.eval = func(t *Thread) bool { return lf(t) && rf(t) } +} + +func (a *expr) genBinOpLogOr(l, r *expr) { + lf := l.asBool() + rf := r.asBool() + a.eval = func(t *Thread) bool { return lf(t) || rf(t) } +} + +func (a *expr) genBinOpAdd(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l + r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l + r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l + r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l + r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l + r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l + r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l + r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l + r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l + r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l + r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Add(l, r) + a.eval = func() *big.Int { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + switch t.Bits { + case 32: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l + r + return float64(float32(ret)) + } + case 64: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l + r + return float64(float64(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Add(l, r) + a.eval = func() *big.Rat { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) string { + l, r := lf(t), rf(t) + return l + r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpSub(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l - r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l - r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l - r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l - r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l - r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l - r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l - r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l - r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l - r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l - r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Sub(l, r) + a.eval = func() *big.Int { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + switch t.Bits { + case 32: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l - r + return float64(float32(ret)) + } + case 64: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l - r + return float64(float64(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Sub(l, r) + a.eval = func() *big.Rat { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpMul(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l * r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l * r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l * r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l * r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l * r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l * r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l * r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l * r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l * r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l * r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Mul(l, r) + a.eval = func() *big.Int { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + switch t.Bits { + case 32: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l * r + return float64(float32(ret)) + } + case 64: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + ret = l * r + return float64(float64(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Mul(l, r) + a.eval = func() *big.Rat { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpQuo(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Quo(l, r) + a.eval = func() *big.Int { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + switch t.Bits { + case 32: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return float64(float32(ret)) + } + case 64: + a.eval = func(t *Thread) float64 { + l, r := lf(t), rf(t) + var ret float64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l / r + return float64(float64(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Quo(l, r) + a.eval = func() *big.Rat { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpRem(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + if r == 0 { + t.Abort(DivByZeroError{}) + } + ret = l % r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Rem(l, r) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpAnd(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l & r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l & r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l & r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l & r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l & r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l & r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l & r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l & r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l & r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l & r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.And(l, r) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpOr(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l | r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l | r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l | r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l | r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l | r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l | r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l | r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l | r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l | r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l | r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Or(l, r) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpXor(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l ^ r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l ^ r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l ^ r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l ^ r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l ^ r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l ^ r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l ^ r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l ^ r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l ^ r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l ^ r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Xor(l, r) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpAndNot(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l &^ r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l &^ r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l &^ r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l &^ r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l &^ r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asInt() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l &^ r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l &^ r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l &^ r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l &^ r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l &^ r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.AndNot(l, r) + a.eval = func() *big.Int { return val } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpShl(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l << r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l << r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l << r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l << r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l << r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l << r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l << r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l << r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l << r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l << r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpShr(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l >> r + return uint64(uint8(ret)) + } + case 16: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l >> r + return uint64(uint16(ret)) + } + case 32: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l >> r + return uint64(uint32(ret)) + } + case 64: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l >> r + return uint64(uint64(ret)) + } + case 0: + a.eval = func(t *Thread) uint64 { + l, r := lf(t), rf(t) + var ret uint64 + ret = l >> r + return uint64(uint(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + case *intType: + lf := l.asInt() + rf := r.asUint() + switch t.Bits { + case 8: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l >> r + return int64(int8(ret)) + } + case 16: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l >> r + return int64(int16(ret)) + } + case 32: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l >> r + return int64(int32(ret)) + } + case 64: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l >> r + return int64(int64(ret)) + } + case 0: + a.eval = func(t *Thread) int64 { + l, r := lf(t), rf(t) + var ret int64 + ret = l >> r + return int64(int(ret)) + } + default: + log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpLss(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l < r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l < r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) < 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l < r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) < 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l < r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpGtr(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l > r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l > r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) > 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l > r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) > 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l > r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpLeq(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l <= r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l <= r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) <= 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l <= r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) <= 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l <= r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpGeq(l, r *expr) { + switch t := l.t.lit().(type) { + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l >= r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l >= r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) >= 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l >= r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) >= 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l >= r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpEql(l, r *expr) { + switch t := l.t.lit().(type) { + case *boolType: + lf := l.asBool() + rf := r.asBool() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) == 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) == 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *PtrType: + lf := l.asPtr() + rf := r.asPtr() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *FuncType: + lf := l.asFunc() + rf := r.asFunc() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + case *MapType: + lf := l.asMap() + rf := r.asMap() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l == r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func (a *expr) genBinOpNeq(l, r *expr) { + switch t := l.t.lit().(type) { + case *boolType: + lf := l.asBool() + rf := r.asBool() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *uintType: + lf := l.asUint() + rf := r.asUint() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *intType: + lf := l.asInt() + rf := r.asInt() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *idealIntType: + l := l.asIdealInt()() + r := r.asIdealInt()() + val := l.Cmp(r) != 0 + a.eval = func(t *Thread) bool { return val } + case *floatType: + lf := l.asFloat() + rf := r.asFloat() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *idealFloatType: + l := l.asIdealFloat()() + r := r.asIdealFloat()() + val := l.Cmp(r) != 0 + a.eval = func(t *Thread) bool { return val } + case *stringType: + lf := l.asString() + rf := r.asString() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *PtrType: + lf := l.asPtr() + rf := r.asPtr() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *FuncType: + lf := l.asFunc() + rf := r.asFunc() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + case *MapType: + lf := l.asMap() + rf := r.asMap() + a.eval = func(t *Thread) bool { + l, r := lf(t), rf(t) + return l != r + } + default: + log.Panicf("unexpected type %v at %v", l.t, a.pos) + } +} + +func genAssign(lt Type, r *expr) func(lv Value, t *Thread) { + switch lt.lit().(type) { + case *boolType: + rf := r.asBool() + return func(lv Value, t *Thread) { lv.(BoolValue).Set(t, rf(t)) } + case *uintType: + rf := r.asUint() + return func(lv Value, t *Thread) { lv.(UintValue).Set(t, rf(t)) } + case *intType: + rf := r.asInt() + return func(lv Value, t *Thread) { lv.(IntValue).Set(t, rf(t)) } + case *floatType: + rf := r.asFloat() + return func(lv Value, t *Thread) { lv.(FloatValue).Set(t, rf(t)) } + case *stringType: + rf := r.asString() + return func(lv Value, t *Thread) { lv.(StringValue).Set(t, rf(t)) } + case *ArrayType: + rf := r.asArray() + return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) } + case *StructType: + rf := r.asStruct() + return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) } + case *PtrType: + rf := r.asPtr() + return func(lv Value, t *Thread) { lv.(PtrValue).Set(t, rf(t)) } + case *FuncType: + rf := r.asFunc() + return func(lv Value, t *Thread) { lv.(FuncValue).Set(t, rf(t)) } + case *SliceType: + rf := r.asSlice() + return func(lv Value, t *Thread) { lv.(SliceValue).Set(t, rf(t)) } + case *MapType: + rf := r.asMap() + return func(lv Value, t *Thread) { lv.(MapValue).Set(t, rf(t)) } + default: + log.Panicf("unexpected left operand type %v at %v", lt, r.pos) + } + panic("fail") +} diff --git a/libgo/go/exp/eval/expr_test.go b/libgo/go/exp/eval/expr_test.go new file mode 100644 index 000000000..0dbce4315 --- /dev/null +++ b/libgo/go/exp/eval/expr_test.go @@ -0,0 +1,355 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "testing" +) + +var undefined = "undefined" +var typeAsExpr = "type .* used as expression" +var badCharLit = "character literal" +var unknownEscape = "unknown escape sequence" +var opTypes = "illegal (operand|argument) type|cannot index into" +var badAddrOf = "cannot take the address" +var constantTruncated = "constant [^ ]* truncated" +var constantUnderflows = "constant [^ ]* underflows" +var constantOverflows = "constant [^ ]* overflows" +var implLimit = "implementation limit" +var mustBeUnsigned = "must be unsigned" +var divByZero = "divide by zero" + +var hugeInteger = new(big.Int).Lsh(idealOne, 64) + +var exprTests = []test{ + Val("i", 1), + CErr("zzz", undefined), + // TODO(austin) Test variable in constant context + //CErr("t", typeAsExpr), + + Val("'a'", big.NewInt('a')), + Val("'\\uffff'", big.NewInt('\uffff')), + Val("'\\n'", big.NewInt('\n')), + CErr("''+x", badCharLit), + // Produces two parse errors + //CErr("'''", ""), + CErr("'\n'", badCharLit), + CErr("'\\z'", unknownEscape), + CErr("'ab'", badCharLit), + + Val("1.0", big.NewRat(1, 1)), + Val("1.", big.NewRat(1, 1)), + Val(".1", big.NewRat(1, 10)), + Val("1e2", big.NewRat(100, 1)), + + Val("\"abc\"", "abc"), + Val("\"\"", ""), + Val("\"\\n\\\"\"", "\n\""), + CErr("\"\\z\"", unknownEscape), + CErr("\"abc", "string not terminated"), + + Val("(i)", 1), + + Val("ai[0]", 1), + Val("(&ai)[0]", 1), + Val("ai[1]", 2), + Val("ai[i]", 2), + Val("ai[u]", 2), + CErr("ai[f]", opTypes), + CErr("ai[0][0]", opTypes), + CErr("ai[2]", "index 2 exceeds"), + CErr("ai[1+1]", "index 2 exceeds"), + CErr("ai[-1]", "negative index"), + RErr("ai[i+i]", "index 2 exceeds"), + RErr("ai[-i]", "negative index"), + CErr("i[0]", opTypes), + CErr("f[0]", opTypes), + + Val("aai[0][0]", 1), + Val("aai[1][1]", 4), + CErr("aai[2][0]", "index 2 exceeds"), + CErr("aai[0][2]", "index 2 exceeds"), + + Val("sli[0]", 1), + Val("sli[1]", 2), + CErr("sli[-1]", "negative index"), + RErr("sli[-i]", "negative index"), + RErr("sli[2]", "index 2 exceeds"), + + Val("s[0]", uint8('a')), + Val("s[1]", uint8('b')), + CErr("s[-1]", "negative index"), + RErr("s[-i]", "negative index"), + RErr("s[3]", "index 3 exceeds"), + + Val("ai[0:2]", vslice{varray{1, 2}, 2, 2}), + Val("ai[0:1]", vslice{varray{1, 2}, 1, 2}), + Val("ai[0:]", vslice{varray{1, 2}, 2, 2}), + Val("ai[i:]", vslice{varray{2}, 1, 1}), + + Val("sli[0:2]", vslice{varray{1, 2, 3}, 2, 3}), + Val("sli[0:i]", vslice{varray{1, 2, 3}, 1, 3}), + Val("sli[1:]", vslice{varray{2, 3}, 1, 2}), + + CErr("1(2)", "cannot call"), + CErr("fn(1,2)", "too many"), + CErr("fn()", "not enough"), + CErr("fn(true)", opTypes), + CErr("fn(true)", "function call"), + // Single argument functions don't say which argument. + //CErr("fn(true)", "argument 1"), + Val("fn(1)", 2), + Val("fn(1.0)", 2), + CErr("fn(1.5)", constantTruncated), + Val("fn(i)", 2), + CErr("fn(u)", opTypes), + + CErr("void()+2", opTypes), + CErr("oneTwo()+2", opTypes), + + Val("cap(ai)", 2), + Val("cap(&ai)", 2), + Val("cap(aai)", 2), + Val("cap(sli)", 3), + CErr("cap(0)", opTypes), + CErr("cap(i)", opTypes), + CErr("cap(s)", opTypes), + + Val("len(s)", 3), + Val("len(ai)", 2), + Val("len(&ai)", 2), + Val("len(ai[0:])", 2), + Val("len(ai[1:])", 1), + Val("len(ai[2:])", 0), + Val("len(aai)", 2), + Val("len(sli)", 2), + Val("len(sli[0:])", 2), + Val("len(sli[1:])", 1), + Val("len(sli[2:])", 0), + // TODO(austin) Test len of map + CErr("len(0)", opTypes), + CErr("len(i)", opTypes), + + CErr("*i", opTypes), + Val("*&i", 1), + Val("*&(i)", 1), + CErr("&1", badAddrOf), + CErr("&c", badAddrOf), + Val("*(&ai[0])", 1), + + Val("+1", big.NewInt(+1)), + Val("+1.0", big.NewRat(1, 1)), + Val("01.5", big.NewRat(15, 10)), + CErr("+\"x\"", opTypes), + + Val("-42", big.NewInt(-42)), + Val("-i", -1), + Val("-f", -1.0), + // 6g bug? + //Val("-(f-1)", -0.0), + CErr("-\"x\"", opTypes), + + // TODO(austin) Test unary ! + + Val("^2", big.NewInt(^2)), + Val("^(-2)", big.NewInt(^(-2))), + CErr("^2.0", opTypes), + CErr("^2.5", opTypes), + Val("^i", ^1), + Val("^u", ^uint(1)), + CErr("^f", opTypes), + + Val("1+i", 2), + Val("1+u", uint(2)), + Val("3.0+i", 4), + Val("1+1", big.NewInt(2)), + Val("f+f", 2.0), + Val("1+f", 2.0), + Val("1.0+1", big.NewRat(2, 1)), + Val("\"abc\" + \"def\"", "abcdef"), + CErr("i+u", opTypes), + CErr("-1+u", constantUnderflows), + // TODO(austin) Test named types + + Val("2-1", big.NewInt(1)), + Val("2.0-1", big.NewRat(1, 1)), + Val("f-2", -1.0), + Val("-0.0", big.NewRat(0, 1)), + Val("2*2", big.NewInt(4)), + Val("2*i", 2), + Val("3/2", big.NewInt(1)), + Val("3/i", 3), + CErr("1/0", divByZero), + CErr("1.0/0", divByZero), + RErr("i/0", divByZero), + Val("3%2", big.NewInt(1)), + Val("i%2", 1), + CErr("3%0", divByZero), + CErr("3.0%0", opTypes), + RErr("i%0", divByZero), + + // Examples from "Arithmetic operators" + Val("5/3", big.NewInt(1)), + Val("(i+4)/(i+2)", 1), + Val("5%3", big.NewInt(2)), + Val("(i+4)%(i+2)", 2), + Val("-5/3", big.NewInt(-1)), + Val("(i-6)/(i+2)", -1), + Val("-5%3", big.NewInt(-2)), + Val("(i-6)%(i+2)", -2), + Val("5/-3", big.NewInt(-1)), + Val("(i+4)/(i-4)", -1), + Val("5%-3", big.NewInt(2)), + Val("(i+4)%(i-4)", 2), + Val("-5/-3", big.NewInt(1)), + Val("(i-6)/(i-4)", 1), + Val("-5%-3", big.NewInt(-2)), + Val("(i-6)%(i-4)", -2), + + // Examples from "Arithmetic operators" + Val("11/4", big.NewInt(2)), + Val("(i+10)/4", 2), + Val("11%4", big.NewInt(3)), + Val("(i+10)%4", 3), + Val("11>>2", big.NewInt(2)), + Val("(i+10)>>2", 2), + Val("11&3", big.NewInt(3)), + Val("(i+10)&3", 3), + Val("-11/4", big.NewInt(-2)), + Val("(i-12)/4", -2), + Val("-11%4", big.NewInt(-3)), + Val("(i-12)%4", -3), + Val("-11>>2", big.NewInt(-3)), + Val("(i-12)>>2", -3), + Val("-11&3", big.NewInt(1)), + Val("(i-12)&3", 1), + + // TODO(austin) Test bit ops + + // For shift, we try nearly every combination of positive + // ideal int, negative ideal int, big ideal int, ideal + // fractional float, ideal non-fractional float, int, uint, + // and float. + Val("2<<2", big.NewInt(2<<2)), + CErr("2<<(-1)", constantUnderflows), + CErr("2<<0x10000000000000000", constantOverflows), + CErr("2<<2.5", constantTruncated), + Val("2<<2.0", big.NewInt(2<<2.0)), + CErr("2<<i", mustBeUnsigned), + Val("2<<u", 2<<1), + CErr("2<<f", opTypes), + + Val("-2<<2", big.NewInt(-2<<2)), + CErr("-2<<(-1)", constantUnderflows), + CErr("-2<<0x10000000000000000", constantOverflows), + CErr("-2<<2.5", constantTruncated), + Val("-2<<2.0", big.NewInt(-2<<2.0)), + CErr("-2<<i", mustBeUnsigned), + Val("-2<<u", -2<<1), + CErr("-2<<f", opTypes), + + Val("0x10000000000000000<<2", new(big.Int).Lsh(hugeInteger, 2)), + CErr("0x10000000000000000<<(-1)", constantUnderflows), + CErr("0x10000000000000000<<0x10000000000000000", constantOverflows), + CErr("0x10000000000000000<<2.5", constantTruncated), + Val("0x10000000000000000<<2.0", new(big.Int).Lsh(hugeInteger, 2)), + CErr("0x10000000000000000<<i", mustBeUnsigned), + CErr("0x10000000000000000<<u", constantOverflows), + CErr("0x10000000000000000<<f", opTypes), + + CErr("2.5<<2", opTypes), + CErr("2.0<<2", opTypes), + + Val("i<<2", 1<<2), + CErr("i<<(-1)", constantUnderflows), + CErr("i<<0x10000000000000000", constantOverflows), + CErr("i<<2.5", constantTruncated), + Val("i<<2.0", 1<<2), + CErr("i<<i", mustBeUnsigned), + Val("i<<u", 1<<1), + CErr("i<<f", opTypes), + Val("i<<u", 1<<1), + + Val("u<<2", uint(1<<2)), + CErr("u<<(-1)", constantUnderflows), + CErr("u<<0x10000000000000000", constantOverflows), + CErr("u<<2.5", constantTruncated), + Val("u<<2.0", uint(1<<2)), + CErr("u<<i", mustBeUnsigned), + Val("u<<u", uint(1<<1)), + CErr("u<<f", opTypes), + Val("u<<u", uint(1<<1)), + + CErr("f<<2", opTypes), + + // <, <=, >, >= + Val("1<2", 1 < 2), + Val("1<=2", 1 <= 2), + Val("2<=2", 2 <= 2), + Val("1>2", 1 > 2), + Val("1>=2", 1 >= 2), + Val("2>=2", 2 >= 2), + + Val("i<2", 1 < 2), + Val("i<=2", 1 <= 2), + Val("i+1<=2", 2 <= 2), + Val("i>2", 1 > 2), + Val("i>=2", 1 >= 2), + Val("i+1>=2", 2 >= 2), + + Val("u<2", 1 < 2), + Val("f<2", 1 < 2), + + Val("s<\"b\"", true), + Val("s<\"a\"", false), + Val("s<=\"abc\"", true), + Val("s>\"aa\"", true), + Val("s>\"ac\"", false), + Val("s>=\"abc\"", true), + + CErr("i<u", opTypes), + CErr("i<f", opTypes), + CErr("i<s", opTypes), + CErr("&i<&i", opTypes), + CErr("ai<ai", opTypes), + + // ==, != + Val("1==1", true), + Val("1!=1", false), + Val("1==2", false), + Val("1!=2", true), + + Val("1.0==1", true), + Val("1.5==1", false), + + Val("i==1", true), + Val("i!=1", false), + Val("i==2", false), + Val("i!=2", true), + + Val("u==1", true), + Val("f==1", true), + + Val("s==\"abc\"", true), + Val("s!=\"abc\"", false), + Val("s==\"abcd\"", false), + Val("s!=\"abcd\"", true), + + Val("&i==&i", true), + Val("&i==&i2", false), + + Val("fn==fn", true), + Val("fn==func(int)int{return 0}", false), + + CErr("i==u", opTypes), + CErr("i==f", opTypes), + CErr("&i==&f", opTypes), + CErr("ai==ai", opTypes), + CErr("t==t", opTypes), + CErr("fn==oneTwo", opTypes), +} + +func TestExpr(t *testing.T) { runTests(t, "exprTests", exprTests) } diff --git a/libgo/go/exp/eval/func.go b/libgo/go/exp/eval/func.go new file mode 100644 index 000000000..cb1b579e4 --- /dev/null +++ b/libgo/go/exp/eval/func.go @@ -0,0 +1,70 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import "os" + +/* + * Virtual machine + */ + +type Thread struct { + abort chan os.Error + pc uint + // The execution frame of this function. This remains the + // same throughout a function invocation. + f *Frame +} + +type code []func(*Thread) + +func (i code) exec(t *Thread) { + opc := t.pc + t.pc = 0 + l := uint(len(i)) + for t.pc < l { + pc := t.pc + t.pc++ + i[pc](t) + } + t.pc = opc +} + +/* + * Code buffer + */ + +type codeBuf struct { + instrs code +} + +func newCodeBuf() *codeBuf { return &codeBuf{make(code, 0, 16)} } + +func (b *codeBuf) push(instr func(*Thread)) { + b.instrs = append(b.instrs, instr) +} + +func (b *codeBuf) nextPC() uint { return uint(len(b.instrs)) } + +func (b *codeBuf) get() code { + // Freeze this buffer into an array of exactly the right size + a := make(code, len(b.instrs)) + copy(a, b.instrs) + return code(a) +} + +/* + * User-defined functions + */ + +type evalFunc struct { + outer *Frame + frameSize int + code code +} + +func (f *evalFunc) NewFrame() *Frame { return f.outer.child(f.frameSize) } + +func (f *evalFunc) Call(t *Thread) { f.code.exec(t) } diff --git a/libgo/go/exp/eval/scope.go b/libgo/go/exp/eval/scope.go new file mode 100644 index 000000000..66305de25 --- /dev/null +++ b/libgo/go/exp/eval/scope.go @@ -0,0 +1,207 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "go/token" + "log" +) + +/* + * Blocks and scopes + */ + +// A definition can be a *Variable, *Constant, or Type. +type Def interface { + Pos() token.Pos +} + +type Variable struct { + VarPos token.Pos + // Index of this variable in the Frame structure + Index int + // Static type of this variable + Type Type + // Value of this variable. This is only used by Scope.NewFrame; + // therefore, it is useful for global scopes but cannot be used + // in function scopes. + Init Value +} + +func (v *Variable) Pos() token.Pos { + return v.VarPos +} + +type Constant struct { + ConstPos token.Pos + Type Type + Value Value +} + +func (c *Constant) Pos() token.Pos { + return c.ConstPos +} + +// A block represents a definition block in which a name may not be +// defined more than once. +type block struct { + // The block enclosing this one, including blocks in other + // scopes. + outer *block + // The nested block currently being compiled, or nil. + inner *block + // The Scope containing this block. + scope *Scope + // The Variables, Constants, and Types defined in this block. + defs map[string]Def + // The index of the first variable defined in this block. + // This must be greater than the index of any variable defined + // in any parent of this block within the same Scope at the + // time this block is entered. + offset int + // The number of Variables defined in this block. + numVars int + // If global, do not allocate new vars and consts in + // the frame; assume that the refs will be compiled in + // using defs[name].Init. + global bool +} + +// A Scope is the compile-time analogue of a Frame, which captures +// some subtree of blocks. +type Scope struct { + // The root block of this scope. + *block + // The maximum number of variables required at any point in + // this Scope. This determines the number of slots needed in + // Frame's created from this Scope at run-time. + maxVars int +} + +func (b *block) enterChild() *block { + if b.inner != nil && b.inner.scope == b.scope { + log.Panic("Failed to exit child block before entering another child") + } + sub := &block{ + outer: b, + scope: b.scope, + defs: make(map[string]Def), + offset: b.offset + b.numVars, + } + b.inner = sub + return sub +} + +func (b *block) exit() { + if b.outer == nil { + log.Panic("Cannot exit top-level block") + } + if b.outer.scope == b.scope { + if b.outer.inner != b { + log.Panic("Already exited block") + } + if b.inner != nil && b.inner.scope == b.scope { + log.Panic("Exit of parent block without exit of child block") + } + } + b.outer.inner = nil +} + +func (b *block) ChildScope() *Scope { + if b.inner != nil && b.inner.scope == b.scope { + log.Panic("Failed to exit child block before entering a child scope") + } + sub := b.enterChild() + sub.offset = 0 + sub.scope = &Scope{sub, 0} + return sub.scope +} + +func (b *block) DefineVar(name string, pos token.Pos, t Type) (*Variable, Def) { + if prev, ok := b.defs[name]; ok { + return nil, prev + } + v := b.defineSlot(t, false) + v.VarPos = pos + b.defs[name] = v + return v, nil +} + +func (b *block) DefineTemp(t Type) *Variable { return b.defineSlot(t, true) } + +func (b *block) defineSlot(t Type, temp bool) *Variable { + if b.inner != nil && b.inner.scope == b.scope { + log.Panic("Failed to exit child block before defining variable") + } + index := -1 + if !b.global || temp { + index = b.offset + b.numVars + b.numVars++ + if index >= b.scope.maxVars { + b.scope.maxVars = index + 1 + } + } + v := &Variable{token.NoPos, index, t, nil} + return v +} + +func (b *block) DefineConst(name string, pos token.Pos, t Type, v Value) (*Constant, Def) { + if prev, ok := b.defs[name]; ok { + return nil, prev + } + c := &Constant{pos, t, v} + b.defs[name] = c + return c, nil +} + +func (b *block) DefineType(name string, pos token.Pos, t Type) Type { + if _, ok := b.defs[name]; ok { + return nil + } + nt := &NamedType{pos, name, nil, true, make(map[string]Method)} + if t != nil { + nt.Complete(t) + } + b.defs[name] = nt + return nt +} + +func (b *block) Lookup(name string) (bl *block, level int, def Def) { + for b != nil { + if d, ok := b.defs[name]; ok { + return b, level, d + } + if b.outer != nil && b.scope != b.outer.scope { + level++ + } + b = b.outer + } + return nil, 0, nil +} + +func (s *Scope) NewFrame(outer *Frame) *Frame { return outer.child(s.maxVars) } + +/* + * Frames + */ + +type Frame struct { + Outer *Frame + Vars []Value +} + +func (f *Frame) Get(level int, index int) Value { + for ; level > 0; level-- { + f = f.Outer + } + return f.Vars[index] +} + +func (f *Frame) child(numVars int) *Frame { + // TODO(austin) This is probably rather expensive. All values + // require heap allocation and zeroing them when we execute a + // definition typically requires some computation. + return &Frame{f, make([]Value, numVars)} +} diff --git a/libgo/go/exp/eval/stmt.go b/libgo/go/exp/eval/stmt.go new file mode 100644 index 000000000..77ff066d0 --- /dev/null +++ b/libgo/go/exp/eval/stmt.go @@ -0,0 +1,1302 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "log" + "go/ast" + "go/token" +) + +const ( + returnPC = ^uint(0) + badPC = ^uint(1) +) + +/* + * Statement compiler + */ + +type stmtCompiler struct { + *blockCompiler + pos token.Pos + // This statement's label, or nil if it is not labeled. + stmtLabel *label +} + +func (a *stmtCompiler) diag(format string, args ...interface{}) { + a.diagAt(a.pos, format, args...) +} + +/* + * Flow checker + */ + +type flowEnt struct { + // Whether this flow entry is conditional. If true, flow can + // continue to the next PC. + cond bool + // True if this will terminate flow (e.g., a return statement). + // cond must be false and jumps must be nil if this is true. + term bool + // PC's that can be reached from this flow entry. + jumps []*uint + // Whether this flow entry has been visited by reachesEnd. + visited bool +} + +type flowBlock struct { + // If this is a goto, the target label. + target string + // The inner-most block containing definitions. + block *block + // The numVars from each block leading to the root of the + // scope, starting at block. + numVars []int +} + +type flowBuf struct { + cb *codeBuf + // ents is a map from PC's to flow entries. Any PC missing + // from this map is assumed to reach only PC+1. + ents map[uint]*flowEnt + // gotos is a map from goto positions to information on the + // block at the point of the goto. + gotos map[token.Pos]*flowBlock + // labels is a map from label name to information on the block + // at the point of the label. labels are tracked by name, + // since mutliple labels at the same PC can have different + // blocks. + labels map[string]*flowBlock +} + +func newFlowBuf(cb *codeBuf) *flowBuf { + return &flowBuf{cb, make(map[uint]*flowEnt), make(map[token.Pos]*flowBlock), make(map[string]*flowBlock)} +} + +// put creates a flow control point for the next PC in the code buffer. +// This should be done before pushing the instruction into the code buffer. +func (f *flowBuf) put(cond bool, term bool, jumps []*uint) { + pc := f.cb.nextPC() + if ent, ok := f.ents[pc]; ok { + log.Panicf("Flow entry already exists at PC %d: %+v", pc, ent) + } + f.ents[pc] = &flowEnt{cond, term, jumps, false} +} + +// putTerm creates a flow control point at the next PC that +// unconditionally terminates execution. +func (f *flowBuf) putTerm() { f.put(false, true, nil) } + +// put1 creates a flow control point at the next PC that jumps to one +// PC and, if cond is true, can also continue to the PC following the +// next PC. +func (f *flowBuf) put1(cond bool, jumpPC *uint) { + f.put(cond, false, []*uint{jumpPC}) +} + +func newFlowBlock(target string, b *block) *flowBlock { + // Find the inner-most block containing definitions + for b.numVars == 0 && b.outer != nil && b.outer.scope == b.scope { + b = b.outer + } + + // Count parents leading to the root of the scope + n := 0 + for bp := b; bp.scope == b.scope; bp = bp.outer { + n++ + } + + // Capture numVars from each block to the root of the scope + numVars := make([]int, n) + i := 0 + for bp := b; i < n; bp = bp.outer { + numVars[i] = bp.numVars + i++ + } + + return &flowBlock{target, b, numVars} +} + +// putGoto captures the block at a goto statement. This should be +// called in addition to putting a flow control point. +func (f *flowBuf) putGoto(pos token.Pos, target string, b *block) { + f.gotos[pos] = newFlowBlock(target, b) +} + +// putLabel captures the block at a label. +func (f *flowBuf) putLabel(name string, b *block) { + f.labels[name] = newFlowBlock("", b) +} + +// reachesEnd returns true if the end of f's code buffer can be +// reached from the given program counter. Error reporting is the +// caller's responsibility. +func (f *flowBuf) reachesEnd(pc uint) bool { + endPC := f.cb.nextPC() + if pc > endPC { + log.Panicf("Reached bad PC %d past end PC %d", pc, endPC) + } + + for ; pc < endPC; pc++ { + ent, ok := f.ents[pc] + if !ok { + continue + } + + if ent.visited { + return false + } + ent.visited = true + + if ent.term { + return false + } + + // If anything can reach the end, we can reach the end + // from pc. + for _, j := range ent.jumps { + if f.reachesEnd(*j) { + return true + } + } + // If the jump was conditional, we can reach the next + // PC, so try reaching the end from it. + if ent.cond { + continue + } + return false + } + return true +} + +// gotosObeyScopes returns true if no goto statement causes any +// variables to come into scope that were not in scope at the point of +// the goto. Reports any errors using the given compiler. +func (f *flowBuf) gotosObeyScopes(a *compiler) { + for pos, src := range f.gotos { + tgt := f.labels[src.target] + + // The target block must be a parent of this block + numVars := src.numVars + b := src.block + for len(numVars) > 0 && b != tgt.block { + b = b.outer + numVars = numVars[1:] + } + if b != tgt.block { + // We jumped into a deeper block + a.diagAt(pos, "goto causes variables to come into scope") + return + } + + // There must be no variables in the target block that + // did not exist at the jump + tgtNumVars := tgt.numVars + for i := range numVars { + if tgtNumVars[i] > numVars[i] { + a.diagAt(pos, "goto causes variables to come into scope") + return + } + } + } +} + +/* + * Statement generation helpers + */ + +func (a *stmtCompiler) defineVar(ident *ast.Ident, t Type) *Variable { + v, prev := a.block.DefineVar(ident.Name, ident.Pos(), t) + if prev != nil { + if prev.Pos().IsValid() { + a.diagAt(ident.Pos(), "variable %s redeclared in this block\n\tprevious declaration at %s", ident.Name, a.fset.Position(prev.Pos())) + } else { + a.diagAt(ident.Pos(), "variable %s redeclared in this block", ident.Name) + } + return nil + } + + // Initialize the variable + index := v.Index + if v.Index >= 0 { + a.push(func(v *Thread) { v.f.Vars[index] = t.Zero() }) + } + return v +} + +// TODO(austin) Move doAssign to here + +/* + * Statement compiler + */ + +func (a *stmtCompiler) compile(s ast.Stmt) { + if a.block.inner != nil { + log.Panic("Child scope still entered") + } + + notimpl := false + switch s := s.(type) { + case *ast.BadStmt: + // Error already reported by parser. + a.silentErrors++ + + case *ast.DeclStmt: + a.compileDeclStmt(s) + + case *ast.EmptyStmt: + // Do nothing. + + case *ast.LabeledStmt: + a.compileLabeledStmt(s) + + case *ast.ExprStmt: + a.compileExprStmt(s) + + case *ast.IncDecStmt: + a.compileIncDecStmt(s) + + case *ast.AssignStmt: + a.compileAssignStmt(s) + + case *ast.GoStmt: + notimpl = true + + case *ast.DeferStmt: + notimpl = true + + case *ast.ReturnStmt: + a.compileReturnStmt(s) + + case *ast.BranchStmt: + a.compileBranchStmt(s) + + case *ast.BlockStmt: + a.compileBlockStmt(s) + + case *ast.IfStmt: + a.compileIfStmt(s) + + case *ast.CaseClause: + a.diag("case clause outside switch") + + case *ast.SwitchStmt: + a.compileSwitchStmt(s) + + case *ast.TypeCaseClause: + notimpl = true + + case *ast.TypeSwitchStmt: + notimpl = true + + case *ast.CommClause: + notimpl = true + + case *ast.SelectStmt: + notimpl = true + + case *ast.ForStmt: + a.compileForStmt(s) + + case *ast.RangeStmt: + notimpl = true + + default: + log.Panicf("unexpected ast node type %T", s) + } + + if notimpl { + a.diag("%T statment node not implemented", s) + } + + if a.block.inner != nil { + log.Panic("Forgot to exit child scope") + } +} + +func (a *stmtCompiler) compileDeclStmt(s *ast.DeclStmt) { + switch decl := s.Decl.(type) { + case *ast.BadDecl: + // Do nothing. Already reported by parser. + a.silentErrors++ + + case *ast.FuncDecl: + if !a.block.global { + log.Panic("FuncDecl at statement level") + } + + case *ast.GenDecl: + if decl.Tok == token.IMPORT && !a.block.global { + log.Panic("import at statement level") + } + + default: + log.Panicf("Unexpected Decl type %T", s.Decl) + } + a.compileDecl(s.Decl) +} + +func (a *stmtCompiler) compileVarDecl(decl *ast.GenDecl) { + for _, spec := range decl.Specs { + spec := spec.(*ast.ValueSpec) + if spec.Values == nil { + // Declaration without assignment + if spec.Type == nil { + // Parser should have caught + log.Panic("Type and Values nil") + } + t := a.compileType(a.block, spec.Type) + // Define placeholders even if type compile failed + for _, n := range spec.Names { + a.defineVar(n, t) + } + } else { + // Declaration with assignment + lhs := make([]ast.Expr, len(spec.Names)) + for i, n := range spec.Names { + lhs[i] = n + } + a.doAssign(lhs, spec.Values, decl.Tok, spec.Type) + } + } +} + +func (a *stmtCompiler) compileDecl(decl ast.Decl) { + switch d := decl.(type) { + case *ast.BadDecl: + // Do nothing. Already reported by parser. + a.silentErrors++ + + case *ast.FuncDecl: + decl := a.compileFuncType(a.block, d.Type) + if decl == nil { + return + } + // Declare and initialize v before compiling func + // so that body can refer to itself. + c, prev := a.block.DefineConst(d.Name.Name, a.pos, decl.Type, decl.Type.Zero()) + if prev != nil { + pos := prev.Pos() + if pos.IsValid() { + a.diagAt(d.Name.Pos(), "identifier %s redeclared in this block\n\tprevious declaration at %s", d.Name.Name, a.fset.Position(pos)) + } else { + a.diagAt(d.Name.Pos(), "identifier %s redeclared in this block", d.Name.Name) + } + } + fn := a.compileFunc(a.block, decl, d.Body) + if c == nil || fn == nil { + return + } + var zeroThread Thread + c.Value.(FuncValue).Set(nil, fn(&zeroThread)) + + case *ast.GenDecl: + switch d.Tok { + case token.IMPORT: + log.Panicf("%v not implemented", d.Tok) + case token.CONST: + log.Panicf("%v not implemented", d.Tok) + case token.TYPE: + a.compileTypeDecl(a.block, d) + case token.VAR: + a.compileVarDecl(d) + } + + default: + log.Panicf("Unexpected Decl type %T", decl) + } +} + +func (a *stmtCompiler) compileLabeledStmt(s *ast.LabeledStmt) { + // Define label + l, ok := a.labels[s.Label.Name] + if ok { + if l.resolved.IsValid() { + a.diag("label %s redeclared in this block\n\tprevious declaration at %s", s.Label.Name, a.fset.Position(l.resolved)) + } + } else { + pc := badPC + l = &label{name: s.Label.Name, gotoPC: &pc} + a.labels[l.name] = l + } + l.desc = "regular label" + l.resolved = s.Pos() + + // Set goto PC + *l.gotoPC = a.nextPC() + + // Define flow entry so we can check for jumps over declarations. + a.flow.putLabel(l.name, a.block) + + // Compile the statement. Reuse our stmtCompiler for simplicity. + sc := &stmtCompiler{a.blockCompiler, s.Stmt.Pos(), l} + sc.compile(s.Stmt) +} + +func (a *stmtCompiler) compileExprStmt(s *ast.ExprStmt) { + bc := a.enterChild() + defer bc.exit() + + e := a.compileExpr(bc.block, false, s.X) + if e == nil { + return + } + + if e.exec == nil { + a.diag("%s cannot be used as expression statement", e.desc) + return + } + + a.push(e.exec) +} + +func (a *stmtCompiler) compileIncDecStmt(s *ast.IncDecStmt) { + // Create temporary block for extractEffect + bc := a.enterChild() + defer bc.exit() + + l := a.compileExpr(bc.block, false, s.X) + if l == nil { + return + } + + if l.evalAddr == nil { + l.diag("cannot assign to %s", l.desc) + return + } + if !(l.t.isInteger() || l.t.isFloat()) { + l.diagOpType(s.Tok, l.t) + return + } + + var op token.Token + var desc string + switch s.Tok { + case token.INC: + op = token.ADD + desc = "increment statement" + case token.DEC: + op = token.SUB + desc = "decrement statement" + default: + log.Panicf("Unexpected IncDec token %v", s.Tok) + } + + effect, l := l.extractEffect(bc.block, desc) + + one := l.newExpr(IdealIntType, "constant") + one.pos = s.Pos() + one.eval = func() *big.Int { return big.NewInt(1) } + + binop := l.compileBinaryExpr(op, l, one) + if binop == nil { + return + } + + assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "", "") + if assign == nil { + log.Panicf("compileAssign type check failed") + } + + lf := l.evalAddr + a.push(func(v *Thread) { + effect(v) + assign(lf(v), v) + }) +} + +func (a *stmtCompiler) doAssign(lhs []ast.Expr, rhs []ast.Expr, tok token.Token, declTypeExpr ast.Expr) { + nerr := a.numError() + + // Compile right side first so we have the types when + // compiling the left side and so we don't see definitions + // made on the left side. + rs := make([]*expr, len(rhs)) + for i, re := range rhs { + rs[i] = a.compileExpr(a.block, false, re) + } + + errOp := "assignment" + if tok == token.DEFINE || tok == token.VAR { + errOp = "declaration" + } + ac, ok := a.checkAssign(a.pos, rs, errOp, "value") + ac.allowMapForms(len(lhs)) + + // If this is a definition and the LHS is too big, we won't be + // able to produce the usual error message because we can't + // begin to infer the types of the LHS. + if (tok == token.DEFINE || tok == token.VAR) && len(lhs) > len(ac.rmt.Elems) { + a.diag("not enough values for definition") + } + + // Compile left type if there is one + var declType Type + if declTypeExpr != nil { + declType = a.compileType(a.block, declTypeExpr) + } + + // Compile left side + ls := make([]*expr, len(lhs)) + nDefs := 0 + for i, le := range lhs { + // If this is a definition, get the identifier and its type + var ident *ast.Ident + var lt Type + switch tok { + case token.DEFINE: + // Check that it's an identifier + ident, ok = le.(*ast.Ident) + if !ok { + a.diagAt(le.Pos(), "left side of := must be a name") + // Suppress new defitions errors + nDefs++ + continue + } + + // Is this simply an assignment? + if _, ok := a.block.defs[ident.Name]; ok { + ident = nil + break + } + nDefs++ + + case token.VAR: + ident = le.(*ast.Ident) + } + + // If it's a definition, get or infer its type. + if ident != nil { + // Compute the identifier's type from the RHS + // type. We use the computed MultiType so we + // don't have to worry about unpacking. + switch { + case declTypeExpr != nil: + // We have a declaration type, use it. + // If declType is nil, we gave an + // error when we compiled it. + lt = declType + + case i >= len(ac.rmt.Elems): + // Define a placeholder. We already + // gave the "not enough" error above. + lt = nil + + case ac.rmt.Elems[i] == nil: + // We gave the error when we compiled + // the RHS. + lt = nil + + case ac.rmt.Elems[i].isIdeal(): + // If the type is absent and the + // corresponding expression is a + // constant expression of ideal + // integer or ideal float type, the + // type of the declared variable is + // int or float respectively. + switch { + case ac.rmt.Elems[i].isInteger(): + lt = IntType + case ac.rmt.Elems[i].isFloat(): + lt = Float64Type + default: + log.Panicf("unexpected ideal type %v", rs[i].t) + } + + default: + lt = ac.rmt.Elems[i] + } + } + + // If it's a definition, define the identifier + if ident != nil { + if a.defineVar(ident, lt) == nil { + continue + } + } + + // Compile LHS + ls[i] = a.compileExpr(a.block, false, le) + if ls[i] == nil { + continue + } + + if ls[i].evalMapValue != nil { + // Map indexes are not generally addressable, + // but they are assignable. + // + // TODO(austin) Now that the expression + // compiler uses semantic values, this might + // be easier to implement as a function call. + sub := ls[i] + ls[i] = ls[i].newExpr(sub.t, sub.desc) + ls[i].evalMapValue = sub.evalMapValue + mvf := sub.evalMapValue + et := sub.t + ls[i].evalAddr = func(t *Thread) Value { + m, k := mvf(t) + e := m.Elem(t, k) + if e == nil { + e = et.Zero() + m.SetElem(t, k, e) + } + return e + } + } else if ls[i].evalAddr == nil { + ls[i].diag("cannot assign to %s", ls[i].desc) + continue + } + } + + // A short variable declaration may redeclare variables + // provided they were originally declared in the same block + // with the same type, and at least one of the variables is + // new. + if tok == token.DEFINE && nDefs == 0 { + a.diag("at least one new variable must be declared") + return + } + + // If there have been errors, our arrays are full of nil's so + // get out of here now. + if nerr != a.numError() { + return + } + + // Check for 'a[x] = r, ok' + if len(ls) == 1 && len(rs) == 2 && ls[0].evalMapValue != nil { + a.diag("a[x] = r, ok form not implemented") + return + } + + // Create assigner + var lt Type + n := len(lhs) + if n == 1 { + lt = ls[0].t + } else { + lts := make([]Type, len(ls)) + for i, l := range ls { + if l != nil { + lts[i] = l.t + } + } + lt = NewMultiType(lts) + } + bc := a.enterChild() + defer bc.exit() + assign := ac.compile(bc.block, lt) + if assign == nil { + return + } + + // Compile + if n == 1 { + // Don't need temporaries and can avoid []Value. + lf := ls[0].evalAddr + a.push(func(t *Thread) { assign(lf(t), t) }) + } else if tok == token.VAR || (tok == token.DEFINE && nDefs == n) { + // Don't need temporaries + lfs := make([]func(*Thread) Value, n) + for i, l := range ls { + lfs[i] = l.evalAddr + } + a.push(func(t *Thread) { + dest := make([]Value, n) + for i, lf := range lfs { + dest[i] = lf(t) + } + assign(multiV(dest), t) + }) + } else { + // Need temporaries + lmt := lt.(*MultiType) + lfs := make([]func(*Thread) Value, n) + for i, l := range ls { + lfs[i] = l.evalAddr + } + a.push(func(t *Thread) { + temp := lmt.Zero().(multiV) + assign(temp, t) + // Copy to destination + for i := 0; i < n; i++ { + // TODO(austin) Need to evaluate LHS + // before RHS + lfs[i](t).Assign(t, temp[i]) + } + }) + } +} + +var assignOpToOp = map[token.Token]token.Token{ + token.ADD_ASSIGN: token.ADD, + token.SUB_ASSIGN: token.SUB, + token.MUL_ASSIGN: token.MUL, + token.QUO_ASSIGN: token.QUO, + token.REM_ASSIGN: token.REM, + + token.AND_ASSIGN: token.AND, + token.OR_ASSIGN: token.OR, + token.XOR_ASSIGN: token.XOR, + token.SHL_ASSIGN: token.SHL, + token.SHR_ASSIGN: token.SHR, + token.AND_NOT_ASSIGN: token.AND_NOT, +} + +func (a *stmtCompiler) doAssignOp(s *ast.AssignStmt) { + if len(s.Lhs) != 1 || len(s.Rhs) != 1 { + a.diag("tuple assignment cannot be combined with an arithmetic operation") + return + } + + // Create temporary block for extractEffect + bc := a.enterChild() + defer bc.exit() + + l := a.compileExpr(bc.block, false, s.Lhs[0]) + r := a.compileExpr(bc.block, false, s.Rhs[0]) + if l == nil || r == nil { + return + } + + if l.evalAddr == nil { + l.diag("cannot assign to %s", l.desc) + return + } + + effect, l := l.extractEffect(bc.block, "operator-assignment") + + binop := r.compileBinaryExpr(assignOpToOp[s.Tok], l, r) + if binop == nil { + return + } + + assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "assignment", "value") + if assign == nil { + log.Panicf("compileAssign type check failed") + } + + lf := l.evalAddr + a.push(func(t *Thread) { + effect(t) + assign(lf(t), t) + }) +} + +func (a *stmtCompiler) compileAssignStmt(s *ast.AssignStmt) { + switch s.Tok { + case token.ASSIGN, token.DEFINE: + a.doAssign(s.Lhs, s.Rhs, s.Tok, nil) + + default: + a.doAssignOp(s) + } +} + +func (a *stmtCompiler) compileReturnStmt(s *ast.ReturnStmt) { + if a.fnType == nil { + a.diag("cannot return at the top level") + return + } + + if len(s.Results) == 0 && (len(a.fnType.Out) == 0 || a.outVarsNamed) { + // Simple case. Simply exit from the function. + a.flow.putTerm() + a.push(func(v *Thread) { v.pc = returnPC }) + return + } + + bc := a.enterChild() + defer bc.exit() + + // Compile expressions + bad := false + rs := make([]*expr, len(s.Results)) + for i, re := range s.Results { + rs[i] = a.compileExpr(bc.block, false, re) + if rs[i] == nil { + bad = true + } + } + if bad { + return + } + + // Create assigner + + // However, if the expression list in the "return" statement + // is a single call to a multi-valued function, the values + // returned from the called function will be returned from + // this one. + assign := a.compileAssign(s.Pos(), bc.block, NewMultiType(a.fnType.Out), rs, "return", "value") + + // XXX(Spec) "The result types of the current function and the + // called function must match." Match is fuzzy. It should + // say that they must be assignment compatible. + + // Compile + start := len(a.fnType.In) + nout := len(a.fnType.Out) + a.flow.putTerm() + a.push(func(t *Thread) { + assign(multiV(t.f.Vars[start:start+nout]), t) + t.pc = returnPC + }) +} + +func (a *stmtCompiler) findLexicalLabel(name *ast.Ident, pred func(*label) bool, errOp, errCtx string) *label { + bc := a.blockCompiler + for ; bc != nil; bc = bc.parent { + if bc.label == nil { + continue + } + l := bc.label + if name == nil && pred(l) { + return l + } + if name != nil && l.name == name.Name { + if !pred(l) { + a.diag("cannot %s to %s %s", errOp, l.desc, l.name) + return nil + } + return l + } + } + if name == nil { + a.diag("%s outside %s", errOp, errCtx) + } else { + a.diag("%s label %s not defined", errOp, name.Name) + } + return nil +} + +func (a *stmtCompiler) compileBranchStmt(s *ast.BranchStmt) { + var pc *uint + + switch s.Tok { + case token.BREAK: + l := a.findLexicalLabel(s.Label, func(l *label) bool { return l.breakPC != nil }, "break", "for loop, switch, or select") + if l == nil { + return + } + pc = l.breakPC + + case token.CONTINUE: + l := a.findLexicalLabel(s.Label, func(l *label) bool { return l.continuePC != nil }, "continue", "for loop") + if l == nil { + return + } + pc = l.continuePC + + case token.GOTO: + l, ok := a.labels[s.Label.Name] + if !ok { + pc := badPC + l = &label{name: s.Label.Name, desc: "unresolved label", gotoPC: &pc, used: s.Pos()} + a.labels[l.name] = l + } + + pc = l.gotoPC + a.flow.putGoto(s.Pos(), l.name, a.block) + + case token.FALLTHROUGH: + a.diag("fallthrough outside switch") + return + + default: + log.Panic("Unexpected branch token %v", s.Tok) + } + + a.flow.put1(false, pc) + a.push(func(v *Thread) { v.pc = *pc }) +} + +func (a *stmtCompiler) compileBlockStmt(s *ast.BlockStmt) { + bc := a.enterChild() + bc.compileStmts(s) + bc.exit() +} + +func (a *stmtCompiler) compileIfStmt(s *ast.IfStmt) { + // The scope of any variables declared by [the init] statement + // extends to the end of the "if" statement and the variables + // are initialized once before the statement is entered. + // + // XXX(Spec) What this really wants to say is that there's an + // implicit scope wrapping every if, for, and switch + // statement. This is subtly different from what it actually + // says when there's a non-block else clause, because that + // else claus has to execute in a scope that is *not* the + // surrounding scope. + bc := a.enterChild() + defer bc.exit() + + // Compile init statement, if any + if s.Init != nil { + bc.compileStmt(s.Init) + } + + elsePC := badPC + endPC := badPC + + // Compile condition, if any. If there is no condition, we + // fall through to the body. + if s.Cond != nil { + e := bc.compileExpr(bc.block, false, s.Cond) + switch { + case e == nil: + // Error reported by compileExpr + case !e.t.isBoolean(): + e.diag("'if' condition must be boolean\n\t%v", e.t) + default: + eval := e.asBool() + a.flow.put1(true, &elsePC) + a.push(func(t *Thread) { + if !eval(t) { + t.pc = elsePC + } + }) + } + } + + // Compile body + body := bc.enterChild() + body.compileStmts(s.Body) + body.exit() + + // Compile else + if s.Else != nil { + // Skip over else if we executed the body + a.flow.put1(false, &endPC) + a.push(func(v *Thread) { v.pc = endPC }) + elsePC = a.nextPC() + bc.compileStmt(s.Else) + } else { + elsePC = a.nextPC() + } + endPC = a.nextPC() +} + +func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) { + // Create implicit scope around switch + bc := a.enterChild() + defer bc.exit() + + // Compile init statement, if any + if s.Init != nil { + bc.compileStmt(s.Init) + } + + // Compile condition, if any, and extract its effects + var cond *expr + condbc := bc.enterChild() + if s.Tag != nil { + e := condbc.compileExpr(condbc.block, false, s.Tag) + if e != nil { + var effect func(*Thread) + effect, cond = e.extractEffect(condbc.block, "switch") + a.push(effect) + } + } + + // Count cases + ncases := 0 + hasDefault := false + for _, c := range s.Body.List { + clause, ok := c.(*ast.CaseClause) + if !ok { + a.diagAt(clause.Pos(), "switch statement must contain case clauses") + continue + } + if clause.Values == nil { + if hasDefault { + a.diagAt(clause.Pos(), "switch statement contains more than one default case") + } + hasDefault = true + } else { + ncases += len(clause.Values) + } + } + + // Compile case expressions + cases := make([]func(*Thread) bool, ncases) + i := 0 + for _, c := range s.Body.List { + clause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + for _, v := range clause.Values { + e := condbc.compileExpr(condbc.block, false, v) + switch { + case e == nil: + // Error reported by compileExpr + case cond == nil && !e.t.isBoolean(): + a.diagAt(v.Pos(), "'case' condition must be boolean") + case cond == nil: + cases[i] = e.asBool() + case cond != nil: + // Create comparison + // TOOD(austin) This produces bad error messages + compare := e.compileBinaryExpr(token.EQL, cond, e) + if compare != nil { + cases[i] = compare.asBool() + } + } + i++ + } + } + + // Emit condition + casePCs := make([]*uint, ncases+1) + endPC := badPC + + a.flow.put(false, false, casePCs) + a.push(func(t *Thread) { + for i, c := range cases { + if c(t) { + t.pc = *casePCs[i] + return + } + } + t.pc = *casePCs[ncases] + }) + condbc.exit() + + // Compile cases + i = 0 + for _, c := range s.Body.List { + clause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + + // Save jump PC's + pc := a.nextPC() + if clause.Values != nil { + for _ = range clause.Values { + casePCs[i] = &pc + i++ + } + } else { + // Default clause + casePCs[ncases] = &pc + } + + // Compile body + fall := false + for j, s := range clause.Body { + if br, ok := s.(*ast.BranchStmt); ok && br.Tok == token.FALLTHROUGH { + // println("Found fallthrough"); + // It may be used only as the final + // non-empty statement in a case or + // default clause in an expression + // "switch" statement. + for _, s2 := range clause.Body[j+1:] { + // XXX(Spec) 6g also considers + // empty blocks to be empty + // statements. + if _, ok := s2.(*ast.EmptyStmt); !ok { + a.diagAt(s.Pos(), "fallthrough statement must be final statement in case") + break + } + } + fall = true + } else { + bc.compileStmt(s) + } + } + // Jump out of switch, unless there was a fallthrough + if !fall { + a.flow.put1(false, &endPC) + a.push(func(v *Thread) { v.pc = endPC }) + } + } + + // Get end PC + endPC = a.nextPC() + if !hasDefault { + casePCs[ncases] = &endPC + } +} + +func (a *stmtCompiler) compileForStmt(s *ast.ForStmt) { + // Wrap the entire for in a block. + bc := a.enterChild() + defer bc.exit() + + // Compile init statement, if any + if s.Init != nil { + bc.compileStmt(s.Init) + } + + bodyPC := badPC + postPC := badPC + checkPC := badPC + endPC := badPC + + // Jump to condition check. We generate slightly less code by + // placing the condition check after the body. + a.flow.put1(false, &checkPC) + a.push(func(v *Thread) { v.pc = checkPC }) + + // Compile body + bodyPC = a.nextPC() + body := bc.enterChild() + if a.stmtLabel != nil { + body.label = a.stmtLabel + } else { + body.label = &label{resolved: s.Pos()} + } + body.label.desc = "for loop" + body.label.breakPC = &endPC + body.label.continuePC = &postPC + body.compileStmts(s.Body) + body.exit() + + // Compile post, if any + postPC = a.nextPC() + if s.Post != nil { + // TODO(austin) Does the parser disallow short + // declarations in s.Post? + bc.compileStmt(s.Post) + } + + // Compile condition check, if any + checkPC = a.nextPC() + if s.Cond == nil { + // If the condition is absent, it is equivalent to true. + a.flow.put1(false, &bodyPC) + a.push(func(v *Thread) { v.pc = bodyPC }) + } else { + e := bc.compileExpr(bc.block, false, s.Cond) + switch { + case e == nil: + // Error reported by compileExpr + case !e.t.isBoolean(): + a.diag("'for' condition must be boolean\n\t%v", e.t) + default: + eval := e.asBool() + a.flow.put1(true, &bodyPC) + a.push(func(t *Thread) { + if eval(t) { + t.pc = bodyPC + } + }) + } + } + + endPC = a.nextPC() +} + +/* + * Block compiler + */ + +func (a *blockCompiler) compileStmt(s ast.Stmt) { + sc := &stmtCompiler{a, s.Pos(), nil} + sc.compile(s) +} + +func (a *blockCompiler) compileStmts(block *ast.BlockStmt) { + for _, sub := range block.List { + a.compileStmt(sub) + } +} + +func (a *blockCompiler) enterChild() *blockCompiler { + block := a.block.enterChild() + return &blockCompiler{ + funcCompiler: a.funcCompiler, + block: block, + parent: a, + } +} + +func (a *blockCompiler) exit() { a.block.exit() } + +/* + * Function compiler + */ + +func (a *compiler) compileFunc(b *block, decl *FuncDecl, body *ast.BlockStmt) func(*Thread) Func { + // Create body scope + // + // The scope of a parameter or result is the body of the + // corresponding function. + bodyScope := b.ChildScope() + defer bodyScope.exit() + for i, t := range decl.Type.In { + if decl.InNames[i] != nil { + bodyScope.DefineVar(decl.InNames[i].Name, decl.InNames[i].Pos(), t) + } else { + bodyScope.DefineTemp(t) + } + } + for i, t := range decl.Type.Out { + if decl.OutNames[i] != nil { + bodyScope.DefineVar(decl.OutNames[i].Name, decl.OutNames[i].Pos(), t) + } else { + bodyScope.DefineTemp(t) + } + } + + // Create block context + cb := newCodeBuf() + fc := &funcCompiler{ + compiler: a, + fnType: decl.Type, + outVarsNamed: len(decl.OutNames) > 0 && decl.OutNames[0] != nil, + codeBuf: cb, + flow: newFlowBuf(cb), + labels: make(map[string]*label), + } + bc := &blockCompiler{ + funcCompiler: fc, + block: bodyScope.block, + } + + // Compile body + nerr := a.numError() + bc.compileStmts(body) + fc.checkLabels() + if nerr != a.numError() { + return nil + } + + // Check that the body returned if necessary. We only check + // this if there were no errors compiling the body. + if len(decl.Type.Out) > 0 && fc.flow.reachesEnd(0) { + // XXX(Spec) Not specified. + a.diagAt(body.Rbrace, "function ends without a return statement") + return nil + } + + code := fc.get() + maxVars := bodyScope.maxVars + return func(t *Thread) Func { return &evalFunc{t.f, maxVars, code} } +} + +// Checks that labels were resolved and that all jumps obey scoping +// rules. Reports an error and set fc.err if any check fails. +func (a *funcCompiler) checkLabels() { + nerr := a.numError() + for _, l := range a.labels { + if !l.resolved.IsValid() { + a.diagAt(l.used, "label %s not defined", l.name) + } + } + if nerr != a.numError() { + // Don't check scopes if we have unresolved labels + return + } + + // Executing the "goto" statement must not cause any variables + // to come into scope that were not already in scope at the + // point of the goto. + a.flow.gotosObeyScopes(a.compiler) +} diff --git a/libgo/go/exp/eval/stmt_test.go b/libgo/go/exp/eval/stmt_test.go new file mode 100644 index 000000000..a14a288d9 --- /dev/null +++ b/libgo/go/exp/eval/stmt_test.go @@ -0,0 +1,343 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import "testing" + +var atLeastOneDecl = "at least one new variable must be declared" + +var stmtTests = []test{ + // Short declarations + Val1("x := i", "x", 1), + Val1("x := f", "x", 1.0), + // Type defaulting + Val1("a := 42", "a", 42), + Val1("a := 1.0", "a", 1.0), + // Parallel assignment + Val2("a, b := 1, 2", "a", 1, "b", 2), + Val2("a, i := 1, 2", "a", 1, "i", 2), + CErr("a, i := 1, f", opTypes), + CErr("a, b := 1, 2, 3", "too many"), + CErr("a := 1, 2", "too many"), + CErr("a, b := 1", "not enough"), + // Mixed declarations + CErr("i := 1", atLeastOneDecl), + CErr("i, u := 1, 2", atLeastOneDecl), + Val2("i, x := 2, f", "i", 2, "x", 1.0), + // Various errors + CErr("1 := 2", "left side of := must be a name"), + CErr("c, a := 1, 1", "cannot assign"), + // Unpacking + Val2("x, y := oneTwo()", "x", 1, "y", 2), + CErr("x := oneTwo()", "too many"), + CErr("x, y, z := oneTwo()", "not enough"), + CErr("x, y := oneTwo(), 2", "multi-valued"), + CErr("x := oneTwo()+2", opTypes), + // TOOD(austin) This error message is weird + CErr("x := void()", "not enough"), + // Placeholders + CErr("x := 1+\"x\"; i=x+1", opTypes), + + // Assignment + Val1("i = 2", "i", 2), + Val1("(i) = 2", "i", 2), + CErr("1 = 2", "cannot assign"), + CErr("1-1 = 2", "- expression"), + Val1("i = 2.0", "i", 2), + CErr("i = 2.2", constantTruncated), + CErr("u = -2", constantUnderflows), + CErr("i = f", opTypes), + CErr("i, u = 0, f", opTypes), + CErr("i, u = 0, f", "value 2"), + Val2("i, i2 = i2, i", "i", 2, "i2", 1), + CErr("c = 1", "cannot assign"), + + Val1("x := &i; *x = 2", "i", 2), + + Val1("ai[0] = 42", "ai", varray{42, 2}), + Val1("aai[1] = ai; ai[0] = 42", "aai", varray{varray{1, 2}, varray{1, 2}}), + Val1("aai = aai2", "aai", varray{varray{5, 6}, varray{7, 8}}), + + // Assignment conversions + Run("var sl []int; sl = &ai"), + CErr("type ST []int; type AT *[2]int; var x AT = &ai; var y ST = x", opTypes), + Run("type ST []int; var y ST = &ai"), + Run("type AT *[2]int; var x AT = &ai; var y []int = x"), + + // Op-assignment + Val1("i += 2", "i", 3), + Val("i", 1), + Val1("f += 2", "f", 3.0), + CErr("2 += 2", "cannot assign"), + CErr("i, j += 2", "cannot be combined"), + CErr("i += 2, 3", "cannot be combined"), + Val2("s2 := s; s += \"def\"", "s2", "abc", "s", "abcdef"), + CErr("s += 1", opTypes), + // Single evaluation + Val2("ai[func()int{i+=1;return 0}()] *= 3; i2 = ai[0]", "i", 2, "i2", 3), + + // Type declarations + // Identifiers + Run("type T int"), + CErr("type T x", "undefined"), + CErr("type T c", "constant"), + CErr("type T i", "variable"), + CErr("type T T", "recursive"), + CErr("type T x; type U T; var v U; v = 1", "undefined"), + // Pointer types + Run("type T *int"), + Run("type T *T"), + // Array types + Run("type T [5]int"), + Run("type T [c+42/2]int"), + Run("type T [2.0]int"), + CErr("type T [i]int", "constant expression"), + CErr("type T [2.5]int", constantTruncated), + CErr("type T [-1]int", "negative"), + CErr("type T [2]T", "recursive"), + // Struct types + Run("type T struct { a int; b int }"), + Run("type T struct { a int; int }"), + Run("type T struct { x *T }"), + Run("type T int; type U struct { T }"), + CErr("type T *int; type U struct { T }", "embedded.*pointer"), + CErr("type T *struct { T }", "embedded.*pointer"), + CErr("type T struct { a int; a int }", " a .*redeclared.*:1:17"), + CErr("type T struct { int; int }", "int .*redeclared.*:1:17"), + CErr("type T struct { int int; int }", "int .*redeclared.*:1:17"), + Run("type T struct { x *struct { T } }"), + CErr("type T struct { x struct { T } }", "recursive"), + CErr("type T struct { x }; type U struct { T }", "undefined"), + // Function types + Run("type T func()"), + Run("type T func(a, b int) int"), + Run("type T func(a, b int) (x int, y int)"), + Run("type T func(a, a int) (a int, a int)"), + Run("type T func(a, b int) (x, y int)"), + Run("type T func(int, int) (int, int)"), + CErr("type T func(x); type U T", "undefined"), + CErr("type T func(a T)", "recursive"), + // Interface types + Run("type T interface {x(a, b int) int}"), + Run("type T interface {x(a, b int) int}; type U interface {T; y(c int)}"), + CErr("type T interface {x(a int); x()}", "method x redeclared"), + CErr("type T interface {x()}; type U interface {T; x()}", "method x redeclared"), + CErr("type T int; type U interface {T}", "embedded type"), + // Parens + Run("type T (int)"), + + // Variable declarations + Val2("var x int", "i", 1, "x", 0), + Val1("var x = 1", "x", 1), + Val1("var x = 1.0", "x", 1.0), + Val1("var x int = 1.0", "x", 1), + // Placeholders + CErr("var x foo; x = 1", "undefined"), + CErr("var x foo = 1; x = 1", "undefined"), + // Redeclaration + CErr("var i, x int", " i .*redeclared"), + CErr("var x int; var x int", " x .*redeclared.*:1:5"), + + // Expression statements + CErr("x := func(){ 1-1 }", "expression statement"), + CErr("x := func(){ 1-1 }", "- expression"), + Val1("fn(2)", "i", 1), + + // IncDec statements + Val1("i++", "i", 2), + Val1("i--", "i", 0), + Val1("u++", "u", uint(2)), + Val1("u--", "u", uint(0)), + Val1("f++", "f", 2.0), + Val1("f--", "f", 0.0), + // Single evaluation + Val2("ai[func()int{i+=1;return 0}()]++; i2 = ai[0]", "i", 2, "i2", 2), + // Operand types + CErr("s++", opTypes), + CErr("s++", "'\\+\\+'"), + CErr("2++", "cannot assign"), + CErr("c++", "cannot assign"), + + // Function scoping + Val1("fn1 := func() { i=2 }; fn1()", "i", 2), + Val1("fn1 := func() { i:=2 }; fn1()", "i", 1), + Val2("fn1 := func() int { i=2; i:=3; i=4; return i }; x := fn1()", "i", 2, "x", 4), + + // Basic returns + CErr("fn1 := func() int {}", "return"), + Run("fn1 := func() {}"), + CErr("fn1 := func() (r int) {}", "return"), + Val1("fn1 := func() (r int) {return}; i = fn1()", "i", 0), + Val1("fn1 := func() (r int) {r = 2; return}; i = fn1()", "i", 2), + Val1("fn1 := func() (r int) {return 2}; i = fn1()", "i", 2), + Val1("fn1 := func(int) int {return 2}; i = fn1(1)", "i", 2), + + // Multi-valued returns + Val2("fn1 := func() (bool, int) {return true, 2}; x, y := fn1()", "x", true, "y", 2), + CErr("fn1 := func() int {return}", "not enough values"), + CErr("fn1 := func() int {return 1,2}", "too many values"), + CErr("fn1 := func() {return 1}", "too many values"), + CErr("fn1 := func() (int,int,int) {return 1,2}", "not enough values"), + Val2("fn1 := func() (int, int) {return oneTwo()}; x, y := fn1()", "x", 1, "y", 2), + CErr("fn1 := func() int {return oneTwo()}", "too many values"), + CErr("fn1 := func() (int,int,int) {return oneTwo()}", "not enough values"), + Val1("fn1 := func(x,y int) int {return x+y}; x := fn1(oneTwo())", "x", 3), + + // Return control flow + Val2("fn1 := func(x *int) bool { *x = 2; return true; *x = 3; }; x := fn1(&i)", "i", 2, "x", true), + + // Break/continue/goto/fallthrough + CErr("break", "outside"), + CErr("break foo", "break.*foo.*not defined"), + CErr("continue", "outside"), + CErr("continue foo", "continue.*foo.*not defined"), + CErr("fallthrough", "outside"), + CErr("goto foo", "foo.*not defined"), + CErr(" foo: foo:;", "foo.*redeclared.*:1:2"), + Val1("i+=2; goto L; i+=4; L: i+=8", "i", 1+2+8), + // Return checking + CErr("fn1 := func() int { goto L; return 1; L: }", "return"), + Run("fn1 := func() int { L: goto L; i = 2 }"), + Run("fn1 := func() int { return 1; L: goto L }"), + // Scope checking + Run("fn1 := func() { { L: x:=1 }; goto L }"), + CErr("fn1 := func() { { x:=1; L: }; goto L }", "into scope"), + CErr("fn1 := func() { goto L; x:=1; L: }", "into scope"), + Run("fn1 := func() { goto L; { L: x:=1 } }"), + CErr("fn1 := func() { goto L; { x:=1; L: } }", "into scope"), + + // Blocks + CErr("fn1 := func() int {{}}", "return"), + Val1("fn1 := func() bool { { return true } }; b := fn1()", "b", true), + + // If + Val2("if true { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4), + Val2("if false { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4), + Val2("if i == i2 { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4), + // Omit optional parts + Val2("if { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4), + Val2("if true { i = 2 }; i2 = 4", "i", 2, "i2", 4), + Val2("if false { i = 2 }; i2 = 4", "i", 1, "i2", 4), + // Init + Val2("if x := true; x { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4), + Val2("if x := false; x { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4), + // Statement else + Val2("if true { i = 2 } else i = 3; i2 = 4", "i", 2, "i2", 4), + Val2("if false { i = 2 } else i = 3; i2 = 4", "i", 3, "i2", 4), + // Scoping + Val2("if true { i := 2 } else { i := 3 }; i2 = i", "i", 1, "i2", 1), + Val2("if false { i := 2 } else { i := 3 }; i2 = i", "i", 1, "i2", 1), + Val2("if false { i := 2 } else i := 3; i2 = i", "i", 1, "i2", 1), + CErr("if true { x := 2 }; x = 4", undefined), + Val2("if i := 2; true { i2 = i; i := 3 }", "i", 1, "i2", 2), + Val2("if i := 2; false {} else { i2 = i; i := 3 }", "i", 1, "i2", 2), + // Return checking + Run("fn1 := func() int { if true { return 1 } else { return 2 } }"), + Run("fn1 := func() int { if true { return 1 } else return 2 }"), + CErr("fn1 := func() int { if true { return 1 } else { } }", "return"), + CErr("fn1 := func() int { if true { } else { return 1 } }", "return"), + CErr("fn1 := func() int { if true { } else return 1 }", "return"), + CErr("fn1 := func() int { if true { } else { } }", "return"), + CErr("fn1 := func() int { if true { return 1 } }", "return"), + CErr("fn1 := func() int { if true { } }", "return"), + Run("fn1 := func() int { if true { }; return 1 }"), + CErr("fn1 := func() int { if { } }", "return"), + CErr("fn1 := func() int { if { } else { return 2 } }", "return"), + Run("fn1 := func() int { if { return 1 } }"), + Run("fn1 := func() int { if { return 1 } else { } }"), + Run("fn1 := func() int { if { return 1 } else { } }"), + + // Switch + Val1("switch { case false: i += 2; case true: i += 4; default: i += 8 }", "i", 1+4), + Val1("switch { default: i += 2; case false: i += 4; case true: i += 8 }", "i", 1+8), + CErr("switch { default: i += 2; default: i += 4 }", "more than one"), + Val1("switch false { case false: i += 2; case true: i += 4; default: i += 8 }", "i", 1+2), + CErr("switch s { case 1: }", opTypes), + CErr("switch ai { case ai: i += 2 }", opTypes), + Val1("switch 1.0 { case 1: i += 2; case 2: i += 4 }", "i", 1+2), + Val1("switch 1.5 { case 1: i += 2; case 2: i += 4 }", "i", 1), + CErr("switch oneTwo() {}", "multi-valued expression"), + Val1("switch 2 { case 1: i += 2; fallthrough; case 2: i += 4; fallthrough; case 3: i += 8; fallthrough }", "i", 1+4+8), + Val1("switch 5 { case 1: i += 2; fallthrough; default: i += 4; fallthrough; case 2: i += 8; fallthrough; case 3: i += 16; fallthrough }", "i", 1+4+8+16), + CErr("switch { case true: fallthrough; i += 2 }", "final statement"), + Val1("switch { case true: i += 2; fallthrough; ; ; case false: i += 4 }", "i", 1+2+4), + Val1("switch 2 { case 0, 1: i += 2; case 2, 3: i += 4 }", "i", 1+4), + Val2("switch func()int{i2++;return 5}() { case 1, 2: i += 2; case 4, 5: i += 4 }", "i", 1+4, "i2", 3), + Run("switch i { case i: }"), + // TODO(austin) Why doesn't this fail? + //CErr("case 1:", "XXX"), + + // For + Val2("for x := 1; x < 5; x++ { i+=x }; i2 = 4", "i", 11, "i2", 4), + Val2("for x := 1; x < 5; x++ { i+=x; break; i++ }; i2 = 4", "i", 2, "i2", 4), + Val2("for x := 1; x < 5; x++ { i+=x; continue; i++ }; i2 = 4", "i", 11, "i2", 4), + Val2("for i = 2; false; i = 3 { i = 4 }; i2 = 4", "i", 2, "i2", 4), + Val2("for i < 5 { i++ }; i2 = 4", "i", 5, "i2", 4), + Val2("for i < 0 { i++ }; i2 = 4", "i", 1, "i2", 4), + // Scoping + Val2("for i := 2; true; { i2 = i; i := 3; break }", "i", 1, "i2", 2), + // Labeled break/continue + Val1("L1: for { L2: for { i+=2; break L1; i+=4 }; i+=8 }", "i", 1+2), + Val1("L1: for { L2: for { i+=2; break L2; i+=4 }; i+=8; break; i+=16 }", "i", 1+2+8), + CErr("L1: { for { break L1 } }", "break.*not defined"), + CErr("L1: for {}; for { break L1 }", "break.*not defined"), + CErr("L1:; for { break L1 }", "break.*not defined"), + Val2("L1: for i = 0; i < 2; i++ { L2: for { i2++; continue L1; i2++ } }", "i", 2, "i2", 4), + CErr("L1: { for { continue L1 } }", "continue.*not defined"), + CErr("L1:; for { continue L1 }", "continue.*not defined"), + // Return checking + Run("fn1 := func() int{ for {} }"), + CErr("fn1 := func() int{ for true {} }", "return"), + CErr("fn1 := func() int{ for true {return 1} }", "return"), + CErr("fn1 := func() int{ for {break} }", "return"), + Run("fn1 := func() int{ for { for {break} } }"), + CErr("fn1 := func() int{ L1: for { for {break L1} } }", "return"), + Run("fn1 := func() int{ for true {}; return 1 }"), + + // Selectors + Val1("var x struct { a int; b int }; x.a = 42; i = x.a", "i", 42), + Val1("type T struct { x int }; var y struct { T }; y.x = 42; i = y.x", "i", 42), + Val2("type T struct { x int }; var y struct { T; x int }; y.x = 42; i = y.x; i2 = y.T.x", "i", 42, "i2", 0), + Run("type T struct { x int }; var y struct { *T }; a := func(){i=y.x}"), + CErr("type T struct { x int }; var x T; x.y = 42", "no field"), + CErr("type T struct { x int }; type U struct { x int }; var y struct { T; U }; y.x = 42", "ambiguous.*\tT\\.x\n\tU\\.x"), + CErr("type T struct { *T }; var x T; x.foo", "no field"), + + Val1("fib := func(int) int{return 0;}; fib = func(v int) int { if v < 2 { return 1 }; return fib(v-1)+fib(v-2) }; i = fib(20)", "i", 10946), + + // Make slice + Val2("x := make([]int, 2); x[0] = 42; i, i2 = x[0], x[1]", "i", 42, "i2", 0), + Val2("x := make([]int, 2); x[1] = 42; i, i2 = x[0], x[1]", "i", 0, "i2", 42), + RErr("x := make([]int, 2); x[-i] = 42", "negative index"), + RErr("x := make([]int, 2); x[2] = 42", "index 2 exceeds"), + Val2("x := make([]int, 2, 3); i, i2 = len(x), cap(x)", "i", 2, "i2", 3), + Val2("x := make([]int, 3, 2); i, i2 = len(x), cap(x)", "i", 3, "i2", 3), + RErr("x := make([]int, -i)", "negative length"), + RErr("x := make([]int, 2, -i)", "negative capacity"), + RErr("x := make([]int, 2, 3); x[2] = 42", "index 2 exceeds"), + CErr("x := make([]int, 2, 3, 4)", "too many"), + CErr("x := make([]int)", "not enough"), + + // TODO(austin) Test make map + + // Maps + Val1("x := make(map[int] int); x[1] = 42; i = x[1]", "i", 42), + Val2("x := make(map[int] int); x[1] = 42; i, y := x[1]", "i", 42, "y", true), + Val2("x := make(map[int] int); x[1] = 42; i, y := x[2]", "i", 0, "y", false), + // Not implemented + //Val1("x := make(map[int] int); x[1] = 42, true; i = x[1]", "i", 42), + //Val2("x := make(map[int] int); x[1] = 42; x[1] = 42, false; i, y := x[1]", "i", 0, "y", false), + Run("var x int; a := make(map[int] int); a[0], x = 1, 2"), + CErr("x := make(map[int] int); (func(a,b int){})(x[0])", "not enough"), + CErr("x := make(map[int] int); x[1] = oneTwo()", "too many"), + RErr("x := make(map[int] int); i = x[1]", "key '1' not found"), + + // Functions + Val2("func fib(n int) int { if n <= 2 { return n }; return fib(n-1) + fib(n-2) }", "fib(4)", 5, "fib(10)", 89), + Run("func f1(){}"), + Run2("func f1(){}", "f1()"), +} + +func TestStmt(t *testing.T) { runTests(t, "stmtTests", stmtTests) } diff --git a/libgo/go/exp/eval/type.go b/libgo/go/exp/eval/type.go new file mode 100644 index 000000000..3f272ce4b --- /dev/null +++ b/libgo/go/exp/eval/type.go @@ -0,0 +1,1252 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "go/ast" + "go/token" + "log" + "reflect" + "sort" + "unsafe" // For Sizeof +) + + +// XXX(Spec) The type compatibility section is very confusing because +// it makes it seem like there are three distinct types of +// compatibility: plain compatibility, assignment compatibility, and +// comparison compatibility. As I understand it, there's really only +// assignment compatibility and comparison and conversion have some +// restrictions and have special meaning in some cases where the types +// are not otherwise assignment compatible. The comparison +// compatibility section is almost all about the semantics of +// comparison, not the type checking of it, so it would make much more +// sense in the comparison operators section. The compatibility and +// assignment compatibility sections should be rolled into one. + +type Type interface { + // compat returns whether this type is compatible with another + // type. If conv is false, this is normal compatibility, + // where two named types are compatible only if they are the + // same named type. If conv if true, this is conversion + // compatibility, where two named types are conversion + // compatible if their definitions are conversion compatible. + // + // TODO(austin) Deal with recursive types + compat(o Type, conv bool) bool + // lit returns this type's literal. If this is a named type, + // this is the unnamed underlying type. Otherwise, this is an + // identity operation. + lit() Type + // isBoolean returns true if this is a boolean type. + isBoolean() bool + // isInteger returns true if this is an integer type. + isInteger() bool + // isFloat returns true if this is a floating type. + isFloat() bool + // isIdeal returns true if this is an ideal int or float. + isIdeal() bool + // Zero returns a new zero value of this type. + Zero() Value + // String returns the string representation of this type. + String() string + // The position where this type was defined, if any. + Pos() token.Pos +} + +type BoundedType interface { + Type + // minVal returns the smallest value of this type. + minVal() *big.Rat + // maxVal returns the largest value of this type. + maxVal() *big.Rat +} + +var universePos = token.NoPos + +/* + * Type array maps. These are used to memoize composite types. + */ + +type typeArrayMapEntry struct { + key []Type + v interface{} + next *typeArrayMapEntry +} + +type typeArrayMap map[uintptr]*typeArrayMapEntry + +func hashTypeArray(key []Type) uintptr { + hash := uintptr(0) + for _, t := range key { + hash = hash * 33 + if t == nil { + continue + } + addr := reflect.NewValue(t).(*reflect.PtrValue).Get() + hash ^= addr + } + return hash +} + +func newTypeArrayMap() typeArrayMap { return make(map[uintptr]*typeArrayMapEntry) } + +func (m typeArrayMap) Get(key []Type) interface{} { + ent, ok := m[hashTypeArray(key)] + if !ok { + return nil + } + +nextEnt: + for ; ent != nil; ent = ent.next { + if len(key) != len(ent.key) { + continue + } + for i := 0; i < len(key); i++ { + if key[i] != ent.key[i] { + continue nextEnt + } + } + // Found it + return ent.v + } + + return nil +} + +func (m typeArrayMap) Put(key []Type, v interface{}) interface{} { + hash := hashTypeArray(key) + ent := m[hash] + + new := &typeArrayMapEntry{key, v, ent} + m[hash] = new + return v +} + +/* + * Common type + */ + +type commonType struct{} + +func (commonType) isBoolean() bool { return false } + +func (commonType) isInteger() bool { return false } + +func (commonType) isFloat() bool { return false } + +func (commonType) isIdeal() bool { return false } + +func (commonType) Pos() token.Pos { return token.NoPos } + +/* + * Bool + */ + +type boolType struct { + commonType +} + +var BoolType = universe.DefineType("bool", universePos, &boolType{}) + +func (t *boolType) compat(o Type, conv bool) bool { + _, ok := o.lit().(*boolType) + return ok +} + +func (t *boolType) lit() Type { return t } + +func (t *boolType) isBoolean() bool { return true } + +func (boolType) String() string { + // Use angle brackets as a convention for printing the + // underlying, unnamed type. This should only show up in + // debug output. + return "<bool>" +} + +func (t *boolType) Zero() Value { + res := boolV(false) + return &res +} + +/* + * Uint + */ + +type uintType struct { + commonType + + // 0 for architecture-dependent types + Bits uint + // true for uintptr, false for all others + Ptr bool + name string +} + +var ( + Uint8Type = universe.DefineType("uint8", universePos, &uintType{commonType{}, 8, false, "uint8"}) + Uint16Type = universe.DefineType("uint16", universePos, &uintType{commonType{}, 16, false, "uint16"}) + Uint32Type = universe.DefineType("uint32", universePos, &uintType{commonType{}, 32, false, "uint32"}) + Uint64Type = universe.DefineType("uint64", universePos, &uintType{commonType{}, 64, false, "uint64"}) + + UintType = universe.DefineType("uint", universePos, &uintType{commonType{}, 0, false, "uint"}) + UintptrType = universe.DefineType("uintptr", universePos, &uintType{commonType{}, 0, true, "uintptr"}) +) + +func (t *uintType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*uintType) + return ok && t == t2 +} + +func (t *uintType) lit() Type { return t } + +func (t *uintType) isInteger() bool { return true } + +func (t *uintType) String() string { return "<" + t.name + ">" } + +func (t *uintType) Zero() Value { + switch t.Bits { + case 0: + if t.Ptr { + res := uintptrV(0) + return &res + } else { + res := uintV(0) + return &res + } + case 8: + res := uint8V(0) + return &res + case 16: + res := uint16V(0) + return &res + case 32: + res := uint32V(0) + return &res + case 64: + res := uint64V(0) + return &res + } + panic("unexpected uint bit count") +} + +func (t *uintType) minVal() *big.Rat { return big.NewRat(0, 1) } + +func (t *uintType) maxVal() *big.Rat { + bits := t.Bits + if bits == 0 { + if t.Ptr { + bits = uint(8 * unsafe.Sizeof(uintptr(0))) + } else { + bits = uint(8 * unsafe.Sizeof(uint(0))) + } + } + numer := big.NewInt(1) + numer.Lsh(numer, bits) + numer.Sub(numer, idealOne) + return new(big.Rat).SetInt(numer) +} + +/* + * Int + */ + +type intType struct { + commonType + + // XXX(Spec) Numeric types: "There is also a set of + // architecture-independent basic numeric types whose size + // depends on the architecture." Should that be + // architecture-dependent? + + // 0 for architecture-dependent types + Bits uint + name string +} + +var ( + Int8Type = universe.DefineType("int8", universePos, &intType{commonType{}, 8, "int8"}) + Int16Type = universe.DefineType("int16", universePos, &intType{commonType{}, 16, "int16"}) + Int32Type = universe.DefineType("int32", universePos, &intType{commonType{}, 32, "int32"}) + Int64Type = universe.DefineType("int64", universePos, &intType{commonType{}, 64, "int64"}) + + IntType = universe.DefineType("int", universePos, &intType{commonType{}, 0, "int"}) +) + +func (t *intType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*intType) + return ok && t == t2 +} + +func (t *intType) lit() Type { return t } + +func (t *intType) isInteger() bool { return true } + +func (t *intType) String() string { return "<" + t.name + ">" } + +func (t *intType) Zero() Value { + switch t.Bits { + case 8: + res := int8V(0) + return &res + case 16: + res := int16V(0) + return &res + case 32: + res := int32V(0) + return &res + case 64: + res := int64V(0) + return &res + + case 0: + res := intV(0) + return &res + } + panic("unexpected int bit count") +} + +func (t *intType) minVal() *big.Rat { + bits := t.Bits + if bits == 0 { + bits = uint(8 * unsafe.Sizeof(int(0))) + } + numer := big.NewInt(-1) + numer.Lsh(numer, bits-1) + return new(big.Rat).SetInt(numer) +} + +func (t *intType) maxVal() *big.Rat { + bits := t.Bits + if bits == 0 { + bits = uint(8 * unsafe.Sizeof(int(0))) + } + numer := big.NewInt(1) + numer.Lsh(numer, bits-1) + numer.Sub(numer, idealOne) + return new(big.Rat).SetInt(numer) +} + +/* + * Ideal int + */ + +type idealIntType struct { + commonType +} + +var IdealIntType Type = &idealIntType{} + +func (t *idealIntType) compat(o Type, conv bool) bool { + _, ok := o.lit().(*idealIntType) + return ok +} + +func (t *idealIntType) lit() Type { return t } + +func (t *idealIntType) isInteger() bool { return true } + +func (t *idealIntType) isIdeal() bool { return true } + +func (t *idealIntType) String() string { return "ideal integer" } + +func (t *idealIntType) Zero() Value { return &idealIntV{idealZero} } + +/* + * Float + */ + +type floatType struct { + commonType + + // 0 for architecture-dependent type + Bits uint + + name string +} + +var ( + Float32Type = universe.DefineType("float32", universePos, &floatType{commonType{}, 32, "float32"}) + Float64Type = universe.DefineType("float64", universePos, &floatType{commonType{}, 64, "float64"}) +) + +func (t *floatType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*floatType) + return ok && t == t2 +} + +func (t *floatType) lit() Type { return t } + +func (t *floatType) isFloat() bool { return true } + +func (t *floatType) String() string { return "<" + t.name + ">" } + +func (t *floatType) Zero() Value { + switch t.Bits { + case 32: + res := float32V(0) + return &res + case 64: + res := float64V(0) + return &res + } + panic("unexpected float bit count") +} + +var maxFloat32Val *big.Rat +var maxFloat64Val *big.Rat +var minFloat32Val *big.Rat +var minFloat64Val *big.Rat + +func (t *floatType) minVal() *big.Rat { + bits := t.Bits + switch bits { + case 32: + return minFloat32Val + case 64: + return minFloat64Val + } + log.Panicf("unexpected floating point bit count: %d", bits) + panic("unreachable") +} + +func (t *floatType) maxVal() *big.Rat { + bits := t.Bits + switch bits { + case 32: + return maxFloat32Val + case 64: + return maxFloat64Val + } + log.Panicf("unexpected floating point bit count: %d", bits) + panic("unreachable") +} + +/* + * Ideal float + */ + +type idealFloatType struct { + commonType +} + +var IdealFloatType Type = &idealFloatType{} + +func (t *idealFloatType) compat(o Type, conv bool) bool { + _, ok := o.lit().(*idealFloatType) + return ok +} + +func (t *idealFloatType) lit() Type { return t } + +func (t *idealFloatType) isFloat() bool { return true } + +func (t *idealFloatType) isIdeal() bool { return true } + +func (t *idealFloatType) String() string { return "ideal float" } + +func (t *idealFloatType) Zero() Value { return &idealFloatV{big.NewRat(0, 1)} } + +/* + * String + */ + +type stringType struct { + commonType +} + +var StringType = universe.DefineType("string", universePos, &stringType{}) + +func (t *stringType) compat(o Type, conv bool) bool { + _, ok := o.lit().(*stringType) + return ok +} + +func (t *stringType) lit() Type { return t } + +func (t *stringType) String() string { return "<string>" } + +func (t *stringType) Zero() Value { + res := stringV("") + return &res +} + +/* + * Array + */ + +type ArrayType struct { + commonType + Len int64 + Elem Type +} + +var arrayTypes = make(map[int64]map[Type]*ArrayType) + +// Two array types are identical if they have identical element types +// and the same array length. + +func NewArrayType(len int64, elem Type) *ArrayType { + ts, ok := arrayTypes[len] + if !ok { + ts = make(map[Type]*ArrayType) + arrayTypes[len] = ts + } + t, ok := ts[elem] + if !ok { + t = &ArrayType{commonType{}, len, elem} + ts[elem] = t + } + return t +} + +func (t *ArrayType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*ArrayType) + if !ok { + return false + } + return t.Len == t2.Len && t.Elem.compat(t2.Elem, conv) +} + +func (t *ArrayType) lit() Type { return t } + +func (t *ArrayType) String() string { return "[]" + t.Elem.String() } + +func (t *ArrayType) Zero() Value { + res := arrayV(make([]Value, t.Len)) + // TODO(austin) It's unfortunate that each element is + // separately heap allocated. We could add ZeroArray to + // everything, though that doesn't help with multidimensional + // arrays. Or we could do something unsafe. We'll have this + // same problem with structs. + for i := int64(0); i < t.Len; i++ { + res[i] = t.Elem.Zero() + } + return &res +} + +/* + * Struct + */ + +type StructField struct { + Name string + Type Type + Anonymous bool +} + +type StructType struct { + commonType + Elems []StructField +} + +var structTypes = newTypeArrayMap() + +// Two struct types are identical if they have the same sequence of +// fields, and if corresponding fields have the same names and +// identical types. Two anonymous fields are considered to have the +// same name. + +func NewStructType(fields []StructField) *StructType { + // Start by looking up just the types + fts := make([]Type, len(fields)) + for i, f := range fields { + fts[i] = f.Type + } + tMapI := structTypes.Get(fts) + if tMapI == nil { + tMapI = structTypes.Put(fts, make(map[string]*StructType)) + } + tMap := tMapI.(map[string]*StructType) + + // Construct key for field names + key := "" + for _, f := range fields { + // XXX(Spec) It's not clear if struct { T } and struct + // { T T } are either identical or compatible. The + // "Struct Types" section says that the name of that + // field is "T", which suggests that they are + // identical, but it really means that it's the name + // for the purpose of selector expressions and nothing + // else. We decided that they should be neither + // identical or compatible. + if f.Anonymous { + key += "!" + } + key += f.Name + " " + } + + // XXX(Spec) Do the tags also have to be identical for the + // types to be identical? I certainly hope so, because + // otherwise, this is the only case where two distinct type + // objects can represent identical types. + + t, ok := tMap[key] + if !ok { + // Create new struct type + t = &StructType{commonType{}, fields} + tMap[key] = t + } + return t +} + +func (t *StructType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*StructType) + if !ok { + return false + } + if len(t.Elems) != len(t2.Elems) { + return false + } + for i, e := range t.Elems { + e2 := t2.Elems[i] + // XXX(Spec) An anonymous and a non-anonymous field + // are neither identical nor compatible. + if e.Anonymous != e2.Anonymous || + (!e.Anonymous && e.Name != e2.Name) || + !e.Type.compat(e2.Type, conv) { + return false + } + } + return true +} + +func (t *StructType) lit() Type { return t } + +func (t *StructType) String() string { + s := "struct {" + for i, f := range t.Elems { + if i > 0 { + s += "; " + } + if !f.Anonymous { + s += f.Name + " " + } + s += f.Type.String() + } + return s + "}" +} + +func (t *StructType) Zero() Value { + res := structV(make([]Value, len(t.Elems))) + for i, f := range t.Elems { + res[i] = f.Type.Zero() + } + return &res +} + +/* + * Pointer + */ + +type PtrType struct { + commonType + Elem Type +} + +var ptrTypes = make(map[Type]*PtrType) + +// Two pointer types are identical if they have identical base types. + +func NewPtrType(elem Type) *PtrType { + t, ok := ptrTypes[elem] + if !ok { + t = &PtrType{commonType{}, elem} + ptrTypes[elem] = t + } + return t +} + +func (t *PtrType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*PtrType) + if !ok { + return false + } + return t.Elem.compat(t2.Elem, conv) +} + +func (t *PtrType) lit() Type { return t } + +func (t *PtrType) String() string { return "*" + t.Elem.String() } + +func (t *PtrType) Zero() Value { return &ptrV{nil} } + +/* + * Function + */ + +type FuncType struct { + commonType + // TODO(austin) Separate receiver Type for methods? + In []Type + Variadic bool + Out []Type + builtin string +} + +var funcTypes = newTypeArrayMap() +var variadicFuncTypes = newTypeArrayMap() + +// Create singleton function types for magic built-in functions +var ( + capType = &FuncType{builtin: "cap"} + closeType = &FuncType{builtin: "close"} + closedType = &FuncType{builtin: "closed"} + lenType = &FuncType{builtin: "len"} + makeType = &FuncType{builtin: "make"} + newType = &FuncType{builtin: "new"} + panicType = &FuncType{builtin: "panic"} + printType = &FuncType{builtin: "print"} + printlnType = &FuncType{builtin: "println"} + copyType = &FuncType{builtin: "copy"} +) + +// Two function types are identical if they have the same number of +// parameters and result values and if corresponding parameter and +// result types are identical. All "..." parameters have identical +// type. Parameter and result names are not required to match. + +func NewFuncType(in []Type, variadic bool, out []Type) *FuncType { + inMap := funcTypes + if variadic { + inMap = variadicFuncTypes + } + + outMapI := inMap.Get(in) + if outMapI == nil { + outMapI = inMap.Put(in, newTypeArrayMap()) + } + outMap := outMapI.(typeArrayMap) + + tI := outMap.Get(out) + if tI != nil { + return tI.(*FuncType) + } + + t := &FuncType{commonType{}, in, variadic, out, ""} + outMap.Put(out, t) + return t +} + +func (t *FuncType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*FuncType) + if !ok { + return false + } + if len(t.In) != len(t2.In) || t.Variadic != t2.Variadic || len(t.Out) != len(t2.Out) { + return false + } + for i := range t.In { + if !t.In[i].compat(t2.In[i], conv) { + return false + } + } + for i := range t.Out { + if !t.Out[i].compat(t2.Out[i], conv) { + return false + } + } + return true +} + +func (t *FuncType) lit() Type { return t } + +func typeListString(ts []Type, ns []*ast.Ident) string { + s := "" + for i, t := range ts { + if i > 0 { + s += ", " + } + if ns != nil && ns[i] != nil { + s += ns[i].Name + " " + } + if t == nil { + // Some places use nil types to represent errors + s += "<none>" + } else { + s += t.String() + } + } + return s +} + +func (t *FuncType) String() string { + if t.builtin != "" { + return "built-in function " + t.builtin + } + args := typeListString(t.In, nil) + if t.Variadic { + if len(args) > 0 { + args += ", " + } + args += "..." + } + s := "func(" + args + ")" + if len(t.Out) > 0 { + s += " (" + typeListString(t.Out, nil) + ")" + } + return s +} + +func (t *FuncType) Zero() Value { return &funcV{nil} } + +type FuncDecl struct { + Type *FuncType + Name *ast.Ident // nil for function literals + // InNames will be one longer than Type.In if this function is + // variadic. + InNames []*ast.Ident + OutNames []*ast.Ident +} + +func (t *FuncDecl) String() string { + s := "func" + if t.Name != nil { + s += " " + t.Name.Name + } + s += funcTypeString(t.Type, t.InNames, t.OutNames) + return s +} + +func funcTypeString(ft *FuncType, ins []*ast.Ident, outs []*ast.Ident) string { + s := "(" + s += typeListString(ft.In, ins) + if ft.Variadic { + if len(ft.In) > 0 { + s += ", " + } + s += "..." + } + s += ")" + if len(ft.Out) > 0 { + s += " (" + typeListString(ft.Out, outs) + ")" + } + return s +} + +/* + * Interface + */ + +// TODO(austin) Interface values, types, and type compilation are +// implemented, but none of the type checking or semantics of +// interfaces are. + +type InterfaceType struct { + commonType + // TODO(austin) This should be a map from names to + // *FuncType's. We only need the sorted list for generating + // the type map key. It's detrimental for everything else. + methods []IMethod +} + +type IMethod struct { + Name string + Type *FuncType +} + +var interfaceTypes = newTypeArrayMap() + +func NewInterfaceType(methods []IMethod, embeds []*InterfaceType) *InterfaceType { + // Count methods of embedded interfaces + nMethods := len(methods) + for _, e := range embeds { + nMethods += len(e.methods) + } + + // Combine methods + allMethods := make([]IMethod, nMethods) + copy(allMethods, methods) + n := len(methods) + for _, e := range embeds { + for _, m := range e.methods { + allMethods[n] = m + n++ + } + } + + // Sort methods + sort.Sort(iMethodSorter(allMethods)) + + mts := make([]Type, len(allMethods)) + for i, m := range methods { + mts[i] = m.Type + } + tMapI := interfaceTypes.Get(mts) + if tMapI == nil { + tMapI = interfaceTypes.Put(mts, make(map[string]*InterfaceType)) + } + tMap := tMapI.(map[string]*InterfaceType) + + key := "" + for _, m := range allMethods { + key += m.Name + " " + } + + t, ok := tMap[key] + if !ok { + t = &InterfaceType{commonType{}, allMethods} + tMap[key] = t + } + return t +} + +type iMethodSorter []IMethod + +func (s iMethodSorter) Less(a, b int) bool { return s[a].Name < s[b].Name } + +func (s iMethodSorter) Swap(a, b int) { s[a], s[b] = s[b], s[a] } + +func (s iMethodSorter) Len() int { return len(s) } + +func (t *InterfaceType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*InterfaceType) + if !ok { + return false + } + if len(t.methods) != len(t2.methods) { + return false + } + for i, e := range t.methods { + e2 := t2.methods[i] + if e.Name != e2.Name || !e.Type.compat(e2.Type, conv) { + return false + } + } + return true +} + +func (t *InterfaceType) lit() Type { return t } + +func (t *InterfaceType) String() string { + // TODO(austin) Instead of showing embedded interfaces, this + // shows their methods. + s := "interface {" + for i, m := range t.methods { + if i > 0 { + s += "; " + } + s += m.Name + funcTypeString(m.Type, nil, nil) + } + return s + "}" +} + +// implementedBy tests if o implements t, returning nil, true if it does. +// Otherwise, it returns a method of t that o is missing and false. +func (t *InterfaceType) implementedBy(o Type) (*IMethod, bool) { + if len(t.methods) == 0 { + return nil, true + } + + // The methods of a named interface types are those of the + // underlying type. + if it, ok := o.lit().(*InterfaceType); ok { + o = it + } + + // XXX(Spec) Interface types: "A type implements any interface + // comprising any subset of its methods" It's unclear if + // methods must have identical or compatible types. 6g + // requires identical types. + + switch o := o.(type) { + case *NamedType: + for _, tm := range t.methods { + sm, ok := o.methods[tm.Name] + if !ok || sm.decl.Type != tm.Type { + return &tm, false + } + } + return nil, true + + case *InterfaceType: + var ti, oi int + for ti < len(t.methods) && oi < len(o.methods) { + tm, om := &t.methods[ti], &o.methods[oi] + switch { + case tm.Name == om.Name: + if tm.Type != om.Type { + return tm, false + } + ti++ + oi++ + case tm.Name > om.Name: + oi++ + default: + return tm, false + } + } + if ti < len(t.methods) { + return &t.methods[ti], false + } + return nil, true + } + + return &t.methods[0], false +} + +func (t *InterfaceType) Zero() Value { return &interfaceV{} } + +/* + * Slice + */ + +type SliceType struct { + commonType + Elem Type +} + +var sliceTypes = make(map[Type]*SliceType) + +// Two slice types are identical if they have identical element types. + +func NewSliceType(elem Type) *SliceType { + t, ok := sliceTypes[elem] + if !ok { + t = &SliceType{commonType{}, elem} + sliceTypes[elem] = t + } + return t +} + +func (t *SliceType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*SliceType) + if !ok { + return false + } + return t.Elem.compat(t2.Elem, conv) +} + +func (t *SliceType) lit() Type { return t } + +func (t *SliceType) String() string { return "[]" + t.Elem.String() } + +func (t *SliceType) Zero() Value { + // The value of an uninitialized slice is nil. The length and + // capacity of a nil slice are 0. + return &sliceV{Slice{nil, 0, 0}} +} + +/* + * Map type + */ + +type MapType struct { + commonType + Key Type + Elem Type +} + +var mapTypes = make(map[Type]map[Type]*MapType) + +func NewMapType(key Type, elem Type) *MapType { + ts, ok := mapTypes[key] + if !ok { + ts = make(map[Type]*MapType) + mapTypes[key] = ts + } + t, ok := ts[elem] + if !ok { + t = &MapType{commonType{}, key, elem} + ts[elem] = t + } + return t +} + +func (t *MapType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*MapType) + if !ok { + return false + } + return t.Elem.compat(t2.Elem, conv) && t.Key.compat(t2.Key, conv) +} + +func (t *MapType) lit() Type { return t } + +func (t *MapType) String() string { return "map[" + t.Key.String() + "] " + t.Elem.String() } + +func (t *MapType) Zero() Value { + // The value of an uninitialized map is nil. + return &mapV{nil} +} + +/* +type ChanType struct { + // TODO(austin) +} +*/ + +/* + * Named types + */ + +type Method struct { + decl *FuncDecl + fn Func +} + +type NamedType struct { + NamePos token.Pos + Name string + // Underlying type. If incomplete is true, this will be nil. + // If incomplete is false and this is still nil, then this is + // a placeholder type representing an error. + Def Type + // True while this type is being defined. + incomplete bool + methods map[string]Method +} + +// TODO(austin) This is temporarily needed by the debugger's remote +// type parser. This should only be possible with block.DefineType. +func NewNamedType(name string) *NamedType { + return &NamedType{token.NoPos, name, nil, true, make(map[string]Method)} +} + +func (t *NamedType) Pos() token.Pos { + return t.NamePos +} + +func (t *NamedType) Complete(def Type) { + if !t.incomplete { + log.Panicf("cannot complete already completed NamedType %+v", *t) + } + // We strip the name from def because multiple levels of + // naming are useless. + if ndef, ok := def.(*NamedType); ok { + def = ndef.Def + } + t.Def = def + t.incomplete = false +} + +func (t *NamedType) compat(o Type, conv bool) bool { + t2, ok := o.(*NamedType) + if ok { + if conv { + // Two named types are conversion compatible + // if their literals are conversion + // compatible. + return t.Def.compat(t2.Def, conv) + } else { + // Two named types are compatible if their + // type names originate in the same type + // declaration. + return t == t2 + } + } + // A named and an unnamed type are compatible if the + // respective type literals are compatible. + return o.compat(t.Def, conv) +} + +func (t *NamedType) lit() Type { return t.Def.lit() } + +func (t *NamedType) isBoolean() bool { return t.Def.isBoolean() } + +func (t *NamedType) isInteger() bool { return t.Def.isInteger() } + +func (t *NamedType) isFloat() bool { return t.Def.isFloat() } + +func (t *NamedType) isIdeal() bool { return false } + +func (t *NamedType) String() string { return t.Name } + +func (t *NamedType) Zero() Value { return t.Def.Zero() } + +/* + * Multi-valued type + */ + +// MultiType is a special type used for multi-valued expressions, akin +// to a tuple type. It's not generally accessible within the +// language. +type MultiType struct { + commonType + Elems []Type +} + +var multiTypes = newTypeArrayMap() + +func NewMultiType(elems []Type) *MultiType { + if t := multiTypes.Get(elems); t != nil { + return t.(*MultiType) + } + + t := &MultiType{commonType{}, elems} + multiTypes.Put(elems, t) + return t +} + +func (t *MultiType) compat(o Type, conv bool) bool { + t2, ok := o.lit().(*MultiType) + if !ok { + return false + } + if len(t.Elems) != len(t2.Elems) { + return false + } + for i := range t.Elems { + if !t.Elems[i].compat(t2.Elems[i], conv) { + return false + } + } + return true +} + +var EmptyType Type = NewMultiType([]Type{}) + +func (t *MultiType) lit() Type { return t } + +func (t *MultiType) String() string { + if len(t.Elems) == 0 { + return "<none>" + } + return typeListString(t.Elems, nil) +} + +func (t *MultiType) Zero() Value { + res := make([]Value, len(t.Elems)) + for i, t := range t.Elems { + res[i] = t.Zero() + } + return multiV(res) +} + +/* + * Initialize the universe + */ + +func init() { + numer := big.NewInt(0xffffff) + numer.Lsh(numer, 127-23) + maxFloat32Val = new(big.Rat).SetInt(numer) + numer.SetInt64(0x1fffffffffffff) + numer.Lsh(numer, 1023-52) + maxFloat64Val = new(big.Rat).SetInt(numer) + minFloat32Val = new(big.Rat).Neg(maxFloat32Val) + minFloat64Val = new(big.Rat).Neg(maxFloat64Val) + + // To avoid portability issues all numeric types are distinct + // except byte, which is an alias for uint8. + + // Make byte an alias for the named type uint8. Type aliases + // are otherwise impossible in Go, so just hack it here. + universe.defs["byte"] = universe.defs["uint8"] + + // Built-in functions + universe.DefineConst("cap", universePos, capType, nil) + universe.DefineConst("close", universePos, closeType, nil) + universe.DefineConst("closed", universePos, closedType, nil) + universe.DefineConst("copy", universePos, copyType, nil) + universe.DefineConst("len", universePos, lenType, nil) + universe.DefineConst("make", universePos, makeType, nil) + universe.DefineConst("new", universePos, newType, nil) + universe.DefineConst("panic", universePos, panicType, nil) + universe.DefineConst("print", universePos, printType, nil) + universe.DefineConst("println", universePos, printlnType, nil) +} diff --git a/libgo/go/exp/eval/typec.go b/libgo/go/exp/eval/typec.go new file mode 100644 index 000000000..de90cf664 --- /dev/null +++ b/libgo/go/exp/eval/typec.go @@ -0,0 +1,409 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "go/ast" + "go/token" + "log" +) + + +/* + * Type compiler + */ + +type typeCompiler struct { + *compiler + block *block + // Check to be performed after a type declaration is compiled. + // + // TODO(austin) This will probably have to change after we + // eliminate forward declarations. + lateCheck func() bool +} + +func (a *typeCompiler) compileIdent(x *ast.Ident, allowRec bool) Type { + _, _, def := a.block.Lookup(x.Name) + if def == nil { + a.diagAt(x.Pos(), "%s: undefined", x.Name) + return nil + } + switch def := def.(type) { + case *Constant: + a.diagAt(x.Pos(), "constant %v used as type", x.Name) + return nil + case *Variable: + a.diagAt(x.Pos(), "variable %v used as type", x.Name) + return nil + case *NamedType: + if !allowRec && def.incomplete { + a.diagAt(x.Pos(), "illegal recursive type") + return nil + } + if !def.incomplete && def.Def == nil { + // Placeholder type from an earlier error + return nil + } + return def + case Type: + return def + } + log.Panicf("name %s has unknown type %T", x.Name, def) + return nil +} + +func (a *typeCompiler) compileArrayType(x *ast.ArrayType, allowRec bool) Type { + // Compile element type + elem := a.compileType(x.Elt, allowRec) + + // Compile length expression + if x.Len == nil { + if elem == nil { + return nil + } + return NewSliceType(elem) + } + + if _, ok := x.Len.(*ast.Ellipsis); ok { + a.diagAt(x.Len.Pos(), "... array initailizers not implemented") + return nil + } + l, ok := a.compileArrayLen(a.block, x.Len) + if !ok { + return nil + } + if l < 0 { + a.diagAt(x.Len.Pos(), "array length must be non-negative") + return nil + } + if elem == nil { + return nil + } + + return NewArrayType(l, elem) +} + +func (a *typeCompiler) compileFields(fields *ast.FieldList, allowRec bool) ([]Type, []*ast.Ident, []token.Pos, bool) { + n := fields.NumFields() + ts := make([]Type, n) + ns := make([]*ast.Ident, n) + ps := make([]token.Pos, n) + bad := false + + if fields != nil { + i := 0 + for _, f := range fields.List { + t := a.compileType(f.Type, allowRec) + if t == nil { + bad = true + } + if f.Names == nil { + ns[i] = nil + ts[i] = t + ps[i] = f.Type.Pos() + i++ + continue + } + for _, n := range f.Names { + ns[i] = n + ts[i] = t + ps[i] = n.Pos() + i++ + } + } + } + + return ts, ns, ps, bad +} + +func (a *typeCompiler) compileStructType(x *ast.StructType, allowRec bool) Type { + ts, names, poss, bad := a.compileFields(x.Fields, allowRec) + + // XXX(Spec) The spec claims that field identifiers must be + // unique, but 6g only checks this when they are accessed. I + // think the spec is better in this regard: if I write two + // fields with the same name in the same struct type, clearly + // that's a mistake. This definition does *not* descend into + // anonymous fields, so it doesn't matter if those change. + // There's separate language in the spec about checking + // uniqueness of field names inherited from anonymous fields + // at use time. + fields := make([]StructField, len(ts)) + nameSet := make(map[string]token.Pos, len(ts)) + for i := range fields { + // Compute field name and check anonymous fields + var name string + if names[i] != nil { + name = names[i].Name + } else { + if ts[i] == nil { + continue + } + + var nt *NamedType + // [For anonymous fields,] the unqualified + // type name acts as the field identifier. + switch t := ts[i].(type) { + case *NamedType: + name = t.Name + nt = t + case *PtrType: + switch t := t.Elem.(type) { + case *NamedType: + name = t.Name + nt = t + } + } + // [An anonymous field] must be specified as a + // type name T or as a pointer to a type name + // *T, and T itself, may not be a pointer or + // interface type. + if nt == nil { + a.diagAt(poss[i], "embedded type must T or *T, where T is a named type") + bad = true + continue + } + // The check for embedded pointer types must + // be deferred because of things like + // type T *struct { T } + lateCheck := a.lateCheck + a.lateCheck = func() bool { + if _, ok := nt.lit().(*PtrType); ok { + a.diagAt(poss[i], "embedded type %v is a pointer type", nt) + return false + } + return lateCheck() + } + } + + // Check name uniqueness + if prev, ok := nameSet[name]; ok { + a.diagAt(poss[i], "field %s redeclared\n\tprevious declaration at %s", name, a.fset.Position(prev)) + bad = true + continue + } + nameSet[name] = poss[i] + + // Create field + fields[i].Name = name + fields[i].Type = ts[i] + fields[i].Anonymous = (names[i] == nil) + } + + if bad { + return nil + } + + return NewStructType(fields) +} + +func (a *typeCompiler) compilePtrType(x *ast.StarExpr) Type { + elem := a.compileType(x.X, true) + if elem == nil { + return nil + } + return NewPtrType(elem) +} + +func (a *typeCompiler) compileFuncType(x *ast.FuncType, allowRec bool) *FuncDecl { + // TODO(austin) Variadic function types + + // The types of parameters and results must be complete. + // + // TODO(austin) It's not clear they actually have to be complete. + in, inNames, _, inBad := a.compileFields(x.Params, allowRec) + out, outNames, _, outBad := a.compileFields(x.Results, allowRec) + + if inBad || outBad { + return nil + } + return &FuncDecl{NewFuncType(in, false, out), nil, inNames, outNames} +} + +func (a *typeCompiler) compileInterfaceType(x *ast.InterfaceType, allowRec bool) *InterfaceType { + ts, names, poss, bad := a.compileFields(x.Methods, allowRec) + + methods := make([]IMethod, len(ts)) + nameSet := make(map[string]token.Pos, len(ts)) + embeds := make([]*InterfaceType, len(ts)) + + var nm, ne int + for i := range ts { + if ts[i] == nil { + continue + } + + if names[i] != nil { + name := names[i].Name + methods[nm].Name = name + methods[nm].Type = ts[i].(*FuncType) + nm++ + if prev, ok := nameSet[name]; ok { + a.diagAt(poss[i], "method %s redeclared\n\tprevious declaration at %s", name, a.fset.Position(prev)) + bad = true + continue + } + nameSet[name] = poss[i] + } else { + // Embedded interface + it, ok := ts[i].lit().(*InterfaceType) + if !ok { + a.diagAt(poss[i], "embedded type must be an interface") + bad = true + continue + } + embeds[ne] = it + ne++ + for _, m := range it.methods { + if prev, ok := nameSet[m.Name]; ok { + a.diagAt(poss[i], "method %s redeclared\n\tprevious declaration at %s", m.Name, a.fset.Position(prev)) + bad = true + continue + } + nameSet[m.Name] = poss[i] + } + } + } + + if bad { + return nil + } + + methods = methods[0:nm] + embeds = embeds[0:ne] + + return NewInterfaceType(methods, embeds) +} + +func (a *typeCompiler) compileMapType(x *ast.MapType) Type { + key := a.compileType(x.Key, true) + val := a.compileType(x.Value, true) + if key == nil || val == nil { + return nil + } + // XXX(Spec) The Map types section explicitly lists all types + // that can be map keys except for function types. + switch key.lit().(type) { + case *StructType: + a.diagAt(x.Pos(), "map key cannot be a struct type") + return nil + case *ArrayType: + a.diagAt(x.Pos(), "map key cannot be an array type") + return nil + case *SliceType: + a.diagAt(x.Pos(), "map key cannot be a slice type") + return nil + } + return NewMapType(key, val) +} + +func (a *typeCompiler) compileType(x ast.Expr, allowRec bool) Type { + switch x := x.(type) { + case *ast.BadExpr: + // Error already reported by parser + a.silentErrors++ + return nil + + case *ast.Ident: + return a.compileIdent(x, allowRec) + + case *ast.ArrayType: + return a.compileArrayType(x, allowRec) + + case *ast.StructType: + return a.compileStructType(x, allowRec) + + case *ast.StarExpr: + return a.compilePtrType(x) + + case *ast.FuncType: + fd := a.compileFuncType(x, allowRec) + if fd == nil { + return nil + } + return fd.Type + + case *ast.InterfaceType: + return a.compileInterfaceType(x, allowRec) + + case *ast.MapType: + return a.compileMapType(x) + + case *ast.ChanType: + goto notimpl + + case *ast.ParenExpr: + return a.compileType(x.X, allowRec) + + case *ast.Ellipsis: + a.diagAt(x.Pos(), "illegal use of ellipsis") + return nil + } + a.diagAt(x.Pos(), "expression used as type") + return nil + +notimpl: + a.diagAt(x.Pos(), "compileType: %T not implemented", x) + return nil +} + +/* + * Type compiler interface + */ + +func noLateCheck() bool { return true } + +func (a *compiler) compileType(b *block, typ ast.Expr) Type { + tc := &typeCompiler{a, b, noLateCheck} + t := tc.compileType(typ, false) + if !tc.lateCheck() { + t = nil + } + return t +} + +func (a *compiler) compileTypeDecl(b *block, decl *ast.GenDecl) bool { + ok := true + for _, spec := range decl.Specs { + spec := spec.(*ast.TypeSpec) + // Create incomplete type for this type + nt := b.DefineType(spec.Name.Name, spec.Name.Pos(), nil) + if nt != nil { + nt.(*NamedType).incomplete = true + } + // Compile type + tc := &typeCompiler{a, b, noLateCheck} + t := tc.compileType(spec.Type, false) + if t == nil { + // Create a placeholder type + ok = false + } + // Fill incomplete type + if nt != nil { + nt.(*NamedType).Complete(t) + } + // Perform late type checking with complete type + if !tc.lateCheck() { + ok = false + if nt != nil { + // Make the type a placeholder + nt.(*NamedType).Def = nil + } + } + } + return ok +} + +func (a *compiler) compileFuncType(b *block, typ *ast.FuncType) *FuncDecl { + tc := &typeCompiler{a, b, noLateCheck} + res := tc.compileFuncType(typ, false) + if res != nil { + if !tc.lateCheck() { + res = nil + } + } + return res +} diff --git a/libgo/go/exp/eval/value.go b/libgo/go/exp/eval/value.go new file mode 100644 index 000000000..daa691897 --- /dev/null +++ b/libgo/go/exp/eval/value.go @@ -0,0 +1,586 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package eval + +import ( + "big" + "fmt" +) + +type Value interface { + String() string + // Assign copies another value into this one. It should + // assume that the other value satisfies the same specific + // value interface (BoolValue, etc.), but must not assume + // anything about its specific type. + Assign(t *Thread, o Value) +} + +type BoolValue interface { + Value + Get(*Thread) bool + Set(*Thread, bool) +} + +type UintValue interface { + Value + Get(*Thread) uint64 + Set(*Thread, uint64) +} + +type IntValue interface { + Value + Get(*Thread) int64 + Set(*Thread, int64) +} + +// TODO(austin) IdealIntValue and IdealFloatValue should not exist +// because ideals are not l-values. +type IdealIntValue interface { + Value + Get() *big.Int +} + +type FloatValue interface { + Value + Get(*Thread) float64 + Set(*Thread, float64) +} + +type IdealFloatValue interface { + Value + Get() *big.Rat +} + +type StringValue interface { + Value + Get(*Thread) string + Set(*Thread, string) +} + +type ArrayValue interface { + Value + // TODO(austin) Get() is here for uniformity, but is + // completely useless. If a lot of other types have similarly + // useless Get methods, just special-case these uses. + Get(*Thread) ArrayValue + Elem(*Thread, int64) Value + // Sub returns an ArrayValue backed by the same array that + // starts from element i and has length len. + Sub(i int64, len int64) ArrayValue +} + +type StructValue interface { + Value + // TODO(austin) This is another useless Get() + Get(*Thread) StructValue + Field(*Thread, int) Value +} + +type PtrValue interface { + Value + Get(*Thread) Value + Set(*Thread, Value) +} + +type Func interface { + NewFrame() *Frame + Call(*Thread) +} + +type FuncValue interface { + Value + Get(*Thread) Func + Set(*Thread, Func) +} + +type Interface struct { + Type Type + Value Value +} + +type InterfaceValue interface { + Value + Get(*Thread) Interface + Set(*Thread, Interface) +} + +type Slice struct { + Base ArrayValue + Len, Cap int64 +} + +type SliceValue interface { + Value + Get(*Thread) Slice + Set(*Thread, Slice) +} + +type Map interface { + Len(*Thread) int64 + // Retrieve an element from the map, returning nil if it does + // not exist. + Elem(t *Thread, key interface{}) Value + // Set an entry in the map. If val is nil, delete the entry. + SetElem(t *Thread, key interface{}, val Value) + // TODO(austin) Perhaps there should be an iterator interface instead. + Iter(func(key interface{}, val Value) bool) +} + +type MapValue interface { + Value + Get(*Thread) Map + Set(*Thread, Map) +} + +/* + * Bool + */ + +type boolV bool + +func (v *boolV) String() string { return fmt.Sprint(*v) } + +func (v *boolV) Assign(t *Thread, o Value) { *v = boolV(o.(BoolValue).Get(t)) } + +func (v *boolV) Get(*Thread) bool { return bool(*v) } + +func (v *boolV) Set(t *Thread, x bool) { *v = boolV(x) } + +/* + * Uint + */ + +type uint8V uint8 + +func (v *uint8V) String() string { return fmt.Sprint(*v) } + +func (v *uint8V) Assign(t *Thread, o Value) { *v = uint8V(o.(UintValue).Get(t)) } + +func (v *uint8V) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uint8V) Set(t *Thread, x uint64) { *v = uint8V(x) } + +type uint16V uint16 + +func (v *uint16V) String() string { return fmt.Sprint(*v) } + +func (v *uint16V) Assign(t *Thread, o Value) { *v = uint16V(o.(UintValue).Get(t)) } + +func (v *uint16V) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uint16V) Set(t *Thread, x uint64) { *v = uint16V(x) } + +type uint32V uint32 + +func (v *uint32V) String() string { return fmt.Sprint(*v) } + +func (v *uint32V) Assign(t *Thread, o Value) { *v = uint32V(o.(UintValue).Get(t)) } + +func (v *uint32V) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uint32V) Set(t *Thread, x uint64) { *v = uint32V(x) } + +type uint64V uint64 + +func (v *uint64V) String() string { return fmt.Sprint(*v) } + +func (v *uint64V) Assign(t *Thread, o Value) { *v = uint64V(o.(UintValue).Get(t)) } + +func (v *uint64V) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uint64V) Set(t *Thread, x uint64) { *v = uint64V(x) } + +type uintV uint + +func (v *uintV) String() string { return fmt.Sprint(*v) } + +func (v *uintV) Assign(t *Thread, o Value) { *v = uintV(o.(UintValue).Get(t)) } + +func (v *uintV) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uintV) Set(t *Thread, x uint64) { *v = uintV(x) } + +type uintptrV uintptr + +func (v *uintptrV) String() string { return fmt.Sprint(*v) } + +func (v *uintptrV) Assign(t *Thread, o Value) { *v = uintptrV(o.(UintValue).Get(t)) } + +func (v *uintptrV) Get(*Thread) uint64 { return uint64(*v) } + +func (v *uintptrV) Set(t *Thread, x uint64) { *v = uintptrV(x) } + +/* + * Int + */ + +type int8V int8 + +func (v *int8V) String() string { return fmt.Sprint(*v) } + +func (v *int8V) Assign(t *Thread, o Value) { *v = int8V(o.(IntValue).Get(t)) } + +func (v *int8V) Get(*Thread) int64 { return int64(*v) } + +func (v *int8V) Set(t *Thread, x int64) { *v = int8V(x) } + +type int16V int16 + +func (v *int16V) String() string { return fmt.Sprint(*v) } + +func (v *int16V) Assign(t *Thread, o Value) { *v = int16V(o.(IntValue).Get(t)) } + +func (v *int16V) Get(*Thread) int64 { return int64(*v) } + +func (v *int16V) Set(t *Thread, x int64) { *v = int16V(x) } + +type int32V int32 + +func (v *int32V) String() string { return fmt.Sprint(*v) } + +func (v *int32V) Assign(t *Thread, o Value) { *v = int32V(o.(IntValue).Get(t)) } + +func (v *int32V) Get(*Thread) int64 { return int64(*v) } + +func (v *int32V) Set(t *Thread, x int64) { *v = int32V(x) } + +type int64V int64 + +func (v *int64V) String() string { return fmt.Sprint(*v) } + +func (v *int64V) Assign(t *Thread, o Value) { *v = int64V(o.(IntValue).Get(t)) } + +func (v *int64V) Get(*Thread) int64 { return int64(*v) } + +func (v *int64V) Set(t *Thread, x int64) { *v = int64V(x) } + +type intV int + +func (v *intV) String() string { return fmt.Sprint(*v) } + +func (v *intV) Assign(t *Thread, o Value) { *v = intV(o.(IntValue).Get(t)) } + +func (v *intV) Get(*Thread) int64 { return int64(*v) } + +func (v *intV) Set(t *Thread, x int64) { *v = intV(x) } + +/* + * Ideal int + */ + +type idealIntV struct { + V *big.Int +} + +func (v *idealIntV) String() string { return v.V.String() } + +func (v *idealIntV) Assign(t *Thread, o Value) { + v.V = o.(IdealIntValue).Get() +} + +func (v *idealIntV) Get() *big.Int { return v.V } + +/* + * Float + */ + +type float32V float32 + +func (v *float32V) String() string { return fmt.Sprint(*v) } + +func (v *float32V) Assign(t *Thread, o Value) { *v = float32V(o.(FloatValue).Get(t)) } + +func (v *float32V) Get(*Thread) float64 { return float64(*v) } + +func (v *float32V) Set(t *Thread, x float64) { *v = float32V(x) } + +type float64V float64 + +func (v *float64V) String() string { return fmt.Sprint(*v) } + +func (v *float64V) Assign(t *Thread, o Value) { *v = float64V(o.(FloatValue).Get(t)) } + +func (v *float64V) Get(*Thread) float64 { return float64(*v) } + +func (v *float64V) Set(t *Thread, x float64) { *v = float64V(x) } + +/* + * Ideal float + */ + +type idealFloatV struct { + V *big.Rat +} + +func (v *idealFloatV) String() string { return v.V.FloatString(6) } + +func (v *idealFloatV) Assign(t *Thread, o Value) { + v.V = o.(IdealFloatValue).Get() +} + +func (v *idealFloatV) Get() *big.Rat { return v.V } + +/* + * String + */ + +type stringV string + +func (v *stringV) String() string { return fmt.Sprint(*v) } + +func (v *stringV) Assign(t *Thread, o Value) { *v = stringV(o.(StringValue).Get(t)) } + +func (v *stringV) Get(*Thread) string { return string(*v) } + +func (v *stringV) Set(t *Thread, x string) { *v = stringV(x) } + +/* + * Array + */ + +type arrayV []Value + +func (v *arrayV) String() string { + res := "{" + for i, e := range *v { + if i > 0 { + res += ", " + } + res += e.String() + } + return res + "}" +} + +func (v *arrayV) Assign(t *Thread, o Value) { + oa := o.(ArrayValue) + l := int64(len(*v)) + for i := int64(0); i < l; i++ { + (*v)[i].Assign(t, oa.Elem(t, i)) + } +} + +func (v *arrayV) Get(*Thread) ArrayValue { return v } + +func (v *arrayV) Elem(t *Thread, i int64) Value { + return (*v)[i] +} + +func (v *arrayV) Sub(i int64, len int64) ArrayValue { + res := (*v)[i : i+len] + return &res +} + +/* + * Struct + */ + +type structV []Value + +// TODO(austin) Should these methods (and arrayV's) be on structV +// instead of *structV? +func (v *structV) String() string { + res := "{" + for i, v := range *v { + if i > 0 { + res += ", " + } + res += v.String() + } + return res + "}" +} + +func (v *structV) Assign(t *Thread, o Value) { + oa := o.(StructValue) + l := len(*v) + for i := 0; i < l; i++ { + (*v)[i].Assign(t, oa.Field(t, i)) + } +} + +func (v *structV) Get(*Thread) StructValue { return v } + +func (v *structV) Field(t *Thread, i int) Value { + return (*v)[i] +} + +/* + * Pointer + */ + +type ptrV struct { + // nil if the pointer is nil + target Value +} + +func (v *ptrV) String() string { + if v.target == nil { + return "<nil>" + } + return "&" + v.target.String() +} + +func (v *ptrV) Assign(t *Thread, o Value) { v.target = o.(PtrValue).Get(t) } + +func (v *ptrV) Get(*Thread) Value { return v.target } + +func (v *ptrV) Set(t *Thread, x Value) { v.target = x } + +/* + * Functions + */ + +type funcV struct { + target Func +} + +func (v *funcV) String() string { + // TODO(austin) Rob wants to see the definition + return "func {...}" +} + +func (v *funcV) Assign(t *Thread, o Value) { v.target = o.(FuncValue).Get(t) } + +func (v *funcV) Get(*Thread) Func { return v.target } + +func (v *funcV) Set(t *Thread, x Func) { v.target = x } + +/* + * Interfaces + */ + +type interfaceV struct { + Interface +} + +func (v *interfaceV) String() string { + if v.Type == nil || v.Value == nil { + return "<nil>" + } + return v.Value.String() +} + +func (v *interfaceV) Assign(t *Thread, o Value) { + v.Interface = o.(InterfaceValue).Get(t) +} + +func (v *interfaceV) Get(*Thread) Interface { return v.Interface } + +func (v *interfaceV) Set(t *Thread, x Interface) { + v.Interface = x +} + +/* + * Slices + */ + +type sliceV struct { + Slice +} + +func (v *sliceV) String() string { + if v.Base == nil { + return "<nil>" + } + return v.Base.Sub(0, v.Len).String() +} + +func (v *sliceV) Assign(t *Thread, o Value) { v.Slice = o.(SliceValue).Get(t) } + +func (v *sliceV) Get(*Thread) Slice { return v.Slice } + +func (v *sliceV) Set(t *Thread, x Slice) { v.Slice = x } + +/* + * Maps + */ + +type mapV struct { + target Map +} + +func (v *mapV) String() string { + if v.target == nil { + return "<nil>" + } + res := "map[" + i := 0 + v.target.Iter(func(key interface{}, val Value) bool { + if i > 0 { + res += ", " + } + i++ + res += fmt.Sprint(key) + ":" + val.String() + return true + }) + return res + "]" +} + +func (v *mapV) Assign(t *Thread, o Value) { v.target = o.(MapValue).Get(t) } + +func (v *mapV) Get(*Thread) Map { return v.target } + +func (v *mapV) Set(t *Thread, x Map) { v.target = x } + +type evalMap map[interface{}]Value + +func (m evalMap) Len(t *Thread) int64 { return int64(len(m)) } + +func (m evalMap) Elem(t *Thread, key interface{}) Value { + return m[key] +} + +func (m evalMap) SetElem(t *Thread, key interface{}, val Value) { + if val == nil { + m[key] = nil, false + } else { + m[key] = val + } +} + +func (m evalMap) Iter(cb func(key interface{}, val Value) bool) { + for k, v := range m { + if !cb(k, v) { + break + } + } +} + +/* + * Multi-values + */ + +type multiV []Value + +func (v multiV) String() string { + res := "(" + for i, v := range v { + if i > 0 { + res += ", " + } + res += v.String() + } + return res + ")" +} + +func (v multiV) Assign(t *Thread, o Value) { + omv := o.(multiV) + for i := range v { + v[i].Assign(t, omv[i]) + } +} + +/* + * Universal constants + */ + +func init() { + s := universe + + true := boolV(true) + s.DefineConst("true", universePos, BoolType, &true) + false := boolV(false) + s.DefineConst("false", universePos, BoolType, &false) +} diff --git a/libgo/go/exp/eval/world.go b/libgo/go/exp/eval/world.go new file mode 100644 index 000000000..02d18bd79 --- /dev/null +++ b/libgo/go/exp/eval/world.go @@ -0,0 +1,188 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package is the beginning of an interpreter for Go. +// It can run simple Go programs but does not implement +// interface values or packages. +package eval + +import ( + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "os" +) + +type World struct { + scope *Scope + frame *Frame +} + +func NewWorld() *World { + w := new(World) + w.scope = universe.ChildScope() + w.scope.global = true // this block's vars allocate directly + return w +} + +type Code interface { + // The type of the value Run returns, or nil if Run returns nil. + Type() Type + + // Run runs the code; if the code is a single expression + // with a value, it returns the value; otherwise it returns nil. + Run() (Value, os.Error) +} + +type stmtCode struct { + w *World + code code +} + +func (w *World) CompileStmtList(fset *token.FileSet, stmts []ast.Stmt) (Code, os.Error) { + if len(stmts) == 1 { + if s, ok := stmts[0].(*ast.ExprStmt); ok { + return w.CompileExpr(fset, s.X) + } + } + errors := new(scanner.ErrorVector) + cc := &compiler{fset, errors, 0, 0} + cb := newCodeBuf() + fc := &funcCompiler{ + compiler: cc, + fnType: nil, + outVarsNamed: false, + codeBuf: cb, + flow: newFlowBuf(cb), + labels: make(map[string]*label), + } + bc := &blockCompiler{ + funcCompiler: fc, + block: w.scope.block, + } + nerr := cc.numError() + for _, stmt := range stmts { + bc.compileStmt(stmt) + } + fc.checkLabels() + if nerr != cc.numError() { + return nil, errors.GetError(scanner.Sorted) + } + return &stmtCode{w, fc.get()}, nil +} + +func (w *World) CompileDeclList(fset *token.FileSet, decls []ast.Decl) (Code, os.Error) { + stmts := make([]ast.Stmt, len(decls)) + for i, d := range decls { + stmts[i] = &ast.DeclStmt{d} + } + return w.CompileStmtList(fset, stmts) +} + +func (s *stmtCode) Type() Type { return nil } + +func (s *stmtCode) Run() (Value, os.Error) { + t := new(Thread) + t.f = s.w.scope.NewFrame(nil) + return nil, t.Try(func(t *Thread) { s.code.exec(t) }) +} + +type exprCode struct { + w *World + e *expr + eval func(Value, *Thread) +} + +func (w *World) CompileExpr(fset *token.FileSet, e ast.Expr) (Code, os.Error) { + errors := new(scanner.ErrorVector) + cc := &compiler{fset, errors, 0, 0} + + ec := cc.compileExpr(w.scope.block, false, e) + if ec == nil { + return nil, errors.GetError(scanner.Sorted) + } + var eval func(Value, *Thread) + switch t := ec.t.(type) { + case *idealIntType: + // nothing + case *idealFloatType: + // nothing + default: + if tm, ok := t.(*MultiType); ok && len(tm.Elems) == 0 { + return &stmtCode{w, code{ec.exec}}, nil + } + eval = genAssign(ec.t, ec) + } + return &exprCode{w, ec, eval}, nil +} + +func (e *exprCode) Type() Type { return e.e.t } + +func (e *exprCode) Run() (Value, os.Error) { + t := new(Thread) + t.f = e.w.scope.NewFrame(nil) + switch e.e.t.(type) { + case *idealIntType: + return &idealIntV{e.e.asIdealInt()()}, nil + case *idealFloatType: + return &idealFloatV{e.e.asIdealFloat()()}, nil + } + v := e.e.t.Zero() + eval := e.eval + err := t.Try(func(t *Thread) { eval(v, t) }) + return v, err +} + +func (w *World) Compile(fset *token.FileSet, text string) (Code, os.Error) { + stmts, err := parser.ParseStmtList(fset, "input", text) + if err == nil { + return w.CompileStmtList(fset, stmts) + } + + // Otherwise try as DeclList. + decls, err1 := parser.ParseDeclList(fset, "input", text) + if err1 == nil { + return w.CompileDeclList(fset, decls) + } + + // Have to pick an error. + // Parsing as statement list admits more forms, + // its error is more likely to be useful. + return nil, err +} + +type RedefinitionError struct { + Name string + Prev Def +} + +func (e *RedefinitionError) String() string { + res := "identifier " + e.Name + " redeclared" + pos := e.Prev.Pos() + if pos.IsValid() { + // TODO: fix this - currently this code is not reached by the tests + // need to get a file set (fset) from somewhere + //res += "; previous declaration at " + fset.Position(pos).String() + panic(0) + } + return res +} + +func (w *World) DefineConst(name string, t Type, val Value) os.Error { + _, prev := w.scope.DefineConst(name, token.NoPos, t, val) + if prev != nil { + return &RedefinitionError{name, prev} + } + return nil +} + +func (w *World) DefineVar(name string, t Type, val Value) os.Error { + v, prev := w.scope.DefineVar(name, token.NoPos, t) + if prev != nil { + return &RedefinitionError{name, prev} + } + v.Init = val + return nil +} diff --git a/libgo/go/exp/ogle/abort.go b/libgo/go/exp/ogle/abort.go new file mode 100644 index 000000000..311a7b38e --- /dev/null +++ b/libgo/go/exp/ogle/abort.go @@ -0,0 +1,35 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "os" + "runtime" +) + +// An aborter aborts the thread's current computation, usually +// passing the error to a waiting thread. +type aborter interface { + Abort(err os.Error) +} + +type ogleAborter chan os.Error + +func (a ogleAborter) Abort(err os.Error) { + a <- err + runtime.Goexit() +} + +// try executes a computation; if the computation Aborts, try returns +// the error passed to abort. +func try(f func(a aborter)) os.Error { + a := make(ogleAborter) + go func() { + f(a) + a <- nil + }() + err := <-a + return err +} diff --git a/libgo/go/exp/ogle/arch.go b/libgo/go/exp/ogle/arch.go new file mode 100644 index 000000000..52b1c9757 --- /dev/null +++ b/libgo/go/exp/ogle/arch.go @@ -0,0 +1,125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "math" +) + +type Arch interface { + // ToWord converts an array of up to 8 bytes in memory order + // to a word. + ToWord(data []byte) proc.Word + // FromWord converts a word to an array of up to 8 bytes in + // memory order. + FromWord(v proc.Word, out []byte) + // ToFloat32 converts a word to a float. The order of this + // word will be the order returned by ToWord on the memory + // representation of a float, and thus may require reversing. + ToFloat32(bits uint32) float32 + // FromFloat32 converts a float to a word. This should return + // a word that can be passed to FromWord to get the memory + // representation of a float on this architecture. + FromFloat32(f float32) uint32 + // ToFloat64 is to float64 as ToFloat32 is to float32. + ToFloat64(bits uint64) float64 + // FromFloat64 is to float64 as FromFloat32 is to float32. + FromFloat64(f float64) uint64 + + // IntSize returns the number of bytes in an 'int'. + IntSize() int + // PtrSize returns the number of bytes in a 'uintptr'. + PtrSize() int + // FloatSize returns the number of bytes in a 'float'. + FloatSize() int + // Align rounds offset up to the appropriate offset for a + // basic type with the given width. + Align(offset, width int) int + + // G returns the current G pointer. + G(regs proc.Regs) proc.Word + + // ClosureSize returns the number of bytes expected by + // ParseClosure. + ClosureSize() int + // ParseClosure takes ClosureSize bytes read from a return PC + // in a remote process, determines if the code is a closure, + // and returns the frame size of the closure if it is. + ParseClosure(data []byte) (frame int, ok bool) +} + +type ArchLSB struct{} + +func (ArchLSB) ToWord(data []byte) proc.Word { + var v proc.Word + for i, b := range data { + v |= proc.Word(b) << (uint(i) * 8) + } + return v +} + +func (ArchLSB) FromWord(v proc.Word, out []byte) { + for i := range out { + out[i] = byte(v) + v >>= 8 + } +} + +func (ArchLSB) ToFloat32(bits uint32) float32 { + // TODO(austin) Do these definitions depend on my current + // architecture? + return math.Float32frombits(bits) +} + +func (ArchLSB) FromFloat32(f float32) uint32 { return math.Float32bits(f) } + +func (ArchLSB) ToFloat64(bits uint64) float64 { return math.Float64frombits(bits) } + +func (ArchLSB) FromFloat64(f float64) uint64 { return math.Float64bits(f) } + +type ArchAlignedMultiple struct{} + +func (ArchAlignedMultiple) Align(offset, width int) int { + return ((offset - 1) | (width - 1)) + 1 +} + +type amd64 struct { + ArchLSB + ArchAlignedMultiple + gReg int +} + +func (a *amd64) IntSize() int { return 4 } + +func (a *amd64) PtrSize() int { return 8 } + +func (a *amd64) FloatSize() int { return 4 } + +func (a *amd64) G(regs proc.Regs) proc.Word { + // See src/pkg/runtime/mkasmh + if a.gReg == -1 { + ns := regs.Names() + for i, n := range ns { + if n == "r15" { + a.gReg = i + break + } + } + } + + return regs.Get(a.gReg) +} + +func (a *amd64) ClosureSize() int { return 8 } + +func (a *amd64) ParseClosure(data []byte) (int, bool) { + if data[0] == 0x48 && data[1] == 0x81 && data[2] == 0xc4 && data[7] == 0xc3 { + return int(a.ToWord(data[3:7]) + 8), true + } + return 0, false +} + +var Amd64 = &amd64{gReg: -1} diff --git a/libgo/go/exp/ogle/cmd.go b/libgo/go/exp/ogle/cmd.go new file mode 100644 index 000000000..4f67032d0 --- /dev/null +++ b/libgo/go/exp/ogle/cmd.go @@ -0,0 +1,373 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Ogle is the beginning of a debugger for Go. +package ogle + +import ( + "bufio" + "debug/elf" + "debug/proc" + "exp/eval" + "fmt" + "go/scanner" + "go/token" + "os" + "strconv" + "strings" +) + +var fset = token.NewFileSet() +var world *eval.World +var curProc *Process + +func Main() { + world = eval.NewWorld() + defineFuncs() + r := bufio.NewReader(os.Stdin) + for { + print("; ") + line, err := r.ReadSlice('\n') + if err != nil { + break + } + + // Try line as a command + cmd, rest := getCmd(line) + if cmd != nil { + err := cmd.handler(rest) + if err != nil { + scanner.PrintError(os.Stderr, err) + } + continue + } + + // Try line as code + code, err := world.Compile(fset, string(line)) + if err != nil { + scanner.PrintError(os.Stderr, err) + continue + } + v, err := code.Run() + if err != nil { + fmt.Fprintf(os.Stderr, err.String()) + continue + } + if v != nil { + println(v.String()) + } + } +} + +// newScanner creates a new scanner that scans that given input bytes. +func newScanner(input []byte) (*scanner.Scanner, *scanner.ErrorVector) { + sc := new(scanner.Scanner) + ev := new(scanner.ErrorVector) + file := fset.AddFile("input", fset.Base(), len(input)) + sc.Init(file, input, ev, 0) + return sc, ev +} + +/* + * Commands + */ + +// A UsageError occurs when a command is called with illegal arguments. +type UsageError string + +func (e UsageError) String() string { return string(e) } + +// A cmd represents a single command with a handler. +type cmd struct { + cmd string + handler func([]byte) os.Error +} + +var cmds = []cmd{ + {"load", cmdLoad}, + {"bt", cmdBt}, +} + +// getCmd attempts to parse an input line as a registered command. If +// successful, it returns the command and the bytes remaining after +// the command, which should be passed to the command. +func getCmd(line []byte) (*cmd, []byte) { + sc, _ := newScanner(line) + pos, tok, lit := sc.Scan() + if sc.ErrorCount != 0 || tok != token.IDENT { + return nil, nil + } + + slit := string(lit) + for i := range cmds { + if cmds[i].cmd == slit { + return &cmds[i], line[fset.Position(pos).Offset+len(lit):] + } + } + return nil, nil +} + +// cmdLoad starts or attaches to a process. Its form is similar to +// import: +// +// load [sym] "path" [;] +// +// sym specifies the name to give to the process. If not given, the +// name is derived from the path of the process. If ".", then the +// packages from the remote process are defined into the current +// namespace. If given, this symbol is defined as a package +// containing the process' packages. +// +// path gives the path of the process to start or attach to. If it is +// "pid:<num>", then attach to the given PID. Otherwise, treat it as +// a file path and space-separated arguments and start a new process. +// +// load always sets the current process to the loaded process. +func cmdLoad(args []byte) os.Error { + ident, path, err := parseLoad(args) + if err != nil { + return err + } + if curProc != nil { + return UsageError("multiple processes not implemented") + } + if ident != "." { + return UsageError("process identifiers not implemented") + } + + // Parse argument and start or attach to process + var fname string + var tproc proc.Process + if len(path) >= 4 && path[0:4] == "pid:" { + pid, err := strconv.Atoi(path[4:]) + if err != nil { + return err + } + fname, err = os.Readlink(fmt.Sprintf("/proc/%d/exe", pid)) + if err != nil { + return err + } + tproc, err = proc.Attach(pid) + if err != nil { + return err + } + println("Attached to", pid) + } else { + parts := strings.Split(path, " ", -1) + if len(parts) == 0 { + fname = "" + } else { + fname = parts[0] + } + tproc, err = proc.ForkExec(fname, parts, os.Environ(), "", []*os.File{os.Stdin, os.Stdout, os.Stderr}) + if err != nil { + return err + } + println("Started", path) + // TODO(austin) If we fail after this point, kill tproc + // before detaching. + } + + // Get symbols + f, err := os.Open(fname, os.O_RDONLY, 0) + if err != nil { + tproc.Detach() + return err + } + defer f.Close() + elf, err := elf.NewFile(f) + if err != nil { + tproc.Detach() + return err + } + curProc, err = NewProcessElf(tproc, elf) + if err != nil { + tproc.Detach() + return err + } + + // Prepare new process + curProc.OnGoroutineCreate().AddHandler(EventPrint) + curProc.OnGoroutineExit().AddHandler(EventPrint) + + err = curProc.populateWorld(world) + if err != nil { + tproc.Detach() + return err + } + + return nil +} + +func parseLoad(args []byte) (ident string, path string, err os.Error) { + err = UsageError("Usage: load [sym] \"path\"") + sc, ev := newScanner(args) + + var toks [4]token.Token + var lits [4][]byte + for i := range toks { + _, toks[i], lits[i] = sc.Scan() + } + if sc.ErrorCount != 0 { + err = ev.GetError(scanner.NoMultiples) + return + } + + i := 0 + switch toks[i] { + case token.PERIOD, token.IDENT: + ident = string(lits[i]) + i++ + } + + if toks[i] != token.STRING { + return + } + path, uerr := strconv.Unquote(string(lits[i])) + if uerr != nil { + err = uerr + return + } + i++ + + if toks[i] == token.SEMICOLON { + i++ + } + if toks[i] != token.EOF { + return + } + + return ident, path, nil +} + +// cmdBt prints a backtrace for the current goroutine. It takes no +// arguments. +func cmdBt(args []byte) os.Error { + err := parseNoArgs(args, "Usage: bt") + if err != nil { + return err + } + + if curProc == nil || curProc.curGoroutine == nil { + return NoCurrentGoroutine{} + } + + f := curProc.curGoroutine.frame + if f == nil { + fmt.Println("No frames on stack") + return nil + } + + for f.Inner() != nil { + f = f.Inner() + } + + for i := 0; i < 100; i++ { + if f == curProc.curGoroutine.frame { + fmt.Printf("=> ") + } else { + fmt.Printf(" ") + } + fmt.Printf("%8x %v\n", f.pc, f) + f, err = f.Outer() + if err != nil { + return err + } + if f == nil { + return nil + } + } + + fmt.Println("...") + return nil +} + +func parseNoArgs(args []byte, usage string) os.Error { + sc, ev := newScanner(args) + _, tok, _ := sc.Scan() + if sc.ErrorCount != 0 { + return ev.GetError(scanner.NoMultiples) + } + if tok != token.EOF { + return UsageError(usage) + } + return nil +} + +/* + * Functions + */ + +// defineFuncs populates world with the built-in functions. +func defineFuncs() { + t, v := eval.FuncFromNativeTyped(fnOut, fnOutSig) + world.DefineConst("Out", t, v) + t, v = eval.FuncFromNativeTyped(fnContWait, fnContWaitSig) + world.DefineConst("ContWait", t, v) + t, v = eval.FuncFromNativeTyped(fnBpSet, fnBpSetSig) + world.DefineConst("BpSet", t, v) +} + +// printCurFrame prints the current stack frame, as it would appear in +// a backtrace. +func printCurFrame() { + if curProc == nil || curProc.curGoroutine == nil { + return + } + f := curProc.curGoroutine.frame + if f == nil { + return + } + fmt.Printf("=> %8x %v\n", f.pc, f) +} + +// fnOut moves the current frame to the caller of the current frame. +func fnOutSig() {} +func fnOut(t *eval.Thread, args []eval.Value, res []eval.Value) { + if curProc == nil { + t.Abort(NoCurrentGoroutine{}) + } + err := curProc.Out() + if err != nil { + t.Abort(err) + } + // TODO(austin) Only in the command form + printCurFrame() +} + +// fnContWait continues the current process and waits for a stopping event. +func fnContWaitSig() {} +func fnContWait(t *eval.Thread, args []eval.Value, res []eval.Value) { + if curProc == nil { + t.Abort(NoCurrentGoroutine{}) + } + err := curProc.ContWait() + if err != nil { + t.Abort(err) + } + // TODO(austin) Only in the command form + ev := curProc.Event() + if ev != nil { + fmt.Printf("%v\n", ev) + } + printCurFrame() +} + +// fnBpSet sets a breakpoint at the entry to the named function. +func fnBpSetSig(string) {} +func fnBpSet(t *eval.Thread, args []eval.Value, res []eval.Value) { + // TODO(austin) This probably shouldn't take a symbol name. + // Perhaps it should take an interface that provides PC's. + // Functions and instructions can implement that interface and + // we can have something to translate file:line pairs. + if curProc == nil { + t.Abort(NoCurrentGoroutine{}) + } + name := args[0].(eval.StringValue).Get(t) + fn := curProc.syms.LookupFunc(name) + if fn == nil { + t.Abort(UsageError("no such function " + name)) + } + curProc.OnBreakpoint(proc.Word(fn.Entry)).AddHandler(EventStop) +} diff --git a/libgo/go/exp/ogle/event.go b/libgo/go/exp/ogle/event.go new file mode 100644 index 000000000..d7092ded3 --- /dev/null +++ b/libgo/go/exp/ogle/event.go @@ -0,0 +1,280 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "fmt" + "os" +) + +/* + * Hooks and events + */ + +// An EventHandler is a function that takes an event and returns a +// response to that event and possibly an error. If an event handler +// returns an error, the process stops and no other handlers for that +// event are executed. +type EventHandler func(e Event) (EventAction, os.Error) + +// An EventAction is an event handler's response to an event. If all +// of an event's handlers execute without returning errors, their +// results are combined as follows: If any handler returned +// EAContinue, then the process resumes (without returning from +// WaitStop); otherwise, if any handler returned EAStop, the process +// remains stopped; otherwise, if all handlers returned EADefault, the +// process resumes. A handler may return EARemoveSelf bit-wise or'd +// with any other action to indicate that the handler should be +// removed from the hook. +type EventAction int + +const ( + EARemoveSelf EventAction = 0x100 + EADefault EventAction = iota + EAStop + EAContinue +) + +// A EventHook allows event handlers to be added and removed. +type EventHook interface { + AddHandler(EventHandler) + RemoveHandler(EventHandler) + NumHandler() int + handle(e Event) (EventAction, os.Error) + String() string +} + +// EventHook is almost, but not quite, suitable for user-defined +// events. If we want user-defined events, make EventHook a struct, +// special-case adding and removing handlers in breakpoint hooks, and +// provide a public interface for posting events to hooks. + +type Event interface { + Process() *Process + Goroutine() *Goroutine + String() string +} + +type commonHook struct { + // Head of handler chain + head *handler + // Number of non-internal handlers + len int +} + +type handler struct { + eh EventHandler + // True if this handler must be run before user-defined + // handlers in order to ensure correctness. + internal bool + // True if this handler has been removed from the chain. + removed bool + next *handler +} + +func (h *commonHook) AddHandler(eh EventHandler) { + h.addHandler(eh, false) +} + +func (h *commonHook) addHandler(eh EventHandler, internal bool) { + // Ensure uniqueness of handlers + h.RemoveHandler(eh) + + if !internal { + h.len++ + } + // Add internal handlers to the beginning + if internal || h.head == nil { + h.head = &handler{eh, internal, false, h.head} + return + } + // Add handler after internal handlers + // TODO(austin) This should probably go on the end instead + prev := h.head + for prev.next != nil && prev.internal { + prev = prev.next + } + prev.next = &handler{eh, internal, false, prev.next} +} + +func (h *commonHook) RemoveHandler(eh EventHandler) { + plink := &h.head + for l := *plink; l != nil; plink, l = &l.next, l.next { + if l.eh == eh { + if !l.internal { + h.len-- + } + l.removed = true + *plink = l.next + break + } + } +} + +func (h *commonHook) NumHandler() int { return h.len } + +func (h *commonHook) handle(e Event) (EventAction, os.Error) { + action := EADefault + plink := &h.head + for l := *plink; l != nil; plink, l = &l.next, l.next { + if l.removed { + continue + } + a, err := l.eh(e) + if a&EARemoveSelf == EARemoveSelf { + if !l.internal { + h.len-- + } + l.removed = true + *plink = l.next + a &^= EARemoveSelf + } + if err != nil { + return EAStop, err + } + if a > action { + action = a + } + } + return action, nil +} + +type commonEvent struct { + // The process of this event + p *Process + // The goroutine of this event. + t *Goroutine +} + +func (e *commonEvent) Process() *Process { return e.p } + +func (e *commonEvent) Goroutine() *Goroutine { return e.t } + +/* + * Standard event handlers + */ + +// EventPrint is a standard event handler that prints events as they +// occur. It will not cause the process to stop. +func EventPrint(ev Event) (EventAction, os.Error) { + // TODO(austin) Include process name here? + fmt.Fprintf(os.Stderr, "*** %v\n", ev.String()) + return EADefault, nil +} + +// EventStop is a standard event handler that causes the process to stop. +func EventStop(ev Event) (EventAction, os.Error) { + return EAStop, nil +} + +/* + * Breakpoints + */ + +type breakpointHook struct { + commonHook + p *Process + pc proc.Word +} + +// A Breakpoint event occurs when a process reaches a particular +// program counter. When this event is handled, the current goroutine +// will be the goroutine that reached the program counter. +type Breakpoint struct { + commonEvent + osThread proc.Thread + pc proc.Word +} + +func (h *breakpointHook) AddHandler(eh EventHandler) { + h.addHandler(eh, false) +} + +func (h *breakpointHook) addHandler(eh EventHandler, internal bool) { + // We register breakpoint events lazily to avoid holding + // references to breakpoints without handlers. Be sure to use + // the "canonical" breakpoint if there is one. + if cur, ok := h.p.breakpointHooks[h.pc]; ok { + h = cur + } + oldhead := h.head + h.commonHook.addHandler(eh, internal) + if oldhead == nil && h.head != nil { + h.p.proc.AddBreakpoint(h.pc) + h.p.breakpointHooks[h.pc] = h + } +} + +func (h *breakpointHook) RemoveHandler(eh EventHandler) { + oldhead := h.head + h.commonHook.RemoveHandler(eh) + if oldhead != nil && h.head == nil { + h.p.proc.RemoveBreakpoint(h.pc) + h.p.breakpointHooks[h.pc] = nil, false + } +} + +func (h *breakpointHook) String() string { + // TODO(austin) Include process name? + // TODO(austin) Use line:pc or at least sym+%#x + return fmt.Sprintf("breakpoint at %#x", h.pc) +} + +func (b *Breakpoint) PC() proc.Word { return b.pc } + +func (b *Breakpoint) String() string { + // TODO(austin) Include process name and goroutine + // TODO(austin) Use line:pc or at least sym+%#x + return fmt.Sprintf("breakpoint at %#x", b.pc) +} + +/* + * Goroutine create/exit + */ + +type goroutineCreateHook struct { + commonHook +} + +func (h *goroutineCreateHook) String() string { return "goroutine create" } + +// A GoroutineCreate event occurs when a process creates a new +// goroutine. When this event is handled, the current goroutine will +// be the newly created goroutine. +type GoroutineCreate struct { + commonEvent + parent *Goroutine +} + +// Parent returns the goroutine that created this goroutine. May be +// nil if this event is the creation of the first goroutine. +func (e *GoroutineCreate) Parent() *Goroutine { return e.parent } + +func (e *GoroutineCreate) String() string { + // TODO(austin) Include process name + if e.parent == nil { + return fmt.Sprintf("%v created", e.t) + } + return fmt.Sprintf("%v created by %v", e.t, e.parent) +} + +type goroutineExitHook struct { + commonHook +} + +func (h *goroutineExitHook) String() string { return "goroutine exit" } + +// A GoroutineExit event occurs when a Go goroutine exits. +type GoroutineExit struct { + commonEvent +} + +func (e *GoroutineExit) String() string { + // TODO(austin) Include process name + //return fmt.Sprintf("%v exited", e.t); + // For debugging purposes + return fmt.Sprintf("goroutine %#x exited", e.t.g.addr().base) +} diff --git a/libgo/go/exp/ogle/frame.go b/libgo/go/exp/ogle/frame.go new file mode 100644 index 000000000..1538362ba --- /dev/null +++ b/libgo/go/exp/ogle/frame.go @@ -0,0 +1,212 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/gosym" + "debug/proc" + "fmt" + "os" +) + +// A Frame represents a single frame on a remote call stack. +type Frame struct { + // pc is the PC of the next instruction that will execute in + // this frame. For lower frames, this is the instruction + // following the CALL instruction. + pc, sp, fp proc.Word + // The runtime.Stktop of the active stack segment + stk remoteStruct + // The function this stack frame is in + fn *gosym.Func + // The path and line of the CALL or current instruction. Note + // that this differs slightly from the meaning of Frame.pc. + path string + line int + // The inner and outer frames of this frame. outer is filled + // in lazily. + inner, outer *Frame +} + +// newFrame returns the top-most Frame of the given g's thread. +func newFrame(g remoteStruct) (*Frame, os.Error) { + var f *Frame + err := try(func(a aborter) { f = aNewFrame(a, g) }) + return f, err +} + +func aNewFrame(a aborter, g remoteStruct) *Frame { + p := g.r.p + var pc, sp proc.Word + + // Is this G alive? + switch g.field(p.f.G.Status).(remoteInt).aGet(a) { + case p.runtime.Gidle, p.runtime.Gmoribund, p.runtime.Gdead: + return nil + } + + // Find the OS thread for this G + + // TODO(austin) Ideally, we could look at the G's state and + // figure out if it's on an OS thread or not. However, this + // is difficult because the state isn't updated atomically + // with scheduling changes. + for _, t := range p.proc.Threads() { + regs, err := t.Regs() + if err != nil { + // TODO(austin) What to do? + continue + } + thisg := p.G(regs) + if thisg == g.addr().base { + // Found this G's OS thread + pc = regs.PC() + sp = regs.SP() + + // If this thread crashed, try to recover it + if pc == 0 { + pc = p.peekUintptr(a, pc) + sp += 8 + } + + break + } + } + + if pc == 0 && sp == 0 { + // G is not mapped to an OS thread. Use the + // scheduler's stored PC and SP. + sched := g.field(p.f.G.Sched).(remoteStruct) + pc = proc.Word(sched.field(p.f.Gobuf.Pc).(remoteUint).aGet(a)) + sp = proc.Word(sched.field(p.f.Gobuf.Sp).(remoteUint).aGet(a)) + } + + // Get Stktop + stk := g.field(p.f.G.Stackbase).(remotePtr).aGet(a).(remoteStruct) + + return prepareFrame(a, pc, sp, stk, nil) +} + +// prepareFrame creates a Frame from the PC and SP within that frame, +// as well as the active stack segment. This function takes care of +// traversing stack breaks and unwinding closures. +func prepareFrame(a aborter, pc, sp proc.Word, stk remoteStruct, inner *Frame) *Frame { + // Based on src/pkg/runtime/amd64/traceback.c:traceback + p := stk.r.p + top := inner == nil + + // Get function + var path string + var line int + var fn *gosym.Func + + for i := 0; i < 100; i++ { + // Traverse segmented stack breaks + if p.sys.lessstack != nil && pc == proc.Word(p.sys.lessstack.Value) { + // Get stk->gobuf.pc + pc = proc.Word(stk.field(p.f.Stktop.Gobuf).(remoteStruct).field(p.f.Gobuf.Pc).(remoteUint).aGet(a)) + // Get stk->gobuf.sp + sp = proc.Word(stk.field(p.f.Stktop.Gobuf).(remoteStruct).field(p.f.Gobuf.Sp).(remoteUint).aGet(a)) + // Get stk->stackbase + stk = stk.field(p.f.Stktop.Stackbase).(remotePtr).aGet(a).(remoteStruct) + continue + } + + // Get the PC of the call instruction + callpc := pc + if !top && (p.sys.goexit == nil || pc != proc.Word(p.sys.goexit.Value)) { + callpc-- + } + + // Look up function + path, line, fn = p.syms.PCToLine(uint64(callpc)) + if fn != nil { + break + } + + // Closure? + var buf = make([]byte, p.ClosureSize()) + if _, err := p.Peek(pc, buf); err != nil { + break + } + spdelta, ok := p.ParseClosure(buf) + if ok { + sp += proc.Word(spdelta) + pc = p.peekUintptr(a, sp-proc.Word(p.PtrSize())) + } + } + if fn == nil { + return nil + } + + // Compute frame pointer + var fp proc.Word + if fn.FrameSize < p.PtrSize() { + fp = sp + proc.Word(p.PtrSize()) + } else { + fp = sp + proc.Word(fn.FrameSize) + } + // TODO(austin) To really figure out if we're in the prologue, + // we need to disassemble the function and look for the call + // to morestack. For now, just special case the entry point. + // + // TODO(austin) What if we're in the call to morestack in the + // prologue? Then top == false. + if top && pc == proc.Word(fn.Entry) { + // We're in the function prologue, before SP + // has been adjusted for the frame. + fp -= proc.Word(fn.FrameSize - p.PtrSize()) + } + + return &Frame{pc, sp, fp, stk, fn, path, line, inner, nil} +} + +// Outer returns the Frame that called this Frame, or nil if this is +// the outermost frame. +func (f *Frame) Outer() (*Frame, os.Error) { + var fr *Frame + err := try(func(a aborter) { fr = f.aOuter(a) }) + return fr, err +} + +func (f *Frame) aOuter(a aborter) *Frame { + // Is there a cached outer frame + if f.outer != nil { + return f.outer + } + + p := f.stk.r.p + + sp := f.fp + if f.fn == p.sys.newproc && f.fn == p.sys.deferproc { + // TODO(rsc) The compiler inserts two push/pop's + // around calls to go and defer. Russ says this + // should get fixed in the compiler, but we account + // for it for now. + sp += proc.Word(2 * p.PtrSize()) + } + + pc := p.peekUintptr(a, f.fp-proc.Word(p.PtrSize())) + if pc < 0x1000 { + return nil + } + + // TODO(austin) Register this frame for shoot-down. + + f.outer = prepareFrame(a, pc, sp, f.stk, f) + return f.outer +} + +// Inner returns the Frame called by this Frame, or nil if this is the +// innermost frame. +func (f *Frame) Inner() *Frame { return f.inner } + +func (f *Frame) String() string { + res := f.fn.Name + if f.pc > proc.Word(f.fn.Value) { + res += fmt.Sprintf("+%#x", f.pc-proc.Word(f.fn.Entry)) + } + return res + fmt.Sprintf(" %s:%d", f.path, f.line) +} diff --git a/libgo/go/exp/ogle/goroutine.go b/libgo/go/exp/ogle/goroutine.go new file mode 100644 index 000000000..5104ec6d4 --- /dev/null +++ b/libgo/go/exp/ogle/goroutine.go @@ -0,0 +1,117 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "exp/eval" + "fmt" + "os" +) + +// A Goroutine represents a goroutine in a remote process. +type Goroutine struct { + g remoteStruct + frame *Frame + dead bool +} + +func (t *Goroutine) String() string { + if t.dead { + return "<dead thread>" + } + // TODO(austin) Give threads friendly ID's, possibly including + // the name of the entry function. + return fmt.Sprintf("thread %#x", t.g.addr().base) +} + +// isG0 returns true if this thread if the internal idle thread +func (t *Goroutine) isG0() bool { return t.g.addr().base == t.g.r.p.sys.g0.addr().base } + +func (t *Goroutine) resetFrame() (err os.Error) { + // TODO(austin) Reuse any live part of the current frame stack + // so existing references to Frame's keep working. + t.frame, err = newFrame(t.g) + return +} + +// Out selects the caller frame of the current frame. +func (t *Goroutine) Out() os.Error { + f, err := t.frame.Outer() + if f != nil { + t.frame = f + } + return err +} + +// In selects the frame called by the current frame. +func (t *Goroutine) In() os.Error { + f := t.frame.Inner() + if f != nil { + t.frame = f + } + return nil +} + +func readylockedBP(ev Event) (EventAction, os.Error) { + b := ev.(*Breakpoint) + p := b.Process() + + // The new g is the only argument to this function, so the + // stack will have the return address, then the G*. + regs, err := b.osThread.Regs() + if err != nil { + return EAStop, err + } + sp := regs.SP() + addr := sp + proc.Word(p.PtrSize()) + arg := remotePtr{remote{addr, p}, p.runtime.G} + var gp eval.Value + err = try(func(a aborter) { gp = arg.aGet(a) }) + if err != nil { + return EAStop, err + } + if gp == nil { + return EAStop, UnknownGoroutine{b.osThread, 0} + } + gs := gp.(remoteStruct) + g := &Goroutine{gs, nil, false} + p.goroutines[gs.addr().base] = g + + // Enqueue goroutine creation event + parent := b.Goroutine() + if parent.isG0() { + parent = nil + } + p.postEvent(&GoroutineCreate{commonEvent{p, g}, parent}) + + // If we don't have any thread selected, select this one + if p.curGoroutine == nil { + p.curGoroutine = g + } + + return EADefault, nil +} + +func goexitBP(ev Event) (EventAction, os.Error) { + b := ev.(*Breakpoint) + p := b.Process() + + g := b.Goroutine() + g.dead = true + + addr := g.g.addr().base + p.goroutines[addr] = nil, false + + // Enqueue thread exit event + p.postEvent(&GoroutineExit{commonEvent{p, g}}) + + // If we just exited our selected goroutine, selected another + if p.curGoroutine == g { + p.selectSomeGoroutine() + } + + return EADefault, nil +} diff --git a/libgo/go/exp/ogle/main.go b/libgo/go/exp/ogle/main.go new file mode 100644 index 000000000..1999eccca --- /dev/null +++ b/libgo/go/exp/ogle/main.go @@ -0,0 +1,9 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "exp/ogle" + +func main() { ogle.Main() } diff --git a/libgo/go/exp/ogle/process.go b/libgo/go/exp/ogle/process.go new file mode 100644 index 000000000..58e830aa6 --- /dev/null +++ b/libgo/go/exp/ogle/process.go @@ -0,0 +1,521 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/elf" + "debug/gosym" + "debug/proc" + "exp/eval" + "fmt" + "log" + "os" + "reflect" +) + +// A FormatError indicates a failure to process information in or +// about a remote process, such as unexpected or missing information +// in the object file or runtime structures. +type FormatError string + +func (e FormatError) String() string { return string(e) } + +// An UnknownArchitecture occurs when trying to load an object file +// that indicates an architecture not supported by the debugger. +type UnknownArchitecture elf.Machine + +func (e UnknownArchitecture) String() string { + return "unknown architecture: " + elf.Machine(e).String() +} + +// A ProcessNotStopped error occurs when attempting to read or write +// memory or registers of a process that is not stopped. +type ProcessNotStopped struct{} + +func (e ProcessNotStopped) String() string { return "process not stopped" } + +// An UnknownGoroutine error is an internal error representing an +// unrecognized G structure pointer. +type UnknownGoroutine struct { + OSThread proc.Thread + Goroutine proc.Word +} + +func (e UnknownGoroutine) String() string { + return fmt.Sprintf("internal error: unknown goroutine (G %#x)", e.Goroutine) +} + +// A NoCurrentGoroutine error occurs when no goroutine is currently +// selected in a process (or when there are no goroutines in a +// process). +type NoCurrentGoroutine struct{} + +func (e NoCurrentGoroutine) String() string { return "no current goroutine" } + +// A Process represents a remote attached process. +type Process struct { + Arch + proc proc.Process + + // The symbol table of this process + syms *gosym.Table + + // A possibly-stopped OS thread, or nil + threadCache proc.Thread + + // Types parsed from the remote process + types map[proc.Word]*remoteType + + // Types and values from the remote runtime package + runtime runtimeValues + + // Runtime field indexes + f runtimeIndexes + + // Globals from the sys package (or from no package) + sys struct { + lessstack, goexit, newproc, deferproc, newprocreadylocked *gosym.Func + allg remotePtr + g0 remoteStruct + } + + // Event queue + posted []Event + pending []Event + event Event + + // Event hooks + breakpointHooks map[proc.Word]*breakpointHook + goroutineCreateHook *goroutineCreateHook + goroutineExitHook *goroutineExitHook + + // Current goroutine, or nil if there are no goroutines + curGoroutine *Goroutine + + // Goroutines by the address of their G structure + goroutines map[proc.Word]*Goroutine +} + +/* + * Process creation + */ + +// NewProcess constructs a new remote process around a traced +// process, an architecture, and a symbol table. +func NewProcess(tproc proc.Process, arch Arch, syms *gosym.Table) (*Process, os.Error) { + p := &Process{ + Arch: arch, + proc: tproc, + syms: syms, + types: make(map[proc.Word]*remoteType), + breakpointHooks: make(map[proc.Word]*breakpointHook), + goroutineCreateHook: new(goroutineCreateHook), + goroutineExitHook: new(goroutineExitHook), + goroutines: make(map[proc.Word]*Goroutine), + } + + // Fill in remote runtime + p.bootstrap() + + switch { + case p.sys.allg.addr().base == 0: + return nil, FormatError("failed to find runtime symbol 'allg'") + case p.sys.g0.addr().base == 0: + return nil, FormatError("failed to find runtime symbol 'g0'") + case p.sys.newprocreadylocked == nil: + return nil, FormatError("failed to find runtime symbol 'newprocreadylocked'") + case p.sys.goexit == nil: + return nil, FormatError("failed to find runtime symbol 'sys.goexit'") + } + + // Get current goroutines + p.goroutines[p.sys.g0.addr().base] = &Goroutine{p.sys.g0, nil, false} + err := try(func(a aborter) { + g := p.sys.allg.aGet(a) + for g != nil { + gs := g.(remoteStruct) + fmt.Printf("*** Found goroutine at %#x\n", gs.addr().base) + p.goroutines[gs.addr().base] = &Goroutine{gs, nil, false} + g = gs.field(p.f.G.Alllink).(remotePtr).aGet(a) + } + }) + if err != nil { + return nil, err + } + + // Create internal breakpoints to catch new and exited goroutines + p.OnBreakpoint(proc.Word(p.sys.newprocreadylocked.Entry)).(*breakpointHook).addHandler(readylockedBP, true) + p.OnBreakpoint(proc.Word(p.sys.goexit.Entry)).(*breakpointHook).addHandler(goexitBP, true) + + // Select current frames + for _, g := range p.goroutines { + g.resetFrame() + } + + p.selectSomeGoroutine() + + return p, nil +} + +func elfGoSyms(f *elf.File) (*gosym.Table, os.Error) { + text := f.Section(".text") + symtab := f.Section(".gosymtab") + pclntab := f.Section(".gopclntab") + if text == nil || symtab == nil || pclntab == nil { + return nil, nil + } + + symdat, err := symtab.Data() + if err != nil { + return nil, err + } + pclndat, err := pclntab.Data() + if err != nil { + return nil, err + } + + pcln := gosym.NewLineTable(pclndat, text.Addr) + tab, err := gosym.NewTable(symdat, pcln) + if err != nil { + return nil, err + } + + return tab, nil +} + +// NewProcessElf constructs a new remote process around a traced +// process and the process' ELF object. +func NewProcessElf(tproc proc.Process, f *elf.File) (*Process, os.Error) { + syms, err := elfGoSyms(f) + if err != nil { + return nil, err + } + if syms == nil { + return nil, FormatError("Failed to find symbol table") + } + var arch Arch + switch f.Machine { + case elf.EM_X86_64: + arch = Amd64 + default: + return nil, UnknownArchitecture(f.Machine) + } + return NewProcess(tproc, arch, syms) +} + +// bootstrap constructs the runtime structure of a remote process. +func (p *Process) bootstrap() { + // Manually construct runtime types + p.runtime.String = newManualType(eval.TypeOfNative(rt1String{}), p.Arch) + p.runtime.Slice = newManualType(eval.TypeOfNative(rt1Slice{}), p.Arch) + p.runtime.Eface = newManualType(eval.TypeOfNative(rt1Eface{}), p.Arch) + + p.runtime.Type = newManualType(eval.TypeOfNative(rt1Type{}), p.Arch) + p.runtime.CommonType = newManualType(eval.TypeOfNative(rt1CommonType{}), p.Arch) + p.runtime.UncommonType = newManualType(eval.TypeOfNative(rt1UncommonType{}), p.Arch) + p.runtime.StructField = newManualType(eval.TypeOfNative(rt1StructField{}), p.Arch) + p.runtime.StructType = newManualType(eval.TypeOfNative(rt1StructType{}), p.Arch) + p.runtime.PtrType = newManualType(eval.TypeOfNative(rt1PtrType{}), p.Arch) + p.runtime.ArrayType = newManualType(eval.TypeOfNative(rt1ArrayType{}), p.Arch) + p.runtime.SliceType = newManualType(eval.TypeOfNative(rt1SliceType{}), p.Arch) + + p.runtime.Stktop = newManualType(eval.TypeOfNative(rt1Stktop{}), p.Arch) + p.runtime.Gobuf = newManualType(eval.TypeOfNative(rt1Gobuf{}), p.Arch) + p.runtime.G = newManualType(eval.TypeOfNative(rt1G{}), p.Arch) + + // Get addresses of type.*runtime.XType for discrimination. + rtv := reflect.Indirect(reflect.NewValue(&p.runtime)).(*reflect.StructValue) + rtvt := rtv.Type().(*reflect.StructType) + for i := 0; i < rtv.NumField(); i++ { + n := rtvt.Field(i).Name + if n[0] != 'P' || n[1] < 'A' || n[1] > 'Z' { + continue + } + sym := p.syms.LookupSym("type.*runtime." + n[1:]) + if sym == nil { + continue + } + rtv.Field(i).(*reflect.UintValue).Set(sym.Value) + } + + // Get runtime field indexes + fillRuntimeIndexes(&p.runtime, &p.f) + + // Fill G status + p.runtime.runtimeGStatus = rt1GStatus + + // Get globals + p.sys.lessstack = p.syms.LookupFunc("sys.lessstack") + p.sys.goexit = p.syms.LookupFunc("goexit") + p.sys.newproc = p.syms.LookupFunc("sys.newproc") + p.sys.deferproc = p.syms.LookupFunc("sys.deferproc") + p.sys.newprocreadylocked = p.syms.LookupFunc("newprocreadylocked") + if allg := p.syms.LookupSym("allg"); allg != nil { + p.sys.allg = remotePtr{remote{proc.Word(allg.Value), p}, p.runtime.G} + } + if g0 := p.syms.LookupSym("g0"); g0 != nil { + p.sys.g0 = p.runtime.G.mk(remote{proc.Word(g0.Value), p}).(remoteStruct) + } +} + +func (p *Process) selectSomeGoroutine() { + // Once we have friendly goroutine ID's, there might be a more + // reasonable behavior for this. + p.curGoroutine = nil + for _, g := range p.goroutines { + if !g.isG0() && g.frame != nil { + p.curGoroutine = g + return + } + } +} + +/* + * Process memory + */ + +func (p *Process) someStoppedOSThread() proc.Thread { + if p.threadCache != nil { + if _, err := p.threadCache.Stopped(); err == nil { + return p.threadCache + } + } + + for _, t := range p.proc.Threads() { + if _, err := t.Stopped(); err == nil { + p.threadCache = t + return t + } + } + return nil +} + +func (p *Process) Peek(addr proc.Word, out []byte) (int, os.Error) { + thr := p.someStoppedOSThread() + if thr == nil { + return 0, ProcessNotStopped{} + } + return thr.Peek(addr, out) +} + +func (p *Process) Poke(addr proc.Word, b []byte) (int, os.Error) { + thr := p.someStoppedOSThread() + if thr == nil { + return 0, ProcessNotStopped{} + } + return thr.Poke(addr, b) +} + +func (p *Process) peekUintptr(a aborter, addr proc.Word) proc.Word { + return proc.Word(mkUintptr(remote{addr, p}).(remoteUint).aGet(a)) +} + +/* + * Events + */ + +// OnBreakpoint returns the hook that is run when the program reaches +// the given program counter. +func (p *Process) OnBreakpoint(pc proc.Word) EventHook { + if bp, ok := p.breakpointHooks[pc]; ok { + return bp + } + // The breakpoint will register itself when a handler is added + return &breakpointHook{commonHook{nil, 0}, p, pc} +} + +// OnGoroutineCreate returns the hook that is run when a goroutine is created. +func (p *Process) OnGoroutineCreate() EventHook { + return p.goroutineCreateHook +} + +// OnGoroutineExit returns the hook that is run when a goroutine exits. +func (p *Process) OnGoroutineExit() EventHook { return p.goroutineExitHook } + +// osThreadToGoroutine looks up the goroutine running on an OS thread. +func (p *Process) osThreadToGoroutine(t proc.Thread) (*Goroutine, os.Error) { + regs, err := t.Regs() + if err != nil { + return nil, err + } + g := p.G(regs) + gt, ok := p.goroutines[g] + if !ok { + return nil, UnknownGoroutine{t, g} + } + return gt, nil +} + +// causesToEvents translates the stop causes of the underlying process +// into an event queue. +func (p *Process) causesToEvents() ([]Event, os.Error) { + // Count causes we're interested in + nev := 0 + for _, t := range p.proc.Threads() { + if c, err := t.Stopped(); err == nil { + switch c := c.(type) { + case proc.Breakpoint: + nev++ + case proc.Signal: + // TODO(austin) + //nev++; + } + } + } + + // Translate causes to events + events := make([]Event, nev) + i := 0 + for _, t := range p.proc.Threads() { + if c, err := t.Stopped(); err == nil { + switch c := c.(type) { + case proc.Breakpoint: + gt, err := p.osThreadToGoroutine(t) + if err != nil { + return nil, err + } + events[i] = &Breakpoint{commonEvent{p, gt}, t, proc.Word(c)} + i++ + case proc.Signal: + // TODO(austin) + } + } + } + + return events, nil +} + +// postEvent appends an event to the posted queue. These events will +// be processed before any currently pending events. +func (p *Process) postEvent(ev Event) { + p.posted = append(p.posted, ev) +} + +// processEvents processes events in the event queue until no events +// remain, a handler returns EAStop, or a handler returns an error. +// It returns either EAStop or EAContinue and possibly an error. +func (p *Process) processEvents() (EventAction, os.Error) { + var ev Event + for len(p.posted) > 0 { + ev, p.posted = p.posted[0], p.posted[1:] + action, err := p.processEvent(ev) + if action == EAStop { + return action, err + } + } + + for len(p.pending) > 0 { + ev, p.pending = p.pending[0], p.pending[1:] + action, err := p.processEvent(ev) + if action == EAStop { + return action, err + } + } + + return EAContinue, nil +} + +// processEvent processes a single event, without manipulating the +// event queues. It returns either EAStop or EAContinue and possibly +// an error. +func (p *Process) processEvent(ev Event) (EventAction, os.Error) { + p.event = ev + + var action EventAction + var err os.Error + switch ev := p.event.(type) { + case *Breakpoint: + hook, ok := p.breakpointHooks[ev.pc] + if !ok { + break + } + p.curGoroutine = ev.Goroutine() + action, err = hook.handle(ev) + + case *GoroutineCreate: + p.curGoroutine = ev.Goroutine() + action, err = p.goroutineCreateHook.handle(ev) + + case *GoroutineExit: + action, err = p.goroutineExitHook.handle(ev) + + default: + log.Panicf("Unknown event type %T in queue", p.event) + } + + if err != nil { + return EAStop, err + } else if action == EAStop { + return EAStop, nil + } + return EAContinue, nil +} + +// Event returns the last event that caused the process to stop. This +// may return nil if the process has never been stopped by an event. +// +// TODO(austin) Return nil if the user calls p.Stop()? +func (p *Process) Event() Event { return p.event } + +/* + * Process control + */ + +// TODO(austin) Cont, WaitStop, and Stop. Need to figure out how +// event handling works with these. Originally I did it only in +// WaitStop, but if you Cont and there are pending events, then you +// have to not actually continue and wait until a WaitStop to process +// them, even if the event handlers will tell you to continue. We +// could handle them in both Cont and WaitStop to avoid this problem, +// but it's still weird if an event happens after the Cont and before +// the WaitStop that the handlers say to continue from. Or we could +// handle them on a separate thread. Then obviously you get weird +// asynchronous things, like prints while the user it typing a command, +// but that's not necessarily a bad thing. + +// ContWait resumes process execution and waits for an event to occur +// that stops the process. +func (p *Process) ContWait() os.Error { + for { + a, err := p.processEvents() + if err != nil { + return err + } else if a == EAStop { + break + } + err = p.proc.Continue() + if err != nil { + return err + } + err = p.proc.WaitStop() + if err != nil { + return err + } + for _, g := range p.goroutines { + g.resetFrame() + } + p.pending, err = p.causesToEvents() + if err != nil { + return err + } + } + return nil +} + +// Out selects the caller frame of the current frame. +func (p *Process) Out() os.Error { + if p.curGoroutine == nil { + return NoCurrentGoroutine{} + } + return p.curGoroutine.Out() +} + +// In selects the frame called by the current frame. +func (p *Process) In() os.Error { + if p.curGoroutine == nil { + return NoCurrentGoroutine{} + } + return p.curGoroutine.In() +} diff --git a/libgo/go/exp/ogle/rruntime.go b/libgo/go/exp/ogle/rruntime.go new file mode 100644 index 000000000..33f1935b8 --- /dev/null +++ b/libgo/go/exp/ogle/rruntime.go @@ -0,0 +1,271 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "exp/eval" + "reflect" +) + +// This file contains remote runtime definitions. Using reflection, +// we convert all of these to interpreter types and layout their +// remote representations using the architecture rules. +// +// We could get most of these definitions from our own runtime +// package; however, some of them differ in convenient ways, some of +// them are not defined or exported by the runtime, and having our own +// definitions makes it easy to support multiple remote runtime +// versions. This may turn out to be overkill. +// +// All of these structures are prefixed with rt1 to indicate the +// runtime version and to mark them as types used only as templates +// for remote types. + +/* + * Runtime data headers + * + * See $GOROOT/src/pkg/runtime/runtime.h + */ + +type rt1String struct { + str uintptr + len int +} + +type rt1Slice struct { + array uintptr + len int + cap int +} + +type rt1Eface struct { + typ uintptr + ptr uintptr +} + +/* + * Runtime type structures + * + * See $GOROOT/src/pkg/runtime/type.h and $GOROOT/src/pkg/runtime/type.go + */ + +type rt1UncommonType struct { + name *string + pkgPath *string + //methods []method; +} + +type rt1CommonType struct { + size uintptr + hash uint32 + alg, align, fieldAlign uint8 + string *string + uncommonType *rt1UncommonType +} + +type rt1Type struct { + // While Type is technically an Eface, treating the + // discriminator as an opaque pointer and taking advantage of + // the commonType prologue on all Type's makes type parsing + // much simpler. + typ uintptr + ptr *rt1CommonType +} + +type rt1StructField struct { + name *string + pkgPath *string + typ *rt1Type + tag *string + offset uintptr +} + +type rt1StructType struct { + rt1CommonType + fields []rt1StructField +} + +type rt1PtrType struct { + rt1CommonType + elem *rt1Type +} + +type rt1SliceType struct { + rt1CommonType + elem *rt1Type +} + +type rt1ArrayType struct { + rt1CommonType + elem *rt1Type + len uintptr +} + +/* + * Runtime scheduler structures + * + * See $GOROOT/src/pkg/runtime/runtime.h + */ + +// Fields beginning with _ are only for padding + +type rt1Stktop struct { + stackguard uintptr + stackbase *rt1Stktop + gobuf rt1Gobuf + _args uint32 + _fp uintptr +} + +type rt1Gobuf struct { + sp uintptr + pc uintptr + g *rt1G + r0 uintptr +} + +type rt1G struct { + _stackguard uintptr + stackbase *rt1Stktop + _defer uintptr + sched rt1Gobuf + _stack0 uintptr + _entry uintptr + alllink *rt1G + _param uintptr + status int16 + // Incomplete +} + +var rt1GStatus = runtimeGStatus{ + Gidle: 0, + Grunnable: 1, + Grunning: 2, + Gsyscall: 3, + Gwaiting: 4, + Gmoribund: 5, + Gdead: 6, +} + +// runtimeIndexes stores the indexes of fields in the runtime +// structures. It is filled in using reflection, so the name of the +// fields must match the names of the remoteType's in runtimeValues +// exactly and the names of the index fields must be the capitalized +// version of the names of the fields in the runtime structures above. +type runtimeIndexes struct { + String struct { + Str, Len int + } + Slice struct { + Array, Len, Cap int + } + Eface struct { + Typ, Ptr int + } + + UncommonType struct { + Name, PkgPath int + } + CommonType struct { + Size, Hash, Alg, Align, FieldAlign, String, UncommonType int + } + Type struct { + Typ, Ptr int + } + StructField struct { + Name, PkgPath, Typ, Tag, Offset int + } + StructType struct { + Fields int + } + PtrType struct { + Elem int + } + SliceType struct { + Elem int + } + ArrayType struct { + Elem, Len int + } + + Stktop struct { + Stackguard, Stackbase, Gobuf int + } + Gobuf struct { + Sp, Pc, G int + } + G struct { + Stackbase, Sched, Status, Alllink int + } +} + +// Values of G status codes +type runtimeGStatus struct { + Gidle, Grunnable, Grunning, Gsyscall, Gwaiting, Gmoribund, Gdead int64 +} + +// runtimeValues stores the types and values that correspond to those +// in the remote runtime package. +type runtimeValues struct { + // Runtime data headers + String, Slice, Eface *remoteType + // Runtime type structures + Type, CommonType, UncommonType, StructField, StructType, PtrType, + ArrayType, SliceType *remoteType + // Runtime scheduler structures + Stktop, Gobuf, G *remoteType + // Addresses of *runtime.XType types. These are the + // discriminators on the runtime.Type interface. We use local + // reflection to fill these in from the remote symbol table, + // so the names must match the runtime names. + PBoolType, + PUint8Type, PUint16Type, PUint32Type, PUint64Type, PUintType, PUintptrType, + PInt8Type, PInt16Type, PInt32Type, PInt64Type, PIntType, + PFloat32Type, PFloat64Type, PFloatType, + PArrayType, PStringType, PStructType, PPtrType, PFuncType, + PInterfaceType, PSliceType, PMapType, PChanType, + PDotDotDotType, PUnsafePointerType proc.Word + // G status values + runtimeGStatus +} + +// fillRuntimeIndexes fills a runtimeIndexes structure will the field +// indexes gathered from the remoteTypes recorded in a runtimeValues +// structure. +func fillRuntimeIndexes(runtime *runtimeValues, out *runtimeIndexes) { + outv := reflect.Indirect(reflect.NewValue(out)).(*reflect.StructValue) + outt := outv.Type().(*reflect.StructType) + runtimev := reflect.Indirect(reflect.NewValue(runtime)).(*reflect.StructValue) + + // out contains fields corresponding to each runtime type + for i := 0; i < outt.NumField(); i++ { + // Find the interpreter type for this runtime type + name := outt.Field(i).Name + et := runtimev.FieldByName(name).Interface().(*remoteType).Type.(*eval.StructType) + + // Get the field indexes of the interpreter struct type + indexes := make(map[string]int, len(et.Elems)) + for j, f := range et.Elems { + if f.Anonymous { + continue + } + name := f.Name + if name[0] >= 'a' && name[0] <= 'z' { + name = string(name[0]+'A'-'a') + name[1:] + } + indexes[name] = j + } + + // Fill this field of out + outStructv := outv.Field(i).(*reflect.StructValue) + outStructt := outStructv.Type().(*reflect.StructType) + for j := 0; j < outStructt.NumField(); j++ { + f := outStructv.Field(j).(*reflect.IntValue) + name := outStructt.Field(j).Name + f.Set(int64(indexes[name])) + } + } +} diff --git a/libgo/go/exp/ogle/rtype.go b/libgo/go/exp/ogle/rtype.go new file mode 100644 index 000000000..b3c35575a --- /dev/null +++ b/libgo/go/exp/ogle/rtype.go @@ -0,0 +1,288 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "exp/eval" + "fmt" + "log" +) + +const debugParseRemoteType = false + +// A remoteType is the local representation of a type in a remote process. +type remoteType struct { + eval.Type + // The size of values of this type in bytes. + size int + // The field alignment of this type. Only used for + // manually-constructed types. + fieldAlign int + // The maker function to turn a remote address of a value of + // this type into an interpreter Value. + mk maker +} + +var manualTypes = make(map[Arch]map[eval.Type]*remoteType) + +// newManualType constructs a remote type from an interpreter Type +// using the size and alignment properties of the given architecture. +// Most types are parsed directly out of the remote process, but to do +// so we need to layout the structures that describe those types ourselves. +func newManualType(t eval.Type, arch Arch) *remoteType { + if nt, ok := t.(*eval.NamedType); ok { + t = nt.Def + } + + // Get the type map for this architecture + typeMap := manualTypes[arch] + if typeMap == nil { + typeMap = make(map[eval.Type]*remoteType) + manualTypes[arch] = typeMap + + // Construct basic types for this architecture + basicType := func(t eval.Type, mk maker, size int, fieldAlign int) { + t = t.(*eval.NamedType).Def + if fieldAlign == 0 { + fieldAlign = size + } + typeMap[t] = &remoteType{t, size, fieldAlign, mk} + } + basicType(eval.Uint8Type, mkUint8, 1, 0) + basicType(eval.Uint32Type, mkUint32, 4, 0) + basicType(eval.UintptrType, mkUintptr, arch.PtrSize(), 0) + basicType(eval.Int16Type, mkInt16, 2, 0) + basicType(eval.Int32Type, mkInt32, 4, 0) + basicType(eval.IntType, mkInt, arch.IntSize(), 0) + basicType(eval.StringType, mkString, arch.PtrSize()+arch.IntSize(), arch.PtrSize()) + } + + if rt, ok := typeMap[t]; ok { + return rt + } + + var rt *remoteType + switch t := t.(type) { + case *eval.PtrType: + var elem *remoteType + mk := func(r remote) eval.Value { return remotePtr{r, elem} } + rt = &remoteType{t, arch.PtrSize(), arch.PtrSize(), mk} + // Construct the element type after registering the + // type to break cycles. + typeMap[eval.Type(t)] = rt + elem = newManualType(t.Elem, arch) + + case *eval.ArrayType: + elem := newManualType(t.Elem, arch) + mk := func(r remote) eval.Value { return remoteArray{r, t.Len, elem} } + rt = &remoteType{t, elem.size * int(t.Len), elem.fieldAlign, mk} + + case *eval.SliceType: + elem := newManualType(t.Elem, arch) + mk := func(r remote) eval.Value { return remoteSlice{r, elem} } + rt = &remoteType{t, arch.PtrSize() + 2*arch.IntSize(), arch.PtrSize(), mk} + + case *eval.StructType: + layout := make([]remoteStructField, len(t.Elems)) + offset := 0 + fieldAlign := 0 + for i, f := range t.Elems { + elem := newManualType(f.Type, arch) + if fieldAlign == 0 { + fieldAlign = elem.fieldAlign + } + offset = arch.Align(offset, elem.fieldAlign) + layout[i].offset = offset + layout[i].fieldType = elem + offset += elem.size + } + mk := func(r remote) eval.Value { return remoteStruct{r, layout} } + rt = &remoteType{t, offset, fieldAlign, mk} + + default: + log.Panicf("cannot manually construct type %T", t) + } + + typeMap[t] = rt + return rt +} + +var prtIndent = "" + +// parseRemoteType parses a Type structure in a remote process to +// construct the corresponding interpreter type and remote type. +func parseRemoteType(a aborter, rs remoteStruct) *remoteType { + addr := rs.addr().base + p := rs.addr().p + + // We deal with circular types by discovering cycles at + // NamedTypes. If a type cycles back to something other than + // a named type, we're guaranteed that there will be a named + // type somewhere in that cycle. Thus, we continue down, + // re-parsing types until we reach the named type in the + // cycle. In order to still create one remoteType per remote + // type, we insert an empty remoteType in the type map the + // first time we encounter the type and re-use that structure + // the second time we encounter it. + + rt, ok := p.types[addr] + if ok && rt.Type != nil { + return rt + } else if !ok { + rt = &remoteType{} + p.types[addr] = rt + } + + if debugParseRemoteType { + sym := p.syms.SymByAddr(uint64(addr)) + name := "<unknown>" + if sym != nil { + name = sym.Name + } + log.Printf("%sParsing type at %#x (%s)", prtIndent, addr, name) + prtIndent += " " + defer func() { prtIndent = prtIndent[0 : len(prtIndent)-1] }() + } + + // Get Type header + itype := proc.Word(rs.field(p.f.Type.Typ).(remoteUint).aGet(a)) + typ := rs.field(p.f.Type.Ptr).(remotePtr).aGet(a).(remoteStruct) + + // Is this a named type? + var nt *eval.NamedType + uncommon := typ.field(p.f.CommonType.UncommonType).(remotePtr).aGet(a) + if uncommon != nil { + name := uncommon.(remoteStruct).field(p.f.UncommonType.Name).(remotePtr).aGet(a) + if name != nil { + // TODO(austin) Declare type in appropriate remote package + nt = eval.NewNamedType(name.(remoteString).aGet(a)) + rt.Type = nt + } + } + + // Create type + var t eval.Type + var mk maker + switch itype { + case p.runtime.PBoolType: + t = eval.BoolType + mk = mkBool + case p.runtime.PUint8Type: + t = eval.Uint8Type + mk = mkUint8 + case p.runtime.PUint16Type: + t = eval.Uint16Type + mk = mkUint16 + case p.runtime.PUint32Type: + t = eval.Uint32Type + mk = mkUint32 + case p.runtime.PUint64Type: + t = eval.Uint64Type + mk = mkUint64 + case p.runtime.PUintType: + t = eval.UintType + mk = mkUint + case p.runtime.PUintptrType: + t = eval.UintptrType + mk = mkUintptr + case p.runtime.PInt8Type: + t = eval.Int8Type + mk = mkInt8 + case p.runtime.PInt16Type: + t = eval.Int16Type + mk = mkInt16 + case p.runtime.PInt32Type: + t = eval.Int32Type + mk = mkInt32 + case p.runtime.PInt64Type: + t = eval.Int64Type + mk = mkInt64 + case p.runtime.PIntType: + t = eval.IntType + mk = mkInt + case p.runtime.PFloat32Type: + t = eval.Float32Type + mk = mkFloat32 + case p.runtime.PFloat64Type: + t = eval.Float64Type + mk = mkFloat64 + case p.runtime.PStringType: + t = eval.StringType + mk = mkString + + case p.runtime.PArrayType: + // Cast to an ArrayType + typ := p.runtime.ArrayType.mk(typ.addr()).(remoteStruct) + len := int64(typ.field(p.f.ArrayType.Len).(remoteUint).aGet(a)) + elem := parseRemoteType(a, typ.field(p.f.ArrayType.Elem).(remotePtr).aGet(a).(remoteStruct)) + t = eval.NewArrayType(len, elem.Type) + mk = func(r remote) eval.Value { return remoteArray{r, len, elem} } + + case p.runtime.PStructType: + // Cast to a StructType + typ := p.runtime.StructType.mk(typ.addr()).(remoteStruct) + fs := typ.field(p.f.StructType.Fields).(remoteSlice).aGet(a) + + fields := make([]eval.StructField, fs.Len) + layout := make([]remoteStructField, fs.Len) + for i := range fields { + f := fs.Base.(remoteArray).elem(int64(i)).(remoteStruct) + elemrs := f.field(p.f.StructField.Typ).(remotePtr).aGet(a).(remoteStruct) + elem := parseRemoteType(a, elemrs) + fields[i].Type = elem.Type + name := f.field(p.f.StructField.Name).(remotePtr).aGet(a) + if name == nil { + fields[i].Anonymous = true + } else { + fields[i].Name = name.(remoteString).aGet(a) + } + layout[i].offset = int(f.field(p.f.StructField.Offset).(remoteUint).aGet(a)) + layout[i].fieldType = elem + } + + t = eval.NewStructType(fields) + mk = func(r remote) eval.Value { return remoteStruct{r, layout} } + + case p.runtime.PPtrType: + // Cast to a PtrType + typ := p.runtime.PtrType.mk(typ.addr()).(remoteStruct) + elem := parseRemoteType(a, typ.field(p.f.PtrType.Elem).(remotePtr).aGet(a).(remoteStruct)) + t = eval.NewPtrType(elem.Type) + mk = func(r remote) eval.Value { return remotePtr{r, elem} } + + case p.runtime.PSliceType: + // Cast to a SliceType + typ := p.runtime.SliceType.mk(typ.addr()).(remoteStruct) + elem := parseRemoteType(a, typ.field(p.f.SliceType.Elem).(remotePtr).aGet(a).(remoteStruct)) + t = eval.NewSliceType(elem.Type) + mk = func(r remote) eval.Value { return remoteSlice{r, elem} } + + case p.runtime.PMapType, p.runtime.PChanType, p.runtime.PFuncType, p.runtime.PInterfaceType, p.runtime.PUnsafePointerType, p.runtime.PDotDotDotType: + // TODO(austin) + t = eval.UintptrType + mk = mkUintptr + + default: + sym := p.syms.SymByAddr(uint64(itype)) + name := "<unknown symbol>" + if sym != nil { + name = sym.Name + } + err := fmt.Sprintf("runtime type at %#x has unexpected type %#x (%s)", addr, itype, name) + a.Abort(FormatError(err)) + } + + // Fill in the remote type + if nt != nil { + nt.Complete(t) + } else { + rt.Type = t + } + rt.size = int(typ.field(p.f.CommonType.Size).(remoteUint).aGet(a)) + rt.mk = mk + + return rt +} diff --git a/libgo/go/exp/ogle/rvalue.go b/libgo/go/exp/ogle/rvalue.go new file mode 100644 index 000000000..3d630f936 --- /dev/null +++ b/libgo/go/exp/ogle/rvalue.go @@ -0,0 +1,515 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/proc" + "exp/eval" + "fmt" +) + +// A RemoteMismatchError occurs when an operation that requires two +// identical remote processes is given different process. For +// example, this occurs when trying to set a pointer in one process to +// point to something in another process. +type RemoteMismatchError string + +func (e RemoteMismatchError) String() string { return string(e) } + +// A ReadOnlyError occurs when attempting to set or assign to a +// read-only value. +type ReadOnlyError string + +func (e ReadOnlyError) String() string { return string(e) } + +// A maker is a function that converts a remote address into an +// interpreter Value. +type maker func(remote) eval.Value + +type remoteValue interface { + addr() remote +} + +// remote represents an address in a remote process. +type remote struct { + base proc.Word + p *Process +} + +func (v remote) Get(a aborter, size int) uint64 { + // TODO(austin) This variable might temporarily be in a + // register. We could trace the assembly back from the + // current PC, looking for the beginning of the function or a + // call (both of which guarantee that the variable is in + // memory), or an instruction that loads the variable into a + // register. + // + // TODO(austin) If this is a local variable, it might not be + // live at this PC. In fact, because the compiler reuses + // slots, there might even be a different local variable at + // this location right now. A simple solution to both + // problems is to include the range of PC's over which a local + // variable is live in the symbol table. + // + // TODO(austin) We need to prevent the remote garbage + // collector from collecting objects out from under us. + var arr [8]byte + buf := arr[0:size] + _, err := v.p.Peek(v.base, buf) + if err != nil { + a.Abort(err) + } + return uint64(v.p.ToWord(buf)) +} + +func (v remote) Set(a aborter, size int, x uint64) { + var arr [8]byte + buf := arr[0:size] + v.p.FromWord(proc.Word(x), buf) + _, err := v.p.Poke(v.base, buf) + if err != nil { + a.Abort(err) + } +} + +func (v remote) plus(x proc.Word) remote { return remote{v.base + x, v.p} } + +func tryRVString(f func(a aborter) string) string { + var s string + err := try(func(a aborter) { s = f(a) }) + if err != nil { + return fmt.Sprintf("<error: %v>", err) + } + return s +} + +/* + * Bool + */ + +type remoteBool struct { + r remote +} + +func (v remoteBool) String() string { + return tryRVString(func(a aborter) string { return fmt.Sprintf("%v", v.aGet(a)) }) +} + +func (v remoteBool) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.BoolValue).Get(t)) +} + +func (v remoteBool) Get(t *eval.Thread) bool { return v.aGet(t) } + +func (v remoteBool) aGet(a aborter) bool { return v.r.Get(a, 1) != 0 } + +func (v remoteBool) Set(t *eval.Thread, x bool) { + v.aSet(t, x) +} + +func (v remoteBool) aSet(a aborter, x bool) { + if x { + v.r.Set(a, 1, 1) + } else { + v.r.Set(a, 1, 0) + } +} + +func (v remoteBool) addr() remote { return v.r } + +func mkBool(r remote) eval.Value { return remoteBool{r} } + +/* + * Uint + */ + +type remoteUint struct { + r remote + size int +} + +func (v remoteUint) String() string { + return tryRVString(func(a aborter) string { return fmt.Sprintf("%v", v.aGet(a)) }) +} + +func (v remoteUint) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.UintValue).Get(t)) +} + +func (v remoteUint) Get(t *eval.Thread) uint64 { + return v.aGet(t) +} + +func (v remoteUint) aGet(a aborter) uint64 { return v.r.Get(a, v.size) } + +func (v remoteUint) Set(t *eval.Thread, x uint64) { + v.aSet(t, x) +} + +func (v remoteUint) aSet(a aborter, x uint64) { v.r.Set(a, v.size, x) } + +func (v remoteUint) addr() remote { return v.r } + +func mkUint8(r remote) eval.Value { return remoteUint{r, 1} } + +func mkUint16(r remote) eval.Value { return remoteUint{r, 2} } + +func mkUint32(r remote) eval.Value { return remoteUint{r, 4} } + +func mkUint64(r remote) eval.Value { return remoteUint{r, 8} } + +func mkUint(r remote) eval.Value { return remoteUint{r, r.p.IntSize()} } + +func mkUintptr(r remote) eval.Value { return remoteUint{r, r.p.PtrSize()} } + +/* + * Int + */ + +type remoteInt struct { + r remote + size int +} + +func (v remoteInt) String() string { + return tryRVString(func(a aborter) string { return fmt.Sprintf("%v", v.aGet(a)) }) +} + +func (v remoteInt) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.IntValue).Get(t)) +} + +func (v remoteInt) Get(t *eval.Thread) int64 { return v.aGet(t) } + +func (v remoteInt) aGet(a aborter) int64 { return int64(v.r.Get(a, v.size)) } + +func (v remoteInt) Set(t *eval.Thread, x int64) { + v.aSet(t, x) +} + +func (v remoteInt) aSet(a aborter, x int64) { v.r.Set(a, v.size, uint64(x)) } + +func (v remoteInt) addr() remote { return v.r } + +func mkInt8(r remote) eval.Value { return remoteInt{r, 1} } + +func mkInt16(r remote) eval.Value { return remoteInt{r, 2} } + +func mkInt32(r remote) eval.Value { return remoteInt{r, 4} } + +func mkInt64(r remote) eval.Value { return remoteInt{r, 8} } + +func mkInt(r remote) eval.Value { return remoteInt{r, r.p.IntSize()} } + +/* + * Float + */ + +type remoteFloat struct { + r remote + size int +} + +func (v remoteFloat) String() string { + return tryRVString(func(a aborter) string { return fmt.Sprintf("%v", v.aGet(a)) }) +} + +func (v remoteFloat) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.FloatValue).Get(t)) +} + +func (v remoteFloat) Get(t *eval.Thread) float64 { + return v.aGet(t) +} + +func (v remoteFloat) aGet(a aborter) float64 { + bits := v.r.Get(a, v.size) + switch v.size { + case 4: + return float64(v.r.p.ToFloat32(uint32(bits))) + case 8: + return v.r.p.ToFloat64(bits) + } + panic("Unexpected float size") +} + +func (v remoteFloat) Set(t *eval.Thread, x float64) { + v.aSet(t, x) +} + +func (v remoteFloat) aSet(a aborter, x float64) { + var bits uint64 + switch v.size { + case 4: + bits = uint64(v.r.p.FromFloat32(float32(x))) + case 8: + bits = v.r.p.FromFloat64(x) + default: + panic("Unexpected float size") + } + v.r.Set(a, v.size, bits) +} + +func (v remoteFloat) addr() remote { return v.r } + +func mkFloat32(r remote) eval.Value { return remoteFloat{r, 4} } + +func mkFloat64(r remote) eval.Value { return remoteFloat{r, 8} } + +func mkFloat(r remote) eval.Value { return remoteFloat{r, r.p.FloatSize()} } + +/* + * String + */ + +type remoteString struct { + r remote +} + +func (v remoteString) String() string { + return tryRVString(func(a aborter) string { return v.aGet(a) }) +} + +func (v remoteString) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.StringValue).Get(t)) +} + +func (v remoteString) Get(t *eval.Thread) string { + return v.aGet(t) +} + +func (v remoteString) aGet(a aborter) string { + rs := v.r.p.runtime.String.mk(v.r).(remoteStruct) + str := proc.Word(rs.field(v.r.p.f.String.Str).(remoteUint).aGet(a)) + len := rs.field(v.r.p.f.String.Len).(remoteInt).aGet(a) + + bytes := make([]uint8, len) + _, err := v.r.p.Peek(str, bytes) + if err != nil { + a.Abort(err) + } + return string(bytes) +} + +func (v remoteString) Set(t *eval.Thread, x string) { + v.aSet(t, x) +} + +func (v remoteString) aSet(a aborter, x string) { + // TODO(austin) This isn't generally possible without the + // ability to allocate remote memory. + a.Abort(ReadOnlyError("remote strings cannot be assigned to")) +} + +func mkString(r remote) eval.Value { return remoteString{r} } + +/* + * Array + */ + +type remoteArray struct { + r remote + len int64 + elemType *remoteType +} + +func (v remoteArray) String() string { + res := "{" + for i := int64(0); i < v.len; i++ { + if i > 0 { + res += ", " + } + res += v.elem(i).String() + } + return res + "}" +} + +func (v remoteArray) Assign(t *eval.Thread, o eval.Value) { + // TODO(austin) Could do a bigger memcpy if o is a + // remoteArray in the same Process. + oa := o.(eval.ArrayValue) + for i := int64(0); i < v.len; i++ { + v.Elem(t, i).Assign(t, oa.Elem(t, i)) + } +} + +func (v remoteArray) Get(t *eval.Thread) eval.ArrayValue { + return v +} + +func (v remoteArray) Elem(t *eval.Thread, i int64) eval.Value { + return v.elem(i) +} + +func (v remoteArray) elem(i int64) eval.Value { + return v.elemType.mk(v.r.plus(proc.Word(int64(v.elemType.size) * i))) +} + +func (v remoteArray) Sub(i int64, len int64) eval.ArrayValue { + return remoteArray{v.r.plus(proc.Word(int64(v.elemType.size) * i)), len, v.elemType} +} + +/* + * Struct + */ + +type remoteStruct struct { + r remote + layout []remoteStructField +} + +type remoteStructField struct { + offset int + fieldType *remoteType +} + +func (v remoteStruct) String() string { + res := "{" + for i := range v.layout { + if i > 0 { + res += ", " + } + res += v.field(i).String() + } + return res + "}" +} + +func (v remoteStruct) Assign(t *eval.Thread, o eval.Value) { + // TODO(austin) Could do a bigger memcpy. + oa := o.(eval.StructValue) + l := len(v.layout) + for i := 0; i < l; i++ { + v.Field(t, i).Assign(t, oa.Field(t, i)) + } +} + +func (v remoteStruct) Get(t *eval.Thread) eval.StructValue { + return v +} + +func (v remoteStruct) Field(t *eval.Thread, i int) eval.Value { + return v.field(i) +} + +func (v remoteStruct) field(i int) eval.Value { + f := &v.layout[i] + return f.fieldType.mk(v.r.plus(proc.Word(f.offset))) +} + +func (v remoteStruct) addr() remote { return v.r } + +/* + * Pointer + */ + +// TODO(austin) Comparing two remote pointers for equality in the +// interpreter will crash it because the Value's returned from +// remotePtr.Get() will be structs. + +type remotePtr struct { + r remote + elemType *remoteType +} + +func (v remotePtr) String() string { + return tryRVString(func(a aborter) string { + e := v.aGet(a) + if e == nil { + return "<nil>" + } + return "&" + e.String() + }) +} + +func (v remotePtr) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.PtrValue).Get(t)) +} + +func (v remotePtr) Get(t *eval.Thread) eval.Value { + return v.aGet(t) +} + +func (v remotePtr) aGet(a aborter) eval.Value { + addr := proc.Word(v.r.Get(a, v.r.p.PtrSize())) + if addr == 0 { + return nil + } + return v.elemType.mk(remote{addr, v.r.p}) +} + +func (v remotePtr) Set(t *eval.Thread, x eval.Value) { + v.aSet(t, x) +} + +func (v remotePtr) aSet(a aborter, x eval.Value) { + if x == nil { + v.r.Set(a, v.r.p.PtrSize(), 0) + return + } + xr, ok := x.(remoteValue) + if !ok || v.r.p != xr.addr().p { + a.Abort(RemoteMismatchError("remote pointer must point within the same process")) + } + v.r.Set(a, v.r.p.PtrSize(), uint64(xr.addr().base)) +} + +func (v remotePtr) addr() remote { return v.r } + +/* + * Slice + */ + +type remoteSlice struct { + r remote + elemType *remoteType +} + +func (v remoteSlice) String() string { + return tryRVString(func(a aborter) string { + b := v.aGet(a).Base + if b == nil { + return "<nil>" + } + return b.String() + }) +} + +func (v remoteSlice) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.SliceValue).Get(t)) +} + +func (v remoteSlice) Get(t *eval.Thread) eval.Slice { + return v.aGet(t) +} + +func (v remoteSlice) aGet(a aborter) eval.Slice { + rs := v.r.p.runtime.Slice.mk(v.r).(remoteStruct) + base := proc.Word(rs.field(v.r.p.f.Slice.Array).(remoteUint).aGet(a)) + nel := rs.field(v.r.p.f.Slice.Len).(remoteInt).aGet(a) + cap := rs.field(v.r.p.f.Slice.Cap).(remoteInt).aGet(a) + if base == 0 { + return eval.Slice{nil, nel, cap} + } + return eval.Slice{remoteArray{remote{base, v.r.p}, nel, v.elemType}, nel, cap} +} + +func (v remoteSlice) Set(t *eval.Thread, x eval.Slice) { + v.aSet(t, x) +} + +func (v remoteSlice) aSet(a aborter, x eval.Slice) { + rs := v.r.p.runtime.Slice.mk(v.r).(remoteStruct) + if x.Base == nil { + rs.field(v.r.p.f.Slice.Array).(remoteUint).aSet(a, 0) + } else { + ar, ok := x.Base.(remoteArray) + if !ok || v.r.p != ar.r.p { + a.Abort(RemoteMismatchError("remote slice must point within the same process")) + } + rs.field(v.r.p.f.Slice.Array).(remoteUint).aSet(a, uint64(ar.r.base)) + } + rs.field(v.r.p.f.Slice.Len).(remoteInt).aSet(a, x.Len) + rs.field(v.r.p.f.Slice.Cap).(remoteInt).aSet(a, x.Cap) +} diff --git a/libgo/go/exp/ogle/vars.go b/libgo/go/exp/ogle/vars.go new file mode 100644 index 000000000..8a3a14791 --- /dev/null +++ b/libgo/go/exp/ogle/vars.go @@ -0,0 +1,272 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ogle + +import ( + "debug/gosym" + "debug/proc" + "exp/eval" + "log" + "os" +) + +/* + * Remote frame pointers + */ + +// A NotOnStack error occurs when attempting to access a variable in a +// remote frame where that remote frame is not on the current stack. +type NotOnStack struct { + Fn *gosym.Func + Goroutine *Goroutine +} + +func (e NotOnStack) String() string { + return "function " + e.Fn.Name + " not on " + e.Goroutine.String() + "'s stack" +} + +// A remoteFramePtr is an implementation of eval.PtrValue that +// represents a pointer to a function frame in a remote process. When +// accessed, this locates the function on the current goroutine's +// stack and returns a structure containing the local variables of +// that function. +type remoteFramePtr struct { + p *Process + fn *gosym.Func + rt *remoteType +} + +func (v remoteFramePtr) String() string { + // TODO(austin): This could be a really awesome string method + return "<remote frame>" +} + +func (v remoteFramePtr) Assign(t *eval.Thread, o eval.Value) { + v.Set(t, o.(eval.PtrValue).Get(t)) +} + +func (v remoteFramePtr) Get(t *eval.Thread) eval.Value { + g := v.p.curGoroutine + if g == nil || g.frame == nil { + t.Abort(NoCurrentGoroutine{}) + } + + for f := g.frame; f != nil; f = f.aOuter(t) { + if f.fn != v.fn { + continue + } + + // TODO(austin): Register for shootdown with f + return v.rt.mk(remote{f.fp, v.p}) + } + + t.Abort(NotOnStack{v.fn, g}) + panic("fail") +} + +func (v remoteFramePtr) Set(t *eval.Thread, x eval.Value) { + // Theoretically this could be a static error. If remote + // packages were packages, remote frames could just be defined + // as constants. + t.Abort(ReadOnlyError("remote frames cannot be assigned to")) +} + +/* + * Remote packages + */ + +// TODO(austin): Remote packages are implemented as structs right now, +// which has some weird consequences. You can attempt to assign to a +// remote package. It also produces terrible error messages. +// Ideally, these would actually be packages, but somehow first-class +// so they could be assigned to other names. + +// A remotePackage is an implementation of eval.StructValue that +// represents a package in a remote process. It's essentially a +// regular struct, except it cannot be assigned to. +type remotePackage struct { + defs []eval.Value +} + +func (v remotePackage) String() string { return "<remote package>" } + +func (v remotePackage) Assign(t *eval.Thread, o eval.Value) { + t.Abort(ReadOnlyError("remote packages cannot be assigned to")) +} + +func (v remotePackage) Get(t *eval.Thread) eval.StructValue { + return v +} + +func (v remotePackage) Field(t *eval.Thread, i int) eval.Value { + return v.defs[i] +} + +/* + * Remote variables + */ + +// populateWorld defines constants in the given world for each package +// in this process. These packages are structs that, in turn, contain +// fields for each global and function in that package. +func (p *Process) populateWorld(w *eval.World) os.Error { + type def struct { + t eval.Type + v eval.Value + } + packages := make(map[string]map[string]def) + + for _, s := range p.syms.Syms { + if s.ReceiverName() != "" { + // TODO(austin) + continue + } + + // Package + pkgName := s.PackageName() + switch pkgName { + case "", "type", "extratype", "string", "go": + // "go" is really "go.string" + continue + } + pkg, ok := packages[pkgName] + if !ok { + pkg = make(map[string]def) + packages[pkgName] = pkg + } + + // Symbol name + name := s.BaseName() + if _, ok := pkg[name]; ok { + log.Printf("Multiple definitions of symbol %s", s.Name) + continue + } + + // Symbol type + rt, err := p.typeOfSym(&s) + if err != nil { + return err + } + + // Definition + switch s.Type { + case 'D', 'd', 'B', 'b': + // Global variable + if rt == nil { + continue + } + pkg[name] = def{rt.Type, rt.mk(remote{proc.Word(s.Value), p})} + + case 'T', 't', 'L', 'l': + // Function + s := s.Func + // TODO(austin): Ideally, this would *also* be + // callable. How does that interact with type + // conversion syntax? + rt, err := p.makeFrameType(s) + if err != nil { + return err + } + pkg[name] = def{eval.NewPtrType(rt.Type), remoteFramePtr{p, s, rt}} + } + } + + // TODO(austin): Define remote types + + // Define packages + for pkgName, defs := range packages { + fields := make([]eval.StructField, len(defs)) + vals := make([]eval.Value, len(defs)) + i := 0 + for name, def := range defs { + fields[i].Name = name + fields[i].Type = def.t + vals[i] = def.v + i++ + } + pkgType := eval.NewStructType(fields) + pkgVal := remotePackage{vals} + + err := w.DefineConst(pkgName, pkgType, pkgVal) + if err != nil { + log.Printf("while defining package %s: %v", pkgName, err) + } + } + + return nil +} + +// typeOfSym returns the type associated with a symbol. If the symbol +// has no type, returns nil. +func (p *Process) typeOfSym(s *gosym.Sym) (*remoteType, os.Error) { + if s.GoType == 0 { + return nil, nil + } + addr := proc.Word(s.GoType) + var rt *remoteType + err := try(func(a aborter) { rt = parseRemoteType(a, p.runtime.Type.mk(remote{addr, p}).(remoteStruct)) }) + if err != nil { + return nil, err + } + return rt, nil +} + +// makeFrameType constructs a struct type for the frame of a function. +// The offsets in this struct type are such that the struct can be +// instantiated at this function's frame pointer. +func (p *Process) makeFrameType(s *gosym.Func) (*remoteType, os.Error) { + n := len(s.Params) + len(s.Locals) + fields := make([]eval.StructField, n) + layout := make([]remoteStructField, n) + i := 0 + + // TODO(austin): There can be multiple locals/parameters with + // the same name. We probably need liveness information to do + // anything about this. Once we have that, perhaps we give + // such fields interface{} type? Or perhaps we disambiguate + // the names with numbers. Disambiguation is annoying for + // things like "i", where there's an obvious right answer. + + for _, param := range s.Params { + rt, err := p.typeOfSym(param) + if err != nil { + return nil, err + } + if rt == nil { + //fmt.Printf(" (no type)\n"); + continue + } + // TODO(austin): Why do local variables carry their + // package name? + fields[i].Name = param.BaseName() + fields[i].Type = rt.Type + // Parameters have positive offsets from FP + layout[i].offset = int(param.Value) + layout[i].fieldType = rt + i++ + } + + for _, local := range s.Locals { + rt, err := p.typeOfSym(local) + if err != nil { + return nil, err + } + if rt == nil { + continue + } + fields[i].Name = local.BaseName() + fields[i].Type = rt.Type + // Locals have negative offsets from FP - PtrSize + layout[i].offset = -int(local.Value) - p.PtrSize() + layout[i].fieldType = rt + i++ + } + + fields = fields[0:i] + layout = layout[0:i] + t := eval.NewStructType(fields) + mk := func(r remote) eval.Value { return remoteStruct{r, layout} } + return &remoteType{t, 0, 0, mk}, nil +} |