summaryrefslogtreecommitdiff
path: root/libgo/go/exp/eval
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /libgo/go/exp/eval
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'libgo/go/exp/eval')
-rw-r--r--libgo/go/exp/eval/abort.go85
-rw-r--r--libgo/go/exp/eval/bridge.go169
-rw-r--r--libgo/go/exp/eval/compiler.go92
-rw-r--r--libgo/go/exp/eval/eval_test.go259
-rw-r--r--libgo/go/exp/eval/expr.go2015
-rw-r--r--libgo/go/exp/eval/expr1.go1874
-rw-r--r--libgo/go/exp/eval/expr_test.go355
-rw-r--r--libgo/go/exp/eval/func.go70
-rw-r--r--libgo/go/exp/eval/scope.go207
-rw-r--r--libgo/go/exp/eval/stmt.go1302
-rw-r--r--libgo/go/exp/eval/stmt_test.go343
-rw-r--r--libgo/go/exp/eval/type.go1252
-rw-r--r--libgo/go/exp/eval/typec.go409
-rw-r--r--libgo/go/exp/eval/value.go586
-rw-r--r--libgo/go/exp/eval/world.go188
15 files changed, 9206 insertions, 0 deletions
diff --git a/libgo/go/exp/eval/abort.go b/libgo/go/exp/eval/abort.go
new file mode 100644
index 000000000..22e17cec4
--- /dev/null
+++ b/libgo/go/exp/eval/abort.go
@@ -0,0 +1,85 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+)
+
+// Abort aborts the thread's current computation,
+// causing the innermost Try to return err.
+func (t *Thread) Abort(err os.Error) {
+ if t.abort == nil {
+ panic("abort: " + err.String())
+ }
+ t.abort <- err
+ runtime.Goexit()
+}
+
+// Try executes a computation; if the computation
+// Aborts, Try returns the error passed to abort.
+func (t *Thread) Try(f func(t *Thread)) os.Error {
+ oc := t.abort
+ c := make(chan os.Error)
+ t.abort = c
+ go func() {
+ f(t)
+ c <- nil
+ }()
+ err := <-c
+ t.abort = oc
+ return err
+}
+
+type DivByZeroError struct{}
+
+func (DivByZeroError) String() string { return "divide by zero" }
+
+type NilPointerError struct{}
+
+func (NilPointerError) String() string { return "nil pointer dereference" }
+
+type IndexError struct {
+ Idx, Len int64
+}
+
+func (e IndexError) String() string {
+ if e.Idx < 0 {
+ return fmt.Sprintf("negative index: %d", e.Idx)
+ }
+ return fmt.Sprintf("index %d exceeds length %d", e.Idx, e.Len)
+}
+
+type SliceError struct {
+ Lo, Hi, Cap int64
+}
+
+func (e SliceError) String() string {
+ return fmt.Sprintf("slice [%d:%d]; cap %d", e.Lo, e.Hi, e.Cap)
+}
+
+type KeyError struct {
+ Key interface{}
+}
+
+func (e KeyError) String() string { return fmt.Sprintf("key '%v' not found in map", e.Key) }
+
+type NegativeLengthError struct {
+ Len int64
+}
+
+func (e NegativeLengthError) String() string {
+ return fmt.Sprintf("negative length: %d", e.Len)
+}
+
+type NegativeCapacityError struct {
+ Len int64
+}
+
+func (e NegativeCapacityError) String() string {
+ return fmt.Sprintf("negative capacity: %d", e.Len)
+}
diff --git a/libgo/go/exp/eval/bridge.go b/libgo/go/exp/eval/bridge.go
new file mode 100644
index 000000000..12835c4c0
--- /dev/null
+++ b/libgo/go/exp/eval/bridge.go
@@ -0,0 +1,169 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "log"
+ "go/token"
+ "reflect"
+)
+
+/*
+ * Type bridging
+ */
+
+var (
+ evalTypes = make(map[reflect.Type]Type)
+ nativeTypes = make(map[Type]reflect.Type)
+)
+
+// TypeFromNative converts a regular Go type into a the corresponding
+// interpreter Type.
+func TypeFromNative(t reflect.Type) Type {
+ if et, ok := evalTypes[t]; ok {
+ return et
+ }
+
+ var nt *NamedType
+ if t.Name() != "" {
+ name := t.PkgPath() + "ยท" + t.Name()
+ nt = &NamedType{token.NoPos, name, nil, true, make(map[string]Method)}
+ evalTypes[t] = nt
+ }
+
+ var et Type
+ switch t := t.(type) {
+ case *reflect.BoolType:
+ et = BoolType
+ case *reflect.FloatType:
+ switch t.Kind() {
+ case reflect.Float32:
+ et = Float32Type
+ case reflect.Float64:
+ et = Float64Type
+ }
+ case *reflect.IntType:
+ switch t.Kind() {
+ case reflect.Int16:
+ et = Int16Type
+ case reflect.Int32:
+ et = Int32Type
+ case reflect.Int64:
+ et = Int64Type
+ case reflect.Int8:
+ et = Int8Type
+ case reflect.Int:
+ et = IntType
+ }
+ case *reflect.UintType:
+ switch t.Kind() {
+ case reflect.Uint16:
+ et = Uint16Type
+ case reflect.Uint32:
+ et = Uint32Type
+ case reflect.Uint64:
+ et = Uint64Type
+ case reflect.Uint8:
+ et = Uint8Type
+ case reflect.Uint:
+ et = UintType
+ case reflect.Uintptr:
+ et = UintptrType
+ }
+ case *reflect.StringType:
+ et = StringType
+ case *reflect.ArrayType:
+ et = NewArrayType(int64(t.Len()), TypeFromNative(t.Elem()))
+ case *reflect.ChanType:
+ log.Panicf("%T not implemented", t)
+ case *reflect.FuncType:
+ nin := t.NumIn()
+ // Variadic functions have DotDotDotType at the end
+ variadic := t.DotDotDot()
+ if variadic {
+ nin--
+ }
+ in := make([]Type, nin)
+ for i := range in {
+ in[i] = TypeFromNative(t.In(i))
+ }
+ out := make([]Type, t.NumOut())
+ for i := range out {
+ out[i] = TypeFromNative(t.Out(i))
+ }
+ et = NewFuncType(in, variadic, out)
+ case *reflect.InterfaceType:
+ log.Panicf("%T not implemented", t)
+ case *reflect.MapType:
+ log.Panicf("%T not implemented", t)
+ case *reflect.PtrType:
+ et = NewPtrType(TypeFromNative(t.Elem()))
+ case *reflect.SliceType:
+ et = NewSliceType(TypeFromNative(t.Elem()))
+ case *reflect.StructType:
+ n := t.NumField()
+ fields := make([]StructField, n)
+ for i := 0; i < n; i++ {
+ sf := t.Field(i)
+ // TODO(austin) What to do about private fields?
+ fields[i].Name = sf.Name
+ fields[i].Type = TypeFromNative(sf.Type)
+ fields[i].Anonymous = sf.Anonymous
+ }
+ et = NewStructType(fields)
+ case *reflect.UnsafePointerType:
+ log.Panicf("%T not implemented", t)
+ default:
+ log.Panicf("unexpected reflect.Type: %T", t)
+ }
+
+ if nt != nil {
+ if _, ok := et.(*NamedType); !ok {
+ nt.Complete(et)
+ et = nt
+ }
+ }
+
+ nativeTypes[et] = t
+ evalTypes[t] = et
+
+ return et
+}
+
+// TypeOfNative returns the interpreter Type of a regular Go value.
+func TypeOfNative(v interface{}) Type { return TypeFromNative(reflect.Typeof(v)) }
+
+/*
+ * Function bridging
+ */
+
+type nativeFunc struct {
+ fn func(*Thread, []Value, []Value)
+ in, out int
+}
+
+func (f *nativeFunc) NewFrame() *Frame {
+ vars := make([]Value, f.in+f.out)
+ return &Frame{nil, vars}
+}
+
+func (f *nativeFunc) Call(t *Thread) { f.fn(t, t.f.Vars[0:f.in], t.f.Vars[f.in:f.in+f.out]) }
+
+// FuncFromNative creates an interpreter function from a native
+// function that takes its in and out arguments as slices of
+// interpreter Value's. While somewhat inconvenient, this avoids
+// value marshalling.
+func FuncFromNative(fn func(*Thread, []Value, []Value), t *FuncType) FuncValue {
+ return &funcV{&nativeFunc{fn, len(t.In), len(t.Out)}}
+}
+
+// FuncFromNativeTyped is like FuncFromNative, but constructs the
+// function type from a function pointer using reflection. Typically,
+// the type will be given as a nil pointer to a function with the
+// desired signature.
+func FuncFromNativeTyped(fn func(*Thread, []Value, []Value), t interface{}) (*FuncType, FuncValue) {
+ ft := TypeOfNative(t).(*FuncType)
+ return ft, FuncFromNative(fn, ft)
+}
diff --git a/libgo/go/exp/eval/compiler.go b/libgo/go/exp/eval/compiler.go
new file mode 100644
index 000000000..9d2923bfc
--- /dev/null
+++ b/libgo/go/exp/eval/compiler.go
@@ -0,0 +1,92 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+)
+
+
+// A compiler captures information used throughout an entire
+// compilation. Currently it includes only the error handler.
+//
+// TODO(austin) This might actually represent package level, in which
+// case it should be package compiler.
+type compiler struct {
+ fset *token.FileSet
+ errors scanner.ErrorHandler
+ numErrors int
+ silentErrors int
+}
+
+func (a *compiler) diagAt(pos token.Pos, format string, args ...interface{}) {
+ a.errors.Error(a.fset.Position(pos), fmt.Sprintf(format, args...))
+ a.numErrors++
+}
+
+func (a *compiler) numError() int { return a.numErrors + a.silentErrors }
+
+// The universal scope
+func newUniverse() *Scope {
+ sc := &Scope{nil, 0}
+ sc.block = &block{
+ offset: 0,
+ scope: sc,
+ global: true,
+ defs: make(map[string]Def),
+ }
+ return sc
+}
+
+var universe *Scope = newUniverse()
+
+
+// TODO(austin) These can all go in stmt.go now
+type label struct {
+ name string
+ desc string
+ // The PC goto statements should jump to, or nil if this label
+ // cannot be goto'd (such as an anonymous for loop label).
+ gotoPC *uint
+ // The PC break statements should jump to, or nil if a break
+ // statement is invalid.
+ breakPC *uint
+ // The PC continue statements should jump to, or nil if a
+ // continue statement is invalid.
+ continuePC *uint
+ // The position where this label was resolved. If it has not
+ // been resolved yet, an invalid position.
+ resolved token.Pos
+ // The position where this label was first jumped to.
+ used token.Pos
+}
+
+// A funcCompiler captures information used throughout the compilation
+// of a single function body.
+type funcCompiler struct {
+ *compiler
+ fnType *FuncType
+ // Whether the out variables are named. This affects what
+ // kinds of return statements are legal.
+ outVarsNamed bool
+ *codeBuf
+ flow *flowBuf
+ labels map[string]*label
+}
+
+// A blockCompiler captures information used throughout the compilation
+// of a single block within a function.
+type blockCompiler struct {
+ *funcCompiler
+ block *block
+ // The label of this block, used for finding break and
+ // continue labels.
+ label *label
+ // The blockCompiler for the block enclosing this one, or nil
+ // for a function-level block.
+ parent *blockCompiler
+}
diff --git a/libgo/go/exp/eval/eval_test.go b/libgo/go/exp/eval/eval_test.go
new file mode 100644
index 000000000..ff28cf1a9
--- /dev/null
+++ b/libgo/go/exp/eval/eval_test.go
@@ -0,0 +1,259 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "flag"
+ "fmt"
+ "go/token"
+ "log"
+ "os"
+ "reflect"
+ "regexp"
+ "testing"
+)
+
+// All tests are done using the same file set.
+var fset = token.NewFileSet()
+
+// Print each statement or expression before parsing it
+var noisy = false
+
+func init() { flag.BoolVar(&noisy, "noisy", false, "chatter during eval tests") }
+
+/*
+ * Generic statement/expression test framework
+ */
+
+type test []job
+
+type job struct {
+ code string
+ cerr string
+ rterr string
+ val Value
+ noval bool
+}
+
+func runTests(t *testing.T, baseName string, tests []test) {
+ for i, test := range tests {
+ name := fmt.Sprintf("%s[%d]", baseName, i)
+ test.run(t, name)
+ }
+}
+
+func (a test) run(t *testing.T, name string) {
+ w := newTestWorld()
+ for _, j := range a {
+ src := j.code + ";" // trailing semicolon to finish statement
+ if noisy {
+ println("code:", src)
+ }
+
+ code, err := w.Compile(fset, src)
+ if err != nil {
+ if j.cerr == "" {
+ t.Errorf("%s: Compile %s: %v", name, src, err)
+ break
+ }
+ if !match(t, err, j.cerr) {
+ t.Errorf("%s: Compile %s = error %s; want %v", name, src, err, j.cerr)
+ break
+ }
+ continue
+ }
+ if j.cerr != "" {
+ t.Errorf("%s: Compile %s succeeded; want %s", name, src, j.cerr)
+ break
+ }
+
+ val, err := code.Run()
+ if err != nil {
+ if j.rterr == "" {
+ t.Errorf("%s: Run %s: %v", name, src, err)
+ break
+ }
+ if !match(t, err, j.rterr) {
+ t.Errorf("%s: Run %s = error %s; want %v", name, src, err, j.rterr)
+ break
+ }
+ continue
+ }
+ if j.rterr != "" {
+ t.Errorf("%s: Run %s succeeded; want %s", name, src, j.rterr)
+ break
+ }
+
+ if !j.noval && !reflect.DeepEqual(val, j.val) {
+ t.Errorf("%s: Run %s = %T(%v) want %T(%v)", name, src, val, val, j.val, j.val)
+ }
+ }
+}
+
+func match(t *testing.T, err os.Error, pat string) bool {
+ ok, err1 := regexp.MatchString(pat, err.String())
+ if err1 != nil {
+ t.Fatalf("compile regexp %s: %v", pat, err1)
+ }
+ return ok
+}
+
+
+/*
+ * Test constructors
+ */
+
+// Expression compile error
+func CErr(expr string, cerr string) test { return test([]job{{code: expr, cerr: cerr}}) }
+
+// Expression runtime error
+func RErr(expr string, rterr string) test { return test([]job{{code: expr, rterr: rterr}}) }
+
+// Expression value
+func Val(expr string, val interface{}) test {
+ return test([]job{{code: expr, val: toValue(val)}})
+}
+
+// Statement runs without error
+func Run(stmts string) test { return test([]job{{code: stmts, noval: true}}) }
+
+// Two statements without error.
+// TODO(rsc): Should be possible with Run but the parser
+// won't let us do both top-level and non-top-level statements.
+func Run2(stmt1, stmt2 string) test {
+ return test([]job{{code: stmt1, noval: true}, {code: stmt2, noval: true}})
+}
+
+// Statement runs and test one expression's value
+func Val1(stmts string, expr1 string, val1 interface{}) test {
+ return test([]job{
+ {code: stmts, noval: true},
+ {code: expr1, val: toValue(val1)},
+ })
+}
+
+// Statement runs and test two expressions' values
+func Val2(stmts string, expr1 string, val1 interface{}, expr2 string, val2 interface{}) test {
+ return test([]job{
+ {code: stmts, noval: true},
+ {code: expr1, val: toValue(val1)},
+ {code: expr2, val: toValue(val2)},
+ })
+}
+
+/*
+ * Value constructors
+ */
+
+type vstruct []interface{}
+
+type varray []interface{}
+
+type vslice struct {
+ arr varray
+ len, cap int
+}
+
+func toValue(val interface{}) Value {
+ switch val := val.(type) {
+ case bool:
+ r := boolV(val)
+ return &r
+ case uint8:
+ r := uint8V(val)
+ return &r
+ case uint:
+ r := uintV(val)
+ return &r
+ case int:
+ r := intV(val)
+ return &r
+ case *big.Int:
+ return &idealIntV{val}
+ case float64:
+ r := float64V(val)
+ return &r
+ case *big.Rat:
+ return &idealFloatV{val}
+ case string:
+ r := stringV(val)
+ return &r
+ case vstruct:
+ elems := make([]Value, len(val))
+ for i, e := range val {
+ elems[i] = toValue(e)
+ }
+ r := structV(elems)
+ return &r
+ case varray:
+ elems := make([]Value, len(val))
+ for i, e := range val {
+ elems[i] = toValue(e)
+ }
+ r := arrayV(elems)
+ return &r
+ case vslice:
+ return &sliceV{Slice{toValue(val.arr).(ArrayValue), int64(val.len), int64(val.cap)}}
+ case Func:
+ return &funcV{val}
+ }
+ log.Panicf("toValue(%T) not implemented", val)
+ panic("unreachable")
+}
+
+/*
+ * Default test scope
+ */
+
+type testFunc struct{}
+
+func (*testFunc) NewFrame() *Frame { return &Frame{nil, make([]Value, 2)} }
+
+func (*testFunc) Call(t *Thread) {
+ n := t.f.Vars[0].(IntValue).Get(t)
+
+ res := n + 1
+
+ t.f.Vars[1].(IntValue).Set(t, res)
+}
+
+type oneTwoFunc struct{}
+
+func (*oneTwoFunc) NewFrame() *Frame { return &Frame{nil, make([]Value, 2)} }
+
+func (*oneTwoFunc) Call(t *Thread) {
+ t.f.Vars[0].(IntValue).Set(t, 1)
+ t.f.Vars[1].(IntValue).Set(t, 2)
+}
+
+type voidFunc struct{}
+
+func (*voidFunc) NewFrame() *Frame { return &Frame{nil, []Value{}} }
+
+func (*voidFunc) Call(t *Thread) {}
+
+func newTestWorld() *World {
+ w := NewWorld()
+
+ def := func(name string, t Type, val interface{}) { w.DefineVar(name, t, toValue(val)) }
+
+ w.DefineConst("c", IdealIntType, toValue(big.NewInt(1)))
+ def("i", IntType, 1)
+ def("i2", IntType, 2)
+ def("u", UintType, uint(1))
+ def("f", Float64Type, 1.0)
+ def("s", StringType, "abc")
+ def("t", NewStructType([]StructField{{"a", IntType, false}}), vstruct{1})
+ def("ai", NewArrayType(2, IntType), varray{1, 2})
+ def("aai", NewArrayType(2, NewArrayType(2, IntType)), varray{varray{1, 2}, varray{3, 4}})
+ def("aai2", NewArrayType(2, NewArrayType(2, IntType)), varray{varray{5, 6}, varray{7, 8}})
+ def("fn", NewFuncType([]Type{IntType}, false, []Type{IntType}), &testFunc{})
+ def("oneTwo", NewFuncType([]Type{}, false, []Type{IntType, IntType}), &oneTwoFunc{})
+ def("void", NewFuncType([]Type{}, false, []Type{}), &voidFunc{})
+ def("sli", NewSliceType(IntType), vslice{varray{1, 2, 3}, 2, 3})
+
+ return w
+}
diff --git a/libgo/go/exp/eval/expr.go b/libgo/go/exp/eval/expr.go
new file mode 100644
index 000000000..e65f47617
--- /dev/null
+++ b/libgo/go/exp/eval/expr.go
@@ -0,0 +1,2015 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "log"
+ "strconv"
+ "strings"
+ "os"
+)
+
+var (
+ idealZero = big.NewInt(0)
+ idealOne = big.NewInt(1)
+)
+
+// An expr is the result of compiling an expression. It stores the
+// type of the expression and its evaluator function.
+type expr struct {
+ *exprInfo
+ t Type
+
+ // Evaluate this node as the given type.
+ eval interface{}
+
+ // Map index expressions permit special forms of assignment,
+ // for which we need to know the Map and key.
+ evalMapValue func(t *Thread) (Map, interface{})
+
+ // Evaluate to the "address of" this value; that is, the
+ // settable Value object. nil for expressions whose address
+ // cannot be taken.
+ evalAddr func(t *Thread) Value
+
+ // Execute this expression as a statement. Only expressions
+ // that are valid expression statements should set this.
+ exec func(t *Thread)
+
+ // If this expression is a type, this is its compiled type.
+ // This is only permitted in the function position of a call
+ // expression. In this case, t should be nil.
+ valType Type
+
+ // A short string describing this expression for error
+ // messages.
+ desc string
+}
+
+// exprInfo stores information needed to compile any expression node.
+// Each expr also stores its exprInfo so further expressions can be
+// compiled from it.
+type exprInfo struct {
+ *compiler
+ pos token.Pos
+}
+
+func (a *exprInfo) newExpr(t Type, desc string) *expr {
+ return &expr{exprInfo: a, t: t, desc: desc}
+}
+
+func (a *exprInfo) diag(format string, args ...interface{}) {
+ a.diagAt(a.pos, format, args...)
+}
+
+func (a *exprInfo) diagOpType(op token.Token, vt Type) {
+ a.diag("illegal operand type for '%v' operator\n\t%v", op, vt)
+}
+
+func (a *exprInfo) diagOpTypes(op token.Token, lt Type, rt Type) {
+ a.diag("illegal operand types for '%v' operator\n\t%v\n\t%v", op, lt, rt)
+}
+
+/*
+ * Common expression manipulations
+ */
+
+// a.convertTo(t) converts the value of the analyzed expression a,
+// which must be a constant, ideal number, to a new analyzed
+// expression with a constant value of type t.
+//
+// TODO(austin) Rename to resolveIdeal or something?
+func (a *expr) convertTo(t Type) *expr {
+ if !a.t.isIdeal() {
+ log.Panicf("attempted to convert from %v, expected ideal", a.t)
+ }
+
+ var rat *big.Rat
+
+ // XXX(Spec) The spec says "It is erroneous".
+ //
+ // It is an error to assign a value with a non-zero fractional
+ // part to an integer, or if the assignment would overflow or
+ // underflow, or in general if the value cannot be represented
+ // by the type of the variable.
+ switch a.t {
+ case IdealFloatType:
+ rat = a.asIdealFloat()()
+ if t.isInteger() && !rat.IsInt() {
+ a.diag("constant %v truncated to integer", rat.FloatString(6))
+ return nil
+ }
+ case IdealIntType:
+ i := a.asIdealInt()()
+ rat = new(big.Rat).SetInt(i)
+ default:
+ log.Panicf("unexpected ideal type %v", a.t)
+ }
+
+ // Check bounds
+ if t, ok := t.lit().(BoundedType); ok {
+ if rat.Cmp(t.minVal()) < 0 {
+ a.diag("constant %v underflows %v", rat.FloatString(6), t)
+ return nil
+ }
+ if rat.Cmp(t.maxVal()) > 0 {
+ a.diag("constant %v overflows %v", rat.FloatString(6), t)
+ return nil
+ }
+ }
+
+ // Convert rat to type t.
+ res := a.newExpr(t, a.desc)
+ switch t := t.lit().(type) {
+ case *uintType:
+ n, d := rat.Num(), rat.Denom()
+ f := new(big.Int).Quo(n, d)
+ f = f.Abs(f)
+ v := uint64(f.Int64())
+ res.eval = func(*Thread) uint64 { return v }
+ case *intType:
+ n, d := rat.Num(), rat.Denom()
+ f := new(big.Int).Quo(n, d)
+ v := f.Int64()
+ res.eval = func(*Thread) int64 { return v }
+ case *idealIntType:
+ n, d := rat.Num(), rat.Denom()
+ f := new(big.Int).Quo(n, d)
+ res.eval = func() *big.Int { return f }
+ case *floatType:
+ n, d := rat.Num(), rat.Denom()
+ v := float64(n.Int64()) / float64(d.Int64())
+ res.eval = func(*Thread) float64 { return v }
+ case *idealFloatType:
+ res.eval = func() *big.Rat { return rat }
+ default:
+ log.Panicf("cannot convert to type %T", t)
+ }
+
+ return res
+}
+
+// convertToInt converts this expression to an integer, if possible,
+// or produces an error if not. This accepts ideal ints, uints, and
+// ints. If max is not -1, produces an error if possible if the value
+// exceeds max. If negErr is not "", produces an error if possible if
+// the value is negative.
+func (a *expr) convertToInt(max int64, negErr string, errOp string) *expr {
+ switch a.t.lit().(type) {
+ case *idealIntType:
+ val := a.asIdealInt()()
+ if negErr != "" && val.Sign() < 0 {
+ a.diag("negative %s: %s", negErr, val)
+ return nil
+ }
+ bound := max
+ if negErr == "slice" {
+ bound++
+ }
+ if max != -1 && val.Cmp(big.NewInt(bound)) >= 0 {
+ a.diag("index %s exceeds length %d", val, max)
+ return nil
+ }
+ return a.convertTo(IntType)
+
+ case *uintType:
+ // Convert to int
+ na := a.newExpr(IntType, a.desc)
+ af := a.asUint()
+ na.eval = func(t *Thread) int64 { return int64(af(t)) }
+ return na
+
+ case *intType:
+ // Good as is
+ return a
+ }
+
+ a.diag("illegal operand type for %s\n\t%v", errOp, a.t)
+ return nil
+}
+
+// derefArray returns an expression of array type if the given
+// expression is a *array type. Otherwise, returns the given
+// expression.
+func (a *expr) derefArray() *expr {
+ if pt, ok := a.t.lit().(*PtrType); ok {
+ if _, ok := pt.Elem.lit().(*ArrayType); ok {
+ deref := a.compileStarExpr(a)
+ if deref == nil {
+ log.Panicf("failed to dereference *array")
+ }
+ return deref
+ }
+ }
+ return a
+}
+
+/*
+ * Assignments
+ */
+
+// An assignCompiler compiles assignment operations. Anything other
+// than short declarations should use the compileAssign wrapper.
+//
+// There are three valid types of assignment:
+// 1) T = T
+// Assigning a single expression with single-valued type to a
+// single-valued type.
+// 2) MT = T, T, ...
+// Assigning multiple expressions with single-valued types to a
+// multi-valued type.
+// 3) MT = MT
+// Assigning a single expression with multi-valued type to a
+// multi-valued type.
+type assignCompiler struct {
+ *compiler
+ pos token.Pos
+ // The RHS expressions. This may include nil's for
+ // expressions that failed to compile.
+ rs []*expr
+ // The (possibly unary) MultiType of the RHS.
+ rmt *MultiType
+ // Whether this is an unpack assignment (case 3).
+ isUnpack bool
+ // Whether map special assignment forms are allowed.
+ allowMap bool
+ // Whether this is a "r, ok = a[x]" assignment.
+ isMapUnpack bool
+ // The operation name to use in error messages, such as
+ // "assignment" or "function call".
+ errOp string
+ // The name to use for positions in error messages, such as
+ // "argument".
+ errPosName string
+}
+
+// Type check the RHS of an assignment, returning a new assignCompiler
+// and indicating if the type check succeeded. This always returns an
+// assignCompiler with rmt set, but if type checking fails, slots in
+// the MultiType may be nil. If rs contains nil's, type checking will
+// fail and these expressions given a nil type.
+func (a *compiler) checkAssign(pos token.Pos, rs []*expr, errOp, errPosName string) (*assignCompiler, bool) {
+ c := &assignCompiler{
+ compiler: a,
+ pos: pos,
+ rs: rs,
+ errOp: errOp,
+ errPosName: errPosName,
+ }
+
+ // Is this an unpack?
+ if len(rs) == 1 && rs[0] != nil {
+ if rmt, isUnpack := rs[0].t.(*MultiType); isUnpack {
+ c.rmt = rmt
+ c.isUnpack = true
+ return c, true
+ }
+ }
+
+ // Create MultiType for RHS and check that all RHS expressions
+ // are single-valued.
+ rts := make([]Type, len(rs))
+ ok := true
+ for i, r := range rs {
+ if r == nil {
+ ok = false
+ continue
+ }
+
+ if _, isMT := r.t.(*MultiType); isMT {
+ r.diag("multi-valued expression not allowed in %s", errOp)
+ ok = false
+ continue
+ }
+
+ rts[i] = r.t
+ }
+
+ c.rmt = NewMultiType(rts)
+ return c, ok
+}
+
+func (a *assignCompiler) allowMapForms(nls int) {
+ a.allowMap = true
+
+ // Update unpacking info if this is r, ok = a[x]
+ if nls == 2 && len(a.rs) == 1 && a.rs[0] != nil && a.rs[0].evalMapValue != nil {
+ a.isUnpack = true
+ a.rmt = NewMultiType([]Type{a.rs[0].t, BoolType})
+ a.isMapUnpack = true
+ }
+}
+
+// compile type checks and compiles an assignment operation, returning
+// a function that expects an l-value and the frame in which to
+// evaluate the RHS expressions. The l-value must have exactly the
+// type given by lt. Returns nil if type checking fails.
+func (a *assignCompiler) compile(b *block, lt Type) func(Value, *Thread) {
+ lmt, isMT := lt.(*MultiType)
+ rmt, isUnpack := a.rmt, a.isUnpack
+
+ // Create unary MultiType for single LHS
+ if !isMT {
+ lmt = NewMultiType([]Type{lt})
+ }
+
+ // Check that the assignment count matches
+ lcount := len(lmt.Elems)
+ rcount := len(rmt.Elems)
+ if lcount != rcount {
+ msg := "not enough"
+ pos := a.pos
+ if rcount > lcount {
+ msg = "too many"
+ if lcount > 0 {
+ pos = a.rs[lcount-1].pos
+ }
+ }
+ a.diagAt(pos, "%s %ss for %s\n\t%s\n\t%s", msg, a.errPosName, a.errOp, lt, rmt)
+ return nil
+ }
+
+ bad := false
+
+ // If this is an unpack, create a temporary to store the
+ // multi-value and replace the RHS with expressions to pull
+ // out values from the temporary. Technically, this is only
+ // necessary when we need to perform assignment conversions.
+ var effect func(*Thread)
+ if isUnpack {
+ // This leaks a slot, but is definitely safe.
+ temp := b.DefineTemp(a.rmt)
+ tempIdx := temp.Index
+ if tempIdx < 0 {
+ panic(fmt.Sprintln("tempidx", tempIdx))
+ }
+ if a.isMapUnpack {
+ rf := a.rs[0].evalMapValue
+ vt := a.rmt.Elems[0]
+ effect = func(t *Thread) {
+ m, k := rf(t)
+ v := m.Elem(t, k)
+ found := boolV(true)
+ if v == nil {
+ found = boolV(false)
+ v = vt.Zero()
+ }
+ t.f.Vars[tempIdx] = multiV([]Value{v, &found})
+ }
+ } else {
+ rf := a.rs[0].asMulti()
+ effect = func(t *Thread) { t.f.Vars[tempIdx] = multiV(rf(t)) }
+ }
+ orig := a.rs[0]
+ a.rs = make([]*expr, len(a.rmt.Elems))
+ for i, t := range a.rmt.Elems {
+ if t.isIdeal() {
+ log.Panicf("Right side of unpack contains ideal: %s", rmt)
+ }
+ a.rs[i] = orig.newExpr(t, orig.desc)
+ index := i
+ a.rs[i].genValue(func(t *Thread) Value { return t.f.Vars[tempIdx].(multiV)[index] })
+ }
+ }
+ // Now len(a.rs) == len(a.rmt) and we've reduced any unpacking
+ // to multi-assignment.
+
+ // TODO(austin) Deal with assignment special cases.
+
+ // Values of any type may always be assigned to variables of
+ // compatible static type.
+ for i, lt := range lmt.Elems {
+ rt := rmt.Elems[i]
+
+ // When [an ideal is] (used in an expression) assigned
+ // to a variable or typed constant, the destination
+ // must be able to represent the assigned value.
+ if rt.isIdeal() {
+ a.rs[i] = a.rs[i].convertTo(lmt.Elems[i])
+ if a.rs[i] == nil {
+ bad = true
+ continue
+ }
+ rt = a.rs[i].t
+ }
+
+ // A pointer p to an array can be assigned to a slice
+ // variable v with compatible element type if the type
+ // of p or v is unnamed.
+ if rpt, ok := rt.lit().(*PtrType); ok {
+ if at, ok := rpt.Elem.lit().(*ArrayType); ok {
+ if lst, ok := lt.lit().(*SliceType); ok {
+ if lst.Elem.compat(at.Elem, false) && (rt.lit() == Type(rt) || lt.lit() == Type(lt)) {
+ rf := a.rs[i].asPtr()
+ a.rs[i] = a.rs[i].newExpr(lt, a.rs[i].desc)
+ len := at.Len
+ a.rs[i].eval = func(t *Thread) Slice { return Slice{rf(t).(ArrayValue), len, len} }
+ rt = a.rs[i].t
+ }
+ }
+ }
+ }
+
+ if !lt.compat(rt, false) {
+ if len(a.rs) == 1 {
+ a.rs[0].diag("illegal operand types for %s\n\t%v\n\t%v", a.errOp, lt, rt)
+ } else {
+ a.rs[i].diag("illegal operand types in %s %d of %s\n\t%v\n\t%v", a.errPosName, i+1, a.errOp, lt, rt)
+ }
+ bad = true
+ }
+ }
+ if bad {
+ return nil
+ }
+
+ // Compile
+ if !isMT {
+ // Case 1
+ return genAssign(lt, a.rs[0])
+ }
+ // Case 2 or 3
+ as := make([]func(lv Value, t *Thread), len(a.rs))
+ for i, r := range a.rs {
+ as[i] = genAssign(lmt.Elems[i], r)
+ }
+ return func(lv Value, t *Thread) {
+ if effect != nil {
+ effect(t)
+ }
+ lmv := lv.(multiV)
+ for i, a := range as {
+ a(lmv[i], t)
+ }
+ }
+}
+
+// compileAssign compiles an assignment operation without the full
+// generality of an assignCompiler. See assignCompiler for a
+// description of the arguments.
+func (a *compiler) compileAssign(pos token.Pos, b *block, lt Type, rs []*expr, errOp, errPosName string) func(Value, *Thread) {
+ ac, ok := a.checkAssign(pos, rs, errOp, errPosName)
+ if !ok {
+ return nil
+ }
+ return ac.compile(b, lt)
+}
+
+/*
+ * Expression compiler
+ */
+
+// An exprCompiler stores information used throughout the compilation
+// of a single expression. It does not embed funcCompiler because
+// expressions can appear at top level.
+type exprCompiler struct {
+ *compiler
+ // The block this expression is being compiled in.
+ block *block
+ // Whether this expression is used in a constant context.
+ constant bool
+}
+
+// compile compiles an expression AST. callCtx should be true if this
+// AST is in the function position of a function call node; it allows
+// the returned expression to be a type or a built-in function (which
+// otherwise result in errors).
+func (a *exprCompiler) compile(x ast.Expr, callCtx bool) *expr {
+ ei := &exprInfo{a.compiler, x.Pos()}
+
+ switch x := x.(type) {
+ // Literals
+ case *ast.BasicLit:
+ switch x.Kind {
+ case token.INT:
+ return ei.compileIntLit(string(x.Value))
+ case token.FLOAT:
+ return ei.compileFloatLit(string(x.Value))
+ case token.CHAR:
+ return ei.compileCharLit(string(x.Value))
+ case token.STRING:
+ return ei.compileStringLit(string(x.Value))
+ default:
+ log.Panicf("unexpected basic literal type %v", x.Kind)
+ }
+
+ case *ast.CompositeLit:
+ goto notimpl
+
+ case *ast.FuncLit:
+ decl := ei.compileFuncType(a.block, x.Type)
+ if decl == nil {
+ // TODO(austin) Try compiling the body,
+ // perhaps with dummy argument definitions
+ return nil
+ }
+ fn := ei.compileFunc(a.block, decl, x.Body)
+ if fn == nil {
+ return nil
+ }
+ if a.constant {
+ a.diagAt(x.Pos(), "function literal used in constant expression")
+ return nil
+ }
+ return ei.compileFuncLit(decl, fn)
+
+ // Types
+ case *ast.ArrayType:
+ // TODO(austin) Use a multi-type case
+ goto typeexpr
+
+ case *ast.ChanType:
+ goto typeexpr
+
+ case *ast.Ellipsis:
+ goto typeexpr
+
+ case *ast.FuncType:
+ goto typeexpr
+
+ case *ast.InterfaceType:
+ goto typeexpr
+
+ case *ast.MapType:
+ goto typeexpr
+
+ // Remaining expressions
+ case *ast.BadExpr:
+ // Error already reported by parser
+ a.silentErrors++
+ return nil
+
+ case *ast.BinaryExpr:
+ l, r := a.compile(x.X, false), a.compile(x.Y, false)
+ if l == nil || r == nil {
+ return nil
+ }
+ return ei.compileBinaryExpr(x.Op, l, r)
+
+ case *ast.CallExpr:
+ l := a.compile(x.Fun, true)
+ args := make([]*expr, len(x.Args))
+ bad := false
+ for i, arg := range x.Args {
+ if i == 0 && l != nil && (l.t == Type(makeType) || l.t == Type(newType)) {
+ argei := &exprInfo{a.compiler, arg.Pos()}
+ args[i] = argei.exprFromType(a.compileType(a.block, arg))
+ } else {
+ args[i] = a.compile(arg, false)
+ }
+ if args[i] == nil {
+ bad = true
+ }
+ }
+ if bad || l == nil {
+ return nil
+ }
+ if a.constant {
+ a.diagAt(x.Pos(), "function call in constant context")
+ return nil
+ }
+
+ if l.valType != nil {
+ a.diagAt(x.Pos(), "type conversions not implemented")
+ return nil
+ } else if ft, ok := l.t.(*FuncType); ok && ft.builtin != "" {
+ return ei.compileBuiltinCallExpr(a.block, ft, args)
+ } else {
+ return ei.compileCallExpr(a.block, l, args)
+ }
+
+ case *ast.Ident:
+ return ei.compileIdent(a.block, a.constant, callCtx, x.Name)
+
+ case *ast.IndexExpr:
+ l, r := a.compile(x.X, false), a.compile(x.Index, false)
+ if l == nil || r == nil {
+ return nil
+ }
+ return ei.compileIndexExpr(l, r)
+
+ case *ast.SliceExpr:
+ var lo, hi *expr
+ arr := a.compile(x.X, false)
+ if x.Low == nil {
+ // beginning was omitted, so we need to provide it
+ ei := &exprInfo{a.compiler, x.Pos()}
+ lo = ei.compileIntLit("0")
+ } else {
+ lo = a.compile(x.Low, false)
+ }
+ if x.High == nil {
+ // End was omitted, so we need to compute len(x.X)
+ ei := &exprInfo{a.compiler, x.Pos()}
+ hi = ei.compileBuiltinCallExpr(a.block, lenType, []*expr{arr})
+ } else {
+ hi = a.compile(x.High, false)
+ }
+ if arr == nil || lo == nil || hi == nil {
+ return nil
+ }
+ return ei.compileSliceExpr(arr, lo, hi)
+
+ case *ast.KeyValueExpr:
+ goto notimpl
+
+ case *ast.ParenExpr:
+ return a.compile(x.X, callCtx)
+
+ case *ast.SelectorExpr:
+ v := a.compile(x.X, false)
+ if v == nil {
+ return nil
+ }
+ return ei.compileSelectorExpr(v, x.Sel.Name)
+
+ case *ast.StarExpr:
+ // We pass down our call context because this could be
+ // a pointer type (and thus a type conversion)
+ v := a.compile(x.X, callCtx)
+ if v == nil {
+ return nil
+ }
+ if v.valType != nil {
+ // Turns out this was a pointer type, not a dereference
+ return ei.exprFromType(NewPtrType(v.valType))
+ }
+ return ei.compileStarExpr(v)
+
+ case *ast.StructType:
+ goto notimpl
+
+ case *ast.TypeAssertExpr:
+ goto notimpl
+
+ case *ast.UnaryExpr:
+ v := a.compile(x.X, false)
+ if v == nil {
+ return nil
+ }
+ return ei.compileUnaryExpr(x.Op, v)
+ }
+ log.Panicf("unexpected ast node type %T", x)
+ panic("unreachable")
+
+typeexpr:
+ if !callCtx {
+ a.diagAt(x.Pos(), "type used as expression")
+ return nil
+ }
+ return ei.exprFromType(a.compileType(a.block, x))
+
+notimpl:
+ a.diagAt(x.Pos(), "%T expression node not implemented", x)
+ return nil
+}
+
+func (a *exprInfo) exprFromType(t Type) *expr {
+ if t == nil {
+ return nil
+ }
+ expr := a.newExpr(nil, "type")
+ expr.valType = t
+ return expr
+}
+
+func (a *exprInfo) compileIdent(b *block, constant bool, callCtx bool, name string) *expr {
+ bl, level, def := b.Lookup(name)
+ if def == nil {
+ a.diag("%s: undefined", name)
+ return nil
+ }
+ switch def := def.(type) {
+ case *Constant:
+ expr := a.newExpr(def.Type, "constant")
+ if ft, ok := def.Type.(*FuncType); ok && ft.builtin != "" {
+ // XXX(Spec) I don't think anything says that
+ // built-in functions can't be used as values.
+ if !callCtx {
+ a.diag("built-in function %s cannot be used as a value", ft.builtin)
+ return nil
+ }
+ // Otherwise, we leave the evaluators empty
+ // because this is handled specially
+ } else {
+ expr.genConstant(def.Value)
+ }
+ return expr
+ case *Variable:
+ if constant {
+ a.diag("variable %s used in constant expression", name)
+ return nil
+ }
+ if bl.global {
+ return a.compileGlobalVariable(def)
+ }
+ return a.compileVariable(level, def)
+ case Type:
+ if callCtx {
+ return a.exprFromType(def)
+ }
+ a.diag("type %v used as expression", name)
+ return nil
+ }
+ log.Panicf("name %s has unknown type %T", name, def)
+ panic("unreachable")
+}
+
+func (a *exprInfo) compileVariable(level int, v *Variable) *expr {
+ if v.Type == nil {
+ // Placeholder definition from an earlier error
+ a.silentErrors++
+ return nil
+ }
+ expr := a.newExpr(v.Type, "variable")
+ expr.genIdentOp(level, v.Index)
+ return expr
+}
+
+func (a *exprInfo) compileGlobalVariable(v *Variable) *expr {
+ if v.Type == nil {
+ // Placeholder definition from an earlier error
+ a.silentErrors++
+ return nil
+ }
+ if v.Init == nil {
+ v.Init = v.Type.Zero()
+ }
+ expr := a.newExpr(v.Type, "variable")
+ val := v.Init
+ expr.genValue(func(t *Thread) Value { return val })
+ return expr
+}
+
+func (a *exprInfo) compileIdealInt(i *big.Int, desc string) *expr {
+ expr := a.newExpr(IdealIntType, desc)
+ expr.eval = func() *big.Int { return i }
+ return expr
+}
+
+func (a *exprInfo) compileIntLit(lit string) *expr {
+ i, _ := new(big.Int).SetString(lit, 0)
+ return a.compileIdealInt(i, "integer literal")
+}
+
+func (a *exprInfo) compileCharLit(lit string) *expr {
+ if lit[0] != '\'' {
+ // Caught by parser
+ a.silentErrors++
+ return nil
+ }
+ v, _, tail, err := strconv.UnquoteChar(lit[1:], '\'')
+ if err != nil || tail != "'" {
+ // Caught by parser
+ a.silentErrors++
+ return nil
+ }
+ return a.compileIdealInt(big.NewInt(int64(v)), "character literal")
+}
+
+func (a *exprInfo) compileFloatLit(lit string) *expr {
+ f, ok := new(big.Rat).SetString(lit)
+ if !ok {
+ log.Panicf("malformed float literal %s at %v passed parser", lit, a.pos)
+ }
+ expr := a.newExpr(IdealFloatType, "float literal")
+ expr.eval = func() *big.Rat { return f }
+ return expr
+}
+
+func (a *exprInfo) compileString(s string) *expr {
+ // Ideal strings don't have a named type but they are
+ // compatible with type string.
+
+ // TODO(austin) Use unnamed string type.
+ expr := a.newExpr(StringType, "string literal")
+ expr.eval = func(*Thread) string { return s }
+ return expr
+}
+
+func (a *exprInfo) compileStringLit(lit string) *expr {
+ s, err := strconv.Unquote(lit)
+ if err != nil {
+ a.diag("illegal string literal, %v", err)
+ return nil
+ }
+ return a.compileString(s)
+}
+
+func (a *exprInfo) compileStringList(list []*expr) *expr {
+ ss := make([]string, len(list))
+ for i, s := range list {
+ ss[i] = s.asString()(nil)
+ }
+ return a.compileString(strings.Join(ss, ""))
+}
+
+func (a *exprInfo) compileFuncLit(decl *FuncDecl, fn func(*Thread) Func) *expr {
+ expr := a.newExpr(decl.Type, "function literal")
+ expr.eval = fn
+ return expr
+}
+
+func (a *exprInfo) compileSelectorExpr(v *expr, name string) *expr {
+ // mark marks a field that matches the selector name. It
+ // tracks the best depth found so far and whether more than
+ // one field has been found at that depth.
+ bestDepth := -1
+ ambig := false
+ amberr := ""
+ mark := func(depth int, pathName string) {
+ switch {
+ case bestDepth == -1 || depth < bestDepth:
+ bestDepth = depth
+ ambig = false
+ amberr = ""
+
+ case depth == bestDepth:
+ ambig = true
+
+ default:
+ log.Panicf("Marked field at depth %d, but already found one at depth %d", depth, bestDepth)
+ }
+ amberr += "\n\t" + pathName[1:]
+ }
+
+ visited := make(map[Type]bool)
+
+ // find recursively searches for the named field, starting at
+ // type t. If it finds the named field, it returns a function
+ // which takes an expr that represents a value of type 't' and
+ // returns an expr that retrieves the named field. We delay
+ // expr construction to avoid producing lots of useless expr's
+ // as we search.
+ //
+ // TODO(austin) Now that the expression compiler works on
+ // semantic values instead of AST's, there should be a much
+ // better way of doing this.
+ var find func(Type, int, string) func(*expr) *expr
+ find = func(t Type, depth int, pathName string) func(*expr) *expr {
+ // Don't bother looking if we've found something shallower
+ if bestDepth != -1 && bestDepth < depth {
+ return nil
+ }
+
+ // Don't check the same type twice and avoid loops
+ if visited[t] {
+ return nil
+ }
+ visited[t] = true
+
+ // Implicit dereference
+ deref := false
+ if ti, ok := t.(*PtrType); ok {
+ deref = true
+ t = ti.Elem
+ }
+
+ // If it's a named type, look for methods
+ if ti, ok := t.(*NamedType); ok {
+ _, ok := ti.methods[name]
+ if ok {
+ mark(depth, pathName+"."+name)
+ log.Panic("Methods not implemented")
+ }
+ t = ti.Def
+ }
+
+ // If it's a struct type, check fields and embedded types
+ var builder func(*expr) *expr
+ if t, ok := t.(*StructType); ok {
+ for i, f := range t.Elems {
+ var sub func(*expr) *expr
+ switch {
+ case f.Name == name:
+ mark(depth, pathName+"."+name)
+ sub = func(e *expr) *expr { return e }
+
+ case f.Anonymous:
+ sub = find(f.Type, depth+1, pathName+"."+f.Name)
+ if sub == nil {
+ continue
+ }
+
+ default:
+ continue
+ }
+
+ // We found something. Create a
+ // builder for accessing this field.
+ ft := f.Type
+ index := i
+ builder = func(parent *expr) *expr {
+ if deref {
+ parent = a.compileStarExpr(parent)
+ }
+ expr := a.newExpr(ft, "selector expression")
+ pf := parent.asStruct()
+ evalAddr := func(t *Thread) Value { return pf(t).Field(t, index) }
+ expr.genValue(evalAddr)
+ return sub(expr)
+ }
+ }
+ }
+
+ return builder
+ }
+
+ builder := find(v.t, 0, "")
+ if builder == nil {
+ a.diag("type %v has no field or method %s", v.t, name)
+ return nil
+ }
+ if ambig {
+ a.diag("field %s is ambiguous in type %v%s", name, v.t, amberr)
+ return nil
+ }
+
+ return builder(v)
+}
+
+func (a *exprInfo) compileSliceExpr(arr, lo, hi *expr) *expr {
+ // Type check object
+ arr = arr.derefArray()
+
+ var at Type
+ var maxIndex int64 = -1
+
+ switch lt := arr.t.lit().(type) {
+ case *ArrayType:
+ at = NewSliceType(lt.Elem)
+ maxIndex = lt.Len
+
+ case *SliceType:
+ at = lt
+
+ case *stringType:
+ at = lt
+
+ default:
+ a.diag("cannot slice %v", arr.t)
+ return nil
+ }
+
+ // Type check index and convert to int
+ // XXX(Spec) It's unclear if ideal floats with no
+ // fractional part are allowed here. 6g allows it. I
+ // believe that's wrong.
+ lo = lo.convertToInt(maxIndex, "slice", "slice")
+ hi = hi.convertToInt(maxIndex, "slice", "slice")
+ if lo == nil || hi == nil {
+ return nil
+ }
+
+ expr := a.newExpr(at, "slice expression")
+
+ // Compile
+ lof := lo.asInt()
+ hif := hi.asInt()
+ switch lt := arr.t.lit().(type) {
+ case *ArrayType:
+ arrf := arr.asArray()
+ bound := lt.Len
+ expr.eval = func(t *Thread) Slice {
+ arr, lo, hi := arrf(t), lof(t), hif(t)
+ if lo > hi || hi > bound || lo < 0 {
+ t.Abort(SliceError{lo, hi, bound})
+ }
+ return Slice{arr.Sub(lo, bound-lo), hi - lo, bound - lo}
+ }
+
+ case *SliceType:
+ arrf := arr.asSlice()
+ expr.eval = func(t *Thread) Slice {
+ arr, lo, hi := arrf(t), lof(t), hif(t)
+ if lo > hi || hi > arr.Cap || lo < 0 {
+ t.Abort(SliceError{lo, hi, arr.Cap})
+ }
+ return Slice{arr.Base.Sub(lo, arr.Cap-lo), hi - lo, arr.Cap - lo}
+ }
+
+ case *stringType:
+ arrf := arr.asString()
+ // TODO(austin) This pulls over the whole string in a
+ // remote setting, instead of creating a substring backed
+ // by remote memory.
+ expr.eval = func(t *Thread) string {
+ arr, lo, hi := arrf(t), lof(t), hif(t)
+ if lo > hi || hi > int64(len(arr)) || lo < 0 {
+ t.Abort(SliceError{lo, hi, int64(len(arr))})
+ }
+ return arr[lo:hi]
+ }
+
+ default:
+ log.Panicf("unexpected left operand type %T", arr.t.lit())
+ }
+
+ return expr
+}
+
+func (a *exprInfo) compileIndexExpr(l, r *expr) *expr {
+ // Type check object
+ l = l.derefArray()
+
+ var at Type
+ intIndex := false
+ var maxIndex int64 = -1
+
+ switch lt := l.t.lit().(type) {
+ case *ArrayType:
+ at = lt.Elem
+ intIndex = true
+ maxIndex = lt.Len
+
+ case *SliceType:
+ at = lt.Elem
+ intIndex = true
+
+ case *stringType:
+ at = Uint8Type
+ intIndex = true
+
+ case *MapType:
+ at = lt.Elem
+ if r.t.isIdeal() {
+ r = r.convertTo(lt.Key)
+ if r == nil {
+ return nil
+ }
+ }
+ if !lt.Key.compat(r.t, false) {
+ a.diag("cannot use %s as index into %s", r.t, lt)
+ return nil
+ }
+
+ default:
+ a.diag("cannot index into %v", l.t)
+ return nil
+ }
+
+ // Type check index and convert to int if necessary
+ if intIndex {
+ // XXX(Spec) It's unclear if ideal floats with no
+ // fractional part are allowed here. 6g allows it. I
+ // believe that's wrong.
+ r = r.convertToInt(maxIndex, "index", "index")
+ if r == nil {
+ return nil
+ }
+ }
+
+ expr := a.newExpr(at, "index expression")
+
+ // Compile
+ switch lt := l.t.lit().(type) {
+ case *ArrayType:
+ lf := l.asArray()
+ rf := r.asInt()
+ bound := lt.Len
+ expr.genValue(func(t *Thread) Value {
+ l, r := lf(t), rf(t)
+ if r < 0 || r >= bound {
+ t.Abort(IndexError{r, bound})
+ }
+ return l.Elem(t, r)
+ })
+
+ case *SliceType:
+ lf := l.asSlice()
+ rf := r.asInt()
+ expr.genValue(func(t *Thread) Value {
+ l, r := lf(t), rf(t)
+ if l.Base == nil {
+ t.Abort(NilPointerError{})
+ }
+ if r < 0 || r >= l.Len {
+ t.Abort(IndexError{r, l.Len})
+ }
+ return l.Base.Elem(t, r)
+ })
+
+ case *stringType:
+ lf := l.asString()
+ rf := r.asInt()
+ // TODO(austin) This pulls over the whole string in a
+ // remote setting, instead of just the one character.
+ expr.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ if r < 0 || r >= int64(len(l)) {
+ t.Abort(IndexError{r, int64(len(l))})
+ }
+ return uint64(l[r])
+ }
+
+ case *MapType:
+ lf := l.asMap()
+ rf := r.asInterface()
+ expr.genValue(func(t *Thread) Value {
+ m := lf(t)
+ k := rf(t)
+ if m == nil {
+ t.Abort(NilPointerError{})
+ }
+ e := m.Elem(t, k)
+ if e == nil {
+ t.Abort(KeyError{k})
+ }
+ return e
+ })
+ // genValue makes things addressable, but map values
+ // aren't addressable.
+ expr.evalAddr = nil
+ expr.evalMapValue = func(t *Thread) (Map, interface{}) {
+ // TODO(austin) Key check? nil check?
+ return lf(t), rf(t)
+ }
+
+ default:
+ log.Panicf("unexpected left operand type %T", l.t.lit())
+ }
+
+ return expr
+}
+
+func (a *exprInfo) compileCallExpr(b *block, l *expr, as []*expr) *expr {
+ // TODO(austin) Variadic functions.
+
+ // Type check
+
+ // XXX(Spec) Calling a named function type is okay. I really
+ // think there needs to be a general discussion of named
+ // types. A named type creates a new, distinct type, but the
+ // type of that type is still whatever it's defined to. Thus,
+ // in "type Foo int", Foo is still an integer type and in
+ // "type Foo func()", Foo is a function type.
+ lt, ok := l.t.lit().(*FuncType)
+ if !ok {
+ a.diag("cannot call non-function type %v", l.t)
+ return nil
+ }
+
+ // The arguments must be single-valued expressions assignment
+ // compatible with the parameters of F.
+ //
+ // XXX(Spec) The spec is wrong. It can also be a single
+ // multi-valued expression.
+ nin := len(lt.In)
+ assign := a.compileAssign(a.pos, b, NewMultiType(lt.In), as, "function call", "argument")
+ if assign == nil {
+ return nil
+ }
+
+ var t Type
+ nout := len(lt.Out)
+ switch nout {
+ case 0:
+ t = EmptyType
+ case 1:
+ t = lt.Out[0]
+ default:
+ t = NewMultiType(lt.Out)
+ }
+ expr := a.newExpr(t, "function call")
+
+ // Gather argument and out types to initialize frame variables
+ vts := make([]Type, nin+nout)
+ copy(vts, lt.In)
+ copy(vts[nin:], lt.Out)
+
+ // Compile
+ lf := l.asFunc()
+ call := func(t *Thread) []Value {
+ fun := lf(t)
+ fr := fun.NewFrame()
+ for i, t := range vts {
+ fr.Vars[i] = t.Zero()
+ }
+ assign(multiV(fr.Vars[0:nin]), t)
+ oldf := t.f
+ t.f = fr
+ fun.Call(t)
+ t.f = oldf
+ return fr.Vars[nin : nin+nout]
+ }
+ expr.genFuncCall(call)
+
+ return expr
+}
+
+func (a *exprInfo) compileBuiltinCallExpr(b *block, ft *FuncType, as []*expr) *expr {
+ checkCount := func(min, max int) bool {
+ if len(as) < min {
+ a.diag("not enough arguments to %s", ft.builtin)
+ return false
+ } else if len(as) > max {
+ a.diag("too many arguments to %s", ft.builtin)
+ return false
+ }
+ return true
+ }
+
+ switch ft {
+ case capType:
+ if !checkCount(1, 1) {
+ return nil
+ }
+ arg := as[0].derefArray()
+ expr := a.newExpr(IntType, "function call")
+ switch t := arg.t.lit().(type) {
+ case *ArrayType:
+ // TODO(austin) It would be nice if this could
+ // be a constant int.
+ v := t.Len
+ expr.eval = func(t *Thread) int64 { return v }
+
+ case *SliceType:
+ vf := arg.asSlice()
+ expr.eval = func(t *Thread) int64 { return vf(t).Cap }
+
+ //case *ChanType:
+
+ default:
+ a.diag("illegal argument type for cap function\n\t%v", arg.t)
+ return nil
+ }
+ return expr
+
+ case copyType:
+ if !checkCount(2, 2) {
+ return nil
+ }
+ src := as[1]
+ dst := as[0]
+ if src.t != dst.t {
+ a.diag("arguments to built-in function 'copy' must have same type\nsrc: %s\ndst: %s\n", src.t, dst.t)
+ return nil
+ }
+ if _, ok := src.t.lit().(*SliceType); !ok {
+ a.diag("src argument to 'copy' must be a slice (got: %s)", src.t)
+ return nil
+ }
+ if _, ok := dst.t.lit().(*SliceType); !ok {
+ a.diag("dst argument to 'copy' must be a slice (got: %s)", dst.t)
+ return nil
+ }
+ expr := a.newExpr(IntType, "function call")
+ srcf := src.asSlice()
+ dstf := dst.asSlice()
+ expr.eval = func(t *Thread) int64 {
+ src, dst := srcf(t), dstf(t)
+ nelems := src.Len
+ if nelems > dst.Len {
+ nelems = dst.Len
+ }
+ dst.Base.Sub(0, nelems).Assign(t, src.Base.Sub(0, nelems))
+ return nelems
+ }
+ return expr
+
+ case lenType:
+ if !checkCount(1, 1) {
+ return nil
+ }
+ arg := as[0].derefArray()
+ expr := a.newExpr(IntType, "function call")
+ switch t := arg.t.lit().(type) {
+ case *stringType:
+ vf := arg.asString()
+ expr.eval = func(t *Thread) int64 { return int64(len(vf(t))) }
+
+ case *ArrayType:
+ // TODO(austin) It would be nice if this could
+ // be a constant int.
+ v := t.Len
+ expr.eval = func(t *Thread) int64 { return v }
+
+ case *SliceType:
+ vf := arg.asSlice()
+ expr.eval = func(t *Thread) int64 { return vf(t).Len }
+
+ case *MapType:
+ vf := arg.asMap()
+ expr.eval = func(t *Thread) int64 {
+ // XXX(Spec) What's the len of an
+ // uninitialized map?
+ m := vf(t)
+ if m == nil {
+ return 0
+ }
+ return m.Len(t)
+ }
+
+ //case *ChanType:
+
+ default:
+ a.diag("illegal argument type for len function\n\t%v", arg.t)
+ return nil
+ }
+ return expr
+
+ case makeType:
+ if !checkCount(1, 3) {
+ return nil
+ }
+ // XXX(Spec) What are the types of the
+ // arguments? Do they have to be ints? 6g
+ // accepts any integral type.
+ var lenexpr, capexpr *expr
+ var lenf, capf func(*Thread) int64
+ if len(as) > 1 {
+ lenexpr = as[1].convertToInt(-1, "length", "make function")
+ if lenexpr == nil {
+ return nil
+ }
+ lenf = lenexpr.asInt()
+ }
+ if len(as) > 2 {
+ capexpr = as[2].convertToInt(-1, "capacity", "make function")
+ if capexpr == nil {
+ return nil
+ }
+ capf = capexpr.asInt()
+ }
+
+ switch t := as[0].valType.lit().(type) {
+ case *SliceType:
+ // A new, initialized slice value for a given
+ // element type T is made using the built-in
+ // function make, which takes a slice type and
+ // parameters specifying the length and
+ // optionally the capacity.
+ if !checkCount(2, 3) {
+ return nil
+ }
+ et := t.Elem
+ expr := a.newExpr(t, "function call")
+ expr.eval = func(t *Thread) Slice {
+ l := lenf(t)
+ // XXX(Spec) What if len or cap is
+ // negative? The runtime panics.
+ if l < 0 {
+ t.Abort(NegativeLengthError{l})
+ }
+ c := l
+ if capf != nil {
+ c = capf(t)
+ if c < 0 {
+ t.Abort(NegativeCapacityError{c})
+ }
+ // XXX(Spec) What happens if
+ // len > cap? The runtime
+ // sets cap to len.
+ if l > c {
+ c = l
+ }
+ }
+ base := arrayV(make([]Value, c))
+ for i := int64(0); i < c; i++ {
+ base[i] = et.Zero()
+ }
+ return Slice{&base, l, c}
+ }
+ return expr
+
+ case *MapType:
+ // A new, empty map value is made using the
+ // built-in function make, which takes the map
+ // type and an optional capacity hint as
+ // arguments.
+ if !checkCount(1, 2) {
+ return nil
+ }
+ expr := a.newExpr(t, "function call")
+ expr.eval = func(t *Thread) Map {
+ if lenf == nil {
+ return make(evalMap)
+ }
+ l := lenf(t)
+ return make(evalMap, l)
+ }
+ return expr
+
+ //case *ChanType:
+
+ default:
+ a.diag("illegal argument type for make function\n\t%v", as[0].valType)
+ return nil
+ }
+
+ case closeType, closedType:
+ a.diag("built-in function %s not implemented", ft.builtin)
+ return nil
+
+ case newType:
+ if !checkCount(1, 1) {
+ return nil
+ }
+
+ t := as[0].valType
+ expr := a.newExpr(NewPtrType(t), "new")
+ expr.eval = func(*Thread) Value { return t.Zero() }
+ return expr
+
+ case panicType, printType, printlnType:
+ evals := make([]func(*Thread) interface{}, len(as))
+ for i, x := range as {
+ evals[i] = x.asInterface()
+ }
+ spaces := ft == printlnType
+ newline := ft != printType
+ printer := func(t *Thread) {
+ for i, eval := range evals {
+ if i > 0 && spaces {
+ print(" ")
+ }
+ v := eval(t)
+ type stringer interface {
+ String() string
+ }
+ switch v1 := v.(type) {
+ case bool:
+ print(v1)
+ case uint64:
+ print(v1)
+ case int64:
+ print(v1)
+ case float64:
+ print(v1)
+ case string:
+ print(v1)
+ case stringer:
+ print(v1.String())
+ default:
+ print("???")
+ }
+ }
+ if newline {
+ print("\n")
+ }
+ }
+ expr := a.newExpr(EmptyType, "print")
+ expr.exec = printer
+ if ft == panicType {
+ expr.exec = func(t *Thread) {
+ printer(t)
+ t.Abort(os.NewError("panic"))
+ }
+ }
+ return expr
+ }
+
+ log.Panicf("unexpected built-in function '%s'", ft.builtin)
+ panic("unreachable")
+}
+
+func (a *exprInfo) compileStarExpr(v *expr) *expr {
+ switch vt := v.t.lit().(type) {
+ case *PtrType:
+ expr := a.newExpr(vt.Elem, "indirect expression")
+ vf := v.asPtr()
+ expr.genValue(func(t *Thread) Value {
+ v := vf(t)
+ if v == nil {
+ t.Abort(NilPointerError{})
+ }
+ return v
+ })
+ return expr
+ }
+
+ a.diagOpType(token.MUL, v.t)
+ return nil
+}
+
+var unaryOpDescs = make(map[token.Token]string)
+
+func (a *exprInfo) compileUnaryExpr(op token.Token, v *expr) *expr {
+ // Type check
+ var t Type
+ switch op {
+ case token.ADD, token.SUB:
+ if !v.t.isInteger() && !v.t.isFloat() {
+ a.diagOpType(op, v.t)
+ return nil
+ }
+ t = v.t
+
+ case token.NOT:
+ if !v.t.isBoolean() {
+ a.diagOpType(op, v.t)
+ return nil
+ }
+ t = BoolType
+
+ case token.XOR:
+ if !v.t.isInteger() {
+ a.diagOpType(op, v.t)
+ return nil
+ }
+ t = v.t
+
+ case token.AND:
+ // The unary prefix address-of operator & generates
+ // the address of its operand, which must be a
+ // variable, pointer indirection, field selector, or
+ // array or slice indexing operation.
+ if v.evalAddr == nil {
+ a.diag("cannot take the address of %s", v.desc)
+ return nil
+ }
+
+ // TODO(austin) Implement "It is illegal to take the
+ // address of a function result variable" once I have
+ // function result variables.
+
+ t = NewPtrType(v.t)
+
+ case token.ARROW:
+ log.Panicf("Unary op %v not implemented", op)
+
+ default:
+ log.Panicf("unknown unary operator %v", op)
+ }
+
+ desc, ok := unaryOpDescs[op]
+ if !ok {
+ desc = "unary " + op.String() + " expression"
+ unaryOpDescs[op] = desc
+ }
+
+ // Compile
+ expr := a.newExpr(t, desc)
+ switch op {
+ case token.ADD:
+ // Just compile it out
+ expr = v
+ expr.desc = desc
+
+ case token.SUB:
+ expr.genUnaryOpNeg(v)
+
+ case token.NOT:
+ expr.genUnaryOpNot(v)
+
+ case token.XOR:
+ expr.genUnaryOpXor(v)
+
+ case token.AND:
+ vf := v.evalAddr
+ expr.eval = func(t *Thread) Value { return vf(t) }
+
+ default:
+ log.Panicf("Compilation of unary op %v not implemented", op)
+ }
+
+ return expr
+}
+
+var binOpDescs = make(map[token.Token]string)
+
+func (a *exprInfo) compileBinaryExpr(op token.Token, l, r *expr) *expr {
+ // Save the original types of l.t and r.t for error messages.
+ origlt := l.t
+ origrt := r.t
+
+ // XXX(Spec) What is the exact definition of a "named type"?
+
+ // XXX(Spec) Arithmetic operators: "Integer types" apparently
+ // means all types compatible with basic integer types, though
+ // this is never explained. Likewise for float types, etc.
+ // This relates to the missing explanation of named types.
+
+ // XXX(Spec) Operators: "If both operands are ideal numbers,
+ // the conversion is to ideal floats if one of the operands is
+ // an ideal float (relevant for / and %)." How is that
+ // relevant only for / and %? If I add an ideal int and an
+ // ideal float, I get an ideal float.
+
+ if op != token.SHL && op != token.SHR {
+ // Except in shift expressions, if one operand has
+ // numeric type and the other operand is an ideal
+ // number, the ideal number is converted to match the
+ // type of the other operand.
+ if (l.t.isInteger() || l.t.isFloat()) && !l.t.isIdeal() && r.t.isIdeal() {
+ r = r.convertTo(l.t)
+ } else if (r.t.isInteger() || r.t.isFloat()) && !r.t.isIdeal() && l.t.isIdeal() {
+ l = l.convertTo(r.t)
+ }
+ if l == nil || r == nil {
+ return nil
+ }
+
+ // Except in shift expressions, if both operands are
+ // ideal numbers and one is an ideal float, the other
+ // is converted to ideal float.
+ if l.t.isIdeal() && r.t.isIdeal() {
+ if l.t.isInteger() && r.t.isFloat() {
+ l = l.convertTo(r.t)
+ } else if l.t.isFloat() && r.t.isInteger() {
+ r = r.convertTo(l.t)
+ }
+ if l == nil || r == nil {
+ return nil
+ }
+ }
+ }
+
+ // Useful type predicates
+ // TODO(austin) CL 33668 mandates identical types except for comparisons.
+ compat := func() bool { return l.t.compat(r.t, false) }
+ integers := func() bool { return l.t.isInteger() && r.t.isInteger() }
+ floats := func() bool { return l.t.isFloat() && r.t.isFloat() }
+ strings := func() bool {
+ // TODO(austin) Deal with named types
+ return l.t == StringType && r.t == StringType
+ }
+ booleans := func() bool { return l.t.isBoolean() && r.t.isBoolean() }
+
+ // Type check
+ var t Type
+ switch op {
+ case token.ADD:
+ if !compat() || (!integers() && !floats() && !strings()) {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ t = l.t
+
+ case token.SUB, token.MUL, token.QUO:
+ if !compat() || (!integers() && !floats()) {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ t = l.t
+
+ case token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
+ if !compat() || !integers() {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ t = l.t
+
+ case token.SHL, token.SHR:
+ // XXX(Spec) Is it okay for the right operand to be an
+ // ideal float with no fractional part? "The right
+ // operand in a shift operation must be always be of
+ // unsigned integer type or an ideal number that can
+ // be safely converted into an unsigned integer type
+ // (ยงArithmetic operators)" suggests so and 6g agrees.
+
+ if !l.t.isInteger() || !(r.t.isInteger() || r.t.isIdeal()) {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+
+ // The right operand in a shift operation must be
+ // always be of unsigned integer type or an ideal
+ // number that can be safely converted into an
+ // unsigned integer type.
+ if r.t.isIdeal() {
+ r2 := r.convertTo(UintType)
+ if r2 == nil {
+ return nil
+ }
+
+ // If the left operand is not ideal, convert
+ // the right to not ideal.
+ if !l.t.isIdeal() {
+ r = r2
+ }
+
+ // If both are ideal, but the right side isn't
+ // an ideal int, convert it to simplify things.
+ if l.t.isIdeal() && !r.t.isInteger() {
+ r = r.convertTo(IdealIntType)
+ if r == nil {
+ log.Panicf("conversion to uintType succeeded, but conversion to idealIntType failed")
+ }
+ }
+ } else if _, ok := r.t.lit().(*uintType); !ok {
+ a.diag("right operand of shift must be unsigned")
+ return nil
+ }
+
+ if l.t.isIdeal() && !r.t.isIdeal() {
+ // XXX(Spec) What is the meaning of "ideal >>
+ // non-ideal"? Russ says the ideal should be
+ // converted to an int. 6g propagates the
+ // type down from assignments as a hint.
+
+ l = l.convertTo(IntType)
+ if l == nil {
+ return nil
+ }
+ }
+
+ // At this point, we should have one of three cases:
+ // 1) uint SHIFT uint
+ // 2) int SHIFT uint
+ // 3) ideal int SHIFT ideal int
+
+ t = l.t
+
+ case token.LOR, token.LAND:
+ if !booleans() {
+ return nil
+ }
+ // XXX(Spec) There's no mention of *which* boolean
+ // type the logical operators return. From poking at
+ // 6g, it appears to be the named boolean type, NOT
+ // the type of the left operand, and NOT an unnamed
+ // boolean type.
+
+ t = BoolType
+
+ case token.ARROW:
+ // The operands in channel sends differ in type: one
+ // is always a channel and the other is a variable or
+ // value of the channel's element type.
+ log.Panic("Binary op <- not implemented")
+ t = BoolType
+
+ case token.LSS, token.GTR, token.LEQ, token.GEQ:
+ // XXX(Spec) It's really unclear what types which
+ // comparison operators apply to. I feel like the
+ // text is trying to paint a Venn diagram for me,
+ // which it's really pretty simple: <, <=, >, >= apply
+ // only to numeric types and strings. == and != apply
+ // to everything except arrays and structs, and there
+ // are some restrictions on when it applies to slices.
+
+ if !compat() || (!integers() && !floats() && !strings()) {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ t = BoolType
+
+ case token.EQL, token.NEQ:
+ // XXX(Spec) The rules for type checking comparison
+ // operators are spread across three places that all
+ // partially overlap with each other: the Comparison
+ // Compatibility section, the Operators section, and
+ // the Comparison Operators section. The Operators
+ // section should just say that operators require
+ // identical types (as it does currently) except that
+ // there a few special cases for comparison, which are
+ // described in section X. Currently it includes just
+ // one of the four special cases. The Comparison
+ // Compatibility section and the Comparison Operators
+ // section should either be merged, or at least the
+ // Comparison Compatibility section should be
+ // exclusively about type checking and the Comparison
+ // Operators section should be exclusively about
+ // semantics.
+
+ // XXX(Spec) Comparison operators: "All comparison
+ // operators apply to basic types except bools." This
+ // is very difficult to parse. It's explained much
+ // better in the Comparison Compatibility section.
+
+ // XXX(Spec) Comparison compatibility: "Function
+ // values are equal if they refer to the same
+ // function." is rather vague. It should probably be
+ // similar to the way the rule for map values is
+ // written: Function values are equal if they were
+ // created by the same execution of a function literal
+ // or refer to the same function declaration. This is
+ // *almost* but not quite waht 6g implements. If a
+ // function literals does not capture any variables,
+ // then multiple executions of it will result in the
+ // same closure. Russ says he'll change that.
+
+ // TODO(austin) Deal with remaining special cases
+
+ if !compat() {
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ // Arrays and structs may not be compared to anything.
+ switch l.t.(type) {
+ case *ArrayType, *StructType:
+ a.diagOpTypes(op, origlt, origrt)
+ return nil
+ }
+ t = BoolType
+
+ default:
+ log.Panicf("unknown binary operator %v", op)
+ }
+
+ desc, ok := binOpDescs[op]
+ if !ok {
+ desc = op.String() + " expression"
+ binOpDescs[op] = desc
+ }
+
+ // Check for ideal divide by zero
+ switch op {
+ case token.QUO, token.REM:
+ if r.t.isIdeal() {
+ if (r.t.isInteger() && r.asIdealInt()().Sign() == 0) ||
+ (r.t.isFloat() && r.asIdealFloat()().Sign() == 0) {
+ a.diag("divide by zero")
+ return nil
+ }
+ }
+ }
+
+ // Compile
+ expr := a.newExpr(t, desc)
+ switch op {
+ case token.ADD:
+ expr.genBinOpAdd(l, r)
+
+ case token.SUB:
+ expr.genBinOpSub(l, r)
+
+ case token.MUL:
+ expr.genBinOpMul(l, r)
+
+ case token.QUO:
+ expr.genBinOpQuo(l, r)
+
+ case token.REM:
+ expr.genBinOpRem(l, r)
+
+ case token.AND:
+ expr.genBinOpAnd(l, r)
+
+ case token.OR:
+ expr.genBinOpOr(l, r)
+
+ case token.XOR:
+ expr.genBinOpXor(l, r)
+
+ case token.AND_NOT:
+ expr.genBinOpAndNot(l, r)
+
+ case token.SHL:
+ if l.t.isIdeal() {
+ lv := l.asIdealInt()()
+ rv := r.asIdealInt()()
+ const maxShift = 99999
+ if rv.Cmp(big.NewInt(maxShift)) > 0 {
+ a.diag("left shift by %v; exceeds implementation limit of %v", rv, maxShift)
+ expr.t = nil
+ return nil
+ }
+ val := new(big.Int).Lsh(lv, uint(rv.Int64()))
+ expr.eval = func() *big.Int { return val }
+ } else {
+ expr.genBinOpShl(l, r)
+ }
+
+ case token.SHR:
+ if l.t.isIdeal() {
+ lv := l.asIdealInt()()
+ rv := r.asIdealInt()()
+ val := new(big.Int).Rsh(lv, uint(rv.Int64()))
+ expr.eval = func() *big.Int { return val }
+ } else {
+ expr.genBinOpShr(l, r)
+ }
+
+ case token.LSS:
+ expr.genBinOpLss(l, r)
+
+ case token.GTR:
+ expr.genBinOpGtr(l, r)
+
+ case token.LEQ:
+ expr.genBinOpLeq(l, r)
+
+ case token.GEQ:
+ expr.genBinOpGeq(l, r)
+
+ case token.EQL:
+ expr.genBinOpEql(l, r)
+
+ case token.NEQ:
+ expr.genBinOpNeq(l, r)
+
+ case token.LAND:
+ expr.genBinOpLogAnd(l, r)
+
+ case token.LOR:
+ expr.genBinOpLogOr(l, r)
+
+ default:
+ log.Panicf("Compilation of binary op %v not implemented", op)
+ }
+
+ return expr
+}
+
+// TODO(austin) This is a hack to eliminate a circular dependency
+// between type.go and expr.go
+func (a *compiler) compileArrayLen(b *block, expr ast.Expr) (int64, bool) {
+ lenExpr := a.compileExpr(b, true, expr)
+ if lenExpr == nil {
+ return 0, false
+ }
+
+ // XXX(Spec) Are ideal floats with no fractional part okay?
+ if lenExpr.t.isIdeal() {
+ lenExpr = lenExpr.convertTo(IntType)
+ if lenExpr == nil {
+ return 0, false
+ }
+ }
+
+ if !lenExpr.t.isInteger() {
+ a.diagAt(expr.Pos(), "array size must be an integer")
+ return 0, false
+ }
+
+ switch lenExpr.t.lit().(type) {
+ case *intType:
+ return lenExpr.asInt()(nil), true
+ case *uintType:
+ return int64(lenExpr.asUint()(nil)), true
+ }
+ log.Panicf("unexpected integer type %T", lenExpr.t)
+ return 0, false
+}
+
+func (a *compiler) compileExpr(b *block, constant bool, expr ast.Expr) *expr {
+ ec := &exprCompiler{a, b, constant}
+ nerr := a.numError()
+ e := ec.compile(expr, false)
+ if e == nil && nerr == a.numError() {
+ log.Panicf("expression compilation failed without reporting errors")
+ }
+ return e
+}
+
+// extractEffect separates out any effects that the expression may
+// have, returning a function that will perform those effects and a
+// new exprCompiler that is guaranteed to be side-effect free. These
+// are the moral equivalents of "temp := expr" and "temp" (or "temp :=
+// &expr" and "*temp" for addressable exprs). Because this creates a
+// temporary variable, the caller should create a temporary block for
+// the compilation of this expression and the evaluation of the
+// results.
+func (a *expr) extractEffect(b *block, errOp string) (func(*Thread), *expr) {
+ // Create "&a" if a is addressable
+ rhs := a
+ if a.evalAddr != nil {
+ rhs = a.compileUnaryExpr(token.AND, rhs)
+ }
+
+ // Create temp
+ ac, ok := a.checkAssign(a.pos, []*expr{rhs}, errOp, "")
+ if !ok {
+ return nil, nil
+ }
+ if len(ac.rmt.Elems) != 1 {
+ a.diag("multi-valued expression not allowed in %s", errOp)
+ return nil, nil
+ }
+ tempType := ac.rmt.Elems[0]
+ if tempType.isIdeal() {
+ // It's too bad we have to duplicate this rule.
+ switch {
+ case tempType.isInteger():
+ tempType = IntType
+ case tempType.isFloat():
+ tempType = Float64Type
+ default:
+ log.Panicf("unexpected ideal type %v", tempType)
+ }
+ }
+ temp := b.DefineTemp(tempType)
+ tempIdx := temp.Index
+
+ // Create "temp := rhs"
+ assign := ac.compile(b, tempType)
+ if assign == nil {
+ log.Panicf("compileAssign type check failed")
+ }
+
+ effect := func(t *Thread) {
+ tempVal := tempType.Zero()
+ t.f.Vars[tempIdx] = tempVal
+ assign(tempVal, t)
+ }
+
+ // Generate "temp" or "*temp"
+ getTemp := a.compileVariable(0, temp)
+ if a.evalAddr == nil {
+ return effect, getTemp
+ }
+
+ deref := a.compileStarExpr(getTemp)
+ if deref == nil {
+ return nil, nil
+ }
+ return effect, deref
+}
diff --git a/libgo/go/exp/eval/expr1.go b/libgo/go/exp/eval/expr1.go
new file mode 100644
index 000000000..5d0e50000
--- /dev/null
+++ b/libgo/go/exp/eval/expr1.go
@@ -0,0 +1,1874 @@
+// This file is machine generated by gen.go.
+// 6g gen.go && 6l gen.6 && ./6.out >expr1.go
+
+package eval
+
+import (
+ "big"
+ "log"
+)
+
+/*
+ * "As" functions. These retrieve evaluator functions from an
+ * expr, panicking if the requested evaluator has the wrong type.
+ */
+func (a *expr) asBool() func(*Thread) bool {
+ return a.eval.(func(*Thread) bool)
+}
+func (a *expr) asUint() func(*Thread) uint64 {
+ return a.eval.(func(*Thread) uint64)
+}
+func (a *expr) asInt() func(*Thread) int64 {
+ return a.eval.(func(*Thread) int64)
+}
+func (a *expr) asIdealInt() func() *big.Int {
+ return a.eval.(func() *big.Int)
+}
+func (a *expr) asFloat() func(*Thread) float64 {
+ return a.eval.(func(*Thread) float64)
+}
+func (a *expr) asIdealFloat() func() *big.Rat {
+ return a.eval.(func() *big.Rat)
+}
+func (a *expr) asString() func(*Thread) string {
+ return a.eval.(func(*Thread) string)
+}
+func (a *expr) asArray() func(*Thread) ArrayValue {
+ return a.eval.(func(*Thread) ArrayValue)
+}
+func (a *expr) asStruct() func(*Thread) StructValue {
+ return a.eval.(func(*Thread) StructValue)
+}
+func (a *expr) asPtr() func(*Thread) Value {
+ return a.eval.(func(*Thread) Value)
+}
+func (a *expr) asFunc() func(*Thread) Func {
+ return a.eval.(func(*Thread) Func)
+}
+func (a *expr) asSlice() func(*Thread) Slice {
+ return a.eval.(func(*Thread) Slice)
+}
+func (a *expr) asMap() func(*Thread) Map {
+ return a.eval.(func(*Thread) Map)
+}
+func (a *expr) asMulti() func(*Thread) []Value {
+ return a.eval.(func(*Thread) []Value)
+}
+
+func (a *expr) asInterface() func(*Thread) interface{} {
+ switch sf := a.eval.(type) {
+ case func(t *Thread) bool:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) uint64:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) int64:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func() *big.Int:
+ return func(*Thread) interface{} { return sf() }
+ case func(t *Thread) float64:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func() *big.Rat:
+ return func(*Thread) interface{} { return sf() }
+ case func(t *Thread) string:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) ArrayValue:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) StructValue:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) Value:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) Func:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) Slice:
+ return func(t *Thread) interface{} { return sf(t) }
+ case func(t *Thread) Map:
+ return func(t *Thread) interface{} { return sf(t) }
+ default:
+ log.Panicf("unexpected expression node type %T at %v", a.eval, a.pos)
+ }
+ panic("fail")
+}
+
+/*
+ * Operator generators.
+ */
+
+func (a *expr) genConstant(v Value) {
+ switch a.t.lit().(type) {
+ case *boolType:
+ a.eval = func(t *Thread) bool { return v.(BoolValue).Get(t) }
+ case *uintType:
+ a.eval = func(t *Thread) uint64 { return v.(UintValue).Get(t) }
+ case *intType:
+ a.eval = func(t *Thread) int64 { return v.(IntValue).Get(t) }
+ case *idealIntType:
+ val := v.(IdealIntValue).Get()
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ a.eval = func(t *Thread) float64 { return v.(FloatValue).Get(t) }
+ case *idealFloatType:
+ val := v.(IdealFloatValue).Get()
+ a.eval = func() *big.Rat { return val }
+ case *stringType:
+ a.eval = func(t *Thread) string { return v.(StringValue).Get(t) }
+ case *ArrayType:
+ a.eval = func(t *Thread) ArrayValue { return v.(ArrayValue).Get(t) }
+ case *StructType:
+ a.eval = func(t *Thread) StructValue { return v.(StructValue).Get(t) }
+ case *PtrType:
+ a.eval = func(t *Thread) Value { return v.(PtrValue).Get(t) }
+ case *FuncType:
+ a.eval = func(t *Thread) Func { return v.(FuncValue).Get(t) }
+ case *SliceType:
+ a.eval = func(t *Thread) Slice { return v.(SliceValue).Get(t) }
+ case *MapType:
+ a.eval = func(t *Thread) Map { return v.(MapValue).Get(t) }
+ default:
+ log.Panicf("unexpected constant type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genIdentOp(level, index int) {
+ a.evalAddr = func(t *Thread) Value { return t.f.Get(level, index) }
+ switch a.t.lit().(type) {
+ case *boolType:
+ a.eval = func(t *Thread) bool { return t.f.Get(level, index).(BoolValue).Get(t) }
+ case *uintType:
+ a.eval = func(t *Thread) uint64 { return t.f.Get(level, index).(UintValue).Get(t) }
+ case *intType:
+ a.eval = func(t *Thread) int64 { return t.f.Get(level, index).(IntValue).Get(t) }
+ case *floatType:
+ a.eval = func(t *Thread) float64 { return t.f.Get(level, index).(FloatValue).Get(t) }
+ case *stringType:
+ a.eval = func(t *Thread) string { return t.f.Get(level, index).(StringValue).Get(t) }
+ case *ArrayType:
+ a.eval = func(t *Thread) ArrayValue { return t.f.Get(level, index).(ArrayValue).Get(t) }
+ case *StructType:
+ a.eval = func(t *Thread) StructValue { return t.f.Get(level, index).(StructValue).Get(t) }
+ case *PtrType:
+ a.eval = func(t *Thread) Value { return t.f.Get(level, index).(PtrValue).Get(t) }
+ case *FuncType:
+ a.eval = func(t *Thread) Func { return t.f.Get(level, index).(FuncValue).Get(t) }
+ case *SliceType:
+ a.eval = func(t *Thread) Slice { return t.f.Get(level, index).(SliceValue).Get(t) }
+ case *MapType:
+ a.eval = func(t *Thread) Map { return t.f.Get(level, index).(MapValue).Get(t) }
+ default:
+ log.Panicf("unexpected identifier type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genFuncCall(call func(t *Thread) []Value) {
+ a.exec = func(t *Thread) { call(t) }
+ switch a.t.lit().(type) {
+ case *boolType:
+ a.eval = func(t *Thread) bool { return call(t)[0].(BoolValue).Get(t) }
+ case *uintType:
+ a.eval = func(t *Thread) uint64 { return call(t)[0].(UintValue).Get(t) }
+ case *intType:
+ a.eval = func(t *Thread) int64 { return call(t)[0].(IntValue).Get(t) }
+ case *floatType:
+ a.eval = func(t *Thread) float64 { return call(t)[0].(FloatValue).Get(t) }
+ case *stringType:
+ a.eval = func(t *Thread) string { return call(t)[0].(StringValue).Get(t) }
+ case *ArrayType:
+ a.eval = func(t *Thread) ArrayValue { return call(t)[0].(ArrayValue).Get(t) }
+ case *StructType:
+ a.eval = func(t *Thread) StructValue { return call(t)[0].(StructValue).Get(t) }
+ case *PtrType:
+ a.eval = func(t *Thread) Value { return call(t)[0].(PtrValue).Get(t) }
+ case *FuncType:
+ a.eval = func(t *Thread) Func { return call(t)[0].(FuncValue).Get(t) }
+ case *SliceType:
+ a.eval = func(t *Thread) Slice { return call(t)[0].(SliceValue).Get(t) }
+ case *MapType:
+ a.eval = func(t *Thread) Map { return call(t)[0].(MapValue).Get(t) }
+ case *MultiType:
+ a.eval = func(t *Thread) []Value { return call(t) }
+ default:
+ log.Panicf("unexpected result type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genValue(vf func(*Thread) Value) {
+ a.evalAddr = vf
+ switch a.t.lit().(type) {
+ case *boolType:
+ a.eval = func(t *Thread) bool { return vf(t).(BoolValue).Get(t) }
+ case *uintType:
+ a.eval = func(t *Thread) uint64 { return vf(t).(UintValue).Get(t) }
+ case *intType:
+ a.eval = func(t *Thread) int64 { return vf(t).(IntValue).Get(t) }
+ case *floatType:
+ a.eval = func(t *Thread) float64 { return vf(t).(FloatValue).Get(t) }
+ case *stringType:
+ a.eval = func(t *Thread) string { return vf(t).(StringValue).Get(t) }
+ case *ArrayType:
+ a.eval = func(t *Thread) ArrayValue { return vf(t).(ArrayValue).Get(t) }
+ case *StructType:
+ a.eval = func(t *Thread) StructValue { return vf(t).(StructValue).Get(t) }
+ case *PtrType:
+ a.eval = func(t *Thread) Value { return vf(t).(PtrValue).Get(t) }
+ case *FuncType:
+ a.eval = func(t *Thread) Func { return vf(t).(FuncValue).Get(t) }
+ case *SliceType:
+ a.eval = func(t *Thread) Slice { return vf(t).(SliceValue).Get(t) }
+ case *MapType:
+ a.eval = func(t *Thread) Map { return vf(t).(MapValue).Get(t) }
+ default:
+ log.Panicf("unexpected result type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genUnaryOpNeg(v *expr) {
+ switch a.t.lit().(type) {
+ case *uintType:
+ vf := v.asUint()
+ a.eval = func(t *Thread) uint64 { v := vf(t); return -v }
+ case *intType:
+ vf := v.asInt()
+ a.eval = func(t *Thread) int64 { v := vf(t); return -v }
+ case *idealIntType:
+ val := v.asIdealInt()()
+ val.Neg(val)
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ vf := v.asFloat()
+ a.eval = func(t *Thread) float64 { v := vf(t); return -v }
+ case *idealFloatType:
+ val := v.asIdealFloat()()
+ val.Neg(val)
+ a.eval = func() *big.Rat { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genUnaryOpNot(v *expr) {
+ switch a.t.lit().(type) {
+ case *boolType:
+ vf := v.asBool()
+ a.eval = func(t *Thread) bool { v := vf(t); return !v }
+ default:
+ log.Panicf("unexpected type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genUnaryOpXor(v *expr) {
+ switch a.t.lit().(type) {
+ case *uintType:
+ vf := v.asUint()
+ a.eval = func(t *Thread) uint64 { v := vf(t); return ^v }
+ case *intType:
+ vf := v.asInt()
+ a.eval = func(t *Thread) int64 { v := vf(t); return ^v }
+ case *idealIntType:
+ val := v.asIdealInt()()
+ val.Not(val)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", a.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpLogAnd(l, r *expr) {
+ lf := l.asBool()
+ rf := r.asBool()
+ a.eval = func(t *Thread) bool { return lf(t) && rf(t) }
+}
+
+func (a *expr) genBinOpLogOr(l, r *expr) {
+ lf := l.asBool()
+ rf := r.asBool()
+ a.eval = func(t *Thread) bool { return lf(t) || rf(t) }
+}
+
+func (a *expr) genBinOpAdd(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l + r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l + r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l + r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l + r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l + r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l + r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l + r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l + r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l + r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l + r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Add(l, r)
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ switch t.Bits {
+ case 32:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l + r
+ return float64(float32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l + r
+ return float64(float64(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Add(l, r)
+ a.eval = func() *big.Rat { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) string {
+ l, r := lf(t), rf(t)
+ return l + r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpSub(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l - r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l - r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l - r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l - r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l - r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l - r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l - r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l - r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l - r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l - r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Sub(l, r)
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ switch t.Bits {
+ case 32:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l - r
+ return float64(float32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l - r
+ return float64(float64(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Sub(l, r)
+ a.eval = func() *big.Rat { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpMul(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l * r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l * r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l * r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l * r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l * r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l * r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l * r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l * r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l * r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l * r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Mul(l, r)
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ switch t.Bits {
+ case 32:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l * r
+ return float64(float32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ ret = l * r
+ return float64(float64(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Mul(l, r)
+ a.eval = func() *big.Rat { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpQuo(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Quo(l, r)
+ a.eval = func() *big.Int { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ switch t.Bits {
+ case 32:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return float64(float32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) float64 {
+ l, r := lf(t), rf(t)
+ var ret float64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l / r
+ return float64(float64(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Quo(l, r)
+ a.eval = func() *big.Rat { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpRem(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ if r == 0 {
+ t.Abort(DivByZeroError{})
+ }
+ ret = l % r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Rem(l, r)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpAnd(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l & r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l & r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l & r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l & r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l & r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l & r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l & r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l & r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l & r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l & r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.And(l, r)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpOr(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l | r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l | r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l | r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l | r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l | r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l | r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l | r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l | r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l | r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l | r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Or(l, r)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpXor(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l ^ r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l ^ r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l ^ r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l ^ r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l ^ r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l ^ r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l ^ r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l ^ r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l ^ r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l ^ r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Xor(l, r)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpAndNot(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l &^ r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l &^ r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l &^ r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l &^ r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l &^ r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l &^ r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l &^ r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l &^ r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l &^ r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l &^ r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.AndNot(l, r)
+ a.eval = func() *big.Int { return val }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpShl(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l << r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l << r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l << r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l << r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l << r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l << r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l << r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l << r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l << r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l << r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpShr(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l >> r
+ return uint64(uint8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l >> r
+ return uint64(uint16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l >> r
+ return uint64(uint32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l >> r
+ return uint64(uint64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) uint64 {
+ l, r := lf(t), rf(t)
+ var ret uint64
+ ret = l >> r
+ return uint64(uint(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asUint()
+ switch t.Bits {
+ case 8:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l >> r
+ return int64(int8(ret))
+ }
+ case 16:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l >> r
+ return int64(int16(ret))
+ }
+ case 32:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l >> r
+ return int64(int32(ret))
+ }
+ case 64:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l >> r
+ return int64(int64(ret))
+ }
+ case 0:
+ a.eval = func(t *Thread) int64 {
+ l, r := lf(t), rf(t)
+ var ret int64
+ ret = l >> r
+ return int64(int(ret))
+ }
+ default:
+ log.Panicf("unexpected size %d in type %v at %v", t.Bits, t, a.pos)
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpLss(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l < r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l < r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) < 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l < r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) < 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l < r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpGtr(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l > r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l > r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) > 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l > r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) > 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l > r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpLeq(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l <= r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l <= r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) <= 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l <= r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) <= 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l <= r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpGeq(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l >= r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l >= r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) >= 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l >= r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) >= 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l >= r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpEql(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *boolType:
+ lf := l.asBool()
+ rf := r.asBool()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) == 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) == 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *PtrType:
+ lf := l.asPtr()
+ rf := r.asPtr()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *FuncType:
+ lf := l.asFunc()
+ rf := r.asFunc()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ case *MapType:
+ lf := l.asMap()
+ rf := r.asMap()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l == r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func (a *expr) genBinOpNeq(l, r *expr) {
+ switch t := l.t.lit().(type) {
+ case *boolType:
+ lf := l.asBool()
+ rf := r.asBool()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *uintType:
+ lf := l.asUint()
+ rf := r.asUint()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *intType:
+ lf := l.asInt()
+ rf := r.asInt()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *idealIntType:
+ l := l.asIdealInt()()
+ r := r.asIdealInt()()
+ val := l.Cmp(r) != 0
+ a.eval = func(t *Thread) bool { return val }
+ case *floatType:
+ lf := l.asFloat()
+ rf := r.asFloat()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *idealFloatType:
+ l := l.asIdealFloat()()
+ r := r.asIdealFloat()()
+ val := l.Cmp(r) != 0
+ a.eval = func(t *Thread) bool { return val }
+ case *stringType:
+ lf := l.asString()
+ rf := r.asString()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *PtrType:
+ lf := l.asPtr()
+ rf := r.asPtr()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *FuncType:
+ lf := l.asFunc()
+ rf := r.asFunc()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ case *MapType:
+ lf := l.asMap()
+ rf := r.asMap()
+ a.eval = func(t *Thread) bool {
+ l, r := lf(t), rf(t)
+ return l != r
+ }
+ default:
+ log.Panicf("unexpected type %v at %v", l.t, a.pos)
+ }
+}
+
+func genAssign(lt Type, r *expr) func(lv Value, t *Thread) {
+ switch lt.lit().(type) {
+ case *boolType:
+ rf := r.asBool()
+ return func(lv Value, t *Thread) { lv.(BoolValue).Set(t, rf(t)) }
+ case *uintType:
+ rf := r.asUint()
+ return func(lv Value, t *Thread) { lv.(UintValue).Set(t, rf(t)) }
+ case *intType:
+ rf := r.asInt()
+ return func(lv Value, t *Thread) { lv.(IntValue).Set(t, rf(t)) }
+ case *floatType:
+ rf := r.asFloat()
+ return func(lv Value, t *Thread) { lv.(FloatValue).Set(t, rf(t)) }
+ case *stringType:
+ rf := r.asString()
+ return func(lv Value, t *Thread) { lv.(StringValue).Set(t, rf(t)) }
+ case *ArrayType:
+ rf := r.asArray()
+ return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) }
+ case *StructType:
+ rf := r.asStruct()
+ return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) }
+ case *PtrType:
+ rf := r.asPtr()
+ return func(lv Value, t *Thread) { lv.(PtrValue).Set(t, rf(t)) }
+ case *FuncType:
+ rf := r.asFunc()
+ return func(lv Value, t *Thread) { lv.(FuncValue).Set(t, rf(t)) }
+ case *SliceType:
+ rf := r.asSlice()
+ return func(lv Value, t *Thread) { lv.(SliceValue).Set(t, rf(t)) }
+ case *MapType:
+ rf := r.asMap()
+ return func(lv Value, t *Thread) { lv.(MapValue).Set(t, rf(t)) }
+ default:
+ log.Panicf("unexpected left operand type %v at %v", lt, r.pos)
+ }
+ panic("fail")
+}
diff --git a/libgo/go/exp/eval/expr_test.go b/libgo/go/exp/eval/expr_test.go
new file mode 100644
index 000000000..0dbce4315
--- /dev/null
+++ b/libgo/go/exp/eval/expr_test.go
@@ -0,0 +1,355 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "testing"
+)
+
+var undefined = "undefined"
+var typeAsExpr = "type .* used as expression"
+var badCharLit = "character literal"
+var unknownEscape = "unknown escape sequence"
+var opTypes = "illegal (operand|argument) type|cannot index into"
+var badAddrOf = "cannot take the address"
+var constantTruncated = "constant [^ ]* truncated"
+var constantUnderflows = "constant [^ ]* underflows"
+var constantOverflows = "constant [^ ]* overflows"
+var implLimit = "implementation limit"
+var mustBeUnsigned = "must be unsigned"
+var divByZero = "divide by zero"
+
+var hugeInteger = new(big.Int).Lsh(idealOne, 64)
+
+var exprTests = []test{
+ Val("i", 1),
+ CErr("zzz", undefined),
+ // TODO(austin) Test variable in constant context
+ //CErr("t", typeAsExpr),
+
+ Val("'a'", big.NewInt('a')),
+ Val("'\\uffff'", big.NewInt('\uffff')),
+ Val("'\\n'", big.NewInt('\n')),
+ CErr("''+x", badCharLit),
+ // Produces two parse errors
+ //CErr("'''", ""),
+ CErr("'\n'", badCharLit),
+ CErr("'\\z'", unknownEscape),
+ CErr("'ab'", badCharLit),
+
+ Val("1.0", big.NewRat(1, 1)),
+ Val("1.", big.NewRat(1, 1)),
+ Val(".1", big.NewRat(1, 10)),
+ Val("1e2", big.NewRat(100, 1)),
+
+ Val("\"abc\"", "abc"),
+ Val("\"\"", ""),
+ Val("\"\\n\\\"\"", "\n\""),
+ CErr("\"\\z\"", unknownEscape),
+ CErr("\"abc", "string not terminated"),
+
+ Val("(i)", 1),
+
+ Val("ai[0]", 1),
+ Val("(&ai)[0]", 1),
+ Val("ai[1]", 2),
+ Val("ai[i]", 2),
+ Val("ai[u]", 2),
+ CErr("ai[f]", opTypes),
+ CErr("ai[0][0]", opTypes),
+ CErr("ai[2]", "index 2 exceeds"),
+ CErr("ai[1+1]", "index 2 exceeds"),
+ CErr("ai[-1]", "negative index"),
+ RErr("ai[i+i]", "index 2 exceeds"),
+ RErr("ai[-i]", "negative index"),
+ CErr("i[0]", opTypes),
+ CErr("f[0]", opTypes),
+
+ Val("aai[0][0]", 1),
+ Val("aai[1][1]", 4),
+ CErr("aai[2][0]", "index 2 exceeds"),
+ CErr("aai[0][2]", "index 2 exceeds"),
+
+ Val("sli[0]", 1),
+ Val("sli[1]", 2),
+ CErr("sli[-1]", "negative index"),
+ RErr("sli[-i]", "negative index"),
+ RErr("sli[2]", "index 2 exceeds"),
+
+ Val("s[0]", uint8('a')),
+ Val("s[1]", uint8('b')),
+ CErr("s[-1]", "negative index"),
+ RErr("s[-i]", "negative index"),
+ RErr("s[3]", "index 3 exceeds"),
+
+ Val("ai[0:2]", vslice{varray{1, 2}, 2, 2}),
+ Val("ai[0:1]", vslice{varray{1, 2}, 1, 2}),
+ Val("ai[0:]", vslice{varray{1, 2}, 2, 2}),
+ Val("ai[i:]", vslice{varray{2}, 1, 1}),
+
+ Val("sli[0:2]", vslice{varray{1, 2, 3}, 2, 3}),
+ Val("sli[0:i]", vslice{varray{1, 2, 3}, 1, 3}),
+ Val("sli[1:]", vslice{varray{2, 3}, 1, 2}),
+
+ CErr("1(2)", "cannot call"),
+ CErr("fn(1,2)", "too many"),
+ CErr("fn()", "not enough"),
+ CErr("fn(true)", opTypes),
+ CErr("fn(true)", "function call"),
+ // Single argument functions don't say which argument.
+ //CErr("fn(true)", "argument 1"),
+ Val("fn(1)", 2),
+ Val("fn(1.0)", 2),
+ CErr("fn(1.5)", constantTruncated),
+ Val("fn(i)", 2),
+ CErr("fn(u)", opTypes),
+
+ CErr("void()+2", opTypes),
+ CErr("oneTwo()+2", opTypes),
+
+ Val("cap(ai)", 2),
+ Val("cap(&ai)", 2),
+ Val("cap(aai)", 2),
+ Val("cap(sli)", 3),
+ CErr("cap(0)", opTypes),
+ CErr("cap(i)", opTypes),
+ CErr("cap(s)", opTypes),
+
+ Val("len(s)", 3),
+ Val("len(ai)", 2),
+ Val("len(&ai)", 2),
+ Val("len(ai[0:])", 2),
+ Val("len(ai[1:])", 1),
+ Val("len(ai[2:])", 0),
+ Val("len(aai)", 2),
+ Val("len(sli)", 2),
+ Val("len(sli[0:])", 2),
+ Val("len(sli[1:])", 1),
+ Val("len(sli[2:])", 0),
+ // TODO(austin) Test len of map
+ CErr("len(0)", opTypes),
+ CErr("len(i)", opTypes),
+
+ CErr("*i", opTypes),
+ Val("*&i", 1),
+ Val("*&(i)", 1),
+ CErr("&1", badAddrOf),
+ CErr("&c", badAddrOf),
+ Val("*(&ai[0])", 1),
+
+ Val("+1", big.NewInt(+1)),
+ Val("+1.0", big.NewRat(1, 1)),
+ Val("01.5", big.NewRat(15, 10)),
+ CErr("+\"x\"", opTypes),
+
+ Val("-42", big.NewInt(-42)),
+ Val("-i", -1),
+ Val("-f", -1.0),
+ // 6g bug?
+ //Val("-(f-1)", -0.0),
+ CErr("-\"x\"", opTypes),
+
+ // TODO(austin) Test unary !
+
+ Val("^2", big.NewInt(^2)),
+ Val("^(-2)", big.NewInt(^(-2))),
+ CErr("^2.0", opTypes),
+ CErr("^2.5", opTypes),
+ Val("^i", ^1),
+ Val("^u", ^uint(1)),
+ CErr("^f", opTypes),
+
+ Val("1+i", 2),
+ Val("1+u", uint(2)),
+ Val("3.0+i", 4),
+ Val("1+1", big.NewInt(2)),
+ Val("f+f", 2.0),
+ Val("1+f", 2.0),
+ Val("1.0+1", big.NewRat(2, 1)),
+ Val("\"abc\" + \"def\"", "abcdef"),
+ CErr("i+u", opTypes),
+ CErr("-1+u", constantUnderflows),
+ // TODO(austin) Test named types
+
+ Val("2-1", big.NewInt(1)),
+ Val("2.0-1", big.NewRat(1, 1)),
+ Val("f-2", -1.0),
+ Val("-0.0", big.NewRat(0, 1)),
+ Val("2*2", big.NewInt(4)),
+ Val("2*i", 2),
+ Val("3/2", big.NewInt(1)),
+ Val("3/i", 3),
+ CErr("1/0", divByZero),
+ CErr("1.0/0", divByZero),
+ RErr("i/0", divByZero),
+ Val("3%2", big.NewInt(1)),
+ Val("i%2", 1),
+ CErr("3%0", divByZero),
+ CErr("3.0%0", opTypes),
+ RErr("i%0", divByZero),
+
+ // Examples from "Arithmetic operators"
+ Val("5/3", big.NewInt(1)),
+ Val("(i+4)/(i+2)", 1),
+ Val("5%3", big.NewInt(2)),
+ Val("(i+4)%(i+2)", 2),
+ Val("-5/3", big.NewInt(-1)),
+ Val("(i-6)/(i+2)", -1),
+ Val("-5%3", big.NewInt(-2)),
+ Val("(i-6)%(i+2)", -2),
+ Val("5/-3", big.NewInt(-1)),
+ Val("(i+4)/(i-4)", -1),
+ Val("5%-3", big.NewInt(2)),
+ Val("(i+4)%(i-4)", 2),
+ Val("-5/-3", big.NewInt(1)),
+ Val("(i-6)/(i-4)", 1),
+ Val("-5%-3", big.NewInt(-2)),
+ Val("(i-6)%(i-4)", -2),
+
+ // Examples from "Arithmetic operators"
+ Val("11/4", big.NewInt(2)),
+ Val("(i+10)/4", 2),
+ Val("11%4", big.NewInt(3)),
+ Val("(i+10)%4", 3),
+ Val("11>>2", big.NewInt(2)),
+ Val("(i+10)>>2", 2),
+ Val("11&3", big.NewInt(3)),
+ Val("(i+10)&3", 3),
+ Val("-11/4", big.NewInt(-2)),
+ Val("(i-12)/4", -2),
+ Val("-11%4", big.NewInt(-3)),
+ Val("(i-12)%4", -3),
+ Val("-11>>2", big.NewInt(-3)),
+ Val("(i-12)>>2", -3),
+ Val("-11&3", big.NewInt(1)),
+ Val("(i-12)&3", 1),
+
+ // TODO(austin) Test bit ops
+
+ // For shift, we try nearly every combination of positive
+ // ideal int, negative ideal int, big ideal int, ideal
+ // fractional float, ideal non-fractional float, int, uint,
+ // and float.
+ Val("2<<2", big.NewInt(2<<2)),
+ CErr("2<<(-1)", constantUnderflows),
+ CErr("2<<0x10000000000000000", constantOverflows),
+ CErr("2<<2.5", constantTruncated),
+ Val("2<<2.0", big.NewInt(2<<2.0)),
+ CErr("2<<i", mustBeUnsigned),
+ Val("2<<u", 2<<1),
+ CErr("2<<f", opTypes),
+
+ Val("-2<<2", big.NewInt(-2<<2)),
+ CErr("-2<<(-1)", constantUnderflows),
+ CErr("-2<<0x10000000000000000", constantOverflows),
+ CErr("-2<<2.5", constantTruncated),
+ Val("-2<<2.0", big.NewInt(-2<<2.0)),
+ CErr("-2<<i", mustBeUnsigned),
+ Val("-2<<u", -2<<1),
+ CErr("-2<<f", opTypes),
+
+ Val("0x10000000000000000<<2", new(big.Int).Lsh(hugeInteger, 2)),
+ CErr("0x10000000000000000<<(-1)", constantUnderflows),
+ CErr("0x10000000000000000<<0x10000000000000000", constantOverflows),
+ CErr("0x10000000000000000<<2.5", constantTruncated),
+ Val("0x10000000000000000<<2.0", new(big.Int).Lsh(hugeInteger, 2)),
+ CErr("0x10000000000000000<<i", mustBeUnsigned),
+ CErr("0x10000000000000000<<u", constantOverflows),
+ CErr("0x10000000000000000<<f", opTypes),
+
+ CErr("2.5<<2", opTypes),
+ CErr("2.0<<2", opTypes),
+
+ Val("i<<2", 1<<2),
+ CErr("i<<(-1)", constantUnderflows),
+ CErr("i<<0x10000000000000000", constantOverflows),
+ CErr("i<<2.5", constantTruncated),
+ Val("i<<2.0", 1<<2),
+ CErr("i<<i", mustBeUnsigned),
+ Val("i<<u", 1<<1),
+ CErr("i<<f", opTypes),
+ Val("i<<u", 1<<1),
+
+ Val("u<<2", uint(1<<2)),
+ CErr("u<<(-1)", constantUnderflows),
+ CErr("u<<0x10000000000000000", constantOverflows),
+ CErr("u<<2.5", constantTruncated),
+ Val("u<<2.0", uint(1<<2)),
+ CErr("u<<i", mustBeUnsigned),
+ Val("u<<u", uint(1<<1)),
+ CErr("u<<f", opTypes),
+ Val("u<<u", uint(1<<1)),
+
+ CErr("f<<2", opTypes),
+
+ // <, <=, >, >=
+ Val("1<2", 1 < 2),
+ Val("1<=2", 1 <= 2),
+ Val("2<=2", 2 <= 2),
+ Val("1>2", 1 > 2),
+ Val("1>=2", 1 >= 2),
+ Val("2>=2", 2 >= 2),
+
+ Val("i<2", 1 < 2),
+ Val("i<=2", 1 <= 2),
+ Val("i+1<=2", 2 <= 2),
+ Val("i>2", 1 > 2),
+ Val("i>=2", 1 >= 2),
+ Val("i+1>=2", 2 >= 2),
+
+ Val("u<2", 1 < 2),
+ Val("f<2", 1 < 2),
+
+ Val("s<\"b\"", true),
+ Val("s<\"a\"", false),
+ Val("s<=\"abc\"", true),
+ Val("s>\"aa\"", true),
+ Val("s>\"ac\"", false),
+ Val("s>=\"abc\"", true),
+
+ CErr("i<u", opTypes),
+ CErr("i<f", opTypes),
+ CErr("i<s", opTypes),
+ CErr("&i<&i", opTypes),
+ CErr("ai<ai", opTypes),
+
+ // ==, !=
+ Val("1==1", true),
+ Val("1!=1", false),
+ Val("1==2", false),
+ Val("1!=2", true),
+
+ Val("1.0==1", true),
+ Val("1.5==1", false),
+
+ Val("i==1", true),
+ Val("i!=1", false),
+ Val("i==2", false),
+ Val("i!=2", true),
+
+ Val("u==1", true),
+ Val("f==1", true),
+
+ Val("s==\"abc\"", true),
+ Val("s!=\"abc\"", false),
+ Val("s==\"abcd\"", false),
+ Val("s!=\"abcd\"", true),
+
+ Val("&i==&i", true),
+ Val("&i==&i2", false),
+
+ Val("fn==fn", true),
+ Val("fn==func(int)int{return 0}", false),
+
+ CErr("i==u", opTypes),
+ CErr("i==f", opTypes),
+ CErr("&i==&f", opTypes),
+ CErr("ai==ai", opTypes),
+ CErr("t==t", opTypes),
+ CErr("fn==oneTwo", opTypes),
+}
+
+func TestExpr(t *testing.T) { runTests(t, "exprTests", exprTests) }
diff --git a/libgo/go/exp/eval/func.go b/libgo/go/exp/eval/func.go
new file mode 100644
index 000000000..cb1b579e4
--- /dev/null
+++ b/libgo/go/exp/eval/func.go
@@ -0,0 +1,70 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import "os"
+
+/*
+ * Virtual machine
+ */
+
+type Thread struct {
+ abort chan os.Error
+ pc uint
+ // The execution frame of this function. This remains the
+ // same throughout a function invocation.
+ f *Frame
+}
+
+type code []func(*Thread)
+
+func (i code) exec(t *Thread) {
+ opc := t.pc
+ t.pc = 0
+ l := uint(len(i))
+ for t.pc < l {
+ pc := t.pc
+ t.pc++
+ i[pc](t)
+ }
+ t.pc = opc
+}
+
+/*
+ * Code buffer
+ */
+
+type codeBuf struct {
+ instrs code
+}
+
+func newCodeBuf() *codeBuf { return &codeBuf{make(code, 0, 16)} }
+
+func (b *codeBuf) push(instr func(*Thread)) {
+ b.instrs = append(b.instrs, instr)
+}
+
+func (b *codeBuf) nextPC() uint { return uint(len(b.instrs)) }
+
+func (b *codeBuf) get() code {
+ // Freeze this buffer into an array of exactly the right size
+ a := make(code, len(b.instrs))
+ copy(a, b.instrs)
+ return code(a)
+}
+
+/*
+ * User-defined functions
+ */
+
+type evalFunc struct {
+ outer *Frame
+ frameSize int
+ code code
+}
+
+func (f *evalFunc) NewFrame() *Frame { return f.outer.child(f.frameSize) }
+
+func (f *evalFunc) Call(t *Thread) { f.code.exec(t) }
diff --git a/libgo/go/exp/eval/scope.go b/libgo/go/exp/eval/scope.go
new file mode 100644
index 000000000..66305de25
--- /dev/null
+++ b/libgo/go/exp/eval/scope.go
@@ -0,0 +1,207 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "go/token"
+ "log"
+)
+
+/*
+ * Blocks and scopes
+ */
+
+// A definition can be a *Variable, *Constant, or Type.
+type Def interface {
+ Pos() token.Pos
+}
+
+type Variable struct {
+ VarPos token.Pos
+ // Index of this variable in the Frame structure
+ Index int
+ // Static type of this variable
+ Type Type
+ // Value of this variable. This is only used by Scope.NewFrame;
+ // therefore, it is useful for global scopes but cannot be used
+ // in function scopes.
+ Init Value
+}
+
+func (v *Variable) Pos() token.Pos {
+ return v.VarPos
+}
+
+type Constant struct {
+ ConstPos token.Pos
+ Type Type
+ Value Value
+}
+
+func (c *Constant) Pos() token.Pos {
+ return c.ConstPos
+}
+
+// A block represents a definition block in which a name may not be
+// defined more than once.
+type block struct {
+ // The block enclosing this one, including blocks in other
+ // scopes.
+ outer *block
+ // The nested block currently being compiled, or nil.
+ inner *block
+ // The Scope containing this block.
+ scope *Scope
+ // The Variables, Constants, and Types defined in this block.
+ defs map[string]Def
+ // The index of the first variable defined in this block.
+ // This must be greater than the index of any variable defined
+ // in any parent of this block within the same Scope at the
+ // time this block is entered.
+ offset int
+ // The number of Variables defined in this block.
+ numVars int
+ // If global, do not allocate new vars and consts in
+ // the frame; assume that the refs will be compiled in
+ // using defs[name].Init.
+ global bool
+}
+
+// A Scope is the compile-time analogue of a Frame, which captures
+// some subtree of blocks.
+type Scope struct {
+ // The root block of this scope.
+ *block
+ // The maximum number of variables required at any point in
+ // this Scope. This determines the number of slots needed in
+ // Frame's created from this Scope at run-time.
+ maxVars int
+}
+
+func (b *block) enterChild() *block {
+ if b.inner != nil && b.inner.scope == b.scope {
+ log.Panic("Failed to exit child block before entering another child")
+ }
+ sub := &block{
+ outer: b,
+ scope: b.scope,
+ defs: make(map[string]Def),
+ offset: b.offset + b.numVars,
+ }
+ b.inner = sub
+ return sub
+}
+
+func (b *block) exit() {
+ if b.outer == nil {
+ log.Panic("Cannot exit top-level block")
+ }
+ if b.outer.scope == b.scope {
+ if b.outer.inner != b {
+ log.Panic("Already exited block")
+ }
+ if b.inner != nil && b.inner.scope == b.scope {
+ log.Panic("Exit of parent block without exit of child block")
+ }
+ }
+ b.outer.inner = nil
+}
+
+func (b *block) ChildScope() *Scope {
+ if b.inner != nil && b.inner.scope == b.scope {
+ log.Panic("Failed to exit child block before entering a child scope")
+ }
+ sub := b.enterChild()
+ sub.offset = 0
+ sub.scope = &Scope{sub, 0}
+ return sub.scope
+}
+
+func (b *block) DefineVar(name string, pos token.Pos, t Type) (*Variable, Def) {
+ if prev, ok := b.defs[name]; ok {
+ return nil, prev
+ }
+ v := b.defineSlot(t, false)
+ v.VarPos = pos
+ b.defs[name] = v
+ return v, nil
+}
+
+func (b *block) DefineTemp(t Type) *Variable { return b.defineSlot(t, true) }
+
+func (b *block) defineSlot(t Type, temp bool) *Variable {
+ if b.inner != nil && b.inner.scope == b.scope {
+ log.Panic("Failed to exit child block before defining variable")
+ }
+ index := -1
+ if !b.global || temp {
+ index = b.offset + b.numVars
+ b.numVars++
+ if index >= b.scope.maxVars {
+ b.scope.maxVars = index + 1
+ }
+ }
+ v := &Variable{token.NoPos, index, t, nil}
+ return v
+}
+
+func (b *block) DefineConst(name string, pos token.Pos, t Type, v Value) (*Constant, Def) {
+ if prev, ok := b.defs[name]; ok {
+ return nil, prev
+ }
+ c := &Constant{pos, t, v}
+ b.defs[name] = c
+ return c, nil
+}
+
+func (b *block) DefineType(name string, pos token.Pos, t Type) Type {
+ if _, ok := b.defs[name]; ok {
+ return nil
+ }
+ nt := &NamedType{pos, name, nil, true, make(map[string]Method)}
+ if t != nil {
+ nt.Complete(t)
+ }
+ b.defs[name] = nt
+ return nt
+}
+
+func (b *block) Lookup(name string) (bl *block, level int, def Def) {
+ for b != nil {
+ if d, ok := b.defs[name]; ok {
+ return b, level, d
+ }
+ if b.outer != nil && b.scope != b.outer.scope {
+ level++
+ }
+ b = b.outer
+ }
+ return nil, 0, nil
+}
+
+func (s *Scope) NewFrame(outer *Frame) *Frame { return outer.child(s.maxVars) }
+
+/*
+ * Frames
+ */
+
+type Frame struct {
+ Outer *Frame
+ Vars []Value
+}
+
+func (f *Frame) Get(level int, index int) Value {
+ for ; level > 0; level-- {
+ f = f.Outer
+ }
+ return f.Vars[index]
+}
+
+func (f *Frame) child(numVars int) *Frame {
+ // TODO(austin) This is probably rather expensive. All values
+ // require heap allocation and zeroing them when we execute a
+ // definition typically requires some computation.
+ return &Frame{f, make([]Value, numVars)}
+}
diff --git a/libgo/go/exp/eval/stmt.go b/libgo/go/exp/eval/stmt.go
new file mode 100644
index 000000000..77ff066d0
--- /dev/null
+++ b/libgo/go/exp/eval/stmt.go
@@ -0,0 +1,1302 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "log"
+ "go/ast"
+ "go/token"
+)
+
+const (
+ returnPC = ^uint(0)
+ badPC = ^uint(1)
+)
+
+/*
+ * Statement compiler
+ */
+
+type stmtCompiler struct {
+ *blockCompiler
+ pos token.Pos
+ // This statement's label, or nil if it is not labeled.
+ stmtLabel *label
+}
+
+func (a *stmtCompiler) diag(format string, args ...interface{}) {
+ a.diagAt(a.pos, format, args...)
+}
+
+/*
+ * Flow checker
+ */
+
+type flowEnt struct {
+ // Whether this flow entry is conditional. If true, flow can
+ // continue to the next PC.
+ cond bool
+ // True if this will terminate flow (e.g., a return statement).
+ // cond must be false and jumps must be nil if this is true.
+ term bool
+ // PC's that can be reached from this flow entry.
+ jumps []*uint
+ // Whether this flow entry has been visited by reachesEnd.
+ visited bool
+}
+
+type flowBlock struct {
+ // If this is a goto, the target label.
+ target string
+ // The inner-most block containing definitions.
+ block *block
+ // The numVars from each block leading to the root of the
+ // scope, starting at block.
+ numVars []int
+}
+
+type flowBuf struct {
+ cb *codeBuf
+ // ents is a map from PC's to flow entries. Any PC missing
+ // from this map is assumed to reach only PC+1.
+ ents map[uint]*flowEnt
+ // gotos is a map from goto positions to information on the
+ // block at the point of the goto.
+ gotos map[token.Pos]*flowBlock
+ // labels is a map from label name to information on the block
+ // at the point of the label. labels are tracked by name,
+ // since mutliple labels at the same PC can have different
+ // blocks.
+ labels map[string]*flowBlock
+}
+
+func newFlowBuf(cb *codeBuf) *flowBuf {
+ return &flowBuf{cb, make(map[uint]*flowEnt), make(map[token.Pos]*flowBlock), make(map[string]*flowBlock)}
+}
+
+// put creates a flow control point for the next PC in the code buffer.
+// This should be done before pushing the instruction into the code buffer.
+func (f *flowBuf) put(cond bool, term bool, jumps []*uint) {
+ pc := f.cb.nextPC()
+ if ent, ok := f.ents[pc]; ok {
+ log.Panicf("Flow entry already exists at PC %d: %+v", pc, ent)
+ }
+ f.ents[pc] = &flowEnt{cond, term, jumps, false}
+}
+
+// putTerm creates a flow control point at the next PC that
+// unconditionally terminates execution.
+func (f *flowBuf) putTerm() { f.put(false, true, nil) }
+
+// put1 creates a flow control point at the next PC that jumps to one
+// PC and, if cond is true, can also continue to the PC following the
+// next PC.
+func (f *flowBuf) put1(cond bool, jumpPC *uint) {
+ f.put(cond, false, []*uint{jumpPC})
+}
+
+func newFlowBlock(target string, b *block) *flowBlock {
+ // Find the inner-most block containing definitions
+ for b.numVars == 0 && b.outer != nil && b.outer.scope == b.scope {
+ b = b.outer
+ }
+
+ // Count parents leading to the root of the scope
+ n := 0
+ for bp := b; bp.scope == b.scope; bp = bp.outer {
+ n++
+ }
+
+ // Capture numVars from each block to the root of the scope
+ numVars := make([]int, n)
+ i := 0
+ for bp := b; i < n; bp = bp.outer {
+ numVars[i] = bp.numVars
+ i++
+ }
+
+ return &flowBlock{target, b, numVars}
+}
+
+// putGoto captures the block at a goto statement. This should be
+// called in addition to putting a flow control point.
+func (f *flowBuf) putGoto(pos token.Pos, target string, b *block) {
+ f.gotos[pos] = newFlowBlock(target, b)
+}
+
+// putLabel captures the block at a label.
+func (f *flowBuf) putLabel(name string, b *block) {
+ f.labels[name] = newFlowBlock("", b)
+}
+
+// reachesEnd returns true if the end of f's code buffer can be
+// reached from the given program counter. Error reporting is the
+// caller's responsibility.
+func (f *flowBuf) reachesEnd(pc uint) bool {
+ endPC := f.cb.nextPC()
+ if pc > endPC {
+ log.Panicf("Reached bad PC %d past end PC %d", pc, endPC)
+ }
+
+ for ; pc < endPC; pc++ {
+ ent, ok := f.ents[pc]
+ if !ok {
+ continue
+ }
+
+ if ent.visited {
+ return false
+ }
+ ent.visited = true
+
+ if ent.term {
+ return false
+ }
+
+ // If anything can reach the end, we can reach the end
+ // from pc.
+ for _, j := range ent.jumps {
+ if f.reachesEnd(*j) {
+ return true
+ }
+ }
+ // If the jump was conditional, we can reach the next
+ // PC, so try reaching the end from it.
+ if ent.cond {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// gotosObeyScopes returns true if no goto statement causes any
+// variables to come into scope that were not in scope at the point of
+// the goto. Reports any errors using the given compiler.
+func (f *flowBuf) gotosObeyScopes(a *compiler) {
+ for pos, src := range f.gotos {
+ tgt := f.labels[src.target]
+
+ // The target block must be a parent of this block
+ numVars := src.numVars
+ b := src.block
+ for len(numVars) > 0 && b != tgt.block {
+ b = b.outer
+ numVars = numVars[1:]
+ }
+ if b != tgt.block {
+ // We jumped into a deeper block
+ a.diagAt(pos, "goto causes variables to come into scope")
+ return
+ }
+
+ // There must be no variables in the target block that
+ // did not exist at the jump
+ tgtNumVars := tgt.numVars
+ for i := range numVars {
+ if tgtNumVars[i] > numVars[i] {
+ a.diagAt(pos, "goto causes variables to come into scope")
+ return
+ }
+ }
+ }
+}
+
+/*
+ * Statement generation helpers
+ */
+
+func (a *stmtCompiler) defineVar(ident *ast.Ident, t Type) *Variable {
+ v, prev := a.block.DefineVar(ident.Name, ident.Pos(), t)
+ if prev != nil {
+ if prev.Pos().IsValid() {
+ a.diagAt(ident.Pos(), "variable %s redeclared in this block\n\tprevious declaration at %s", ident.Name, a.fset.Position(prev.Pos()))
+ } else {
+ a.diagAt(ident.Pos(), "variable %s redeclared in this block", ident.Name)
+ }
+ return nil
+ }
+
+ // Initialize the variable
+ index := v.Index
+ if v.Index >= 0 {
+ a.push(func(v *Thread) { v.f.Vars[index] = t.Zero() })
+ }
+ return v
+}
+
+// TODO(austin) Move doAssign to here
+
+/*
+ * Statement compiler
+ */
+
+func (a *stmtCompiler) compile(s ast.Stmt) {
+ if a.block.inner != nil {
+ log.Panic("Child scope still entered")
+ }
+
+ notimpl := false
+ switch s := s.(type) {
+ case *ast.BadStmt:
+ // Error already reported by parser.
+ a.silentErrors++
+
+ case *ast.DeclStmt:
+ a.compileDeclStmt(s)
+
+ case *ast.EmptyStmt:
+ // Do nothing.
+
+ case *ast.LabeledStmt:
+ a.compileLabeledStmt(s)
+
+ case *ast.ExprStmt:
+ a.compileExprStmt(s)
+
+ case *ast.IncDecStmt:
+ a.compileIncDecStmt(s)
+
+ case *ast.AssignStmt:
+ a.compileAssignStmt(s)
+
+ case *ast.GoStmt:
+ notimpl = true
+
+ case *ast.DeferStmt:
+ notimpl = true
+
+ case *ast.ReturnStmt:
+ a.compileReturnStmt(s)
+
+ case *ast.BranchStmt:
+ a.compileBranchStmt(s)
+
+ case *ast.BlockStmt:
+ a.compileBlockStmt(s)
+
+ case *ast.IfStmt:
+ a.compileIfStmt(s)
+
+ case *ast.CaseClause:
+ a.diag("case clause outside switch")
+
+ case *ast.SwitchStmt:
+ a.compileSwitchStmt(s)
+
+ case *ast.TypeCaseClause:
+ notimpl = true
+
+ case *ast.TypeSwitchStmt:
+ notimpl = true
+
+ case *ast.CommClause:
+ notimpl = true
+
+ case *ast.SelectStmt:
+ notimpl = true
+
+ case *ast.ForStmt:
+ a.compileForStmt(s)
+
+ case *ast.RangeStmt:
+ notimpl = true
+
+ default:
+ log.Panicf("unexpected ast node type %T", s)
+ }
+
+ if notimpl {
+ a.diag("%T statment node not implemented", s)
+ }
+
+ if a.block.inner != nil {
+ log.Panic("Forgot to exit child scope")
+ }
+}
+
+func (a *stmtCompiler) compileDeclStmt(s *ast.DeclStmt) {
+ switch decl := s.Decl.(type) {
+ case *ast.BadDecl:
+ // Do nothing. Already reported by parser.
+ a.silentErrors++
+
+ case *ast.FuncDecl:
+ if !a.block.global {
+ log.Panic("FuncDecl at statement level")
+ }
+
+ case *ast.GenDecl:
+ if decl.Tok == token.IMPORT && !a.block.global {
+ log.Panic("import at statement level")
+ }
+
+ default:
+ log.Panicf("Unexpected Decl type %T", s.Decl)
+ }
+ a.compileDecl(s.Decl)
+}
+
+func (a *stmtCompiler) compileVarDecl(decl *ast.GenDecl) {
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if spec.Values == nil {
+ // Declaration without assignment
+ if spec.Type == nil {
+ // Parser should have caught
+ log.Panic("Type and Values nil")
+ }
+ t := a.compileType(a.block, spec.Type)
+ // Define placeholders even if type compile failed
+ for _, n := range spec.Names {
+ a.defineVar(n, t)
+ }
+ } else {
+ // Declaration with assignment
+ lhs := make([]ast.Expr, len(spec.Names))
+ for i, n := range spec.Names {
+ lhs[i] = n
+ }
+ a.doAssign(lhs, spec.Values, decl.Tok, spec.Type)
+ }
+ }
+}
+
+func (a *stmtCompiler) compileDecl(decl ast.Decl) {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ // Do nothing. Already reported by parser.
+ a.silentErrors++
+
+ case *ast.FuncDecl:
+ decl := a.compileFuncType(a.block, d.Type)
+ if decl == nil {
+ return
+ }
+ // Declare and initialize v before compiling func
+ // so that body can refer to itself.
+ c, prev := a.block.DefineConst(d.Name.Name, a.pos, decl.Type, decl.Type.Zero())
+ if prev != nil {
+ pos := prev.Pos()
+ if pos.IsValid() {
+ a.diagAt(d.Name.Pos(), "identifier %s redeclared in this block\n\tprevious declaration at %s", d.Name.Name, a.fset.Position(pos))
+ } else {
+ a.diagAt(d.Name.Pos(), "identifier %s redeclared in this block", d.Name.Name)
+ }
+ }
+ fn := a.compileFunc(a.block, decl, d.Body)
+ if c == nil || fn == nil {
+ return
+ }
+ var zeroThread Thread
+ c.Value.(FuncValue).Set(nil, fn(&zeroThread))
+
+ case *ast.GenDecl:
+ switch d.Tok {
+ case token.IMPORT:
+ log.Panicf("%v not implemented", d.Tok)
+ case token.CONST:
+ log.Panicf("%v not implemented", d.Tok)
+ case token.TYPE:
+ a.compileTypeDecl(a.block, d)
+ case token.VAR:
+ a.compileVarDecl(d)
+ }
+
+ default:
+ log.Panicf("Unexpected Decl type %T", decl)
+ }
+}
+
+func (a *stmtCompiler) compileLabeledStmt(s *ast.LabeledStmt) {
+ // Define label
+ l, ok := a.labels[s.Label.Name]
+ if ok {
+ if l.resolved.IsValid() {
+ a.diag("label %s redeclared in this block\n\tprevious declaration at %s", s.Label.Name, a.fset.Position(l.resolved))
+ }
+ } else {
+ pc := badPC
+ l = &label{name: s.Label.Name, gotoPC: &pc}
+ a.labels[l.name] = l
+ }
+ l.desc = "regular label"
+ l.resolved = s.Pos()
+
+ // Set goto PC
+ *l.gotoPC = a.nextPC()
+
+ // Define flow entry so we can check for jumps over declarations.
+ a.flow.putLabel(l.name, a.block)
+
+ // Compile the statement. Reuse our stmtCompiler for simplicity.
+ sc := &stmtCompiler{a.blockCompiler, s.Stmt.Pos(), l}
+ sc.compile(s.Stmt)
+}
+
+func (a *stmtCompiler) compileExprStmt(s *ast.ExprStmt) {
+ bc := a.enterChild()
+ defer bc.exit()
+
+ e := a.compileExpr(bc.block, false, s.X)
+ if e == nil {
+ return
+ }
+
+ if e.exec == nil {
+ a.diag("%s cannot be used as expression statement", e.desc)
+ return
+ }
+
+ a.push(e.exec)
+}
+
+func (a *stmtCompiler) compileIncDecStmt(s *ast.IncDecStmt) {
+ // Create temporary block for extractEffect
+ bc := a.enterChild()
+ defer bc.exit()
+
+ l := a.compileExpr(bc.block, false, s.X)
+ if l == nil {
+ return
+ }
+
+ if l.evalAddr == nil {
+ l.diag("cannot assign to %s", l.desc)
+ return
+ }
+ if !(l.t.isInteger() || l.t.isFloat()) {
+ l.diagOpType(s.Tok, l.t)
+ return
+ }
+
+ var op token.Token
+ var desc string
+ switch s.Tok {
+ case token.INC:
+ op = token.ADD
+ desc = "increment statement"
+ case token.DEC:
+ op = token.SUB
+ desc = "decrement statement"
+ default:
+ log.Panicf("Unexpected IncDec token %v", s.Tok)
+ }
+
+ effect, l := l.extractEffect(bc.block, desc)
+
+ one := l.newExpr(IdealIntType, "constant")
+ one.pos = s.Pos()
+ one.eval = func() *big.Int { return big.NewInt(1) }
+
+ binop := l.compileBinaryExpr(op, l, one)
+ if binop == nil {
+ return
+ }
+
+ assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "", "")
+ if assign == nil {
+ log.Panicf("compileAssign type check failed")
+ }
+
+ lf := l.evalAddr
+ a.push(func(v *Thread) {
+ effect(v)
+ assign(lf(v), v)
+ })
+}
+
+func (a *stmtCompiler) doAssign(lhs []ast.Expr, rhs []ast.Expr, tok token.Token, declTypeExpr ast.Expr) {
+ nerr := a.numError()
+
+ // Compile right side first so we have the types when
+ // compiling the left side and so we don't see definitions
+ // made on the left side.
+ rs := make([]*expr, len(rhs))
+ for i, re := range rhs {
+ rs[i] = a.compileExpr(a.block, false, re)
+ }
+
+ errOp := "assignment"
+ if tok == token.DEFINE || tok == token.VAR {
+ errOp = "declaration"
+ }
+ ac, ok := a.checkAssign(a.pos, rs, errOp, "value")
+ ac.allowMapForms(len(lhs))
+
+ // If this is a definition and the LHS is too big, we won't be
+ // able to produce the usual error message because we can't
+ // begin to infer the types of the LHS.
+ if (tok == token.DEFINE || tok == token.VAR) && len(lhs) > len(ac.rmt.Elems) {
+ a.diag("not enough values for definition")
+ }
+
+ // Compile left type if there is one
+ var declType Type
+ if declTypeExpr != nil {
+ declType = a.compileType(a.block, declTypeExpr)
+ }
+
+ // Compile left side
+ ls := make([]*expr, len(lhs))
+ nDefs := 0
+ for i, le := range lhs {
+ // If this is a definition, get the identifier and its type
+ var ident *ast.Ident
+ var lt Type
+ switch tok {
+ case token.DEFINE:
+ // Check that it's an identifier
+ ident, ok = le.(*ast.Ident)
+ if !ok {
+ a.diagAt(le.Pos(), "left side of := must be a name")
+ // Suppress new defitions errors
+ nDefs++
+ continue
+ }
+
+ // Is this simply an assignment?
+ if _, ok := a.block.defs[ident.Name]; ok {
+ ident = nil
+ break
+ }
+ nDefs++
+
+ case token.VAR:
+ ident = le.(*ast.Ident)
+ }
+
+ // If it's a definition, get or infer its type.
+ if ident != nil {
+ // Compute the identifier's type from the RHS
+ // type. We use the computed MultiType so we
+ // don't have to worry about unpacking.
+ switch {
+ case declTypeExpr != nil:
+ // We have a declaration type, use it.
+ // If declType is nil, we gave an
+ // error when we compiled it.
+ lt = declType
+
+ case i >= len(ac.rmt.Elems):
+ // Define a placeholder. We already
+ // gave the "not enough" error above.
+ lt = nil
+
+ case ac.rmt.Elems[i] == nil:
+ // We gave the error when we compiled
+ // the RHS.
+ lt = nil
+
+ case ac.rmt.Elems[i].isIdeal():
+ // If the type is absent and the
+ // corresponding expression is a
+ // constant expression of ideal
+ // integer or ideal float type, the
+ // type of the declared variable is
+ // int or float respectively.
+ switch {
+ case ac.rmt.Elems[i].isInteger():
+ lt = IntType
+ case ac.rmt.Elems[i].isFloat():
+ lt = Float64Type
+ default:
+ log.Panicf("unexpected ideal type %v", rs[i].t)
+ }
+
+ default:
+ lt = ac.rmt.Elems[i]
+ }
+ }
+
+ // If it's a definition, define the identifier
+ if ident != nil {
+ if a.defineVar(ident, lt) == nil {
+ continue
+ }
+ }
+
+ // Compile LHS
+ ls[i] = a.compileExpr(a.block, false, le)
+ if ls[i] == nil {
+ continue
+ }
+
+ if ls[i].evalMapValue != nil {
+ // Map indexes are not generally addressable,
+ // but they are assignable.
+ //
+ // TODO(austin) Now that the expression
+ // compiler uses semantic values, this might
+ // be easier to implement as a function call.
+ sub := ls[i]
+ ls[i] = ls[i].newExpr(sub.t, sub.desc)
+ ls[i].evalMapValue = sub.evalMapValue
+ mvf := sub.evalMapValue
+ et := sub.t
+ ls[i].evalAddr = func(t *Thread) Value {
+ m, k := mvf(t)
+ e := m.Elem(t, k)
+ if e == nil {
+ e = et.Zero()
+ m.SetElem(t, k, e)
+ }
+ return e
+ }
+ } else if ls[i].evalAddr == nil {
+ ls[i].diag("cannot assign to %s", ls[i].desc)
+ continue
+ }
+ }
+
+ // A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block
+ // with the same type, and at least one of the variables is
+ // new.
+ if tok == token.DEFINE && nDefs == 0 {
+ a.diag("at least one new variable must be declared")
+ return
+ }
+
+ // If there have been errors, our arrays are full of nil's so
+ // get out of here now.
+ if nerr != a.numError() {
+ return
+ }
+
+ // Check for 'a[x] = r, ok'
+ if len(ls) == 1 && len(rs) == 2 && ls[0].evalMapValue != nil {
+ a.diag("a[x] = r, ok form not implemented")
+ return
+ }
+
+ // Create assigner
+ var lt Type
+ n := len(lhs)
+ if n == 1 {
+ lt = ls[0].t
+ } else {
+ lts := make([]Type, len(ls))
+ for i, l := range ls {
+ if l != nil {
+ lts[i] = l.t
+ }
+ }
+ lt = NewMultiType(lts)
+ }
+ bc := a.enterChild()
+ defer bc.exit()
+ assign := ac.compile(bc.block, lt)
+ if assign == nil {
+ return
+ }
+
+ // Compile
+ if n == 1 {
+ // Don't need temporaries and can avoid []Value.
+ lf := ls[0].evalAddr
+ a.push(func(t *Thread) { assign(lf(t), t) })
+ } else if tok == token.VAR || (tok == token.DEFINE && nDefs == n) {
+ // Don't need temporaries
+ lfs := make([]func(*Thread) Value, n)
+ for i, l := range ls {
+ lfs[i] = l.evalAddr
+ }
+ a.push(func(t *Thread) {
+ dest := make([]Value, n)
+ for i, lf := range lfs {
+ dest[i] = lf(t)
+ }
+ assign(multiV(dest), t)
+ })
+ } else {
+ // Need temporaries
+ lmt := lt.(*MultiType)
+ lfs := make([]func(*Thread) Value, n)
+ for i, l := range ls {
+ lfs[i] = l.evalAddr
+ }
+ a.push(func(t *Thread) {
+ temp := lmt.Zero().(multiV)
+ assign(temp, t)
+ // Copy to destination
+ for i := 0; i < n; i++ {
+ // TODO(austin) Need to evaluate LHS
+ // before RHS
+ lfs[i](t).Assign(t, temp[i])
+ }
+ })
+ }
+}
+
+var assignOpToOp = map[token.Token]token.Token{
+ token.ADD_ASSIGN: token.ADD,
+ token.SUB_ASSIGN: token.SUB,
+ token.MUL_ASSIGN: token.MUL,
+ token.QUO_ASSIGN: token.QUO,
+ token.REM_ASSIGN: token.REM,
+
+ token.AND_ASSIGN: token.AND,
+ token.OR_ASSIGN: token.OR,
+ token.XOR_ASSIGN: token.XOR,
+ token.SHL_ASSIGN: token.SHL,
+ token.SHR_ASSIGN: token.SHR,
+ token.AND_NOT_ASSIGN: token.AND_NOT,
+}
+
+func (a *stmtCompiler) doAssignOp(s *ast.AssignStmt) {
+ if len(s.Lhs) != 1 || len(s.Rhs) != 1 {
+ a.diag("tuple assignment cannot be combined with an arithmetic operation")
+ return
+ }
+
+ // Create temporary block for extractEffect
+ bc := a.enterChild()
+ defer bc.exit()
+
+ l := a.compileExpr(bc.block, false, s.Lhs[0])
+ r := a.compileExpr(bc.block, false, s.Rhs[0])
+ if l == nil || r == nil {
+ return
+ }
+
+ if l.evalAddr == nil {
+ l.diag("cannot assign to %s", l.desc)
+ return
+ }
+
+ effect, l := l.extractEffect(bc.block, "operator-assignment")
+
+ binop := r.compileBinaryExpr(assignOpToOp[s.Tok], l, r)
+ if binop == nil {
+ return
+ }
+
+ assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "assignment", "value")
+ if assign == nil {
+ log.Panicf("compileAssign type check failed")
+ }
+
+ lf := l.evalAddr
+ a.push(func(t *Thread) {
+ effect(t)
+ assign(lf(t), t)
+ })
+}
+
+func (a *stmtCompiler) compileAssignStmt(s *ast.AssignStmt) {
+ switch s.Tok {
+ case token.ASSIGN, token.DEFINE:
+ a.doAssign(s.Lhs, s.Rhs, s.Tok, nil)
+
+ default:
+ a.doAssignOp(s)
+ }
+}
+
+func (a *stmtCompiler) compileReturnStmt(s *ast.ReturnStmt) {
+ if a.fnType == nil {
+ a.diag("cannot return at the top level")
+ return
+ }
+
+ if len(s.Results) == 0 && (len(a.fnType.Out) == 0 || a.outVarsNamed) {
+ // Simple case. Simply exit from the function.
+ a.flow.putTerm()
+ a.push(func(v *Thread) { v.pc = returnPC })
+ return
+ }
+
+ bc := a.enterChild()
+ defer bc.exit()
+
+ // Compile expressions
+ bad := false
+ rs := make([]*expr, len(s.Results))
+ for i, re := range s.Results {
+ rs[i] = a.compileExpr(bc.block, false, re)
+ if rs[i] == nil {
+ bad = true
+ }
+ }
+ if bad {
+ return
+ }
+
+ // Create assigner
+
+ // However, if the expression list in the "return" statement
+ // is a single call to a multi-valued function, the values
+ // returned from the called function will be returned from
+ // this one.
+ assign := a.compileAssign(s.Pos(), bc.block, NewMultiType(a.fnType.Out), rs, "return", "value")
+
+ // XXX(Spec) "The result types of the current function and the
+ // called function must match." Match is fuzzy. It should
+ // say that they must be assignment compatible.
+
+ // Compile
+ start := len(a.fnType.In)
+ nout := len(a.fnType.Out)
+ a.flow.putTerm()
+ a.push(func(t *Thread) {
+ assign(multiV(t.f.Vars[start:start+nout]), t)
+ t.pc = returnPC
+ })
+}
+
+func (a *stmtCompiler) findLexicalLabel(name *ast.Ident, pred func(*label) bool, errOp, errCtx string) *label {
+ bc := a.blockCompiler
+ for ; bc != nil; bc = bc.parent {
+ if bc.label == nil {
+ continue
+ }
+ l := bc.label
+ if name == nil && pred(l) {
+ return l
+ }
+ if name != nil && l.name == name.Name {
+ if !pred(l) {
+ a.diag("cannot %s to %s %s", errOp, l.desc, l.name)
+ return nil
+ }
+ return l
+ }
+ }
+ if name == nil {
+ a.diag("%s outside %s", errOp, errCtx)
+ } else {
+ a.diag("%s label %s not defined", errOp, name.Name)
+ }
+ return nil
+}
+
+func (a *stmtCompiler) compileBranchStmt(s *ast.BranchStmt) {
+ var pc *uint
+
+ switch s.Tok {
+ case token.BREAK:
+ l := a.findLexicalLabel(s.Label, func(l *label) bool { return l.breakPC != nil }, "break", "for loop, switch, or select")
+ if l == nil {
+ return
+ }
+ pc = l.breakPC
+
+ case token.CONTINUE:
+ l := a.findLexicalLabel(s.Label, func(l *label) bool { return l.continuePC != nil }, "continue", "for loop")
+ if l == nil {
+ return
+ }
+ pc = l.continuePC
+
+ case token.GOTO:
+ l, ok := a.labels[s.Label.Name]
+ if !ok {
+ pc := badPC
+ l = &label{name: s.Label.Name, desc: "unresolved label", gotoPC: &pc, used: s.Pos()}
+ a.labels[l.name] = l
+ }
+
+ pc = l.gotoPC
+ a.flow.putGoto(s.Pos(), l.name, a.block)
+
+ case token.FALLTHROUGH:
+ a.diag("fallthrough outside switch")
+ return
+
+ default:
+ log.Panic("Unexpected branch token %v", s.Tok)
+ }
+
+ a.flow.put1(false, pc)
+ a.push(func(v *Thread) { v.pc = *pc })
+}
+
+func (a *stmtCompiler) compileBlockStmt(s *ast.BlockStmt) {
+ bc := a.enterChild()
+ bc.compileStmts(s)
+ bc.exit()
+}
+
+func (a *stmtCompiler) compileIfStmt(s *ast.IfStmt) {
+ // The scope of any variables declared by [the init] statement
+ // extends to the end of the "if" statement and the variables
+ // are initialized once before the statement is entered.
+ //
+ // XXX(Spec) What this really wants to say is that there's an
+ // implicit scope wrapping every if, for, and switch
+ // statement. This is subtly different from what it actually
+ // says when there's a non-block else clause, because that
+ // else claus has to execute in a scope that is *not* the
+ // surrounding scope.
+ bc := a.enterChild()
+ defer bc.exit()
+
+ // Compile init statement, if any
+ if s.Init != nil {
+ bc.compileStmt(s.Init)
+ }
+
+ elsePC := badPC
+ endPC := badPC
+
+ // Compile condition, if any. If there is no condition, we
+ // fall through to the body.
+ if s.Cond != nil {
+ e := bc.compileExpr(bc.block, false, s.Cond)
+ switch {
+ case e == nil:
+ // Error reported by compileExpr
+ case !e.t.isBoolean():
+ e.diag("'if' condition must be boolean\n\t%v", e.t)
+ default:
+ eval := e.asBool()
+ a.flow.put1(true, &elsePC)
+ a.push(func(t *Thread) {
+ if !eval(t) {
+ t.pc = elsePC
+ }
+ })
+ }
+ }
+
+ // Compile body
+ body := bc.enterChild()
+ body.compileStmts(s.Body)
+ body.exit()
+
+ // Compile else
+ if s.Else != nil {
+ // Skip over else if we executed the body
+ a.flow.put1(false, &endPC)
+ a.push(func(v *Thread) { v.pc = endPC })
+ elsePC = a.nextPC()
+ bc.compileStmt(s.Else)
+ } else {
+ elsePC = a.nextPC()
+ }
+ endPC = a.nextPC()
+}
+
+func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) {
+ // Create implicit scope around switch
+ bc := a.enterChild()
+ defer bc.exit()
+
+ // Compile init statement, if any
+ if s.Init != nil {
+ bc.compileStmt(s.Init)
+ }
+
+ // Compile condition, if any, and extract its effects
+ var cond *expr
+ condbc := bc.enterChild()
+ if s.Tag != nil {
+ e := condbc.compileExpr(condbc.block, false, s.Tag)
+ if e != nil {
+ var effect func(*Thread)
+ effect, cond = e.extractEffect(condbc.block, "switch")
+ a.push(effect)
+ }
+ }
+
+ // Count cases
+ ncases := 0
+ hasDefault := false
+ for _, c := range s.Body.List {
+ clause, ok := c.(*ast.CaseClause)
+ if !ok {
+ a.diagAt(clause.Pos(), "switch statement must contain case clauses")
+ continue
+ }
+ if clause.Values == nil {
+ if hasDefault {
+ a.diagAt(clause.Pos(), "switch statement contains more than one default case")
+ }
+ hasDefault = true
+ } else {
+ ncases += len(clause.Values)
+ }
+ }
+
+ // Compile case expressions
+ cases := make([]func(*Thread) bool, ncases)
+ i := 0
+ for _, c := range s.Body.List {
+ clause, ok := c.(*ast.CaseClause)
+ if !ok {
+ continue
+ }
+ for _, v := range clause.Values {
+ e := condbc.compileExpr(condbc.block, false, v)
+ switch {
+ case e == nil:
+ // Error reported by compileExpr
+ case cond == nil && !e.t.isBoolean():
+ a.diagAt(v.Pos(), "'case' condition must be boolean")
+ case cond == nil:
+ cases[i] = e.asBool()
+ case cond != nil:
+ // Create comparison
+ // TOOD(austin) This produces bad error messages
+ compare := e.compileBinaryExpr(token.EQL, cond, e)
+ if compare != nil {
+ cases[i] = compare.asBool()
+ }
+ }
+ i++
+ }
+ }
+
+ // Emit condition
+ casePCs := make([]*uint, ncases+1)
+ endPC := badPC
+
+ a.flow.put(false, false, casePCs)
+ a.push(func(t *Thread) {
+ for i, c := range cases {
+ if c(t) {
+ t.pc = *casePCs[i]
+ return
+ }
+ }
+ t.pc = *casePCs[ncases]
+ })
+ condbc.exit()
+
+ // Compile cases
+ i = 0
+ for _, c := range s.Body.List {
+ clause, ok := c.(*ast.CaseClause)
+ if !ok {
+ continue
+ }
+
+ // Save jump PC's
+ pc := a.nextPC()
+ if clause.Values != nil {
+ for _ = range clause.Values {
+ casePCs[i] = &pc
+ i++
+ }
+ } else {
+ // Default clause
+ casePCs[ncases] = &pc
+ }
+
+ // Compile body
+ fall := false
+ for j, s := range clause.Body {
+ if br, ok := s.(*ast.BranchStmt); ok && br.Tok == token.FALLTHROUGH {
+ // println("Found fallthrough");
+ // It may be used only as the final
+ // non-empty statement in a case or
+ // default clause in an expression
+ // "switch" statement.
+ for _, s2 := range clause.Body[j+1:] {
+ // XXX(Spec) 6g also considers
+ // empty blocks to be empty
+ // statements.
+ if _, ok := s2.(*ast.EmptyStmt); !ok {
+ a.diagAt(s.Pos(), "fallthrough statement must be final statement in case")
+ break
+ }
+ }
+ fall = true
+ } else {
+ bc.compileStmt(s)
+ }
+ }
+ // Jump out of switch, unless there was a fallthrough
+ if !fall {
+ a.flow.put1(false, &endPC)
+ a.push(func(v *Thread) { v.pc = endPC })
+ }
+ }
+
+ // Get end PC
+ endPC = a.nextPC()
+ if !hasDefault {
+ casePCs[ncases] = &endPC
+ }
+}
+
+func (a *stmtCompiler) compileForStmt(s *ast.ForStmt) {
+ // Wrap the entire for in a block.
+ bc := a.enterChild()
+ defer bc.exit()
+
+ // Compile init statement, if any
+ if s.Init != nil {
+ bc.compileStmt(s.Init)
+ }
+
+ bodyPC := badPC
+ postPC := badPC
+ checkPC := badPC
+ endPC := badPC
+
+ // Jump to condition check. We generate slightly less code by
+ // placing the condition check after the body.
+ a.flow.put1(false, &checkPC)
+ a.push(func(v *Thread) { v.pc = checkPC })
+
+ // Compile body
+ bodyPC = a.nextPC()
+ body := bc.enterChild()
+ if a.stmtLabel != nil {
+ body.label = a.stmtLabel
+ } else {
+ body.label = &label{resolved: s.Pos()}
+ }
+ body.label.desc = "for loop"
+ body.label.breakPC = &endPC
+ body.label.continuePC = &postPC
+ body.compileStmts(s.Body)
+ body.exit()
+
+ // Compile post, if any
+ postPC = a.nextPC()
+ if s.Post != nil {
+ // TODO(austin) Does the parser disallow short
+ // declarations in s.Post?
+ bc.compileStmt(s.Post)
+ }
+
+ // Compile condition check, if any
+ checkPC = a.nextPC()
+ if s.Cond == nil {
+ // If the condition is absent, it is equivalent to true.
+ a.flow.put1(false, &bodyPC)
+ a.push(func(v *Thread) { v.pc = bodyPC })
+ } else {
+ e := bc.compileExpr(bc.block, false, s.Cond)
+ switch {
+ case e == nil:
+ // Error reported by compileExpr
+ case !e.t.isBoolean():
+ a.diag("'for' condition must be boolean\n\t%v", e.t)
+ default:
+ eval := e.asBool()
+ a.flow.put1(true, &bodyPC)
+ a.push(func(t *Thread) {
+ if eval(t) {
+ t.pc = bodyPC
+ }
+ })
+ }
+ }
+
+ endPC = a.nextPC()
+}
+
+/*
+ * Block compiler
+ */
+
+func (a *blockCompiler) compileStmt(s ast.Stmt) {
+ sc := &stmtCompiler{a, s.Pos(), nil}
+ sc.compile(s)
+}
+
+func (a *blockCompiler) compileStmts(block *ast.BlockStmt) {
+ for _, sub := range block.List {
+ a.compileStmt(sub)
+ }
+}
+
+func (a *blockCompiler) enterChild() *blockCompiler {
+ block := a.block.enterChild()
+ return &blockCompiler{
+ funcCompiler: a.funcCompiler,
+ block: block,
+ parent: a,
+ }
+}
+
+func (a *blockCompiler) exit() { a.block.exit() }
+
+/*
+ * Function compiler
+ */
+
+func (a *compiler) compileFunc(b *block, decl *FuncDecl, body *ast.BlockStmt) func(*Thread) Func {
+ // Create body scope
+ //
+ // The scope of a parameter or result is the body of the
+ // corresponding function.
+ bodyScope := b.ChildScope()
+ defer bodyScope.exit()
+ for i, t := range decl.Type.In {
+ if decl.InNames[i] != nil {
+ bodyScope.DefineVar(decl.InNames[i].Name, decl.InNames[i].Pos(), t)
+ } else {
+ bodyScope.DefineTemp(t)
+ }
+ }
+ for i, t := range decl.Type.Out {
+ if decl.OutNames[i] != nil {
+ bodyScope.DefineVar(decl.OutNames[i].Name, decl.OutNames[i].Pos(), t)
+ } else {
+ bodyScope.DefineTemp(t)
+ }
+ }
+
+ // Create block context
+ cb := newCodeBuf()
+ fc := &funcCompiler{
+ compiler: a,
+ fnType: decl.Type,
+ outVarsNamed: len(decl.OutNames) > 0 && decl.OutNames[0] != nil,
+ codeBuf: cb,
+ flow: newFlowBuf(cb),
+ labels: make(map[string]*label),
+ }
+ bc := &blockCompiler{
+ funcCompiler: fc,
+ block: bodyScope.block,
+ }
+
+ // Compile body
+ nerr := a.numError()
+ bc.compileStmts(body)
+ fc.checkLabels()
+ if nerr != a.numError() {
+ return nil
+ }
+
+ // Check that the body returned if necessary. We only check
+ // this if there were no errors compiling the body.
+ if len(decl.Type.Out) > 0 && fc.flow.reachesEnd(0) {
+ // XXX(Spec) Not specified.
+ a.diagAt(body.Rbrace, "function ends without a return statement")
+ return nil
+ }
+
+ code := fc.get()
+ maxVars := bodyScope.maxVars
+ return func(t *Thread) Func { return &evalFunc{t.f, maxVars, code} }
+}
+
+// Checks that labels were resolved and that all jumps obey scoping
+// rules. Reports an error and set fc.err if any check fails.
+func (a *funcCompiler) checkLabels() {
+ nerr := a.numError()
+ for _, l := range a.labels {
+ if !l.resolved.IsValid() {
+ a.diagAt(l.used, "label %s not defined", l.name)
+ }
+ }
+ if nerr != a.numError() {
+ // Don't check scopes if we have unresolved labels
+ return
+ }
+
+ // Executing the "goto" statement must not cause any variables
+ // to come into scope that were not already in scope at the
+ // point of the goto.
+ a.flow.gotosObeyScopes(a.compiler)
+}
diff --git a/libgo/go/exp/eval/stmt_test.go b/libgo/go/exp/eval/stmt_test.go
new file mode 100644
index 000000000..a14a288d9
--- /dev/null
+++ b/libgo/go/exp/eval/stmt_test.go
@@ -0,0 +1,343 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import "testing"
+
+var atLeastOneDecl = "at least one new variable must be declared"
+
+var stmtTests = []test{
+ // Short declarations
+ Val1("x := i", "x", 1),
+ Val1("x := f", "x", 1.0),
+ // Type defaulting
+ Val1("a := 42", "a", 42),
+ Val1("a := 1.0", "a", 1.0),
+ // Parallel assignment
+ Val2("a, b := 1, 2", "a", 1, "b", 2),
+ Val2("a, i := 1, 2", "a", 1, "i", 2),
+ CErr("a, i := 1, f", opTypes),
+ CErr("a, b := 1, 2, 3", "too many"),
+ CErr("a := 1, 2", "too many"),
+ CErr("a, b := 1", "not enough"),
+ // Mixed declarations
+ CErr("i := 1", atLeastOneDecl),
+ CErr("i, u := 1, 2", atLeastOneDecl),
+ Val2("i, x := 2, f", "i", 2, "x", 1.0),
+ // Various errors
+ CErr("1 := 2", "left side of := must be a name"),
+ CErr("c, a := 1, 1", "cannot assign"),
+ // Unpacking
+ Val2("x, y := oneTwo()", "x", 1, "y", 2),
+ CErr("x := oneTwo()", "too many"),
+ CErr("x, y, z := oneTwo()", "not enough"),
+ CErr("x, y := oneTwo(), 2", "multi-valued"),
+ CErr("x := oneTwo()+2", opTypes),
+ // TOOD(austin) This error message is weird
+ CErr("x := void()", "not enough"),
+ // Placeholders
+ CErr("x := 1+\"x\"; i=x+1", opTypes),
+
+ // Assignment
+ Val1("i = 2", "i", 2),
+ Val1("(i) = 2", "i", 2),
+ CErr("1 = 2", "cannot assign"),
+ CErr("1-1 = 2", "- expression"),
+ Val1("i = 2.0", "i", 2),
+ CErr("i = 2.2", constantTruncated),
+ CErr("u = -2", constantUnderflows),
+ CErr("i = f", opTypes),
+ CErr("i, u = 0, f", opTypes),
+ CErr("i, u = 0, f", "value 2"),
+ Val2("i, i2 = i2, i", "i", 2, "i2", 1),
+ CErr("c = 1", "cannot assign"),
+
+ Val1("x := &i; *x = 2", "i", 2),
+
+ Val1("ai[0] = 42", "ai", varray{42, 2}),
+ Val1("aai[1] = ai; ai[0] = 42", "aai", varray{varray{1, 2}, varray{1, 2}}),
+ Val1("aai = aai2", "aai", varray{varray{5, 6}, varray{7, 8}}),
+
+ // Assignment conversions
+ Run("var sl []int; sl = &ai"),
+ CErr("type ST []int; type AT *[2]int; var x AT = &ai; var y ST = x", opTypes),
+ Run("type ST []int; var y ST = &ai"),
+ Run("type AT *[2]int; var x AT = &ai; var y []int = x"),
+
+ // Op-assignment
+ Val1("i += 2", "i", 3),
+ Val("i", 1),
+ Val1("f += 2", "f", 3.0),
+ CErr("2 += 2", "cannot assign"),
+ CErr("i, j += 2", "cannot be combined"),
+ CErr("i += 2, 3", "cannot be combined"),
+ Val2("s2 := s; s += \"def\"", "s2", "abc", "s", "abcdef"),
+ CErr("s += 1", opTypes),
+ // Single evaluation
+ Val2("ai[func()int{i+=1;return 0}()] *= 3; i2 = ai[0]", "i", 2, "i2", 3),
+
+ // Type declarations
+ // Identifiers
+ Run("type T int"),
+ CErr("type T x", "undefined"),
+ CErr("type T c", "constant"),
+ CErr("type T i", "variable"),
+ CErr("type T T", "recursive"),
+ CErr("type T x; type U T; var v U; v = 1", "undefined"),
+ // Pointer types
+ Run("type T *int"),
+ Run("type T *T"),
+ // Array types
+ Run("type T [5]int"),
+ Run("type T [c+42/2]int"),
+ Run("type T [2.0]int"),
+ CErr("type T [i]int", "constant expression"),
+ CErr("type T [2.5]int", constantTruncated),
+ CErr("type T [-1]int", "negative"),
+ CErr("type T [2]T", "recursive"),
+ // Struct types
+ Run("type T struct { a int; b int }"),
+ Run("type T struct { a int; int }"),
+ Run("type T struct { x *T }"),
+ Run("type T int; type U struct { T }"),
+ CErr("type T *int; type U struct { T }", "embedded.*pointer"),
+ CErr("type T *struct { T }", "embedded.*pointer"),
+ CErr("type T struct { a int; a int }", " a .*redeclared.*:1:17"),
+ CErr("type T struct { int; int }", "int .*redeclared.*:1:17"),
+ CErr("type T struct { int int; int }", "int .*redeclared.*:1:17"),
+ Run("type T struct { x *struct { T } }"),
+ CErr("type T struct { x struct { T } }", "recursive"),
+ CErr("type T struct { x }; type U struct { T }", "undefined"),
+ // Function types
+ Run("type T func()"),
+ Run("type T func(a, b int) int"),
+ Run("type T func(a, b int) (x int, y int)"),
+ Run("type T func(a, a int) (a int, a int)"),
+ Run("type T func(a, b int) (x, y int)"),
+ Run("type T func(int, int) (int, int)"),
+ CErr("type T func(x); type U T", "undefined"),
+ CErr("type T func(a T)", "recursive"),
+ // Interface types
+ Run("type T interface {x(a, b int) int}"),
+ Run("type T interface {x(a, b int) int}; type U interface {T; y(c int)}"),
+ CErr("type T interface {x(a int); x()}", "method x redeclared"),
+ CErr("type T interface {x()}; type U interface {T; x()}", "method x redeclared"),
+ CErr("type T int; type U interface {T}", "embedded type"),
+ // Parens
+ Run("type T (int)"),
+
+ // Variable declarations
+ Val2("var x int", "i", 1, "x", 0),
+ Val1("var x = 1", "x", 1),
+ Val1("var x = 1.0", "x", 1.0),
+ Val1("var x int = 1.0", "x", 1),
+ // Placeholders
+ CErr("var x foo; x = 1", "undefined"),
+ CErr("var x foo = 1; x = 1", "undefined"),
+ // Redeclaration
+ CErr("var i, x int", " i .*redeclared"),
+ CErr("var x int; var x int", " x .*redeclared.*:1:5"),
+
+ // Expression statements
+ CErr("x := func(){ 1-1 }", "expression statement"),
+ CErr("x := func(){ 1-1 }", "- expression"),
+ Val1("fn(2)", "i", 1),
+
+ // IncDec statements
+ Val1("i++", "i", 2),
+ Val1("i--", "i", 0),
+ Val1("u++", "u", uint(2)),
+ Val1("u--", "u", uint(0)),
+ Val1("f++", "f", 2.0),
+ Val1("f--", "f", 0.0),
+ // Single evaluation
+ Val2("ai[func()int{i+=1;return 0}()]++; i2 = ai[0]", "i", 2, "i2", 2),
+ // Operand types
+ CErr("s++", opTypes),
+ CErr("s++", "'\\+\\+'"),
+ CErr("2++", "cannot assign"),
+ CErr("c++", "cannot assign"),
+
+ // Function scoping
+ Val1("fn1 := func() { i=2 }; fn1()", "i", 2),
+ Val1("fn1 := func() { i:=2 }; fn1()", "i", 1),
+ Val2("fn1 := func() int { i=2; i:=3; i=4; return i }; x := fn1()", "i", 2, "x", 4),
+
+ // Basic returns
+ CErr("fn1 := func() int {}", "return"),
+ Run("fn1 := func() {}"),
+ CErr("fn1 := func() (r int) {}", "return"),
+ Val1("fn1 := func() (r int) {return}; i = fn1()", "i", 0),
+ Val1("fn1 := func() (r int) {r = 2; return}; i = fn1()", "i", 2),
+ Val1("fn1 := func() (r int) {return 2}; i = fn1()", "i", 2),
+ Val1("fn1 := func(int) int {return 2}; i = fn1(1)", "i", 2),
+
+ // Multi-valued returns
+ Val2("fn1 := func() (bool, int) {return true, 2}; x, y := fn1()", "x", true, "y", 2),
+ CErr("fn1 := func() int {return}", "not enough values"),
+ CErr("fn1 := func() int {return 1,2}", "too many values"),
+ CErr("fn1 := func() {return 1}", "too many values"),
+ CErr("fn1 := func() (int,int,int) {return 1,2}", "not enough values"),
+ Val2("fn1 := func() (int, int) {return oneTwo()}; x, y := fn1()", "x", 1, "y", 2),
+ CErr("fn1 := func() int {return oneTwo()}", "too many values"),
+ CErr("fn1 := func() (int,int,int) {return oneTwo()}", "not enough values"),
+ Val1("fn1 := func(x,y int) int {return x+y}; x := fn1(oneTwo())", "x", 3),
+
+ // Return control flow
+ Val2("fn1 := func(x *int) bool { *x = 2; return true; *x = 3; }; x := fn1(&i)", "i", 2, "x", true),
+
+ // Break/continue/goto/fallthrough
+ CErr("break", "outside"),
+ CErr("break foo", "break.*foo.*not defined"),
+ CErr("continue", "outside"),
+ CErr("continue foo", "continue.*foo.*not defined"),
+ CErr("fallthrough", "outside"),
+ CErr("goto foo", "foo.*not defined"),
+ CErr(" foo: foo:;", "foo.*redeclared.*:1:2"),
+ Val1("i+=2; goto L; i+=4; L: i+=8", "i", 1+2+8),
+ // Return checking
+ CErr("fn1 := func() int { goto L; return 1; L: }", "return"),
+ Run("fn1 := func() int { L: goto L; i = 2 }"),
+ Run("fn1 := func() int { return 1; L: goto L }"),
+ // Scope checking
+ Run("fn1 := func() { { L: x:=1 }; goto L }"),
+ CErr("fn1 := func() { { x:=1; L: }; goto L }", "into scope"),
+ CErr("fn1 := func() { goto L; x:=1; L: }", "into scope"),
+ Run("fn1 := func() { goto L; { L: x:=1 } }"),
+ CErr("fn1 := func() { goto L; { x:=1; L: } }", "into scope"),
+
+ // Blocks
+ CErr("fn1 := func() int {{}}", "return"),
+ Val1("fn1 := func() bool { { return true } }; b := fn1()", "b", true),
+
+ // If
+ Val2("if true { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4),
+ Val2("if false { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4),
+ Val2("if i == i2 { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4),
+ // Omit optional parts
+ Val2("if { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4),
+ Val2("if true { i = 2 }; i2 = 4", "i", 2, "i2", 4),
+ Val2("if false { i = 2 }; i2 = 4", "i", 1, "i2", 4),
+ // Init
+ Val2("if x := true; x { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4),
+ Val2("if x := false; x { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4),
+ // Statement else
+ Val2("if true { i = 2 } else i = 3; i2 = 4", "i", 2, "i2", 4),
+ Val2("if false { i = 2 } else i = 3; i2 = 4", "i", 3, "i2", 4),
+ // Scoping
+ Val2("if true { i := 2 } else { i := 3 }; i2 = i", "i", 1, "i2", 1),
+ Val2("if false { i := 2 } else { i := 3 }; i2 = i", "i", 1, "i2", 1),
+ Val2("if false { i := 2 } else i := 3; i2 = i", "i", 1, "i2", 1),
+ CErr("if true { x := 2 }; x = 4", undefined),
+ Val2("if i := 2; true { i2 = i; i := 3 }", "i", 1, "i2", 2),
+ Val2("if i := 2; false {} else { i2 = i; i := 3 }", "i", 1, "i2", 2),
+ // Return checking
+ Run("fn1 := func() int { if true { return 1 } else { return 2 } }"),
+ Run("fn1 := func() int { if true { return 1 } else return 2 }"),
+ CErr("fn1 := func() int { if true { return 1 } else { } }", "return"),
+ CErr("fn1 := func() int { if true { } else { return 1 } }", "return"),
+ CErr("fn1 := func() int { if true { } else return 1 }", "return"),
+ CErr("fn1 := func() int { if true { } else { } }", "return"),
+ CErr("fn1 := func() int { if true { return 1 } }", "return"),
+ CErr("fn1 := func() int { if true { } }", "return"),
+ Run("fn1 := func() int { if true { }; return 1 }"),
+ CErr("fn1 := func() int { if { } }", "return"),
+ CErr("fn1 := func() int { if { } else { return 2 } }", "return"),
+ Run("fn1 := func() int { if { return 1 } }"),
+ Run("fn1 := func() int { if { return 1 } else { } }"),
+ Run("fn1 := func() int { if { return 1 } else { } }"),
+
+ // Switch
+ Val1("switch { case false: i += 2; case true: i += 4; default: i += 8 }", "i", 1+4),
+ Val1("switch { default: i += 2; case false: i += 4; case true: i += 8 }", "i", 1+8),
+ CErr("switch { default: i += 2; default: i += 4 }", "more than one"),
+ Val1("switch false { case false: i += 2; case true: i += 4; default: i += 8 }", "i", 1+2),
+ CErr("switch s { case 1: }", opTypes),
+ CErr("switch ai { case ai: i += 2 }", opTypes),
+ Val1("switch 1.0 { case 1: i += 2; case 2: i += 4 }", "i", 1+2),
+ Val1("switch 1.5 { case 1: i += 2; case 2: i += 4 }", "i", 1),
+ CErr("switch oneTwo() {}", "multi-valued expression"),
+ Val1("switch 2 { case 1: i += 2; fallthrough; case 2: i += 4; fallthrough; case 3: i += 8; fallthrough }", "i", 1+4+8),
+ Val1("switch 5 { case 1: i += 2; fallthrough; default: i += 4; fallthrough; case 2: i += 8; fallthrough; case 3: i += 16; fallthrough }", "i", 1+4+8+16),
+ CErr("switch { case true: fallthrough; i += 2 }", "final statement"),
+ Val1("switch { case true: i += 2; fallthrough; ; ; case false: i += 4 }", "i", 1+2+4),
+ Val1("switch 2 { case 0, 1: i += 2; case 2, 3: i += 4 }", "i", 1+4),
+ Val2("switch func()int{i2++;return 5}() { case 1, 2: i += 2; case 4, 5: i += 4 }", "i", 1+4, "i2", 3),
+ Run("switch i { case i: }"),
+ // TODO(austin) Why doesn't this fail?
+ //CErr("case 1:", "XXX"),
+
+ // For
+ Val2("for x := 1; x < 5; x++ { i+=x }; i2 = 4", "i", 11, "i2", 4),
+ Val2("for x := 1; x < 5; x++ { i+=x; break; i++ }; i2 = 4", "i", 2, "i2", 4),
+ Val2("for x := 1; x < 5; x++ { i+=x; continue; i++ }; i2 = 4", "i", 11, "i2", 4),
+ Val2("for i = 2; false; i = 3 { i = 4 }; i2 = 4", "i", 2, "i2", 4),
+ Val2("for i < 5 { i++ }; i2 = 4", "i", 5, "i2", 4),
+ Val2("for i < 0 { i++ }; i2 = 4", "i", 1, "i2", 4),
+ // Scoping
+ Val2("for i := 2; true; { i2 = i; i := 3; break }", "i", 1, "i2", 2),
+ // Labeled break/continue
+ Val1("L1: for { L2: for { i+=2; break L1; i+=4 }; i+=8 }", "i", 1+2),
+ Val1("L1: for { L2: for { i+=2; break L2; i+=4 }; i+=8; break; i+=16 }", "i", 1+2+8),
+ CErr("L1: { for { break L1 } }", "break.*not defined"),
+ CErr("L1: for {}; for { break L1 }", "break.*not defined"),
+ CErr("L1:; for { break L1 }", "break.*not defined"),
+ Val2("L1: for i = 0; i < 2; i++ { L2: for { i2++; continue L1; i2++ } }", "i", 2, "i2", 4),
+ CErr("L1: { for { continue L1 } }", "continue.*not defined"),
+ CErr("L1:; for { continue L1 }", "continue.*not defined"),
+ // Return checking
+ Run("fn1 := func() int{ for {} }"),
+ CErr("fn1 := func() int{ for true {} }", "return"),
+ CErr("fn1 := func() int{ for true {return 1} }", "return"),
+ CErr("fn1 := func() int{ for {break} }", "return"),
+ Run("fn1 := func() int{ for { for {break} } }"),
+ CErr("fn1 := func() int{ L1: for { for {break L1} } }", "return"),
+ Run("fn1 := func() int{ for true {}; return 1 }"),
+
+ // Selectors
+ Val1("var x struct { a int; b int }; x.a = 42; i = x.a", "i", 42),
+ Val1("type T struct { x int }; var y struct { T }; y.x = 42; i = y.x", "i", 42),
+ Val2("type T struct { x int }; var y struct { T; x int }; y.x = 42; i = y.x; i2 = y.T.x", "i", 42, "i2", 0),
+ Run("type T struct { x int }; var y struct { *T }; a := func(){i=y.x}"),
+ CErr("type T struct { x int }; var x T; x.y = 42", "no field"),
+ CErr("type T struct { x int }; type U struct { x int }; var y struct { T; U }; y.x = 42", "ambiguous.*\tT\\.x\n\tU\\.x"),
+ CErr("type T struct { *T }; var x T; x.foo", "no field"),
+
+ Val1("fib := func(int) int{return 0;}; fib = func(v int) int { if v < 2 { return 1 }; return fib(v-1)+fib(v-2) }; i = fib(20)", "i", 10946),
+
+ // Make slice
+ Val2("x := make([]int, 2); x[0] = 42; i, i2 = x[0], x[1]", "i", 42, "i2", 0),
+ Val2("x := make([]int, 2); x[1] = 42; i, i2 = x[0], x[1]", "i", 0, "i2", 42),
+ RErr("x := make([]int, 2); x[-i] = 42", "negative index"),
+ RErr("x := make([]int, 2); x[2] = 42", "index 2 exceeds"),
+ Val2("x := make([]int, 2, 3); i, i2 = len(x), cap(x)", "i", 2, "i2", 3),
+ Val2("x := make([]int, 3, 2); i, i2 = len(x), cap(x)", "i", 3, "i2", 3),
+ RErr("x := make([]int, -i)", "negative length"),
+ RErr("x := make([]int, 2, -i)", "negative capacity"),
+ RErr("x := make([]int, 2, 3); x[2] = 42", "index 2 exceeds"),
+ CErr("x := make([]int, 2, 3, 4)", "too many"),
+ CErr("x := make([]int)", "not enough"),
+
+ // TODO(austin) Test make map
+
+ // Maps
+ Val1("x := make(map[int] int); x[1] = 42; i = x[1]", "i", 42),
+ Val2("x := make(map[int] int); x[1] = 42; i, y := x[1]", "i", 42, "y", true),
+ Val2("x := make(map[int] int); x[1] = 42; i, y := x[2]", "i", 0, "y", false),
+ // Not implemented
+ //Val1("x := make(map[int] int); x[1] = 42, true; i = x[1]", "i", 42),
+ //Val2("x := make(map[int] int); x[1] = 42; x[1] = 42, false; i, y := x[1]", "i", 0, "y", false),
+ Run("var x int; a := make(map[int] int); a[0], x = 1, 2"),
+ CErr("x := make(map[int] int); (func(a,b int){})(x[0])", "not enough"),
+ CErr("x := make(map[int] int); x[1] = oneTwo()", "too many"),
+ RErr("x := make(map[int] int); i = x[1]", "key '1' not found"),
+
+ // Functions
+ Val2("func fib(n int) int { if n <= 2 { return n }; return fib(n-1) + fib(n-2) }", "fib(4)", 5, "fib(10)", 89),
+ Run("func f1(){}"),
+ Run2("func f1(){}", "f1()"),
+}
+
+func TestStmt(t *testing.T) { runTests(t, "stmtTests", stmtTests) }
diff --git a/libgo/go/exp/eval/type.go b/libgo/go/exp/eval/type.go
new file mode 100644
index 000000000..3f272ce4b
--- /dev/null
+++ b/libgo/go/exp/eval/type.go
@@ -0,0 +1,1252 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "go/ast"
+ "go/token"
+ "log"
+ "reflect"
+ "sort"
+ "unsafe" // For Sizeof
+)
+
+
+// XXX(Spec) The type compatibility section is very confusing because
+// it makes it seem like there are three distinct types of
+// compatibility: plain compatibility, assignment compatibility, and
+// comparison compatibility. As I understand it, there's really only
+// assignment compatibility and comparison and conversion have some
+// restrictions and have special meaning in some cases where the types
+// are not otherwise assignment compatible. The comparison
+// compatibility section is almost all about the semantics of
+// comparison, not the type checking of it, so it would make much more
+// sense in the comparison operators section. The compatibility and
+// assignment compatibility sections should be rolled into one.
+
+type Type interface {
+ // compat returns whether this type is compatible with another
+ // type. If conv is false, this is normal compatibility,
+ // where two named types are compatible only if they are the
+ // same named type. If conv if true, this is conversion
+ // compatibility, where two named types are conversion
+ // compatible if their definitions are conversion compatible.
+ //
+ // TODO(austin) Deal with recursive types
+ compat(o Type, conv bool) bool
+ // lit returns this type's literal. If this is a named type,
+ // this is the unnamed underlying type. Otherwise, this is an
+ // identity operation.
+ lit() Type
+ // isBoolean returns true if this is a boolean type.
+ isBoolean() bool
+ // isInteger returns true if this is an integer type.
+ isInteger() bool
+ // isFloat returns true if this is a floating type.
+ isFloat() bool
+ // isIdeal returns true if this is an ideal int or float.
+ isIdeal() bool
+ // Zero returns a new zero value of this type.
+ Zero() Value
+ // String returns the string representation of this type.
+ String() string
+ // The position where this type was defined, if any.
+ Pos() token.Pos
+}
+
+type BoundedType interface {
+ Type
+ // minVal returns the smallest value of this type.
+ minVal() *big.Rat
+ // maxVal returns the largest value of this type.
+ maxVal() *big.Rat
+}
+
+var universePos = token.NoPos
+
+/*
+ * Type array maps. These are used to memoize composite types.
+ */
+
+type typeArrayMapEntry struct {
+ key []Type
+ v interface{}
+ next *typeArrayMapEntry
+}
+
+type typeArrayMap map[uintptr]*typeArrayMapEntry
+
+func hashTypeArray(key []Type) uintptr {
+ hash := uintptr(0)
+ for _, t := range key {
+ hash = hash * 33
+ if t == nil {
+ continue
+ }
+ addr := reflect.NewValue(t).(*reflect.PtrValue).Get()
+ hash ^= addr
+ }
+ return hash
+}
+
+func newTypeArrayMap() typeArrayMap { return make(map[uintptr]*typeArrayMapEntry) }
+
+func (m typeArrayMap) Get(key []Type) interface{} {
+ ent, ok := m[hashTypeArray(key)]
+ if !ok {
+ return nil
+ }
+
+nextEnt:
+ for ; ent != nil; ent = ent.next {
+ if len(key) != len(ent.key) {
+ continue
+ }
+ for i := 0; i < len(key); i++ {
+ if key[i] != ent.key[i] {
+ continue nextEnt
+ }
+ }
+ // Found it
+ return ent.v
+ }
+
+ return nil
+}
+
+func (m typeArrayMap) Put(key []Type, v interface{}) interface{} {
+ hash := hashTypeArray(key)
+ ent := m[hash]
+
+ new := &typeArrayMapEntry{key, v, ent}
+ m[hash] = new
+ return v
+}
+
+/*
+ * Common type
+ */
+
+type commonType struct{}
+
+func (commonType) isBoolean() bool { return false }
+
+func (commonType) isInteger() bool { return false }
+
+func (commonType) isFloat() bool { return false }
+
+func (commonType) isIdeal() bool { return false }
+
+func (commonType) Pos() token.Pos { return token.NoPos }
+
+/*
+ * Bool
+ */
+
+type boolType struct {
+ commonType
+}
+
+var BoolType = universe.DefineType("bool", universePos, &boolType{})
+
+func (t *boolType) compat(o Type, conv bool) bool {
+ _, ok := o.lit().(*boolType)
+ return ok
+}
+
+func (t *boolType) lit() Type { return t }
+
+func (t *boolType) isBoolean() bool { return true }
+
+func (boolType) String() string {
+ // Use angle brackets as a convention for printing the
+ // underlying, unnamed type. This should only show up in
+ // debug output.
+ return "<bool>"
+}
+
+func (t *boolType) Zero() Value {
+ res := boolV(false)
+ return &res
+}
+
+/*
+ * Uint
+ */
+
+type uintType struct {
+ commonType
+
+ // 0 for architecture-dependent types
+ Bits uint
+ // true for uintptr, false for all others
+ Ptr bool
+ name string
+}
+
+var (
+ Uint8Type = universe.DefineType("uint8", universePos, &uintType{commonType{}, 8, false, "uint8"})
+ Uint16Type = universe.DefineType("uint16", universePos, &uintType{commonType{}, 16, false, "uint16"})
+ Uint32Type = universe.DefineType("uint32", universePos, &uintType{commonType{}, 32, false, "uint32"})
+ Uint64Type = universe.DefineType("uint64", universePos, &uintType{commonType{}, 64, false, "uint64"})
+
+ UintType = universe.DefineType("uint", universePos, &uintType{commonType{}, 0, false, "uint"})
+ UintptrType = universe.DefineType("uintptr", universePos, &uintType{commonType{}, 0, true, "uintptr"})
+)
+
+func (t *uintType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*uintType)
+ return ok && t == t2
+}
+
+func (t *uintType) lit() Type { return t }
+
+func (t *uintType) isInteger() bool { return true }
+
+func (t *uintType) String() string { return "<" + t.name + ">" }
+
+func (t *uintType) Zero() Value {
+ switch t.Bits {
+ case 0:
+ if t.Ptr {
+ res := uintptrV(0)
+ return &res
+ } else {
+ res := uintV(0)
+ return &res
+ }
+ case 8:
+ res := uint8V(0)
+ return &res
+ case 16:
+ res := uint16V(0)
+ return &res
+ case 32:
+ res := uint32V(0)
+ return &res
+ case 64:
+ res := uint64V(0)
+ return &res
+ }
+ panic("unexpected uint bit count")
+}
+
+func (t *uintType) minVal() *big.Rat { return big.NewRat(0, 1) }
+
+func (t *uintType) maxVal() *big.Rat {
+ bits := t.Bits
+ if bits == 0 {
+ if t.Ptr {
+ bits = uint(8 * unsafe.Sizeof(uintptr(0)))
+ } else {
+ bits = uint(8 * unsafe.Sizeof(uint(0)))
+ }
+ }
+ numer := big.NewInt(1)
+ numer.Lsh(numer, bits)
+ numer.Sub(numer, idealOne)
+ return new(big.Rat).SetInt(numer)
+}
+
+/*
+ * Int
+ */
+
+type intType struct {
+ commonType
+
+ // XXX(Spec) Numeric types: "There is also a set of
+ // architecture-independent basic numeric types whose size
+ // depends on the architecture." Should that be
+ // architecture-dependent?
+
+ // 0 for architecture-dependent types
+ Bits uint
+ name string
+}
+
+var (
+ Int8Type = universe.DefineType("int8", universePos, &intType{commonType{}, 8, "int8"})
+ Int16Type = universe.DefineType("int16", universePos, &intType{commonType{}, 16, "int16"})
+ Int32Type = universe.DefineType("int32", universePos, &intType{commonType{}, 32, "int32"})
+ Int64Type = universe.DefineType("int64", universePos, &intType{commonType{}, 64, "int64"})
+
+ IntType = universe.DefineType("int", universePos, &intType{commonType{}, 0, "int"})
+)
+
+func (t *intType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*intType)
+ return ok && t == t2
+}
+
+func (t *intType) lit() Type { return t }
+
+func (t *intType) isInteger() bool { return true }
+
+func (t *intType) String() string { return "<" + t.name + ">" }
+
+func (t *intType) Zero() Value {
+ switch t.Bits {
+ case 8:
+ res := int8V(0)
+ return &res
+ case 16:
+ res := int16V(0)
+ return &res
+ case 32:
+ res := int32V(0)
+ return &res
+ case 64:
+ res := int64V(0)
+ return &res
+
+ case 0:
+ res := intV(0)
+ return &res
+ }
+ panic("unexpected int bit count")
+}
+
+func (t *intType) minVal() *big.Rat {
+ bits := t.Bits
+ if bits == 0 {
+ bits = uint(8 * unsafe.Sizeof(int(0)))
+ }
+ numer := big.NewInt(-1)
+ numer.Lsh(numer, bits-1)
+ return new(big.Rat).SetInt(numer)
+}
+
+func (t *intType) maxVal() *big.Rat {
+ bits := t.Bits
+ if bits == 0 {
+ bits = uint(8 * unsafe.Sizeof(int(0)))
+ }
+ numer := big.NewInt(1)
+ numer.Lsh(numer, bits-1)
+ numer.Sub(numer, idealOne)
+ return new(big.Rat).SetInt(numer)
+}
+
+/*
+ * Ideal int
+ */
+
+type idealIntType struct {
+ commonType
+}
+
+var IdealIntType Type = &idealIntType{}
+
+func (t *idealIntType) compat(o Type, conv bool) bool {
+ _, ok := o.lit().(*idealIntType)
+ return ok
+}
+
+func (t *idealIntType) lit() Type { return t }
+
+func (t *idealIntType) isInteger() bool { return true }
+
+func (t *idealIntType) isIdeal() bool { return true }
+
+func (t *idealIntType) String() string { return "ideal integer" }
+
+func (t *idealIntType) Zero() Value { return &idealIntV{idealZero} }
+
+/*
+ * Float
+ */
+
+type floatType struct {
+ commonType
+
+ // 0 for architecture-dependent type
+ Bits uint
+
+ name string
+}
+
+var (
+ Float32Type = universe.DefineType("float32", universePos, &floatType{commonType{}, 32, "float32"})
+ Float64Type = universe.DefineType("float64", universePos, &floatType{commonType{}, 64, "float64"})
+)
+
+func (t *floatType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*floatType)
+ return ok && t == t2
+}
+
+func (t *floatType) lit() Type { return t }
+
+func (t *floatType) isFloat() bool { return true }
+
+func (t *floatType) String() string { return "<" + t.name + ">" }
+
+func (t *floatType) Zero() Value {
+ switch t.Bits {
+ case 32:
+ res := float32V(0)
+ return &res
+ case 64:
+ res := float64V(0)
+ return &res
+ }
+ panic("unexpected float bit count")
+}
+
+var maxFloat32Val *big.Rat
+var maxFloat64Val *big.Rat
+var minFloat32Val *big.Rat
+var minFloat64Val *big.Rat
+
+func (t *floatType) minVal() *big.Rat {
+ bits := t.Bits
+ switch bits {
+ case 32:
+ return minFloat32Val
+ case 64:
+ return minFloat64Val
+ }
+ log.Panicf("unexpected floating point bit count: %d", bits)
+ panic("unreachable")
+}
+
+func (t *floatType) maxVal() *big.Rat {
+ bits := t.Bits
+ switch bits {
+ case 32:
+ return maxFloat32Val
+ case 64:
+ return maxFloat64Val
+ }
+ log.Panicf("unexpected floating point bit count: %d", bits)
+ panic("unreachable")
+}
+
+/*
+ * Ideal float
+ */
+
+type idealFloatType struct {
+ commonType
+}
+
+var IdealFloatType Type = &idealFloatType{}
+
+func (t *idealFloatType) compat(o Type, conv bool) bool {
+ _, ok := o.lit().(*idealFloatType)
+ return ok
+}
+
+func (t *idealFloatType) lit() Type { return t }
+
+func (t *idealFloatType) isFloat() bool { return true }
+
+func (t *idealFloatType) isIdeal() bool { return true }
+
+func (t *idealFloatType) String() string { return "ideal float" }
+
+func (t *idealFloatType) Zero() Value { return &idealFloatV{big.NewRat(0, 1)} }
+
+/*
+ * String
+ */
+
+type stringType struct {
+ commonType
+}
+
+var StringType = universe.DefineType("string", universePos, &stringType{})
+
+func (t *stringType) compat(o Type, conv bool) bool {
+ _, ok := o.lit().(*stringType)
+ return ok
+}
+
+func (t *stringType) lit() Type { return t }
+
+func (t *stringType) String() string { return "<string>" }
+
+func (t *stringType) Zero() Value {
+ res := stringV("")
+ return &res
+}
+
+/*
+ * Array
+ */
+
+type ArrayType struct {
+ commonType
+ Len int64
+ Elem Type
+}
+
+var arrayTypes = make(map[int64]map[Type]*ArrayType)
+
+// Two array types are identical if they have identical element types
+// and the same array length.
+
+func NewArrayType(len int64, elem Type) *ArrayType {
+ ts, ok := arrayTypes[len]
+ if !ok {
+ ts = make(map[Type]*ArrayType)
+ arrayTypes[len] = ts
+ }
+ t, ok := ts[elem]
+ if !ok {
+ t = &ArrayType{commonType{}, len, elem}
+ ts[elem] = t
+ }
+ return t
+}
+
+func (t *ArrayType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*ArrayType)
+ if !ok {
+ return false
+ }
+ return t.Len == t2.Len && t.Elem.compat(t2.Elem, conv)
+}
+
+func (t *ArrayType) lit() Type { return t }
+
+func (t *ArrayType) String() string { return "[]" + t.Elem.String() }
+
+func (t *ArrayType) Zero() Value {
+ res := arrayV(make([]Value, t.Len))
+ // TODO(austin) It's unfortunate that each element is
+ // separately heap allocated. We could add ZeroArray to
+ // everything, though that doesn't help with multidimensional
+ // arrays. Or we could do something unsafe. We'll have this
+ // same problem with structs.
+ for i := int64(0); i < t.Len; i++ {
+ res[i] = t.Elem.Zero()
+ }
+ return &res
+}
+
+/*
+ * Struct
+ */
+
+type StructField struct {
+ Name string
+ Type Type
+ Anonymous bool
+}
+
+type StructType struct {
+ commonType
+ Elems []StructField
+}
+
+var structTypes = newTypeArrayMap()
+
+// Two struct types are identical if they have the same sequence of
+// fields, and if corresponding fields have the same names and
+// identical types. Two anonymous fields are considered to have the
+// same name.
+
+func NewStructType(fields []StructField) *StructType {
+ // Start by looking up just the types
+ fts := make([]Type, len(fields))
+ for i, f := range fields {
+ fts[i] = f.Type
+ }
+ tMapI := structTypes.Get(fts)
+ if tMapI == nil {
+ tMapI = structTypes.Put(fts, make(map[string]*StructType))
+ }
+ tMap := tMapI.(map[string]*StructType)
+
+ // Construct key for field names
+ key := ""
+ for _, f := range fields {
+ // XXX(Spec) It's not clear if struct { T } and struct
+ // { T T } are either identical or compatible. The
+ // "Struct Types" section says that the name of that
+ // field is "T", which suggests that they are
+ // identical, but it really means that it's the name
+ // for the purpose of selector expressions and nothing
+ // else. We decided that they should be neither
+ // identical or compatible.
+ if f.Anonymous {
+ key += "!"
+ }
+ key += f.Name + " "
+ }
+
+ // XXX(Spec) Do the tags also have to be identical for the
+ // types to be identical? I certainly hope so, because
+ // otherwise, this is the only case where two distinct type
+ // objects can represent identical types.
+
+ t, ok := tMap[key]
+ if !ok {
+ // Create new struct type
+ t = &StructType{commonType{}, fields}
+ tMap[key] = t
+ }
+ return t
+}
+
+func (t *StructType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*StructType)
+ if !ok {
+ return false
+ }
+ if len(t.Elems) != len(t2.Elems) {
+ return false
+ }
+ for i, e := range t.Elems {
+ e2 := t2.Elems[i]
+ // XXX(Spec) An anonymous and a non-anonymous field
+ // are neither identical nor compatible.
+ if e.Anonymous != e2.Anonymous ||
+ (!e.Anonymous && e.Name != e2.Name) ||
+ !e.Type.compat(e2.Type, conv) {
+ return false
+ }
+ }
+ return true
+}
+
+func (t *StructType) lit() Type { return t }
+
+func (t *StructType) String() string {
+ s := "struct {"
+ for i, f := range t.Elems {
+ if i > 0 {
+ s += "; "
+ }
+ if !f.Anonymous {
+ s += f.Name + " "
+ }
+ s += f.Type.String()
+ }
+ return s + "}"
+}
+
+func (t *StructType) Zero() Value {
+ res := structV(make([]Value, len(t.Elems)))
+ for i, f := range t.Elems {
+ res[i] = f.Type.Zero()
+ }
+ return &res
+}
+
+/*
+ * Pointer
+ */
+
+type PtrType struct {
+ commonType
+ Elem Type
+}
+
+var ptrTypes = make(map[Type]*PtrType)
+
+// Two pointer types are identical if they have identical base types.
+
+func NewPtrType(elem Type) *PtrType {
+ t, ok := ptrTypes[elem]
+ if !ok {
+ t = &PtrType{commonType{}, elem}
+ ptrTypes[elem] = t
+ }
+ return t
+}
+
+func (t *PtrType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*PtrType)
+ if !ok {
+ return false
+ }
+ return t.Elem.compat(t2.Elem, conv)
+}
+
+func (t *PtrType) lit() Type { return t }
+
+func (t *PtrType) String() string { return "*" + t.Elem.String() }
+
+func (t *PtrType) Zero() Value { return &ptrV{nil} }
+
+/*
+ * Function
+ */
+
+type FuncType struct {
+ commonType
+ // TODO(austin) Separate receiver Type for methods?
+ In []Type
+ Variadic bool
+ Out []Type
+ builtin string
+}
+
+var funcTypes = newTypeArrayMap()
+var variadicFuncTypes = newTypeArrayMap()
+
+// Create singleton function types for magic built-in functions
+var (
+ capType = &FuncType{builtin: "cap"}
+ closeType = &FuncType{builtin: "close"}
+ closedType = &FuncType{builtin: "closed"}
+ lenType = &FuncType{builtin: "len"}
+ makeType = &FuncType{builtin: "make"}
+ newType = &FuncType{builtin: "new"}
+ panicType = &FuncType{builtin: "panic"}
+ printType = &FuncType{builtin: "print"}
+ printlnType = &FuncType{builtin: "println"}
+ copyType = &FuncType{builtin: "copy"}
+)
+
+// Two function types are identical if they have the same number of
+// parameters and result values and if corresponding parameter and
+// result types are identical. All "..." parameters have identical
+// type. Parameter and result names are not required to match.
+
+func NewFuncType(in []Type, variadic bool, out []Type) *FuncType {
+ inMap := funcTypes
+ if variadic {
+ inMap = variadicFuncTypes
+ }
+
+ outMapI := inMap.Get(in)
+ if outMapI == nil {
+ outMapI = inMap.Put(in, newTypeArrayMap())
+ }
+ outMap := outMapI.(typeArrayMap)
+
+ tI := outMap.Get(out)
+ if tI != nil {
+ return tI.(*FuncType)
+ }
+
+ t := &FuncType{commonType{}, in, variadic, out, ""}
+ outMap.Put(out, t)
+ return t
+}
+
+func (t *FuncType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*FuncType)
+ if !ok {
+ return false
+ }
+ if len(t.In) != len(t2.In) || t.Variadic != t2.Variadic || len(t.Out) != len(t2.Out) {
+ return false
+ }
+ for i := range t.In {
+ if !t.In[i].compat(t2.In[i], conv) {
+ return false
+ }
+ }
+ for i := range t.Out {
+ if !t.Out[i].compat(t2.Out[i], conv) {
+ return false
+ }
+ }
+ return true
+}
+
+func (t *FuncType) lit() Type { return t }
+
+func typeListString(ts []Type, ns []*ast.Ident) string {
+ s := ""
+ for i, t := range ts {
+ if i > 0 {
+ s += ", "
+ }
+ if ns != nil && ns[i] != nil {
+ s += ns[i].Name + " "
+ }
+ if t == nil {
+ // Some places use nil types to represent errors
+ s += "<none>"
+ } else {
+ s += t.String()
+ }
+ }
+ return s
+}
+
+func (t *FuncType) String() string {
+ if t.builtin != "" {
+ return "built-in function " + t.builtin
+ }
+ args := typeListString(t.In, nil)
+ if t.Variadic {
+ if len(args) > 0 {
+ args += ", "
+ }
+ args += "..."
+ }
+ s := "func(" + args + ")"
+ if len(t.Out) > 0 {
+ s += " (" + typeListString(t.Out, nil) + ")"
+ }
+ return s
+}
+
+func (t *FuncType) Zero() Value { return &funcV{nil} }
+
+type FuncDecl struct {
+ Type *FuncType
+ Name *ast.Ident // nil for function literals
+ // InNames will be one longer than Type.In if this function is
+ // variadic.
+ InNames []*ast.Ident
+ OutNames []*ast.Ident
+}
+
+func (t *FuncDecl) String() string {
+ s := "func"
+ if t.Name != nil {
+ s += " " + t.Name.Name
+ }
+ s += funcTypeString(t.Type, t.InNames, t.OutNames)
+ return s
+}
+
+func funcTypeString(ft *FuncType, ins []*ast.Ident, outs []*ast.Ident) string {
+ s := "("
+ s += typeListString(ft.In, ins)
+ if ft.Variadic {
+ if len(ft.In) > 0 {
+ s += ", "
+ }
+ s += "..."
+ }
+ s += ")"
+ if len(ft.Out) > 0 {
+ s += " (" + typeListString(ft.Out, outs) + ")"
+ }
+ return s
+}
+
+/*
+ * Interface
+ */
+
+// TODO(austin) Interface values, types, and type compilation are
+// implemented, but none of the type checking or semantics of
+// interfaces are.
+
+type InterfaceType struct {
+ commonType
+ // TODO(austin) This should be a map from names to
+ // *FuncType's. We only need the sorted list for generating
+ // the type map key. It's detrimental for everything else.
+ methods []IMethod
+}
+
+type IMethod struct {
+ Name string
+ Type *FuncType
+}
+
+var interfaceTypes = newTypeArrayMap()
+
+func NewInterfaceType(methods []IMethod, embeds []*InterfaceType) *InterfaceType {
+ // Count methods of embedded interfaces
+ nMethods := len(methods)
+ for _, e := range embeds {
+ nMethods += len(e.methods)
+ }
+
+ // Combine methods
+ allMethods := make([]IMethod, nMethods)
+ copy(allMethods, methods)
+ n := len(methods)
+ for _, e := range embeds {
+ for _, m := range e.methods {
+ allMethods[n] = m
+ n++
+ }
+ }
+
+ // Sort methods
+ sort.Sort(iMethodSorter(allMethods))
+
+ mts := make([]Type, len(allMethods))
+ for i, m := range methods {
+ mts[i] = m.Type
+ }
+ tMapI := interfaceTypes.Get(mts)
+ if tMapI == nil {
+ tMapI = interfaceTypes.Put(mts, make(map[string]*InterfaceType))
+ }
+ tMap := tMapI.(map[string]*InterfaceType)
+
+ key := ""
+ for _, m := range allMethods {
+ key += m.Name + " "
+ }
+
+ t, ok := tMap[key]
+ if !ok {
+ t = &InterfaceType{commonType{}, allMethods}
+ tMap[key] = t
+ }
+ return t
+}
+
+type iMethodSorter []IMethod
+
+func (s iMethodSorter) Less(a, b int) bool { return s[a].Name < s[b].Name }
+
+func (s iMethodSorter) Swap(a, b int) { s[a], s[b] = s[b], s[a] }
+
+func (s iMethodSorter) Len() int { return len(s) }
+
+func (t *InterfaceType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*InterfaceType)
+ if !ok {
+ return false
+ }
+ if len(t.methods) != len(t2.methods) {
+ return false
+ }
+ for i, e := range t.methods {
+ e2 := t2.methods[i]
+ if e.Name != e2.Name || !e.Type.compat(e2.Type, conv) {
+ return false
+ }
+ }
+ return true
+}
+
+func (t *InterfaceType) lit() Type { return t }
+
+func (t *InterfaceType) String() string {
+ // TODO(austin) Instead of showing embedded interfaces, this
+ // shows their methods.
+ s := "interface {"
+ for i, m := range t.methods {
+ if i > 0 {
+ s += "; "
+ }
+ s += m.Name + funcTypeString(m.Type, nil, nil)
+ }
+ return s + "}"
+}
+
+// implementedBy tests if o implements t, returning nil, true if it does.
+// Otherwise, it returns a method of t that o is missing and false.
+func (t *InterfaceType) implementedBy(o Type) (*IMethod, bool) {
+ if len(t.methods) == 0 {
+ return nil, true
+ }
+
+ // The methods of a named interface types are those of the
+ // underlying type.
+ if it, ok := o.lit().(*InterfaceType); ok {
+ o = it
+ }
+
+ // XXX(Spec) Interface types: "A type implements any interface
+ // comprising any subset of its methods" It's unclear if
+ // methods must have identical or compatible types. 6g
+ // requires identical types.
+
+ switch o := o.(type) {
+ case *NamedType:
+ for _, tm := range t.methods {
+ sm, ok := o.methods[tm.Name]
+ if !ok || sm.decl.Type != tm.Type {
+ return &tm, false
+ }
+ }
+ return nil, true
+
+ case *InterfaceType:
+ var ti, oi int
+ for ti < len(t.methods) && oi < len(o.methods) {
+ tm, om := &t.methods[ti], &o.methods[oi]
+ switch {
+ case tm.Name == om.Name:
+ if tm.Type != om.Type {
+ return tm, false
+ }
+ ti++
+ oi++
+ case tm.Name > om.Name:
+ oi++
+ default:
+ return tm, false
+ }
+ }
+ if ti < len(t.methods) {
+ return &t.methods[ti], false
+ }
+ return nil, true
+ }
+
+ return &t.methods[0], false
+}
+
+func (t *InterfaceType) Zero() Value { return &interfaceV{} }
+
+/*
+ * Slice
+ */
+
+type SliceType struct {
+ commonType
+ Elem Type
+}
+
+var sliceTypes = make(map[Type]*SliceType)
+
+// Two slice types are identical if they have identical element types.
+
+func NewSliceType(elem Type) *SliceType {
+ t, ok := sliceTypes[elem]
+ if !ok {
+ t = &SliceType{commonType{}, elem}
+ sliceTypes[elem] = t
+ }
+ return t
+}
+
+func (t *SliceType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*SliceType)
+ if !ok {
+ return false
+ }
+ return t.Elem.compat(t2.Elem, conv)
+}
+
+func (t *SliceType) lit() Type { return t }
+
+func (t *SliceType) String() string { return "[]" + t.Elem.String() }
+
+func (t *SliceType) Zero() Value {
+ // The value of an uninitialized slice is nil. The length and
+ // capacity of a nil slice are 0.
+ return &sliceV{Slice{nil, 0, 0}}
+}
+
+/*
+ * Map type
+ */
+
+type MapType struct {
+ commonType
+ Key Type
+ Elem Type
+}
+
+var mapTypes = make(map[Type]map[Type]*MapType)
+
+func NewMapType(key Type, elem Type) *MapType {
+ ts, ok := mapTypes[key]
+ if !ok {
+ ts = make(map[Type]*MapType)
+ mapTypes[key] = ts
+ }
+ t, ok := ts[elem]
+ if !ok {
+ t = &MapType{commonType{}, key, elem}
+ ts[elem] = t
+ }
+ return t
+}
+
+func (t *MapType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*MapType)
+ if !ok {
+ return false
+ }
+ return t.Elem.compat(t2.Elem, conv) && t.Key.compat(t2.Key, conv)
+}
+
+func (t *MapType) lit() Type { return t }
+
+func (t *MapType) String() string { return "map[" + t.Key.String() + "] " + t.Elem.String() }
+
+func (t *MapType) Zero() Value {
+ // The value of an uninitialized map is nil.
+ return &mapV{nil}
+}
+
+/*
+type ChanType struct {
+ // TODO(austin)
+}
+*/
+
+/*
+ * Named types
+ */
+
+type Method struct {
+ decl *FuncDecl
+ fn Func
+}
+
+type NamedType struct {
+ NamePos token.Pos
+ Name string
+ // Underlying type. If incomplete is true, this will be nil.
+ // If incomplete is false and this is still nil, then this is
+ // a placeholder type representing an error.
+ Def Type
+ // True while this type is being defined.
+ incomplete bool
+ methods map[string]Method
+}
+
+// TODO(austin) This is temporarily needed by the debugger's remote
+// type parser. This should only be possible with block.DefineType.
+func NewNamedType(name string) *NamedType {
+ return &NamedType{token.NoPos, name, nil, true, make(map[string]Method)}
+}
+
+func (t *NamedType) Pos() token.Pos {
+ return t.NamePos
+}
+
+func (t *NamedType) Complete(def Type) {
+ if !t.incomplete {
+ log.Panicf("cannot complete already completed NamedType %+v", *t)
+ }
+ // We strip the name from def because multiple levels of
+ // naming are useless.
+ if ndef, ok := def.(*NamedType); ok {
+ def = ndef.Def
+ }
+ t.Def = def
+ t.incomplete = false
+}
+
+func (t *NamedType) compat(o Type, conv bool) bool {
+ t2, ok := o.(*NamedType)
+ if ok {
+ if conv {
+ // Two named types are conversion compatible
+ // if their literals are conversion
+ // compatible.
+ return t.Def.compat(t2.Def, conv)
+ } else {
+ // Two named types are compatible if their
+ // type names originate in the same type
+ // declaration.
+ return t == t2
+ }
+ }
+ // A named and an unnamed type are compatible if the
+ // respective type literals are compatible.
+ return o.compat(t.Def, conv)
+}
+
+func (t *NamedType) lit() Type { return t.Def.lit() }
+
+func (t *NamedType) isBoolean() bool { return t.Def.isBoolean() }
+
+func (t *NamedType) isInteger() bool { return t.Def.isInteger() }
+
+func (t *NamedType) isFloat() bool { return t.Def.isFloat() }
+
+func (t *NamedType) isIdeal() bool { return false }
+
+func (t *NamedType) String() string { return t.Name }
+
+func (t *NamedType) Zero() Value { return t.Def.Zero() }
+
+/*
+ * Multi-valued type
+ */
+
+// MultiType is a special type used for multi-valued expressions, akin
+// to a tuple type. It's not generally accessible within the
+// language.
+type MultiType struct {
+ commonType
+ Elems []Type
+}
+
+var multiTypes = newTypeArrayMap()
+
+func NewMultiType(elems []Type) *MultiType {
+ if t := multiTypes.Get(elems); t != nil {
+ return t.(*MultiType)
+ }
+
+ t := &MultiType{commonType{}, elems}
+ multiTypes.Put(elems, t)
+ return t
+}
+
+func (t *MultiType) compat(o Type, conv bool) bool {
+ t2, ok := o.lit().(*MultiType)
+ if !ok {
+ return false
+ }
+ if len(t.Elems) != len(t2.Elems) {
+ return false
+ }
+ for i := range t.Elems {
+ if !t.Elems[i].compat(t2.Elems[i], conv) {
+ return false
+ }
+ }
+ return true
+}
+
+var EmptyType Type = NewMultiType([]Type{})
+
+func (t *MultiType) lit() Type { return t }
+
+func (t *MultiType) String() string {
+ if len(t.Elems) == 0 {
+ return "<none>"
+ }
+ return typeListString(t.Elems, nil)
+}
+
+func (t *MultiType) Zero() Value {
+ res := make([]Value, len(t.Elems))
+ for i, t := range t.Elems {
+ res[i] = t.Zero()
+ }
+ return multiV(res)
+}
+
+/*
+ * Initialize the universe
+ */
+
+func init() {
+ numer := big.NewInt(0xffffff)
+ numer.Lsh(numer, 127-23)
+ maxFloat32Val = new(big.Rat).SetInt(numer)
+ numer.SetInt64(0x1fffffffffffff)
+ numer.Lsh(numer, 1023-52)
+ maxFloat64Val = new(big.Rat).SetInt(numer)
+ minFloat32Val = new(big.Rat).Neg(maxFloat32Val)
+ minFloat64Val = new(big.Rat).Neg(maxFloat64Val)
+
+ // To avoid portability issues all numeric types are distinct
+ // except byte, which is an alias for uint8.
+
+ // Make byte an alias for the named type uint8. Type aliases
+ // are otherwise impossible in Go, so just hack it here.
+ universe.defs["byte"] = universe.defs["uint8"]
+
+ // Built-in functions
+ universe.DefineConst("cap", universePos, capType, nil)
+ universe.DefineConst("close", universePos, closeType, nil)
+ universe.DefineConst("closed", universePos, closedType, nil)
+ universe.DefineConst("copy", universePos, copyType, nil)
+ universe.DefineConst("len", universePos, lenType, nil)
+ universe.DefineConst("make", universePos, makeType, nil)
+ universe.DefineConst("new", universePos, newType, nil)
+ universe.DefineConst("panic", universePos, panicType, nil)
+ universe.DefineConst("print", universePos, printType, nil)
+ universe.DefineConst("println", universePos, printlnType, nil)
+}
diff --git a/libgo/go/exp/eval/typec.go b/libgo/go/exp/eval/typec.go
new file mode 100644
index 000000000..de90cf664
--- /dev/null
+++ b/libgo/go/exp/eval/typec.go
@@ -0,0 +1,409 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "go/ast"
+ "go/token"
+ "log"
+)
+
+
+/*
+ * Type compiler
+ */
+
+type typeCompiler struct {
+ *compiler
+ block *block
+ // Check to be performed after a type declaration is compiled.
+ //
+ // TODO(austin) This will probably have to change after we
+ // eliminate forward declarations.
+ lateCheck func() bool
+}
+
+func (a *typeCompiler) compileIdent(x *ast.Ident, allowRec bool) Type {
+ _, _, def := a.block.Lookup(x.Name)
+ if def == nil {
+ a.diagAt(x.Pos(), "%s: undefined", x.Name)
+ return nil
+ }
+ switch def := def.(type) {
+ case *Constant:
+ a.diagAt(x.Pos(), "constant %v used as type", x.Name)
+ return nil
+ case *Variable:
+ a.diagAt(x.Pos(), "variable %v used as type", x.Name)
+ return nil
+ case *NamedType:
+ if !allowRec && def.incomplete {
+ a.diagAt(x.Pos(), "illegal recursive type")
+ return nil
+ }
+ if !def.incomplete && def.Def == nil {
+ // Placeholder type from an earlier error
+ return nil
+ }
+ return def
+ case Type:
+ return def
+ }
+ log.Panicf("name %s has unknown type %T", x.Name, def)
+ return nil
+}
+
+func (a *typeCompiler) compileArrayType(x *ast.ArrayType, allowRec bool) Type {
+ // Compile element type
+ elem := a.compileType(x.Elt, allowRec)
+
+ // Compile length expression
+ if x.Len == nil {
+ if elem == nil {
+ return nil
+ }
+ return NewSliceType(elem)
+ }
+
+ if _, ok := x.Len.(*ast.Ellipsis); ok {
+ a.diagAt(x.Len.Pos(), "... array initailizers not implemented")
+ return nil
+ }
+ l, ok := a.compileArrayLen(a.block, x.Len)
+ if !ok {
+ return nil
+ }
+ if l < 0 {
+ a.diagAt(x.Len.Pos(), "array length must be non-negative")
+ return nil
+ }
+ if elem == nil {
+ return nil
+ }
+
+ return NewArrayType(l, elem)
+}
+
+func (a *typeCompiler) compileFields(fields *ast.FieldList, allowRec bool) ([]Type, []*ast.Ident, []token.Pos, bool) {
+ n := fields.NumFields()
+ ts := make([]Type, n)
+ ns := make([]*ast.Ident, n)
+ ps := make([]token.Pos, n)
+ bad := false
+
+ if fields != nil {
+ i := 0
+ for _, f := range fields.List {
+ t := a.compileType(f.Type, allowRec)
+ if t == nil {
+ bad = true
+ }
+ if f.Names == nil {
+ ns[i] = nil
+ ts[i] = t
+ ps[i] = f.Type.Pos()
+ i++
+ continue
+ }
+ for _, n := range f.Names {
+ ns[i] = n
+ ts[i] = t
+ ps[i] = n.Pos()
+ i++
+ }
+ }
+ }
+
+ return ts, ns, ps, bad
+}
+
+func (a *typeCompiler) compileStructType(x *ast.StructType, allowRec bool) Type {
+ ts, names, poss, bad := a.compileFields(x.Fields, allowRec)
+
+ // XXX(Spec) The spec claims that field identifiers must be
+ // unique, but 6g only checks this when they are accessed. I
+ // think the spec is better in this regard: if I write two
+ // fields with the same name in the same struct type, clearly
+ // that's a mistake. This definition does *not* descend into
+ // anonymous fields, so it doesn't matter if those change.
+ // There's separate language in the spec about checking
+ // uniqueness of field names inherited from anonymous fields
+ // at use time.
+ fields := make([]StructField, len(ts))
+ nameSet := make(map[string]token.Pos, len(ts))
+ for i := range fields {
+ // Compute field name and check anonymous fields
+ var name string
+ if names[i] != nil {
+ name = names[i].Name
+ } else {
+ if ts[i] == nil {
+ continue
+ }
+
+ var nt *NamedType
+ // [For anonymous fields,] the unqualified
+ // type name acts as the field identifier.
+ switch t := ts[i].(type) {
+ case *NamedType:
+ name = t.Name
+ nt = t
+ case *PtrType:
+ switch t := t.Elem.(type) {
+ case *NamedType:
+ name = t.Name
+ nt = t
+ }
+ }
+ // [An anonymous field] must be specified as a
+ // type name T or as a pointer to a type name
+ // *T, and T itself, may not be a pointer or
+ // interface type.
+ if nt == nil {
+ a.diagAt(poss[i], "embedded type must T or *T, where T is a named type")
+ bad = true
+ continue
+ }
+ // The check for embedded pointer types must
+ // be deferred because of things like
+ // type T *struct { T }
+ lateCheck := a.lateCheck
+ a.lateCheck = func() bool {
+ if _, ok := nt.lit().(*PtrType); ok {
+ a.diagAt(poss[i], "embedded type %v is a pointer type", nt)
+ return false
+ }
+ return lateCheck()
+ }
+ }
+
+ // Check name uniqueness
+ if prev, ok := nameSet[name]; ok {
+ a.diagAt(poss[i], "field %s redeclared\n\tprevious declaration at %s", name, a.fset.Position(prev))
+ bad = true
+ continue
+ }
+ nameSet[name] = poss[i]
+
+ // Create field
+ fields[i].Name = name
+ fields[i].Type = ts[i]
+ fields[i].Anonymous = (names[i] == nil)
+ }
+
+ if bad {
+ return nil
+ }
+
+ return NewStructType(fields)
+}
+
+func (a *typeCompiler) compilePtrType(x *ast.StarExpr) Type {
+ elem := a.compileType(x.X, true)
+ if elem == nil {
+ return nil
+ }
+ return NewPtrType(elem)
+}
+
+func (a *typeCompiler) compileFuncType(x *ast.FuncType, allowRec bool) *FuncDecl {
+ // TODO(austin) Variadic function types
+
+ // The types of parameters and results must be complete.
+ //
+ // TODO(austin) It's not clear they actually have to be complete.
+ in, inNames, _, inBad := a.compileFields(x.Params, allowRec)
+ out, outNames, _, outBad := a.compileFields(x.Results, allowRec)
+
+ if inBad || outBad {
+ return nil
+ }
+ return &FuncDecl{NewFuncType(in, false, out), nil, inNames, outNames}
+}
+
+func (a *typeCompiler) compileInterfaceType(x *ast.InterfaceType, allowRec bool) *InterfaceType {
+ ts, names, poss, bad := a.compileFields(x.Methods, allowRec)
+
+ methods := make([]IMethod, len(ts))
+ nameSet := make(map[string]token.Pos, len(ts))
+ embeds := make([]*InterfaceType, len(ts))
+
+ var nm, ne int
+ for i := range ts {
+ if ts[i] == nil {
+ continue
+ }
+
+ if names[i] != nil {
+ name := names[i].Name
+ methods[nm].Name = name
+ methods[nm].Type = ts[i].(*FuncType)
+ nm++
+ if prev, ok := nameSet[name]; ok {
+ a.diagAt(poss[i], "method %s redeclared\n\tprevious declaration at %s", name, a.fset.Position(prev))
+ bad = true
+ continue
+ }
+ nameSet[name] = poss[i]
+ } else {
+ // Embedded interface
+ it, ok := ts[i].lit().(*InterfaceType)
+ if !ok {
+ a.diagAt(poss[i], "embedded type must be an interface")
+ bad = true
+ continue
+ }
+ embeds[ne] = it
+ ne++
+ for _, m := range it.methods {
+ if prev, ok := nameSet[m.Name]; ok {
+ a.diagAt(poss[i], "method %s redeclared\n\tprevious declaration at %s", m.Name, a.fset.Position(prev))
+ bad = true
+ continue
+ }
+ nameSet[m.Name] = poss[i]
+ }
+ }
+ }
+
+ if bad {
+ return nil
+ }
+
+ methods = methods[0:nm]
+ embeds = embeds[0:ne]
+
+ return NewInterfaceType(methods, embeds)
+}
+
+func (a *typeCompiler) compileMapType(x *ast.MapType) Type {
+ key := a.compileType(x.Key, true)
+ val := a.compileType(x.Value, true)
+ if key == nil || val == nil {
+ return nil
+ }
+ // XXX(Spec) The Map types section explicitly lists all types
+ // that can be map keys except for function types.
+ switch key.lit().(type) {
+ case *StructType:
+ a.diagAt(x.Pos(), "map key cannot be a struct type")
+ return nil
+ case *ArrayType:
+ a.diagAt(x.Pos(), "map key cannot be an array type")
+ return nil
+ case *SliceType:
+ a.diagAt(x.Pos(), "map key cannot be a slice type")
+ return nil
+ }
+ return NewMapType(key, val)
+}
+
+func (a *typeCompiler) compileType(x ast.Expr, allowRec bool) Type {
+ switch x := x.(type) {
+ case *ast.BadExpr:
+ // Error already reported by parser
+ a.silentErrors++
+ return nil
+
+ case *ast.Ident:
+ return a.compileIdent(x, allowRec)
+
+ case *ast.ArrayType:
+ return a.compileArrayType(x, allowRec)
+
+ case *ast.StructType:
+ return a.compileStructType(x, allowRec)
+
+ case *ast.StarExpr:
+ return a.compilePtrType(x)
+
+ case *ast.FuncType:
+ fd := a.compileFuncType(x, allowRec)
+ if fd == nil {
+ return nil
+ }
+ return fd.Type
+
+ case *ast.InterfaceType:
+ return a.compileInterfaceType(x, allowRec)
+
+ case *ast.MapType:
+ return a.compileMapType(x)
+
+ case *ast.ChanType:
+ goto notimpl
+
+ case *ast.ParenExpr:
+ return a.compileType(x.X, allowRec)
+
+ case *ast.Ellipsis:
+ a.diagAt(x.Pos(), "illegal use of ellipsis")
+ return nil
+ }
+ a.diagAt(x.Pos(), "expression used as type")
+ return nil
+
+notimpl:
+ a.diagAt(x.Pos(), "compileType: %T not implemented", x)
+ return nil
+}
+
+/*
+ * Type compiler interface
+ */
+
+func noLateCheck() bool { return true }
+
+func (a *compiler) compileType(b *block, typ ast.Expr) Type {
+ tc := &typeCompiler{a, b, noLateCheck}
+ t := tc.compileType(typ, false)
+ if !tc.lateCheck() {
+ t = nil
+ }
+ return t
+}
+
+func (a *compiler) compileTypeDecl(b *block, decl *ast.GenDecl) bool {
+ ok := true
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.TypeSpec)
+ // Create incomplete type for this type
+ nt := b.DefineType(spec.Name.Name, spec.Name.Pos(), nil)
+ if nt != nil {
+ nt.(*NamedType).incomplete = true
+ }
+ // Compile type
+ tc := &typeCompiler{a, b, noLateCheck}
+ t := tc.compileType(spec.Type, false)
+ if t == nil {
+ // Create a placeholder type
+ ok = false
+ }
+ // Fill incomplete type
+ if nt != nil {
+ nt.(*NamedType).Complete(t)
+ }
+ // Perform late type checking with complete type
+ if !tc.lateCheck() {
+ ok = false
+ if nt != nil {
+ // Make the type a placeholder
+ nt.(*NamedType).Def = nil
+ }
+ }
+ }
+ return ok
+}
+
+func (a *compiler) compileFuncType(b *block, typ *ast.FuncType) *FuncDecl {
+ tc := &typeCompiler{a, b, noLateCheck}
+ res := tc.compileFuncType(typ, false)
+ if res != nil {
+ if !tc.lateCheck() {
+ res = nil
+ }
+ }
+ return res
+}
diff --git a/libgo/go/exp/eval/value.go b/libgo/go/exp/eval/value.go
new file mode 100644
index 000000000..daa691897
--- /dev/null
+++ b/libgo/go/exp/eval/value.go
@@ -0,0 +1,586 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package eval
+
+import (
+ "big"
+ "fmt"
+)
+
+type Value interface {
+ String() string
+ // Assign copies another value into this one. It should
+ // assume that the other value satisfies the same specific
+ // value interface (BoolValue, etc.), but must not assume
+ // anything about its specific type.
+ Assign(t *Thread, o Value)
+}
+
+type BoolValue interface {
+ Value
+ Get(*Thread) bool
+ Set(*Thread, bool)
+}
+
+type UintValue interface {
+ Value
+ Get(*Thread) uint64
+ Set(*Thread, uint64)
+}
+
+type IntValue interface {
+ Value
+ Get(*Thread) int64
+ Set(*Thread, int64)
+}
+
+// TODO(austin) IdealIntValue and IdealFloatValue should not exist
+// because ideals are not l-values.
+type IdealIntValue interface {
+ Value
+ Get() *big.Int
+}
+
+type FloatValue interface {
+ Value
+ Get(*Thread) float64
+ Set(*Thread, float64)
+}
+
+type IdealFloatValue interface {
+ Value
+ Get() *big.Rat
+}
+
+type StringValue interface {
+ Value
+ Get(*Thread) string
+ Set(*Thread, string)
+}
+
+type ArrayValue interface {
+ Value
+ // TODO(austin) Get() is here for uniformity, but is
+ // completely useless. If a lot of other types have similarly
+ // useless Get methods, just special-case these uses.
+ Get(*Thread) ArrayValue
+ Elem(*Thread, int64) Value
+ // Sub returns an ArrayValue backed by the same array that
+ // starts from element i and has length len.
+ Sub(i int64, len int64) ArrayValue
+}
+
+type StructValue interface {
+ Value
+ // TODO(austin) This is another useless Get()
+ Get(*Thread) StructValue
+ Field(*Thread, int) Value
+}
+
+type PtrValue interface {
+ Value
+ Get(*Thread) Value
+ Set(*Thread, Value)
+}
+
+type Func interface {
+ NewFrame() *Frame
+ Call(*Thread)
+}
+
+type FuncValue interface {
+ Value
+ Get(*Thread) Func
+ Set(*Thread, Func)
+}
+
+type Interface struct {
+ Type Type
+ Value Value
+}
+
+type InterfaceValue interface {
+ Value
+ Get(*Thread) Interface
+ Set(*Thread, Interface)
+}
+
+type Slice struct {
+ Base ArrayValue
+ Len, Cap int64
+}
+
+type SliceValue interface {
+ Value
+ Get(*Thread) Slice
+ Set(*Thread, Slice)
+}
+
+type Map interface {
+ Len(*Thread) int64
+ // Retrieve an element from the map, returning nil if it does
+ // not exist.
+ Elem(t *Thread, key interface{}) Value
+ // Set an entry in the map. If val is nil, delete the entry.
+ SetElem(t *Thread, key interface{}, val Value)
+ // TODO(austin) Perhaps there should be an iterator interface instead.
+ Iter(func(key interface{}, val Value) bool)
+}
+
+type MapValue interface {
+ Value
+ Get(*Thread) Map
+ Set(*Thread, Map)
+}
+
+/*
+ * Bool
+ */
+
+type boolV bool
+
+func (v *boolV) String() string { return fmt.Sprint(*v) }
+
+func (v *boolV) Assign(t *Thread, o Value) { *v = boolV(o.(BoolValue).Get(t)) }
+
+func (v *boolV) Get(*Thread) bool { return bool(*v) }
+
+func (v *boolV) Set(t *Thread, x bool) { *v = boolV(x) }
+
+/*
+ * Uint
+ */
+
+type uint8V uint8
+
+func (v *uint8V) String() string { return fmt.Sprint(*v) }
+
+func (v *uint8V) Assign(t *Thread, o Value) { *v = uint8V(o.(UintValue).Get(t)) }
+
+func (v *uint8V) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uint8V) Set(t *Thread, x uint64) { *v = uint8V(x) }
+
+type uint16V uint16
+
+func (v *uint16V) String() string { return fmt.Sprint(*v) }
+
+func (v *uint16V) Assign(t *Thread, o Value) { *v = uint16V(o.(UintValue).Get(t)) }
+
+func (v *uint16V) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uint16V) Set(t *Thread, x uint64) { *v = uint16V(x) }
+
+type uint32V uint32
+
+func (v *uint32V) String() string { return fmt.Sprint(*v) }
+
+func (v *uint32V) Assign(t *Thread, o Value) { *v = uint32V(o.(UintValue).Get(t)) }
+
+func (v *uint32V) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uint32V) Set(t *Thread, x uint64) { *v = uint32V(x) }
+
+type uint64V uint64
+
+func (v *uint64V) String() string { return fmt.Sprint(*v) }
+
+func (v *uint64V) Assign(t *Thread, o Value) { *v = uint64V(o.(UintValue).Get(t)) }
+
+func (v *uint64V) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uint64V) Set(t *Thread, x uint64) { *v = uint64V(x) }
+
+type uintV uint
+
+func (v *uintV) String() string { return fmt.Sprint(*v) }
+
+func (v *uintV) Assign(t *Thread, o Value) { *v = uintV(o.(UintValue).Get(t)) }
+
+func (v *uintV) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uintV) Set(t *Thread, x uint64) { *v = uintV(x) }
+
+type uintptrV uintptr
+
+func (v *uintptrV) String() string { return fmt.Sprint(*v) }
+
+func (v *uintptrV) Assign(t *Thread, o Value) { *v = uintptrV(o.(UintValue).Get(t)) }
+
+func (v *uintptrV) Get(*Thread) uint64 { return uint64(*v) }
+
+func (v *uintptrV) Set(t *Thread, x uint64) { *v = uintptrV(x) }
+
+/*
+ * Int
+ */
+
+type int8V int8
+
+func (v *int8V) String() string { return fmt.Sprint(*v) }
+
+func (v *int8V) Assign(t *Thread, o Value) { *v = int8V(o.(IntValue).Get(t)) }
+
+func (v *int8V) Get(*Thread) int64 { return int64(*v) }
+
+func (v *int8V) Set(t *Thread, x int64) { *v = int8V(x) }
+
+type int16V int16
+
+func (v *int16V) String() string { return fmt.Sprint(*v) }
+
+func (v *int16V) Assign(t *Thread, o Value) { *v = int16V(o.(IntValue).Get(t)) }
+
+func (v *int16V) Get(*Thread) int64 { return int64(*v) }
+
+func (v *int16V) Set(t *Thread, x int64) { *v = int16V(x) }
+
+type int32V int32
+
+func (v *int32V) String() string { return fmt.Sprint(*v) }
+
+func (v *int32V) Assign(t *Thread, o Value) { *v = int32V(o.(IntValue).Get(t)) }
+
+func (v *int32V) Get(*Thread) int64 { return int64(*v) }
+
+func (v *int32V) Set(t *Thread, x int64) { *v = int32V(x) }
+
+type int64V int64
+
+func (v *int64V) String() string { return fmt.Sprint(*v) }
+
+func (v *int64V) Assign(t *Thread, o Value) { *v = int64V(o.(IntValue).Get(t)) }
+
+func (v *int64V) Get(*Thread) int64 { return int64(*v) }
+
+func (v *int64V) Set(t *Thread, x int64) { *v = int64V(x) }
+
+type intV int
+
+func (v *intV) String() string { return fmt.Sprint(*v) }
+
+func (v *intV) Assign(t *Thread, o Value) { *v = intV(o.(IntValue).Get(t)) }
+
+func (v *intV) Get(*Thread) int64 { return int64(*v) }
+
+func (v *intV) Set(t *Thread, x int64) { *v = intV(x) }
+
+/*
+ * Ideal int
+ */
+
+type idealIntV struct {
+ V *big.Int
+}
+
+func (v *idealIntV) String() string { return v.V.String() }
+
+func (v *idealIntV) Assign(t *Thread, o Value) {
+ v.V = o.(IdealIntValue).Get()
+}
+
+func (v *idealIntV) Get() *big.Int { return v.V }
+
+/*
+ * Float
+ */
+
+type float32V float32
+
+func (v *float32V) String() string { return fmt.Sprint(*v) }
+
+func (v *float32V) Assign(t *Thread, o Value) { *v = float32V(o.(FloatValue).Get(t)) }
+
+func (v *float32V) Get(*Thread) float64 { return float64(*v) }
+
+func (v *float32V) Set(t *Thread, x float64) { *v = float32V(x) }
+
+type float64V float64
+
+func (v *float64V) String() string { return fmt.Sprint(*v) }
+
+func (v *float64V) Assign(t *Thread, o Value) { *v = float64V(o.(FloatValue).Get(t)) }
+
+func (v *float64V) Get(*Thread) float64 { return float64(*v) }
+
+func (v *float64V) Set(t *Thread, x float64) { *v = float64V(x) }
+
+/*
+ * Ideal float
+ */
+
+type idealFloatV struct {
+ V *big.Rat
+}
+
+func (v *idealFloatV) String() string { return v.V.FloatString(6) }
+
+func (v *idealFloatV) Assign(t *Thread, o Value) {
+ v.V = o.(IdealFloatValue).Get()
+}
+
+func (v *idealFloatV) Get() *big.Rat { return v.V }
+
+/*
+ * String
+ */
+
+type stringV string
+
+func (v *stringV) String() string { return fmt.Sprint(*v) }
+
+func (v *stringV) Assign(t *Thread, o Value) { *v = stringV(o.(StringValue).Get(t)) }
+
+func (v *stringV) Get(*Thread) string { return string(*v) }
+
+func (v *stringV) Set(t *Thread, x string) { *v = stringV(x) }
+
+/*
+ * Array
+ */
+
+type arrayV []Value
+
+func (v *arrayV) String() string {
+ res := "{"
+ for i, e := range *v {
+ if i > 0 {
+ res += ", "
+ }
+ res += e.String()
+ }
+ return res + "}"
+}
+
+func (v *arrayV) Assign(t *Thread, o Value) {
+ oa := o.(ArrayValue)
+ l := int64(len(*v))
+ for i := int64(0); i < l; i++ {
+ (*v)[i].Assign(t, oa.Elem(t, i))
+ }
+}
+
+func (v *arrayV) Get(*Thread) ArrayValue { return v }
+
+func (v *arrayV) Elem(t *Thread, i int64) Value {
+ return (*v)[i]
+}
+
+func (v *arrayV) Sub(i int64, len int64) ArrayValue {
+ res := (*v)[i : i+len]
+ return &res
+}
+
+/*
+ * Struct
+ */
+
+type structV []Value
+
+// TODO(austin) Should these methods (and arrayV's) be on structV
+// instead of *structV?
+func (v *structV) String() string {
+ res := "{"
+ for i, v := range *v {
+ if i > 0 {
+ res += ", "
+ }
+ res += v.String()
+ }
+ return res + "}"
+}
+
+func (v *structV) Assign(t *Thread, o Value) {
+ oa := o.(StructValue)
+ l := len(*v)
+ for i := 0; i < l; i++ {
+ (*v)[i].Assign(t, oa.Field(t, i))
+ }
+}
+
+func (v *structV) Get(*Thread) StructValue { return v }
+
+func (v *structV) Field(t *Thread, i int) Value {
+ return (*v)[i]
+}
+
+/*
+ * Pointer
+ */
+
+type ptrV struct {
+ // nil if the pointer is nil
+ target Value
+}
+
+func (v *ptrV) String() string {
+ if v.target == nil {
+ return "<nil>"
+ }
+ return "&" + v.target.String()
+}
+
+func (v *ptrV) Assign(t *Thread, o Value) { v.target = o.(PtrValue).Get(t) }
+
+func (v *ptrV) Get(*Thread) Value { return v.target }
+
+func (v *ptrV) Set(t *Thread, x Value) { v.target = x }
+
+/*
+ * Functions
+ */
+
+type funcV struct {
+ target Func
+}
+
+func (v *funcV) String() string {
+ // TODO(austin) Rob wants to see the definition
+ return "func {...}"
+}
+
+func (v *funcV) Assign(t *Thread, o Value) { v.target = o.(FuncValue).Get(t) }
+
+func (v *funcV) Get(*Thread) Func { return v.target }
+
+func (v *funcV) Set(t *Thread, x Func) { v.target = x }
+
+/*
+ * Interfaces
+ */
+
+type interfaceV struct {
+ Interface
+}
+
+func (v *interfaceV) String() string {
+ if v.Type == nil || v.Value == nil {
+ return "<nil>"
+ }
+ return v.Value.String()
+}
+
+func (v *interfaceV) Assign(t *Thread, o Value) {
+ v.Interface = o.(InterfaceValue).Get(t)
+}
+
+func (v *interfaceV) Get(*Thread) Interface { return v.Interface }
+
+func (v *interfaceV) Set(t *Thread, x Interface) {
+ v.Interface = x
+}
+
+/*
+ * Slices
+ */
+
+type sliceV struct {
+ Slice
+}
+
+func (v *sliceV) String() string {
+ if v.Base == nil {
+ return "<nil>"
+ }
+ return v.Base.Sub(0, v.Len).String()
+}
+
+func (v *sliceV) Assign(t *Thread, o Value) { v.Slice = o.(SliceValue).Get(t) }
+
+func (v *sliceV) Get(*Thread) Slice { return v.Slice }
+
+func (v *sliceV) Set(t *Thread, x Slice) { v.Slice = x }
+
+/*
+ * Maps
+ */
+
+type mapV struct {
+ target Map
+}
+
+func (v *mapV) String() string {
+ if v.target == nil {
+ return "<nil>"
+ }
+ res := "map["
+ i := 0
+ v.target.Iter(func(key interface{}, val Value) bool {
+ if i > 0 {
+ res += ", "
+ }
+ i++
+ res += fmt.Sprint(key) + ":" + val.String()
+ return true
+ })
+ return res + "]"
+}
+
+func (v *mapV) Assign(t *Thread, o Value) { v.target = o.(MapValue).Get(t) }
+
+func (v *mapV) Get(*Thread) Map { return v.target }
+
+func (v *mapV) Set(t *Thread, x Map) { v.target = x }
+
+type evalMap map[interface{}]Value
+
+func (m evalMap) Len(t *Thread) int64 { return int64(len(m)) }
+
+func (m evalMap) Elem(t *Thread, key interface{}) Value {
+ return m[key]
+}
+
+func (m evalMap) SetElem(t *Thread, key interface{}, val Value) {
+ if val == nil {
+ m[key] = nil, false
+ } else {
+ m[key] = val
+ }
+}
+
+func (m evalMap) Iter(cb func(key interface{}, val Value) bool) {
+ for k, v := range m {
+ if !cb(k, v) {
+ break
+ }
+ }
+}
+
+/*
+ * Multi-values
+ */
+
+type multiV []Value
+
+func (v multiV) String() string {
+ res := "("
+ for i, v := range v {
+ if i > 0 {
+ res += ", "
+ }
+ res += v.String()
+ }
+ return res + ")"
+}
+
+func (v multiV) Assign(t *Thread, o Value) {
+ omv := o.(multiV)
+ for i := range v {
+ v[i].Assign(t, omv[i])
+ }
+}
+
+/*
+ * Universal constants
+ */
+
+func init() {
+ s := universe
+
+ true := boolV(true)
+ s.DefineConst("true", universePos, BoolType, &true)
+ false := boolV(false)
+ s.DefineConst("false", universePos, BoolType, &false)
+}
diff --git a/libgo/go/exp/eval/world.go b/libgo/go/exp/eval/world.go
new file mode 100644
index 000000000..02d18bd79
--- /dev/null
+++ b/libgo/go/exp/eval/world.go
@@ -0,0 +1,188 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package is the beginning of an interpreter for Go.
+// It can run simple Go programs but does not implement
+// interface values or packages.
+package eval
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "os"
+)
+
+type World struct {
+ scope *Scope
+ frame *Frame
+}
+
+func NewWorld() *World {
+ w := new(World)
+ w.scope = universe.ChildScope()
+ w.scope.global = true // this block's vars allocate directly
+ return w
+}
+
+type Code interface {
+ // The type of the value Run returns, or nil if Run returns nil.
+ Type() Type
+
+ // Run runs the code; if the code is a single expression
+ // with a value, it returns the value; otherwise it returns nil.
+ Run() (Value, os.Error)
+}
+
+type stmtCode struct {
+ w *World
+ code code
+}
+
+func (w *World) CompileStmtList(fset *token.FileSet, stmts []ast.Stmt) (Code, os.Error) {
+ if len(stmts) == 1 {
+ if s, ok := stmts[0].(*ast.ExprStmt); ok {
+ return w.CompileExpr(fset, s.X)
+ }
+ }
+ errors := new(scanner.ErrorVector)
+ cc := &compiler{fset, errors, 0, 0}
+ cb := newCodeBuf()
+ fc := &funcCompiler{
+ compiler: cc,
+ fnType: nil,
+ outVarsNamed: false,
+ codeBuf: cb,
+ flow: newFlowBuf(cb),
+ labels: make(map[string]*label),
+ }
+ bc := &blockCompiler{
+ funcCompiler: fc,
+ block: w.scope.block,
+ }
+ nerr := cc.numError()
+ for _, stmt := range stmts {
+ bc.compileStmt(stmt)
+ }
+ fc.checkLabels()
+ if nerr != cc.numError() {
+ return nil, errors.GetError(scanner.Sorted)
+ }
+ return &stmtCode{w, fc.get()}, nil
+}
+
+func (w *World) CompileDeclList(fset *token.FileSet, decls []ast.Decl) (Code, os.Error) {
+ stmts := make([]ast.Stmt, len(decls))
+ for i, d := range decls {
+ stmts[i] = &ast.DeclStmt{d}
+ }
+ return w.CompileStmtList(fset, stmts)
+}
+
+func (s *stmtCode) Type() Type { return nil }
+
+func (s *stmtCode) Run() (Value, os.Error) {
+ t := new(Thread)
+ t.f = s.w.scope.NewFrame(nil)
+ return nil, t.Try(func(t *Thread) { s.code.exec(t) })
+}
+
+type exprCode struct {
+ w *World
+ e *expr
+ eval func(Value, *Thread)
+}
+
+func (w *World) CompileExpr(fset *token.FileSet, e ast.Expr) (Code, os.Error) {
+ errors := new(scanner.ErrorVector)
+ cc := &compiler{fset, errors, 0, 0}
+
+ ec := cc.compileExpr(w.scope.block, false, e)
+ if ec == nil {
+ return nil, errors.GetError(scanner.Sorted)
+ }
+ var eval func(Value, *Thread)
+ switch t := ec.t.(type) {
+ case *idealIntType:
+ // nothing
+ case *idealFloatType:
+ // nothing
+ default:
+ if tm, ok := t.(*MultiType); ok && len(tm.Elems) == 0 {
+ return &stmtCode{w, code{ec.exec}}, nil
+ }
+ eval = genAssign(ec.t, ec)
+ }
+ return &exprCode{w, ec, eval}, nil
+}
+
+func (e *exprCode) Type() Type { return e.e.t }
+
+func (e *exprCode) Run() (Value, os.Error) {
+ t := new(Thread)
+ t.f = e.w.scope.NewFrame(nil)
+ switch e.e.t.(type) {
+ case *idealIntType:
+ return &idealIntV{e.e.asIdealInt()()}, nil
+ case *idealFloatType:
+ return &idealFloatV{e.e.asIdealFloat()()}, nil
+ }
+ v := e.e.t.Zero()
+ eval := e.eval
+ err := t.Try(func(t *Thread) { eval(v, t) })
+ return v, err
+}
+
+func (w *World) Compile(fset *token.FileSet, text string) (Code, os.Error) {
+ stmts, err := parser.ParseStmtList(fset, "input", text)
+ if err == nil {
+ return w.CompileStmtList(fset, stmts)
+ }
+
+ // Otherwise try as DeclList.
+ decls, err1 := parser.ParseDeclList(fset, "input", text)
+ if err1 == nil {
+ return w.CompileDeclList(fset, decls)
+ }
+
+ // Have to pick an error.
+ // Parsing as statement list admits more forms,
+ // its error is more likely to be useful.
+ return nil, err
+}
+
+type RedefinitionError struct {
+ Name string
+ Prev Def
+}
+
+func (e *RedefinitionError) String() string {
+ res := "identifier " + e.Name + " redeclared"
+ pos := e.Prev.Pos()
+ if pos.IsValid() {
+ // TODO: fix this - currently this code is not reached by the tests
+ // need to get a file set (fset) from somewhere
+ //res += "; previous declaration at " + fset.Position(pos).String()
+ panic(0)
+ }
+ return res
+}
+
+func (w *World) DefineConst(name string, t Type, val Value) os.Error {
+ _, prev := w.scope.DefineConst(name, token.NoPos, t, val)
+ if prev != nil {
+ return &RedefinitionError{name, prev}
+ }
+ return nil
+}
+
+func (w *World) DefineVar(name string, t Type, val Value) os.Error {
+ v, prev := w.scope.DefineVar(name, token.NoPos, t)
+ if prev != nil {
+ return &RedefinitionError{name, prev}
+ }
+ v.Init = val
+ return nil
+}