summaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/chan_defs.go56
-rw-r--r--libgo/go/runtime/debug.go159
-rw-r--r--libgo/go/runtime/debug/stack.go90
-rw-r--r--libgo/go/runtime/debug/stack_test.go55
-rw-r--r--libgo/go/runtime/error.go133
-rw-r--r--libgo/go/runtime/export_test.go17
-rw-r--r--libgo/go/runtime/extern.go163
-rw-r--r--libgo/go/runtime/hashmap_defs.go51
-rw-r--r--libgo/go/runtime/iface_defs.go18
-rw-r--r--libgo/go/runtime/malloc_defs.go130
-rw-r--r--libgo/go/runtime/mheapmap32_defs.go23
-rw-r--r--libgo/go/runtime/mheapmap64_defs.go31
-rw-r--r--libgo/go/runtime/pprof/pprof.go108
-rw-r--r--libgo/go/runtime/runtime_defs.go200
-rw-r--r--libgo/go/runtime/sig.go16
-rw-r--r--libgo/go/runtime/softfloat64.go498
-rw-r--r--libgo/go/runtime/softfloat64_test.go198
-rw-r--r--libgo/go/runtime/type.go206
18 files changed, 2152 insertions, 0 deletions
diff --git a/libgo/go/runtime/chan_defs.go b/libgo/go/runtime/chan_defs.go
new file mode 100644
index 000000000..5cfea6e15
--- /dev/null
+++ b/libgo/go/runtime/chan_defs.go
@@ -0,0 +1,56 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go definitions of internal structures. Master is chan.c
+
+package runtime
+
+type sudoG struct {
+ g *g_
+ selgen uint32
+ offset int16
+ isfree int8
+ link *sudoG
+ elem [8]byte
+}
+
+type waitQ struct {
+ first *sudoG
+ last *sudoG
+}
+
+type hChan struct {
+ qcount uint32
+ dataqsiz uint32
+ elemsize uint16
+ closed uint16
+ elemalign uint8
+ elemalg *alg
+ senddataq *link
+ recvdataq *link
+ recvq waitQ
+ sendq waitQ
+ free sudoG
+ lock
+}
+
+type link struct {
+ link *link
+ elem [8]byte
+}
+
+type scase struct {
+ chan_ *hChan
+ pc *byte
+ send uint16
+ so uint16
+ elemp *byte // union elem [8]byte
+}
+
+type select_ struct {
+ tcase uint16
+ ncase uint16
+ link *select_
+ scase [1]*scase
+}
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go
new file mode 100644
index 000000000..803ea4921
--- /dev/null
+++ b/libgo/go/runtime/debug.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Breakpoint() executes a breakpoint trap.
+func Breakpoint()
+
+// LockOSThread wires the calling goroutine to its current operating system thread.
+// Until the calling goroutine exits or calls UnlockOSThread, it will always
+// execute in that thread, and no other goroutine can.
+// LockOSThread cannot be used during init functions.
+func LockOSThread()
+
+// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
+// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
+func UnlockOSThread()
+
+// GOMAXPROCS sets the maximum number of CPUs that can be executing
+// simultaneously and returns the previous setting. If n < 1, it does not
+// change the current setting.
+// This call will go away when the scheduler improves.
+func GOMAXPROCS(n int) int
+
+// Cgocalls returns the number of cgo calls made by the current process.
+func Cgocalls() int64
+
+// Goroutines returns the number of goroutines that currently exist.
+func Goroutines() int32
+
+type MemStatsType struct {
+ // General statistics.
+ // Not locked during update; approximate.
+ Alloc uint64 // bytes allocated and still in use
+ TotalAlloc uint64 // bytes allocated (even if freed)
+ Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
+ Lookups uint64 // number of pointer lookups
+ Mallocs uint64 // number of mallocs
+ Frees uint64 // number of frees
+
+ // Main allocation heap statistics.
+ HeapAlloc uint64 // bytes allocated and still in use
+ HeapSys uint64 // bytes obtained from system
+ HeapIdle uint64 // bytes in idle spans
+ HeapInuse uint64 // bytes in non-idle span
+ HeapObjects uint64 // total number of allocated objects
+
+ // Low-level fixed-size structure allocator statistics.
+ // Inuse is bytes used now.
+ // Sys is bytes obtained from system.
+ StackInuse uint64 // bootstrap stacks
+ StackSys uint64
+ MSpanInuse uint64 // mspan structures
+ MSpanSys uint64
+ MCacheInuse uint64 // mcache structures
+ MCacheSys uint64
+ MHeapMapSys uint64 // heap map
+ BuckHashSys uint64 // profiling bucket hash table
+
+ // Garbage collector statistics.
+ NextGC uint64
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // most recent GC pause times
+ NumGC uint32
+ EnableGC bool
+ DebugGC bool
+
+ // Per-size allocation statistics.
+ // Not locked during update; approximate.
+ BySize [67]struct {
+ Size uint32
+ Mallocs uint64
+ Frees uint64
+ }
+}
+
+var Sizeof_C_MStats int // filled in by malloc.goc
+
+func init() {
+ if Sizeof_C_MStats != unsafe.Sizeof(MemStats) {
+ println(Sizeof_C_MStats, unsafe.Sizeof(MemStats))
+ panic("MStats vs MemStatsType size mismatch")
+ }
+}
+
+// MemStats holds statistics about the memory system.
+// The statistics are only approximate, as they are not interlocked on update.
+var MemStats MemStatsType
+
+// Alloc allocates a block of the given size.
+// FOR TESTING AND DEBUGGING ONLY.
+func Alloc(uintptr) *byte
+
+// Free frees the block starting at the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Free(*byte)
+
+// Lookup returns the base and size of the block containing the given pointer.
+// FOR TESTING AND DEBUGGING ONLY.
+func Lookup(*byte) (*byte, uintptr)
+
+// GC runs a garbage collection.
+func GC()
+
+// MemProfileRate controls the fraction of memory allocations
+// that are recorded and reported in the memory profile.
+// The profiler aims to sample an average of
+// one allocation per MemProfileRate bytes allocated.
+//
+// To include every allocated block in the profile, set MemProfileRate to 1.
+// To turn off profiling entirely, set MemProfileRate to 0.
+//
+// The tools that process the memory profiles assume that the
+// profile rate is constant across the lifetime of the program
+// and equal to the current value. Programs that change the
+// memory profiling rate should do so just once, as early as
+// possible in the execution of the program (for example,
+// at the beginning of main).
+var MemProfileRate int = 512 * 1024
+
+// A MemProfileRecord describes the live objects allocated
+// by a particular call sequence (stack trace).
+type MemProfileRecord struct {
+ AllocBytes, FreeBytes int64 // number of bytes allocated, freed
+ AllocObjects, FreeObjects int64 // number of objects allocated, freed
+ Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
+}
+
+// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
+func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
+
+// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
+func (r *MemProfileRecord) InUseObjects() int64 {
+ return r.AllocObjects - r.FreeObjects
+}
+
+// Stack returns the stack trace associated with the record,
+// a prefix of r.Stack0.
+func (r *MemProfileRecord) Stack() []uintptr {
+ for i, v := range r.Stack0 {
+ if v == 0 {
+ return r.Stack0[0:i]
+ }
+ }
+ return r.Stack0[0:]
+}
+
+// MemProfile returns n, the number of records in the current memory profile.
+// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
+// If len(p) < n, MemProfile does not change p and returns n, false.
+//
+// If inuseZero is true, the profile includes allocation records
+// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
+// These are sites where memory was allocated, but it has all
+// been released back to the runtime.
+func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
diff --git a/libgo/go/runtime/debug/stack.go b/libgo/go/runtime/debug/stack.go
new file mode 100644
index 000000000..e7d56ac23
--- /dev/null
+++ b/libgo/go/runtime/debug/stack.go
@@ -0,0 +1,90 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The debug package contains facilities for programs to debug themselves
+// while they are running.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+)
+
+// PrintStack prints to standard error the stack trace returned by Stack.
+func PrintStack() {
+ os.Stderr.Write(stack())
+}
+
+// Stack returns a formatted stack trace of the goroutine that calls it.
+// For each routine, it includes the source line information and PC value,
+// then attempts to discover, for Go functions, the calling function or
+// method and the text of the line containing the invocation.
+func Stack() []byte {
+ return stack()
+}
+
+// stack implements Stack, skipping 2 frames
+func stack() []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := 2; ; i++ { // Caller we care about is the user, 2 frames up
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'}, -1)
+ lastFile = file
+ }
+ line-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.Trim(lines[n], " \t")
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go
new file mode 100644
index 000000000..f4bdc4624
--- /dev/null
+++ b/libgo/go/runtime/debug/stack_test.go
@@ -0,0 +1,55 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "strings"
+ "testing"
+)
+
+type T int
+
+func (t *T) ptrmethod() []byte {
+ return Stack()
+}
+func (t T) method() []byte {
+ return t.ptrmethod()
+}
+
+/*
+ The traceback should look something like this, modulo line numbers and hex constants.
+ Don't worry much about the base levels, but check the ones in our own package.
+
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:15 (0x13878)
+ *T.ptrmethod: return Stack()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:18 (0x138dd)
+ T.method: return t.ptrmethod()
+ /Users/r/go/src/pkg/runtime/debug/stack_test.go:23 (0x13920)
+ TestStack: b := T(0).method()
+ /Users/r/go/src/pkg/testing/testing.go:132 (0x14a7a)
+ tRunner: test.F(t)
+ /Users/r/go/src/pkg/runtime/proc.c:145 (0xc970)
+ ???: runtime·unlock(&runtime·sched);
+*/
+func TestStack(t *testing.T) {
+ b := T(0).method()
+ lines := strings.Split(string(b), "\n", -1)
+ if len(lines) <= 6 {
+ t.Fatal("too few lines")
+ }
+ check(t, lines[0], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[1], "\t*T.ptrmethod: return Stack()")
+ check(t, lines[2], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[3], "\tT.method: return t.ptrmethod()")
+ check(t, lines[4], "src/pkg/runtime/debug/stack_test.go")
+ check(t, lines[5], "\tTestStack: b := T(0).method()")
+ check(t, lines[6], "src/pkg/testing/testing.go")
+}
+
+func check(t *testing.T, line, has string) {
+ if strings.Index(line, has) < 0 {
+ t.Errorf("expected %q in %q", has, line)
+ }
+}
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
new file mode 100644
index 000000000..2515722aa
--- /dev/null
+++ b/libgo/go/runtime/error.go
@@ -0,0 +1,133 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// The Error interface identifies a run time error.
+type Error interface {
+ String() string
+
+ // RuntimeError is a no-op function but
+ // serves to distinguish types that are runtime
+ // errors from ordinary os.Errors: a type is a
+ // runtime error if it has a RuntimeError method.
+ RuntimeError()
+}
+
+// A TypeAssertionError explains a failed type assertion.
+type TypeAssertionError struct {
+ interfaceType *Type // interface had this type
+ concreteType *Type // concrete value had this type
+ assertedType *Type // asserted type
+ interfaceString string
+ concreteString string
+ assertedString string
+ missingMethod string // one method needed by Interface, missing from Concrete
+}
+
+func (*TypeAssertionError) RuntimeError() {}
+
+func (e *TypeAssertionError) String() string {
+ inter := e.interfaceString
+ if inter == "" {
+ inter = "interface"
+ }
+ if e.concreteType == nil {
+ return "interface conversion: " + inter + " is nil, not " + e.assertedString
+ }
+ if e.missingMethod == "" {
+ return "interface conversion: " + inter + " is " + e.concreteString +
+ ", not " + e.assertedString
+ }
+ return "interface conversion: " + e.concreteString + " is not " + e.assertedString +
+ ": missing method " + e.missingMethod
+}
+
+// Concrete returns the type of the concrete value in the failed type assertion.
+// If the interface value was nil, Concrete returns nil.
+func (e *TypeAssertionError) Concrete() *Type {
+ return e.concreteType
+}
+
+// Asserted returns the type incorrectly asserted by the type assertion.
+func (e *TypeAssertionError) Asserted() *Type {
+ return e.assertedType
+}
+
+// If the type assertion is to an interface type, MissingMethod returns the
+// name of a method needed to satisfy that interface type but not implemented
+// by Concrete. If there are multiple such methods,
+// MissingMethod returns one; which one is unspecified.
+// If the type assertion is not to an interface type, MissingMethod returns an empty string.
+func (e *TypeAssertionError) MissingMethod() string {
+ return e.missingMethod
+}
+
+// For calling from C.
+func NewTypeAssertionError(pt1, pt2, pt3 *Type, ps1, ps2, ps3 *string, pmeth *string, ret *interface{}) {
+ var t1, t2, t3 *Type
+ var s1, s2, s3, meth string
+
+ if pt1 != nil {
+ t1 = pt1
+ }
+ if pt2 != nil {
+ t2 = pt2
+ }
+ if pt3 != nil {
+ t3 = pt3
+ }
+ if ps1 != nil {
+ s1 = *ps1
+ }
+ if ps2 != nil {
+ s2 = *ps2
+ }
+ if ps3 != nil {
+ s3 = *ps3
+ }
+ if pmeth != nil {
+ meth = *pmeth
+ }
+ *ret = &TypeAssertionError{t1, t2, t3, s1, s2, s3, meth}
+}
+
+// An errorString represents a runtime error described by a single string.
+type errorString string
+
+func (e errorString) RuntimeError() {}
+
+func (e errorString) String() string {
+ return "runtime error: " + string(e)
+}
+
+// For calling from C.
+func NewErrorString(s string, ret *interface{}) {
+ *ret = errorString(s)
+}
+
+type stringer interface {
+ String() string
+}
+
+func typestring(interface{}) string
+
+// For calling from C.
+// Prints an argument passed to panic.
+// There's room for arbitrary complexity here, but we keep it
+// simple and handle just a few important cases: int, string, and Stringer.
+func Printany(i interface{}) {
+ switch v := i.(type) {
+ case nil:
+ print("nil")
+ case stringer:
+ print(v.String())
+ case int:
+ print(v)
+ case string:
+ print(v)
+ default:
+ print("(", typestring(i), ") ", i)
+ }
+}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
new file mode 100644
index 000000000..58631c7b4
--- /dev/null
+++ b/libgo/go/runtime/export_test.go
@@ -0,0 +1,17 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Export guts for testing.
+
+package runtime
+
+var Fadd64 = fadd64
+var Fsub64 = fsub64
+var Fmul64 = fmul64
+var Fdiv64 = fdiv64
+var F64to32 = f64to32
+var F32to64 = f32to64
+var Fcmp64 = fcmp64
+var Fintto64 = fintto64
+var F64toint = f64toint
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
new file mode 100644
index 000000000..77c3e8e3a
--- /dev/null
+++ b/libgo/go/runtime/extern.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ The runtime package contains operations that interact with Go's runtime system,
+ such as functions to control goroutines. It also includes the low-level type information
+ used by the reflect package; see reflect's documentation for the programmable
+ interface to the run-time type system.
+*/
+package runtime
+
+// Gosched yields the processor, allowing other goroutines to run. It does not
+// suspend the current goroutine, so execution resumes automatically.
+func Gosched()
+
+// Goexit terminates the goroutine that calls it. No other goroutine is affected.
+// Goexit runs all deferred calls before terminating the goroutine.
+func Goexit()
+
+// Caller reports file and line number information about function invocations on
+// the calling goroutine's stack. The argument skip is the number of stack frames to
+// ascend, with 0 identifying the the caller of Caller. The return values report the
+// program counter, file name, and line number within the file of the corresponding
+// call. The boolean ok is false if it was not possible to recover the information.
+func Caller(skip int) (pc uintptr, file string, line int, ok bool)
+
+// Callers fills the slice pc with the program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
+// to skip before recording in pc, with 0 starting at the caller of Caller.
+// It returns the number of entries written to pc.
+func Callers(skip int, pc []uintptr) int
+
+// FuncForPC returns a *Func describing the function that contains the
+// given program counter address, or else nil.
+func FuncForPC(pc uintptr) *Func
+
+// Name returns the name of the function.
+func (f *Func) Name() string { return f.name }
+
+// Entry returns the entry address of the function.
+func (f *Func) Entry() uintptr { return f.entry }
+
+// FileLine returns the file name and line number of the
+// source code corresponding to the program counter pc.
+// The result will not be accurate if pc is not a program
+// counter within f.
+func (f *Func) FileLine(pc uintptr) (file string, line int) {
+ // NOTE(rsc): If you edit this function, also edit
+ // symtab.c:/^funcline.
+ var pcQuant uintptr = 1
+ if GOARCH == "arm" {
+ pcQuant = 4
+ }
+
+ targetpc := pc
+ p := f.pcln
+ pc = f.pc0
+ line = int(f.ln0)
+ file = f.src
+ for i := 0; i < len(p) && pc <= targetpc; i++ {
+ switch {
+ case p[i] == 0:
+ line += int(p[i+1]<<24) | int(p[i+2]<<16) | int(p[i+3]<<8) | int(p[i+4])
+ i += 4
+ case p[i] <= 64:
+ line += int(p[i])
+ case p[i] <= 128:
+ line -= int(p[i] - 64)
+ default:
+ pc += pcQuant * uintptr(p[i]-129)
+ }
+ pc += pcQuant
+ }
+ return
+}
+
+// mid returns the current os thread (m) id.
+func mid() uint32
+
+// Semacquire waits until *s > 0 and then atomically decrements it.
+// It is intended as a simple sleep primitive for use by the synchronization
+// library and should not be used directly.
+func Semacquire(s *uint32)
+
+// Semrelease atomically increments *s and notifies a waiting goroutine
+// if one is blocked in Semacquire.
+// It is intended as a simple wakeup primitive for use by the synchronization
+// library and should not be used directly.
+func Semrelease(s *uint32)
+
+// SetFinalizer sets the finalizer associated with x to f.
+// When the garbage collector finds an unreachable block
+// with an associated finalizer, it clears the association and runs
+// f(x) in a separate goroutine. This makes x reachable again, but
+// now without an associated finalizer. Assuming that SetFinalizer
+// is not called again, the next time the garbage collector sees
+// that x is unreachable, it will free x.
+//
+// SetFinalizer(x, nil) clears any finalizer associated with x.
+//
+// The argument x must be a pointer to an object allocated by
+// calling new or by taking the address of a composite literal.
+// The argument f must be a function that takes a single argument
+// of x's type and returns no arguments. If either of these is not
+// true, SetFinalizer aborts the program.
+//
+// Finalizers are run in dependency order: if A points at B, both have
+// finalizers, and they are otherwise unreachable, only the finalizer
+// for A runs; once A is freed, the finalizer for B can run.
+// If a cyclic structure includes a block with a finalizer, that
+// cycle is not guaranteed to be garbage collected and the finalizer
+// is not guaranteed to run, because there is no ordering that
+// respects the dependencies.
+//
+// The finalizer for x is scheduled to run at some arbitrary time after
+// x becomes unreachable.
+// There is no guarantee that finalizers will run before a program exits,
+// so typically they are useful only for releasing non-memory resources
+// associated with an object during a long-running program.
+// For example, an os.File object could use a finalizer to close the
+// associated operating system file descriptor when a program discards
+// an os.File without calling Close, but it would be a mistake
+// to depend on a finalizer to flush an in-memory I/O buffer such as a
+// bufio.Writer, because the buffer would not be flushed at program exit.
+//
+// A single goroutine runs all finalizers for a program, sequentially.
+// If a finalizer must run for a long time, it should do so by starting
+// a new goroutine.
+//
+// TODO(rsc): allow f to have (ignored) return values
+//
+func SetFinalizer(x, f interface{})
+
+func getgoroot() string
+
+// GOROOT returns the root of the Go tree.
+// It uses the GOROOT environment variable, if set,
+// or else the root used during the Go build.
+func GOROOT() string {
+ s := getgoroot()
+ if s != "" {
+ return s
+ }
+ return defaultGoroot
+}
+
+// Version returns the Go tree's version string.
+// It is either a sequence number or, when possible,
+// a release tag like "release.2010-03-04".
+// A trailing + indicates that the tree had local modifications
+// at the time of the build.
+func Version() string {
+ return theVersion
+}
+
+// GOOS is the Go tree's operating system target:
+// one of darwin, freebsd, linux, and so on.
+const GOOS string = theGoos
+
+// GOARCH is the Go tree's architecture target:
+// 386, amd64, or arm.
+const GOARCH string = theGoarch
diff --git a/libgo/go/runtime/hashmap_defs.go b/libgo/go/runtime/hashmap_defs.go
new file mode 100644
index 000000000..57780df87
--- /dev/null
+++ b/libgo/go/runtime/hashmap_defs.go
@@ -0,0 +1,51 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go definitions of internal structures. Master is hashmap.[c,h]
+
+package runtime
+
+type hash_hash uintptr
+
+type hash_entry struct {
+ hash hash_hash
+ key byte // dwarf.c substitutes the real type
+ val byte // for key and val
+}
+
+type hash_subtable struct {
+ power uint8
+ used uint8
+ datasize uint8
+ max_probes uint8
+ limit_bytes int16
+ end *hash_entry
+ entry hash_entry // TODO: [0]hash_entry
+}
+
+type hash struct {
+ count uint32
+ datasize uint8
+ max_power uint8
+ max_probes uint8
+ indirectval uint8
+ changes int32
+ data_hash func(uint32, uintptr) hash_hash
+ data_eq func(uint32, uintptr, uintptr) uint32
+ data_del func(uint32, uintptr, uintptr)
+ st *hash_subtable
+ keysize uint32
+ valsize uint32
+ datavo uint32
+ ko0 uint32
+ vo0 uint32
+ ko1 uint32
+ vo1 uint32
+ po1 uint32
+ ko2 uint32
+ vo2 uint32
+ po2 uint32
+ keyalg *alg
+ valalg *alg
+}
diff --git a/libgo/go/runtime/iface_defs.go b/libgo/go/runtime/iface_defs.go
new file mode 100644
index 000000000..69d52ef9a
--- /dev/null
+++ b/libgo/go/runtime/iface_defs.go
@@ -0,0 +1,18 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+/*
+ * Must match iface.c:/Itable and compilers.
+ * NOTE: type.go has an Itable, that is the version of Itab used by the reflection code.
+ */
+type itab struct {
+ Itype *Type
+ Type *Type
+ link *itab
+ bad int32
+ unused int32
+ Fn func() // TODO: [0]func()
+}
diff --git a/libgo/go/runtime/malloc_defs.go b/libgo/go/runtime/malloc_defs.go
new file mode 100644
index 000000000..11d6627e1
--- /dev/null
+++ b/libgo/go/runtime/malloc_defs.go
@@ -0,0 +1,130 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go definitions of internal structures. Master is malloc.h
+
+package runtime
+
+import "unsafe"
+
+const (
+ pageShift = 12
+ pageSize = 1 << pageShift
+ pageMask = pageSize - 1
+)
+
+type pageID uintptr
+
+const (
+ numSizeClasses = 67
+ maxSmallSize = 32 << 10
+ fixAllocChunk = 128 << 10
+ maxMCacheListLen = 256
+ maxMCacheSize = 2 << 20
+ maxMHeapList = 1 << 8 // 1 << (20 - pageShift)
+ heapAllocChunk = 1 << 20
+)
+
+type mLink struct {
+ next *mLink
+}
+
+type fixAlloc struct {
+ size uintptr
+ alloc func(uintptr)
+ first func(unsafe.Pointer, *byte)
+ arg unsafe.Pointer
+ list *mLink
+ chunk *byte
+ nchunk uint32
+ inuse uintptr
+ sys uintptr
+}
+
+
+// MStats? used to be in extern.go
+
+type mCacheList struct {
+ list *mLink
+ nlist uint32
+ nlistmin uint32
+}
+
+type mCache struct {
+ list [numSizeClasses]mCacheList
+ size uint64
+ local_alloc int64
+ local_objects int64
+ next_sample int32
+}
+
+type mSpan struct {
+ next *mSpan
+ prev *mSpan
+ allnext *mSpan
+ start pageID
+ npages uintptr
+ freelist *mLink
+ ref uint32
+ sizeclass uint32
+ state uint32
+ // union {
+ gcref *uint32 // sizeclass > 0
+ // gcref0 uint32; // sizeclass == 0
+ // }
+}
+
+type mCentral struct {
+ // lock
+ sizeclass int32
+ nonempty mSpan
+ empty mSpan
+ nfree int32
+}
+
+type mHeap struct {
+ // lock
+ free [maxMHeapList]mSpan
+ large mSpan
+ allspans *mSpan
+ // map_ mHeapMap
+ min *byte
+ max *byte
+ closure_min *byte
+ closure_max *byte
+
+ central [numSizeClasses]struct {
+ pad [64]byte
+ // union: mCentral
+ }
+
+ spanalloc fixAlloc
+ cachealloc fixAlloc
+}
+
+const (
+ refFree = iota
+ refStack
+ refNone
+ refSome
+ refcountOverhead = 4
+ refNoPointers = 0x80000000
+ refHasFinalizer = 0x40000000
+ refProfiled = 0x20000000
+ refNoProfiling = 0x10000000
+ refFlags = 0xFFFF0000
+)
+
+const (
+ mProf_None = iota
+ mProf_Sample
+ mProf_All
+)
+
+type finalizer struct {
+ next *finalizer
+ fn func(unsafe.Pointer)
+ arg unsafe.Pointer
+ nret int32
+}
diff --git a/libgo/go/runtime/mheapmap32_defs.go b/libgo/go/runtime/mheapmap32_defs.go
new file mode 100644
index 000000000..755725b46
--- /dev/null
+++ b/libgo/go/runtime/mheapmap32_defs.go
@@ -0,0 +1,23 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ mHeapMap_Level1Bits = 10
+ mHeapMap_Level2Bits = 10
+ mHeapMap_TotalBits = mHeapMap_Level1Bits + mHeapMap_Level2Bits
+
+ mHeapMap_Level1Mask = (1 << mHeapMap_Level1Bits) - 1
+ mHeapMap_Level2Mask = (1 << mHeapMap_Level2Bits) - 1
+)
+
+type mHeapMap struct {
+ allocator func(uintptr)
+ p [1 << mHeapMap_Level1Bits]*mHeapMapNode2
+}
+
+type mHeapMapNode2 struct {
+ s [1 << mHeapMap_Level2Bits]*mSpan
+}
diff --git a/libgo/go/runtime/mheapmap64_defs.go b/libgo/go/runtime/mheapmap64_defs.go
new file mode 100644
index 000000000..d7ba2b420
--- /dev/null
+++ b/libgo/go/runtime/mheapmap64_defs.go
@@ -0,0 +1,31 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ mHeapMap_Level1Bits = 18
+ mHeapMap_Level2Bits = 18
+ mHeapMap_Level3Bits = 16
+ mHeapMap_TotalBits = mHeapMap_Level1Bits + mHeapMap_Level2Bits + mHeapMap_Level3Bits
+
+ mHeapMap_Level1Mask = (1 << mHeapMap_Level1Bits) - 1
+ mHeapMap_Level2Mask = (1 << mHeapMap_Level2Bits) - 1
+ mHeapMap_Level3Mask = (1 << mHeapMap_Level3Bits) - 1
+)
+
+type mHeapMap struct {
+ allocator func(uintptr)
+ p [1 << mHeapMap_Level1Bits]*mHeapMapNode2
+}
+
+
+type mHeapMapNode2 struct {
+ p [1 << mHeapMap_Level2Bits]*mHeapMapNode3
+}
+
+
+type mHeapMapNode3 struct {
+ s [1 << mHeapMap_Level3Bits]*mSpan
+}
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
new file mode 100644
index 000000000..d0cc73089
--- /dev/null
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -0,0 +1,108 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pprof writes runtime profiling data in the format expected
+// by the pprof visualization tool.
+// For more information about pprof, see
+// http://code.google.com/p/google-perftools/.
+package pprof
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+)
+
+// WriteHeapProfile writes a pprof-formatted heap profile to w.
+// If a write to w returns an error, WriteHeapProfile returns that error.
+// Otherwise, WriteHeapProfile returns nil.
+func WriteHeapProfile(w io.Writer) os.Error {
+ // Find out how many records there are (MemProfile(nil, false)),
+ // allocate that many records, and get the data.
+ // There's a race—more records might be added between
+ // the two calls—so allocate a few extra records for safety
+ // and also try again if we're very unlucky.
+ // The loop should only execute one iteration in the common case.
+ var p []runtime.MemProfileRecord
+ n, ok := runtime.MemProfile(nil, false)
+ for {
+ // Allocate room for a slightly bigger profile,
+ // in case a few more entries have been added
+ // since the call to MemProfile.
+ p = make([]runtime.MemProfileRecord, n+50)
+ n, ok = runtime.MemProfile(p, false)
+ if ok {
+ p = p[0:n]
+ break
+ }
+ // Profile grew; try again.
+ }
+
+ var total runtime.MemProfileRecord
+ for i := range p {
+ r := &p[i]
+ total.AllocBytes += r.AllocBytes
+ total.AllocObjects += r.AllocObjects
+ total.FreeBytes += r.FreeBytes
+ total.FreeObjects += r.FreeObjects
+ }
+
+ // Technically the rate is MemProfileRate not 2*MemProfileRate,
+ // but early versions of the C++ heap profiler reported 2*MemProfileRate,
+ // so that's what pprof has come to expect.
+ b := bufio.NewWriter(w)
+ fmt.Fprintf(b, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
+ total.InUseObjects(), total.InUseBytes(),
+ total.AllocObjects, total.AllocBytes,
+ 2*runtime.MemProfileRate)
+
+ for i := range p {
+ r := &p[i]
+ fmt.Fprintf(b, "%d: %d [%d: %d] @",
+ r.InUseObjects(), r.InUseBytes(),
+ r.AllocObjects, r.AllocBytes)
+ for _, pc := range r.Stack() {
+ fmt.Fprintf(b, " %#x", pc)
+ }
+ fmt.Fprintf(b, "\n")
+ }
+
+ // Print memstats information too.
+ // Pprof will ignore, but useful for people.
+ s := &runtime.MemStats
+ fmt.Fprintf(b, "\n# runtime.MemStats\n")
+ fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc)
+ fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc)
+ fmt.Fprintf(b, "# Sys = %d\n", s.Sys)
+ fmt.Fprintf(b, "# Lookups = %d\n", s.Lookups)
+ fmt.Fprintf(b, "# Mallocs = %d\n", s.Mallocs)
+
+ fmt.Fprintf(b, "# HeapAlloc = %d\n", s.HeapAlloc)
+ fmt.Fprintf(b, "# HeapSys = %d\n", s.HeapSys)
+ fmt.Fprintf(b, "# HeapIdle = %d\n", s.HeapIdle)
+ fmt.Fprintf(b, "# HeapInuse = %d\n", s.HeapInuse)
+
+ fmt.Fprintf(b, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
+ fmt.Fprintf(b, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
+ fmt.Fprintf(b, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
+ fmt.Fprintf(b, "# MHeapMapSys = %d\n", s.MHeapMapSys)
+ fmt.Fprintf(b, "# BuckHashSys = %d\n", s.BuckHashSys)
+
+ fmt.Fprintf(b, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(b, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(b, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(b, "# EnableGC = %v\n", s.EnableGC)
+ fmt.Fprintf(b, "# DebugGC = %v\n", s.DebugGC)
+
+ fmt.Fprintf(b, "# BySize = Size * (Active = Mallocs - Frees)\n")
+ fmt.Fprintf(b, "# (Excluding large blocks.)\n")
+ for _, t := range s.BySize {
+ if t.Mallocs > 0 {
+ fmt.Fprintf(b, "# %d * (%d = %d - %d)\n", t.Size, t.Mallocs-t.Frees, t.Mallocs, t.Frees)
+ }
+ }
+ return b.Flush()
+}
diff --git a/libgo/go/runtime/runtime_defs.go b/libgo/go/runtime/runtime_defs.go
new file mode 100644
index 000000000..deea320b5
--- /dev/null
+++ b/libgo/go/runtime/runtime_defs.go
@@ -0,0 +1,200 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Go definitions of internal structures. Master is runtime.h
+
+// TODO(lvd): automate conversion to all the _defs.go files
+
+package runtime
+
+import "unsafe"
+
+const (
+ gidle = iota
+ grunnable
+ grunning
+ gsyscall
+ gwaiting
+ gmoribund
+ gdead
+ grecovery
+)
+
+// const ( Structrnd = sizeof(uintptr) )
+
+type string_ struct {
+ str *byte
+ len int32
+}
+
+type iface struct {
+ // tab *itab
+ data unsafe.Pointer
+}
+
+type eface struct {
+ type_ *Type
+ data unsafe.Pointer
+}
+
+type complex64 struct {
+ real float32
+ imag float32
+}
+
+type complex128 struct {
+ real float64
+ imag float64
+}
+
+type slice struct {
+ array *byte
+ len uint32
+ cap uint32
+}
+
+type gobuf struct {
+ sp unsafe.Pointer
+ pc unsafe.Pointer
+ g *g_
+}
+
+type g_ struct {
+ stackguard unsafe.Pointer
+ stackbase unsafe.Pointer
+ defer_ *defer_
+ panic_ *panic_
+ sched gobuf
+ stack0 unsafe.Pointer
+ entry unsafe.Pointer
+ alllink *g_
+ param unsafe.Pointer
+ status int16
+ goid int32
+ selgen uint32
+ schedlink *g_
+ readyonstop bool
+ ispanic bool
+ m *m_
+ lockedm *m_
+ sig int32
+ sigcode0 uintptr
+ sigcode1 uintptr
+}
+
+type m_ struct {
+ g0 *g_
+ morepc unsafe.Pointer
+ moreargp unsafe.Pointer
+ morebuf gobuf
+ moreframesize uint32
+ moreargsize uint32
+ cret uintptr
+ procid uint64
+ gsignal *g_
+ tls [8]uint32
+ sched gobuf
+ curg *g_
+ id int32
+ mallocing int32
+ gcing int32
+ locks int32
+ nomemprof int32
+ waitnextg int32
+ // havenextg note
+ nextg *g_
+ alllink *m_
+ schedlink *m_
+ machport uint32
+ mcache *mCache
+ lockedg *g_
+ freg [8]uint64
+ // gostack unsafe.Pointer // __WINDOWS__
+}
+
+type stktop struct {
+ stackguard *uint8
+ stackbase *uint8
+ gobuf gobuf
+ args uint32
+ fp *uint8
+ free bool
+ panic_ bool
+}
+
+type alg struct {
+ hash func(uint32, unsafe.Pointer) uintptr
+ equal func(uint32, unsafe.Pointer, unsafe.Pointer) uint32
+ print func(uint32, unsafe.Pointer)
+ copy func(uint32, unsafe.Pointer, unsafe.Pointer)
+}
+
+type sigtab struct {
+ flags int32
+ name *int8
+}
+
+const (
+ sigCatch = (1 << iota)
+ sigIgnore
+ sigRestart
+ sigQueue
+ sigPanic
+)
+
+type Func struct {
+ name string
+ typ string
+ src string
+ pcln []byte
+ entry uintptr
+ pc0 uintptr
+ ln0 int32
+ frame int32
+ args int32
+ locals int32
+}
+
+const (
+ aMEM = iota
+ aNOEQ
+ aSTRING
+ aINTER
+ aNILINTER
+ aMEMWORD
+ amax
+)
+
+type defer_ struct {
+ siz int32
+ sp unsafe.Pointer
+ pc unsafe.Pointer
+ fn unsafe.Pointer
+ link *defer_
+ args [8]byte // padded to actual size
+}
+
+type panic_ struct {
+ arg eface
+ stackbase unsafe.Pointer
+ link *panic_
+ recovered bool
+}
+
+/*
+ * External data.
+ */
+
+var (
+ algarray [amax]alg
+ emptystring string
+ allg *g_
+ allm *m_
+ goidgen int32
+ gomaxprocs int32
+ panicking int32
+ fd int32
+ gcwaiting int32
+ goos *int8
+)
diff --git a/libgo/go/runtime/sig.go b/libgo/go/runtime/sig.go
new file mode 100644
index 000000000..6d560b900
--- /dev/null
+++ b/libgo/go/runtime/sig.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Sigrecv returns a bitmask of signals that have arrived since the last call to Sigrecv.
+// It blocks until at least one signal arrives.
+func Sigrecv() uint32
+
+// Signame returns a string describing the signal, or "" if the signal is unknown.
+func Signame(sig int32) string
+
+// Siginit enables receipt of signals via Sigrecv. It should typically
+// be called during initialization.
+func Siginit()
diff --git a/libgo/go/runtime/softfloat64.go b/libgo/go/runtime/softfloat64.go
new file mode 100644
index 000000000..d9bbe5def
--- /dev/null
+++ b/libgo/go/runtime/softfloat64.go
@@ -0,0 +1,498 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Software IEEE754 64-bit floating point.
+// Only referred to (and thus linked in) by arm port
+// and by gotest in this directory.
+
+package runtime
+
+const (
+ mantbits64 uint = 52
+ expbits64 uint = 11
+ bias64 = -1<<(expbits64-1) + 1
+
+ nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
+ inf64 uint64 = (1<<expbits64 - 1) << mantbits64
+ neg64 uint64 = 1 << (expbits64 + mantbits64)
+
+ mantbits32 uint = 23
+ expbits32 uint = 8
+ bias32 = -1<<(expbits32-1) + 1
+
+ nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
+ inf32 uint32 = (1<<expbits32 - 1) << mantbits32
+ neg32 uint32 = 1 << (expbits32 + mantbits32)
+)
+
+func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits64 + expbits64))
+ mant = f & (1<<mantbits64 - 1)
+ exp = int(f>>mantbits64) & (1<<expbits64 - 1)
+
+ switch exp {
+ case 1<<expbits64 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias64 + 1
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits64
+ exp += bias64
+ }
+ return
+}
+
+func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) {
+ sign = f & (1 << (mantbits32 + expbits32))
+ mant = f & (1<<mantbits32 - 1)
+ exp = int(f>>mantbits32) & (1<<expbits32 - 1)
+
+ switch exp {
+ case 1<<expbits32 - 1:
+ if mant != 0 {
+ nan = true
+ return
+ }
+ inf = true
+ return
+
+ case 0:
+ // denormalized
+ if mant != 0 {
+ exp += bias32 + 1
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ }
+
+ default:
+ // add implicit top bit
+ mant |= 1 << mantbits32
+ exp += bias32
+ }
+ return
+}
+
+func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits64 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits64 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits64 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits64-1+bias64 {
+ return sign ^ inf64
+ }
+ if exp < bias64+1 {
+ if exp < bias64-int(mantbits64) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias64 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits64 {
+ return sign | mant
+ }
+ }
+ return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1)
+}
+
+func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 {
+ mant0, exp0, trunc0 := mant, exp, trunc
+ if mant == 0 {
+ return sign
+ }
+ for mant < 1<<mantbits32 {
+ mant <<= 1
+ exp--
+ }
+ for mant >= 4<<mantbits32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant >= 2<<mantbits32 {
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ if mant >= 4<<mantbits32 {
+ mant >>= 1
+ exp++
+ }
+ }
+ mant >>= 1
+ exp++
+ }
+ if exp >= 1<<expbits32-1+bias32 {
+ return sign ^ inf32
+ }
+ if exp < bias32+1 {
+ if exp < bias32-int(mantbits32) {
+ return sign | 0
+ }
+ // repeat expecting denormal
+ mant, exp, trunc = mant0, exp0, trunc0
+ for exp < bias32 {
+ trunc |= mant & 1
+ mant >>= 1
+ exp++
+ }
+ if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
+ mant++
+ }
+ mant >>= 1
+ exp++
+ if mant < 1<<mantbits32 {
+ return sign | mant
+ }
+ }
+ return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1)
+}
+
+func fadd64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN + x or x + NaN = NaN
+ return nan64
+
+ case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN
+ return nan64
+
+ case fi: // ±Inf + g = ±Inf
+ return f
+
+ case gi: // f + ±Inf = ±Inf
+ return g
+
+ case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0
+ return f
+
+ case fm == 0: // 0 + g = g but 0 + -0 = +0
+ if gm == 0 {
+ g ^= gs
+ }
+ return g
+
+ case gm == 0: // f + 0 = f
+ return f
+
+ }
+
+ if fe < ge || fe == ge && fm < gm {
+ f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe
+ }
+
+ shift := uint(fe - ge)
+ fm <<= 2
+ gm <<= 2
+ trunc := gm & (1<<shift - 1)
+ gm >>= shift
+ if fs == gs {
+ fm += gm
+ } else {
+ fm -= gm
+ if trunc != 0 {
+ fm--
+ }
+ }
+ if fm == 0 {
+ fs = 0
+ }
+ return fpack64(fs, fm, fe-2, trunc)
+}
+
+func fsub64(f, g uint64) uint64 {
+ return fadd64(f, fneg64(g))
+}
+
+func fneg64(f uint64) uint64 {
+ return f ^ (1 << (mantbits64 + expbits64))
+}
+
+func fmul64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN * g or f * NaN = NaN
+ return nan64
+
+ case fi && gi: // Inf * Inf = Inf (with sign adjusted)
+ return f ^ gs
+
+ case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN
+ return nan64
+
+ case fm == 0: // 0 * x = 0 (with sign adjusted)
+ return f ^ gs
+
+ case gm == 0: // x * 0 = 0 (with sign adjusted)
+ return g ^ fs
+ }
+
+ // 53-bit * 53-bit = 107- or 108-bit
+ lo, hi := mullu(fm, gm)
+ shift := mantbits64 - 1
+ trunc := lo & (1<<shift - 1)
+ mant := hi<<(64-shift) | lo>>shift
+ return fpack64(fs^gs, mant, fe+ge-1, trunc)
+}
+
+func fdiv64(f, g uint64) uint64 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ gs, gm, ge, gi, gn := funpack64(g)
+
+ // Special cases.
+ switch {
+ case fn || gn: // NaN / g = f / NaN = NaN
+ return nan64
+
+ case fi && gi: // ±Inf / ±Inf = NaN
+ return nan64
+
+ case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN
+ return nan64
+
+ case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf
+ return fs ^ gs ^ inf64
+
+ case gi, fm == 0: // f / Inf = 0 / g = Inf
+ return fs ^ gs ^ 0
+ }
+ _, _, _, _ = fi, fn, gi, gn
+
+ // 53-bit<<54 / 53-bit = 53- or 54-bit.
+ shift := mantbits64 + 2
+ q, r := divlu(fm>>(64-shift), fm<<shift, gm)
+ return fpack64(fs^gs, q, fe-ge-2, r)
+}
+
+func f64to32(f uint64) uint32 {
+ fs, fm, fe, fi, fn := funpack64(f)
+ if fn {
+ return nan32
+ }
+ fs32 := uint32(fs >> 32)
+ if fi {
+ return fs32 ^ inf32
+ }
+ const d = mantbits64 - mantbits32 - 1
+ return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1)))
+}
+
+func f32to64(f uint32) uint64 {
+ const d = mantbits64 - mantbits32
+ fs, fm, fe, fi, fn := funpack32(f)
+ if fn {
+ return nan64
+ }
+ fs64 := uint64(fs) << 32
+ if fi {
+ return fs64 ^ inf64
+ }
+ return fpack64(fs64, uint64(fm)<<d, fe, 0)
+}
+
+func fcmp64(f, g uint64) (cmp int, isnan bool) {
+ fs, fm, _, fi, fn := funpack64(f)
+ gs, gm, _, gi, gn := funpack64(g)
+
+ switch {
+ case fn, gn: // flag NaN
+ return 0, true
+
+ case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0
+ return 0, false
+
+ case fs > gs: // f < 0, g > 0
+ return -1, false
+
+ case fs < gs: // f > 0, g < 0
+ return +1, false
+
+ // Same sign, not NaN.
+ // Can compare encodings directly now.
+ // Reverse for sign.
+ case fs == 0 && f < g, fs != 0 && f > g:
+ return -1, false
+
+ case fs == 0 && f > g, fs != 0 && f < g:
+ return +1, false
+ }
+
+ // f == g
+ return 0, false
+}
+
+func f64toint(f uint64) (val int64, ok bool) {
+ fs, fm, fe, fi, fn := funpack64(f)
+
+ switch {
+ case fi, fn: // NaN
+ return 0, false
+
+ case fe < -1: // f < 0.5
+ return 0, false
+
+ case fe > 63: // f >= 2^63
+ if fs != 0 && fm == 0 { // f == -2^63
+ return -1 << 63, true
+ }
+ if fs != 0 {
+ return 0, false
+ }
+ return 0, false
+ }
+
+ for fe > int(mantbits64) {
+ fe--
+ fm <<= 1
+ }
+ for fe < int(mantbits64) {
+ fe++
+ fm >>= 1
+ }
+ val = int64(fm)
+ if fs != 0 {
+ val = -val
+ }
+ return val, true
+}
+
+func fintto64(val int64) (f uint64) {
+ fs := uint64(val) & (1 << 63)
+ mant := uint64(val)
+ if fs != 0 {
+ mant = -mant
+ }
+ return fpack64(fs, mant, int(mantbits64), 0)
+}
+
+// 64x64 -> 128 multiply.
+// adapted from hacker's delight.
+func mullu(u, v uint64) (lo, hi uint64) {
+ const (
+ s = 32
+ mask = 1<<s - 1
+ )
+ u0 := u & mask
+ u1 := u >> s
+ v0 := v & mask
+ v1 := v >> s
+ w0 := u0 * v0
+ t := u1*v0 + w0>>s
+ w1 := t & mask
+ w2 := t >> s
+ w1 += u0 * v1
+ return u * v, u1*v1 + w2 + w1>>s
+}
+
+// 128/64 -> 64 quotient, 64 remainder.
+// adapted from hacker's delight
+func divlu(u1, u0, v uint64) (q, r uint64) {
+ const b = 1 << 32
+
+ if u1 >= v {
+ return 1<<64 - 1, 1<<64 - 1
+ }
+
+ // s = nlz(v); v <<= s
+ s := uint(0)
+ for v&(1<<63) == 0 {
+ s++
+ v <<= 1
+ }
+
+ vn1 := v >> 32
+ vn0 := v & (1<<32 - 1)
+ un32 := u1<<s | u0>>(64-s)
+ un10 := u0 << s
+ un1 := un10 >> 32
+ un0 := un10 & (1<<32 - 1)
+ q1 := un32 / vn1
+ rhat := un32 - q1*vn1
+
+again1:
+ if q1 >= b || q1*vn0 > b*rhat+un1 {
+ q1--
+ rhat += vn1
+ if rhat < b {
+ goto again1
+ }
+ }
+
+ un21 := un32*b + un1 - q1*v
+ q0 := un21 / vn1
+ rhat = un21 - q0*vn1
+
+again2:
+ if q0 >= b || q0*vn0 > b*rhat+un0 {
+ q0--
+ rhat += vn1
+ if rhat < b {
+ goto again2
+ }
+ }
+
+ return q1*b + q0, (un21*b + un0 - q0*v) >> s
+}
+
+// callable from C
+
+func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
+func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
+func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
+func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
+func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
+func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
+func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
+func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
+func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
+func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
diff --git a/libgo/go/runtime/softfloat64_test.go b/libgo/go/runtime/softfloat64_test.go
new file mode 100644
index 000000000..fb7f3d3c0
--- /dev/null
+++ b/libgo/go/runtime/softfloat64_test.go
@@ -0,0 +1,198 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "math"
+ "rand"
+ . "runtime"
+ "testing"
+)
+
+// turn uint64 op into float64 op
+func fop(f func(x, y uint64) uint64) func(x, y float64) float64 {
+ return func(x, y float64) float64 {
+ bx := math.Float64bits(x)
+ by := math.Float64bits(y)
+ return math.Float64frombits(f(bx, by))
+ }
+}
+
+func add(x, y float64) float64 { return x + y }
+func sub(x, y float64) float64 { return x - y }
+func mul(x, y float64) float64 { return x * y }
+func div(x, y float64) float64 { return x / y }
+
+func TestFloat64(t *testing.T) {
+ base := []float64{
+ 0,
+ math.Copysign(0, -1),
+ -1,
+ 1,
+ math.NaN(),
+ math.Inf(+1),
+ math.Inf(-1),
+ 0.1,
+ 1.5,
+ 1.9999999999999998, // all 1s mantissa
+ 1.3333333333333333, // 1.010101010101...
+ 1.1428571428571428, // 1.001001001001...
+ 1.112536929253601e-308, // first normal
+ 2,
+ 4,
+ 8,
+ 16,
+ 32,
+ 64,
+ 128,
+ 256,
+ 3,
+ 12,
+ 1234,
+ 123456,
+ -0.1,
+ -1.5,
+ -1.9999999999999998,
+ -1.3333333333333333,
+ -1.1428571428571428,
+ -2,
+ -3,
+ 1e-200,
+ 1e-300,
+ 1e-310,
+ 5e-324,
+ 1e-105,
+ 1e-305,
+ 1e+200,
+ 1e+306,
+ 1e+307,
+ 1e+308,
+ }
+ all := make([]float64, 200)
+ copy(all, base)
+ for i := len(base); i < len(all); i++ {
+ all[i] = rand.NormFloat64()
+ }
+
+ test(t, "+", add, fop(Fadd64), all)
+ test(t, "-", sub, fop(Fsub64), all)
+ if GOARCH != "386" { // 386 is not precise!
+ test(t, "*", mul, fop(Fmul64), all)
+ test(t, "/", div, fop(Fdiv64), all)
+ }
+}
+
+// 64 -hw-> 32 -hw-> 64
+func trunc32(f float64) float64 {
+ return float64(float32(f))
+}
+
+// 64 -sw->32 -hw-> 64
+func to32sw(f float64) float64 {
+ return float64(math.Float32frombits(F64to32(math.Float64bits(f))))
+}
+
+// 64 -hw->32 -sw-> 64
+func to64sw(f float64) float64 {
+ return math.Float64frombits(F32to64(math.Float32bits(float32(f))))
+}
+
+// float64 -hw-> int64 -hw-> float64
+func hwint64(f float64) float64 {
+ return float64(int64(f))
+}
+
+// float64 -hw-> int32 -hw-> float64
+func hwint32(f float64) float64 {
+ return float64(int32(f))
+}
+
+// float64 -sw-> int64 -hw-> float64
+func toint64sw(f float64) float64 {
+ i, ok := F64toint(math.Float64bits(f))
+ if !ok {
+ // There's no right answer for out of range.
+ // Match the hardware to pass the test.
+ i = int64(f)
+ }
+ return float64(i)
+}
+
+// float64 -hw-> int64 -sw-> float64
+func fromint64sw(f float64) float64 {
+ return math.Float64frombits(Fintto64(int64(f)))
+}
+
+var nerr int
+
+func err(t *testing.T, format string, args ...interface{}) {
+ t.Errorf(format, args...)
+
+ // cut errors off after a while.
+ // otherwise we spend all our time
+ // allocating memory to hold the
+ // formatted output.
+ if nerr++; nerr >= 10 {
+ t.Fatal("too many errors")
+ }
+}
+
+func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all []float64) {
+ for _, f := range all {
+ for _, g := range all {
+ h := hw(f, g)
+ s := sw(f, g)
+ if !same(h, s) {
+ err(t, "%g %s %g = sw %g, hw %g\n", f, op, g, s, h)
+ }
+ testu(t, "to32", trunc32, to32sw, h)
+ testu(t, "to64", trunc32, to64sw, h)
+ testu(t, "toint64", hwint64, toint64sw, h)
+ testu(t, "fromint64", hwint64, fromint64sw, h)
+ testcmp(t, f, h)
+ testcmp(t, h, f)
+ testcmp(t, g, h)
+ testcmp(t, h, g)
+ }
+ }
+}
+
+func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) {
+ h := hw(v)
+ s := sw(v)
+ if !same(h, s) {
+ err(t, "%s %g = sw %g, hw %g\n", op, v, s, h)
+ }
+}
+
+func hwcmp(f, g float64) (cmp int, isnan bool) {
+ switch {
+ case f < g:
+ return -1, false
+ case f > g:
+ return +1, false
+ case f == g:
+ return 0, false
+ }
+ return 0, true // must be NaN
+}
+
+func testcmp(t *testing.T, f, g float64) {
+ hcmp, hisnan := hwcmp(f, g)
+ scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g))
+ if hcmp != scmp || hisnan != sisnan {
+ err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan)
+ }
+}
+
+func same(f, g float64) bool {
+ if math.IsNaN(f) && math.IsNaN(g) {
+ return true
+ }
+ if math.Copysign(1, f) != math.Copysign(1, g) {
+ return false
+ }
+ return f == g
+}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
new file mode 100644
index 000000000..645e3647e
--- /dev/null
+++ b/libgo/go/runtime/type.go
@@ -0,0 +1,206 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Runtime type representation.
+ *
+ * The following files know the exact layout of these
+ * data structures and must be kept in sync with this file:
+ *
+ * ../../cmd/gc/reflect.c
+ * ../../cmd/ld/dwarf.c
+ * ../reflect/type.go
+ * type.h
+ */
+
+package runtime
+
+import "unsafe"
+
+// All types begin with a few common fields needed for
+// the interface runtime.
+type commonType struct {
+ Kind uint8 // type kind
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ size uintptr // size in bytes
+ hash uint32 // hash of type; avoids computation in hash tables
+
+ hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
+ equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function
+
+ string *string // string form; unnecessary but undeniably useful
+ *uncommonType // (relatively) uncommon fields
+}
+
+// Values for commonType.kind.
+const (
+ kindBool = 1 + iota
+ kindInt
+ kindInt8
+ kindInt16
+ kindInt32
+ kindInt64
+ kindUint
+ kindUint8
+ kindUint16
+ kindUint32
+ kindUint64
+ kindUintptr
+ kindFloat32
+ kindFloat64
+ kindComplex64
+ kindComplex128
+ kindArray
+ kindChan
+ kindFunc
+ kindInterface
+ kindMap
+ kindPtr
+ kindSlice
+ kindString
+ kindStruct
+ kindUnsafePointer
+
+ // Not currently generated by gccgo.
+ // kindNoPointers = 1 << 7 // OR'ed into kind
+)
+
+// Externally visible name.
+type Type commonType
+
+// Method on non-interface type
+type method struct {
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ mtyp *Type // method type (without receiver)
+ typ *Type // .(*FuncType) underneath (with receiver)
+ tfn unsafe.Pointer // fn used for normal method call
+}
+
+// uncommonType is present only for types with names or methods
+// (if T is a named type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe an unnamed type with no methods.
+type uncommonType struct {
+ name *string // name of type
+ pkgPath *string // import path; nil for built-in types like int, string
+ methods []method // methods associated with type
+}
+
+// BoolType represents a boolean type.
+type BoolType commonType
+
+// FloatType represents a float type.
+type FloatType commonType
+
+// ComplexType represents a complex type.
+type ComplexType commonType
+
+// IntType represents an int type.
+type IntType commonType
+
+// UintType represents a uint type.
+type UintType commonType
+
+// StringType represents a string type.
+type StringType commonType
+
+// UintptrType represents a uintptr type.
+type UintptrType commonType
+
+// UnsafePointerType represents an unsafe.Pointer type.
+type UnsafePointerType commonType
+
+// ArrayType represents a fixed array type.
+type ArrayType struct {
+ commonType
+ elem *Type // array element type
+ len uintptr
+}
+
+// SliceType represents a slice type.
+type SliceType struct {
+ commonType
+ elem *Type // slice element type
+}
+
+// ChanDir represents a channel type's direction.
+type ChanDir int
+
+const (
+ RecvDir ChanDir = 1 << iota // <-chan
+ SendDir // chan<-
+ BothDir = RecvDir | SendDir // chan
+)
+
+// ChanType represents a channel type.
+type ChanType struct {
+ commonType
+ elem *Type // channel element type
+ dir uintptr // channel direction (ChanDir)
+}
+
+// FuncType represents a function type.
+type FuncType struct {
+ commonType
+ dotdotdot bool // last input parameter is ...
+ in []*Type // input parameter types
+ out []*Type // output parameter types
+}
+
+// Method on interface type
+type imethod struct {
+ name *string // name of method
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *Type // .(*FuncType) underneath
+}
+
+// InterfaceType represents an interface type.
+type InterfaceType struct {
+ commonType
+ methods []imethod // sorted by hash
+}
+
+// MapType represents a map type.
+type MapType struct {
+ commonType
+ key *Type // map key type
+ elem *Type // map element (value) type
+}
+
+// PtrType represents a pointer type.
+type PtrType struct {
+ commonType
+ elem *Type // pointer element (pointed at) type
+}
+
+// Struct field
+type structField struct {
+ name *string // nil for embedded fields
+ pkgPath *string // nil for exported Names; otherwise import path
+ typ *Type // type of field
+ tag *string // nil if no tag
+ offset uintptr // byte offset of field within struct
+}
+
+// StructType represents a struct type.
+type StructType struct {
+ commonType
+ fields []structField // sorted by offset
+}
+
+/*
+ * Must match iface.c:/Itab and compilers.
+ * NOTE: this is the version used by the reflection code, there is another
+ * one in iface_defs.go that is closer to the original C version.
+ */
+type Itable struct {
+ Itype *Type // (*tab.inter).(*InterfaceType) is the interface type
+ Type *Type
+ link *Itable
+ bad int32
+ unused int32
+ Fn [100000]uintptr // bigger than we'll ever see
+}