From 554fd8c5195424bdbcabf5de30fdc183aba391bd Mon Sep 17 00:00:00 2001
From: upstream source tree <ports@midipix.org>
Date: Sun, 15 Mar 2015 20:14:05 -0400
Subject: obtained gcc-4.6.4.tar.bz2 from upstream website; verified
 gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream
 tarball.

downloading a git-generated archive based on the 'upstream' tag
should provide you with a source tree that is binary identical
to the one extracted from the above tarball.

if you have obtained the source via the command 'git clone',
however, do note that line-endings of files in your working
directory might differ from line-endings of the respective
files in the upstream repository.
---
 libgo/runtime/array.h                      |  28 ++
 libgo/runtime/chan.goc                     |  39 ++
 libgo/runtime/channel.h                    | 147 ++++++
 libgo/runtime/defs.h                       |  12 +
 libgo/runtime/go-alloc.h                   |  11 +
 libgo/runtime/go-append.c                  |  67 +++
 libgo/runtime/go-assert-interface.c        |  49 ++
 libgo/runtime/go-assert.c                  |  18 +
 libgo/runtime/go-assert.h                  |  18 +
 libgo/runtime/go-breakpoint.c              |  15 +
 libgo/runtime/go-byte-array-to-string.c    |  24 +
 libgo/runtime/go-caller.c                  |  51 ++
 libgo/runtime/go-can-convert-interface.c   |  76 +++
 libgo/runtime/go-cgo.c                     |  42 ++
 libgo/runtime/go-chan-cap.c                |  41 ++
 libgo/runtime/go-chan-len.c                |  41 ++
 libgo/runtime/go-check-interface.c         |  46 ++
 libgo/runtime/go-close.c                   |  33 ++
 libgo/runtime/go-closed.c                  |  34 ++
 libgo/runtime/go-construct-map.c           |  32 ++
 libgo/runtime/go-convert-interface.c       | 138 ++++++
 libgo/runtime/go-copy.c                    |  21 +
 libgo/runtime/go-defer.c                   |  69 +++
 libgo/runtime/go-defer.h                   |  36 ++
 libgo/runtime/go-deferred-recover.c        |  92 ++++
 libgo/runtime/go-eface-compare.c           |  32 ++
 libgo/runtime/go-eface-val-compare.c       |  32 ++
 libgo/runtime/go-getgoroot.c               |  26 +
 libgo/runtime/go-go.c                      | 656 +++++++++++++++++++++++++
 libgo/runtime/go-gomaxprocs.c              |  15 +
 libgo/runtime/go-int-array-to-string.c     |  85 ++++
 libgo/runtime/go-int-to-string.c           |  60 +++
 libgo/runtime/go-interface-compare.c       |  31 ++
 libgo/runtime/go-interface-eface-compare.c |  32 ++
 libgo/runtime/go-interface-val-compare.c   |  32 ++
 libgo/runtime/go-lock-os-thread.c          |  24 +
 libgo/runtime/go-main.c                    |  89 ++++
 libgo/runtime/go-map-delete.c              |  52 ++
 libgo/runtime/go-map-index.c               | 127 +++++
 libgo/runtime/go-map-len.c                 |  21 +
 libgo/runtime/go-map-range.c               | 102 ++++
 libgo/runtime/go-nanotime.c                |  22 +
 libgo/runtime/go-new-channel.c             |  57 +++
 libgo/runtime/go-new-map.c                 | 125 +++++
 libgo/runtime/go-new.c                     |  21 +
 libgo/runtime/go-note.c                    |  74 +++
 libgo/runtime/go-panic-defer.c             |  13 +
 libgo/runtime/go-panic.c                   | 121 +++++
 libgo/runtime/go-panic.h                   |  94 ++++
 libgo/runtime/go-print.c                   |  93 ++++
 libgo/runtime/go-rec-big.c                 |  34 ++
 libgo/runtime/go-rec-nb-big.c              |  39 ++
 libgo/runtime/go-rec-nb-small.c            | 127 +++++
 libgo/runtime/go-rec-small.c               | 289 +++++++++++
 libgo/runtime/go-recover.c                 |  69 +++
 libgo/runtime/go-reflect-call.c            | 375 ++++++++++++++
 libgo/runtime/go-reflect-chan.c            | 148 ++++++
 libgo/runtime/go-reflect-map.c             | 139 ++++++
 libgo/runtime/go-reflect.c                 | 186 +++++++
 libgo/runtime/go-rune.c                    |  77 +++
 libgo/runtime/go-runtime-error.c           |  84 ++++
 libgo/runtime/go-sched.c                   |  15 +
 libgo/runtime/go-select.c                  | 758 +++++++++++++++++++++++++++++
 libgo/runtime/go-semacquire.c              | 151 ++++++
 libgo/runtime/go-send-big.c                |  31 ++
 libgo/runtime/go-send-nb-big.c             |  30 ++
 libgo/runtime/go-send-nb-small.c           | 112 +++++
 libgo/runtime/go-send-small.c              | 165 +++++++
 libgo/runtime/go-signal.c                  | 200 ++++++++
 libgo/runtime/go-signal.h                  |   7 +
 libgo/runtime/go-strcmp.c                  |  27 +
 libgo/runtime/go-string-to-byte-array.c    |  24 +
 libgo/runtime/go-string-to-int-array.c     |  50 ++
 libgo/runtime/go-string.h                  |  42 ++
 libgo/runtime/go-strplus.c                 |  30 ++
 libgo/runtime/go-strslice.c                |  26 +
 libgo/runtime/go-trampoline.c              |  53 ++
 libgo/runtime/go-type-eface.c              |  55 +++
 libgo/runtime/go-type-error.c              |  28 ++
 libgo/runtime/go-type-identity.c           |  50 ++
 libgo/runtime/go-type-interface.c          |  55 +++
 libgo/runtime/go-type-string.c             |  45 ++
 libgo/runtime/go-type.h                    | 309 ++++++++++++
 libgo/runtime/go-typedesc-equal.c          |  38 ++
 libgo/runtime/go-typestring.c              |  18 +
 libgo/runtime/go-unreflect.c               |  30 ++
 libgo/runtime/go-unsafe-new.c              |  27 +
 libgo/runtime/go-unsafe-newarray.c         |  28 ++
 libgo/runtime/go-unsafe-pointer.c          |  97 ++++
 libgo/runtime/go-unwind.c                  | 426 ++++++++++++++++
 libgo/runtime/goc2c.c                      | 735 ++++++++++++++++++++++++++++
 libgo/runtime/iface.goc                    | 131 +++++
 libgo/runtime/interface.h                  |  57 +++
 libgo/runtime/malloc.goc                   | 357 ++++++++++++++
 libgo/runtime/malloc.h                     | 399 +++++++++++++++
 libgo/runtime/map.goc                      |  69 +++
 libgo/runtime/map.h                        |  86 ++++
 libgo/runtime/mcache.c                     | 131 +++++
 libgo/runtime/mcentral.c                   | 209 ++++++++
 libgo/runtime/mem.c                        |  76 +++
 libgo/runtime/mem_posix_memalign.c         |  38 ++
 libgo/runtime/mfinal.c                     | 217 +++++++++
 libgo/runtime/mfixalloc.c                  |  62 +++
 libgo/runtime/mgc0.c                       | 392 +++++++++++++++
 libgo/runtime/mheap.c                      | 350 +++++++++++++
 libgo/runtime/mheapmap32.c                 |  99 ++++
 libgo/runtime/mheapmap32.h                 |  41 ++
 libgo/runtime/mheapmap64.c                 | 120 +++++
 libgo/runtime/mheapmap64.h                 |  60 +++
 libgo/runtime/mprof.goc                    | 305 ++++++++++++
 libgo/runtime/msize.c                      | 169 +++++++
 libgo/runtime/proc.c                       |  16 +
 libgo/runtime/reflect.goc                  |  35 ++
 libgo/runtime/rtems-task-variable-add.c    |  24 +
 libgo/runtime/runtime.h                    | 196 ++++++++
 libgo/runtime/sigqueue.goc                 | 113 +++++
 libgo/runtime/string.goc                   |  57 +++
 libgo/runtime/thread.c                     | 118 +++++
 118 files changed, 12372 insertions(+)
 create mode 100644 libgo/runtime/array.h
 create mode 100644 libgo/runtime/chan.goc
 create mode 100644 libgo/runtime/channel.h
 create mode 100644 libgo/runtime/defs.h
 create mode 100644 libgo/runtime/go-alloc.h
 create mode 100644 libgo/runtime/go-append.c
 create mode 100644 libgo/runtime/go-assert-interface.c
 create mode 100644 libgo/runtime/go-assert.c
 create mode 100644 libgo/runtime/go-assert.h
 create mode 100644 libgo/runtime/go-breakpoint.c
 create mode 100644 libgo/runtime/go-byte-array-to-string.c
 create mode 100644 libgo/runtime/go-caller.c
 create mode 100644 libgo/runtime/go-can-convert-interface.c
 create mode 100644 libgo/runtime/go-cgo.c
 create mode 100644 libgo/runtime/go-chan-cap.c
 create mode 100644 libgo/runtime/go-chan-len.c
 create mode 100644 libgo/runtime/go-check-interface.c
 create mode 100644 libgo/runtime/go-close.c
 create mode 100644 libgo/runtime/go-closed.c
 create mode 100644 libgo/runtime/go-construct-map.c
 create mode 100644 libgo/runtime/go-convert-interface.c
 create mode 100644 libgo/runtime/go-copy.c
 create mode 100644 libgo/runtime/go-defer.c
 create mode 100644 libgo/runtime/go-defer.h
 create mode 100644 libgo/runtime/go-deferred-recover.c
 create mode 100644 libgo/runtime/go-eface-compare.c
 create mode 100644 libgo/runtime/go-eface-val-compare.c
 create mode 100644 libgo/runtime/go-getgoroot.c
 create mode 100644 libgo/runtime/go-go.c
 create mode 100644 libgo/runtime/go-gomaxprocs.c
 create mode 100644 libgo/runtime/go-int-array-to-string.c
 create mode 100644 libgo/runtime/go-int-to-string.c
 create mode 100644 libgo/runtime/go-interface-compare.c
 create mode 100644 libgo/runtime/go-interface-eface-compare.c
 create mode 100644 libgo/runtime/go-interface-val-compare.c
 create mode 100644 libgo/runtime/go-lock-os-thread.c
 create mode 100644 libgo/runtime/go-main.c
 create mode 100644 libgo/runtime/go-map-delete.c
 create mode 100644 libgo/runtime/go-map-index.c
 create mode 100644 libgo/runtime/go-map-len.c
 create mode 100644 libgo/runtime/go-map-range.c
 create mode 100644 libgo/runtime/go-nanotime.c
 create mode 100644 libgo/runtime/go-new-channel.c
 create mode 100644 libgo/runtime/go-new-map.c
 create mode 100644 libgo/runtime/go-new.c
 create mode 100644 libgo/runtime/go-note.c
 create mode 100644 libgo/runtime/go-panic-defer.c
 create mode 100644 libgo/runtime/go-panic.c
 create mode 100644 libgo/runtime/go-panic.h
 create mode 100644 libgo/runtime/go-print.c
 create mode 100644 libgo/runtime/go-rec-big.c
 create mode 100644 libgo/runtime/go-rec-nb-big.c
 create mode 100644 libgo/runtime/go-rec-nb-small.c
 create mode 100644 libgo/runtime/go-rec-small.c
 create mode 100644 libgo/runtime/go-recover.c
 create mode 100644 libgo/runtime/go-reflect-call.c
 create mode 100644 libgo/runtime/go-reflect-chan.c
 create mode 100644 libgo/runtime/go-reflect-map.c
 create mode 100644 libgo/runtime/go-reflect.c
 create mode 100644 libgo/runtime/go-rune.c
 create mode 100644 libgo/runtime/go-runtime-error.c
 create mode 100644 libgo/runtime/go-sched.c
 create mode 100644 libgo/runtime/go-select.c
 create mode 100644 libgo/runtime/go-semacquire.c
 create mode 100644 libgo/runtime/go-send-big.c
 create mode 100644 libgo/runtime/go-send-nb-big.c
 create mode 100644 libgo/runtime/go-send-nb-small.c
 create mode 100644 libgo/runtime/go-send-small.c
 create mode 100644 libgo/runtime/go-signal.c
 create mode 100644 libgo/runtime/go-signal.h
 create mode 100644 libgo/runtime/go-strcmp.c
 create mode 100644 libgo/runtime/go-string-to-byte-array.c
 create mode 100644 libgo/runtime/go-string-to-int-array.c
 create mode 100644 libgo/runtime/go-string.h
 create mode 100644 libgo/runtime/go-strplus.c
 create mode 100644 libgo/runtime/go-strslice.c
 create mode 100644 libgo/runtime/go-trampoline.c
 create mode 100644 libgo/runtime/go-type-eface.c
 create mode 100644 libgo/runtime/go-type-error.c
 create mode 100644 libgo/runtime/go-type-identity.c
 create mode 100644 libgo/runtime/go-type-interface.c
 create mode 100644 libgo/runtime/go-type-string.c
 create mode 100644 libgo/runtime/go-type.h
 create mode 100644 libgo/runtime/go-typedesc-equal.c
 create mode 100644 libgo/runtime/go-typestring.c
 create mode 100644 libgo/runtime/go-unreflect.c
 create mode 100644 libgo/runtime/go-unsafe-new.c
 create mode 100644 libgo/runtime/go-unsafe-newarray.c
 create mode 100644 libgo/runtime/go-unsafe-pointer.c
 create mode 100644 libgo/runtime/go-unwind.c
 create mode 100644 libgo/runtime/goc2c.c
 create mode 100644 libgo/runtime/iface.goc
 create mode 100644 libgo/runtime/interface.h
 create mode 100644 libgo/runtime/malloc.goc
 create mode 100644 libgo/runtime/malloc.h
 create mode 100644 libgo/runtime/map.goc
 create mode 100644 libgo/runtime/map.h
 create mode 100644 libgo/runtime/mcache.c
 create mode 100644 libgo/runtime/mcentral.c
 create mode 100644 libgo/runtime/mem.c
 create mode 100644 libgo/runtime/mem_posix_memalign.c
 create mode 100644 libgo/runtime/mfinal.c
 create mode 100644 libgo/runtime/mfixalloc.c
 create mode 100644 libgo/runtime/mgc0.c
 create mode 100644 libgo/runtime/mheap.c
 create mode 100644 libgo/runtime/mheapmap32.c
 create mode 100644 libgo/runtime/mheapmap32.h
 create mode 100644 libgo/runtime/mheapmap64.c
 create mode 100644 libgo/runtime/mheapmap64.h
 create mode 100644 libgo/runtime/mprof.goc
 create mode 100644 libgo/runtime/msize.c
 create mode 100644 libgo/runtime/proc.c
 create mode 100644 libgo/runtime/reflect.goc
 create mode 100644 libgo/runtime/rtems-task-variable-add.c
 create mode 100644 libgo/runtime/runtime.h
 create mode 100644 libgo/runtime/sigqueue.goc
 create mode 100644 libgo/runtime/string.goc
 create mode 100644 libgo/runtime/thread.c

(limited to 'libgo/runtime')

diff --git a/libgo/runtime/array.h b/libgo/runtime/array.h
new file mode 100644
index 000000000..f6d0261df
--- /dev/null
+++ b/libgo/runtime/array.h
@@ -0,0 +1,28 @@
+/* array.h -- the open array type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_ARRAY_H
+#define LIBGO_ARRAY_H
+
+/* An open array is an instance of this structure.  */
+
+struct __go_open_array
+{
+  /* The elements of the array.  In use in the compiler this is a
+     pointer to the element type.  */
+  void* __values;
+  /* The number of elements in the array.  Note that this is "int",
+     not "size_t".  The language definition says that "int" is large
+     enough to hold the size of any allocated object.  Using "int"
+     saves 8 bytes per slice header on a 64-bit system with 32-bit
+     ints.  */
+  int __count;
+  /* The capacity of the array--the number of elements that can fit in
+     the __VALUES field.  */
+  int __capacity;
+};
+
+#endif /* !defined(LIBGO_ARRAY_H) */
diff --git a/libgo/runtime/chan.goc b/libgo/runtime/chan.goc
new file mode 100644
index 000000000..da0bbfccb
--- /dev/null
+++ b/libgo/runtime/chan.goc
@@ -0,0 +1,39 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "config.h"
+#include "channel.h"
+
+typedef _Bool bool;
+typedef unsigned char byte;
+typedef struct __go_channel chan;
+
+/* Do a nonblocking channel receive.  */
+
+func chanrecv2(c *chan, val *byte) (pres bool) {
+	if (c->element_size > 8) {
+		return __go_receive_nonblocking_big(c, val);
+	} else {
+		struct __go_receive_nonblocking_small rs;
+		union {
+			char b[8];
+			uint64_t v;
+		} u;
+
+		rs = __go_receive_nonblocking_small (c);
+		if (!rs.__success) {
+			__builtin_memset(val, 0, c->element_size);
+			return 0;
+		}
+		u.v = rs.__val;
+#ifndef WORDS_BIGENDIAN
+		__builtin_memcpy(val, u.b, c->element_size);
+#else
+		__builtin_memcpy(val, u.b + 8 - c->element_size,
+				 c->element_size);
+#endif
+		return 1;
+	}
+}
diff --git a/libgo/runtime/channel.h b/libgo/runtime/channel.h
new file mode 100644
index 000000000..b0d13477a
--- /dev/null
+++ b/libgo/runtime/channel.h
@@ -0,0 +1,147 @@
+/* channel.h -- the channel type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <pthread.h>
+
+/* This structure is used when a select is waiting for a synchronous
+   channel.  */
+
+struct __go_channel_select
+{
+  /* A pointer to the next select waiting for this channel.  */
+  struct __go_channel_select *next;
+  /* A pointer to the channel which this select will use.  This starts
+     out as NULL and is set to the first channel which synchs up with
+     this one.  This variable to which this points may only be
+     accessed when __go_select_data_mutex is held.  */
+  struct __go_channel **selected;
+  /* A pointer to a variable which must be set to true if the
+     goroutine which sets *SELECTED wants to read from the channel,
+     false if it wants to write to it.  */
+  _Bool *is_read;
+};
+
+/* A channel is a pointer to this structure.  */
+
+struct __go_channel
+{
+  /* A mutex to control access to the channel.  */
+  pthread_mutex_t lock;
+  /* A condition variable.  This is signalled when data is added to
+     the channel and when data is removed from the channel.  */
+  pthread_cond_t cond;
+  /* The size of elements on this channel.  */
+  size_t element_size;
+  /* Number of operations on closed channel.  */
+  unsigned short closed_op_count;
+  /* True if a goroutine is waiting to send on a synchronous
+     channel.  */
+  _Bool waiting_to_send;
+  /* True if a goroutine is waiting to receive on a synchronous
+     channel.  */
+  _Bool waiting_to_receive;
+  /* True if this channel was selected for send in a select statement.
+     This looks out all other sends.  */
+  _Bool selected_for_send;
+  /* True if this channel was selected for receive in a select
+     statement.  This locks out all other receives.  */
+  _Bool selected_for_receive;
+  /* True if this channel has been closed.  */
+  _Bool is_closed;
+  /* True if at least one null value has been read from a closed
+     channel.  */
+  _Bool saw_close;
+  /* The list of select statements waiting to send on a synchronous
+     channel.  */
+  struct __go_channel_select *select_send_queue;
+  /* The list of select statements waiting to receive on a synchronous
+     channel.  */
+  struct __go_channel_select *select_receive_queue;
+  /* If a select statement is waiting for this channel, it sets these
+     pointers.  When something happens on the channel, the channel
+     locks the mutex, signals the condition, and unlocks the
+     mutex.  */
+  pthread_mutex_t *select_mutex;
+  pthread_cond_t *select_cond;
+  /* The number of entries in the circular buffer.  */
+  unsigned int num_entries;
+  /* Where to store the next value.  */
+  unsigned int next_store;
+  /* Where to fetch the next value.  If next_fetch == next_store, the
+     buffer is empty.  If next_store + 1 == next_fetch, the buffer is
+     full.  */
+  unsigned int next_fetch;
+  /* The circular buffer.  */
+  uint64_t data[];
+};
+
+/* The mutex used to control access to the value pointed to by the
+   __go_channel_select selected field.  No additional mutexes may be
+   acquired while this mutex is held.  */
+extern pthread_mutex_t __go_select_data_mutex;
+
+/* Maximum permitted number of operations on a closed channel.  */
+#define MAX_CLOSED_OPERATIONS (0x100)
+
+extern struct __go_channel *__go_new_channel (size_t, size_t);
+
+extern _Bool __go_synch_with_select (struct __go_channel *, _Bool);
+
+extern void __go_broadcast_to_select (struct __go_channel *);
+
+extern _Bool __go_send_acquire (struct __go_channel *, _Bool);
+
+#define SEND_NONBLOCKING_ACQUIRE_SPACE 0
+#define SEND_NONBLOCKING_ACQUIRE_NOSPACE 1
+#define SEND_NONBLOCKING_ACQUIRE_CLOSED 2
+
+extern int __go_send_nonblocking_acquire (struct __go_channel *);
+
+extern void __go_send_release (struct __go_channel *);
+
+extern void __go_send_small (struct __go_channel *, uint64_t, _Bool);
+
+extern _Bool __go_send_nonblocking_small (struct __go_channel *, uint64_t);
+
+extern void __go_send_big (struct __go_channel *, const void *, _Bool);
+
+extern _Bool __go_send_nonblocking_big (struct __go_channel *, const void *);
+
+extern _Bool __go_receive_acquire (struct __go_channel *, _Bool);
+
+#define RECEIVE_NONBLOCKING_ACQUIRE_DATA 0
+#define RECEIVE_NONBLOCKING_ACQUIRE_NODATA 1
+#define RECEIVE_NONBLOCKING_ACQUIRE_CLOSED 2
+
+extern int __go_receive_nonblocking_acquire (struct __go_channel *);
+
+extern uint64_t __go_receive_small (struct __go_channel *, _Bool);
+
+extern void __go_receive_release (struct __go_channel *);
+
+struct __go_receive_nonblocking_small
+{
+  uint64_t __val;
+  _Bool __success;
+};
+
+extern struct __go_receive_nonblocking_small
+__go_receive_nonblocking_small (struct __go_channel *);
+
+extern void __go_receive_big (struct __go_channel *, void *, _Bool);
+
+extern _Bool __go_receive_nonblocking_big (struct __go_channel *, void *);
+
+extern void __go_unlock_and_notify_selects (struct __go_channel *);
+
+extern _Bool __go_builtin_closed (struct __go_channel *);
+
+extern void __go_builtin_close (struct __go_channel *);
+
+extern size_t __go_chan_len (struct __go_channel *);
+
+extern size_t __go_chan_cap (struct __go_channel *);
diff --git a/libgo/runtime/defs.h b/libgo/runtime/defs.h
new file mode 100644
index 000000000..67ad212b8
--- /dev/null
+++ b/libgo/runtime/defs.h
@@ -0,0 +1,12 @@
+/* defs.h -- runtime definitions for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* The gc library uses this file for system defines, and generates it
+   automatically using the godefs program.  The logical thing to put
+   here for gccgo would be #include statements for system header
+   files.  We can't do that, though, because runtime.h #define's the
+   standard types.  So we #include the system headers from runtime.h
+   instead.  */
diff --git a/libgo/runtime/go-alloc.h b/libgo/runtime/go-alloc.h
new file mode 100644
index 000000000..c880a043e
--- /dev/null
+++ b/libgo/runtime/go-alloc.h
@@ -0,0 +1,11 @@
+/* go-alloc.h -- allocate memory for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern void *__go_alloc (unsigned int __attribute__ ((mode (pointer))));
+extern void __go_free (void *);
diff --git a/libgo/runtime/go-append.c b/libgo/runtime/go-append.c
new file mode 100644
index 000000000..91493b1b7
--- /dev/null
+++ b/libgo/runtime/go-append.c
@@ -0,0 +1,67 @@
+/* go-append.c -- the go builtin append function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-type.h"
+#include "go-panic.h"
+#include "array.h"
+#include "runtime.h"
+#include "malloc.h"
+
+/* We should be OK if we don't split the stack here, since the only
+   libc functions we call are memcpy and memmove.  If we don't do
+   this, we will always split the stack, because of memcpy and
+   memmove.  */
+extern struct __go_open_array
+__go_append (struct __go_open_array, void *, size_t, size_t)
+  __attribute__ ((no_split_stack));
+
+struct __go_open_array
+__go_append (struct __go_open_array a, void *bvalues, size_t bcount,
+	     size_t element_size)
+{
+  size_t ucount;
+  int count;
+
+  if (bvalues == NULL || bcount == 0)
+    return a;
+
+  ucount = (size_t) a.__count + bcount;
+  count = (int) ucount;
+  if ((size_t) count != ucount || count <= a.__count)
+    __go_panic_msg ("append: slice overflow");
+
+  if (count > a.__capacity)
+    {
+      int m;
+      void *n;
+
+      m = a.__capacity;
+      if (m == 0)
+	m = (int) bcount;
+      else
+	{
+	  do
+	    {
+	      if (a.__count < 1024)
+		m += m;
+	      else
+		m += m / 4;
+	    }
+	  while (m < count);
+	}
+
+      n = __go_alloc (m * element_size);
+      __builtin_memcpy (n, a.__values, a.__count * element_size);
+
+      a.__values = n;
+      a.__capacity = m;
+    }
+
+  __builtin_memmove ((char *) a.__values + a.__count * element_size,
+		     bvalues, bcount * element_size);
+  a.__count = count;
+  return a;
+}
diff --git a/libgo/runtime/go-assert-interface.c b/libgo/runtime/go-assert-interface.c
new file mode 100644
index 000000000..57a092d59
--- /dev/null
+++ b/libgo/runtime/go-assert-interface.c
@@ -0,0 +1,49 @@
+/* go-assert-interface.c -- interface type assertion for Go.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "go-panic.h"
+#include "interface.h"
+
+/* This is called by the compiler to implement a type assertion from
+   one interface type to another.  This returns the value that should
+   go in the first field of the result tuple.  The result may be an
+   empty or a non-empty interface.  */
+
+const void *
+__go_assert_interface (const struct __go_type_descriptor *lhs_descriptor,
+		       const struct __go_type_descriptor *rhs_descriptor)
+{
+  const struct __go_interface_type *lhs_interface;
+
+  if (rhs_descriptor == NULL)
+    {
+      struct __go_empty_interface panic_arg;
+
+      /* A type assertion is not permitted with a nil interface.  */
+
+      newTypeAssertionError (NULL,
+			     NULL,
+			     lhs_descriptor,
+			     NULL,
+			     NULL,
+			     lhs_descriptor->__reflection,
+			     NULL,
+			     &panic_arg);
+      __go_panic (panic_arg);
+    }
+
+  /* A type assertion to an empty interface just returns the object
+     descriptor.  */
+
+  __go_assert (lhs_descriptor->__code == GO_INTERFACE);
+  lhs_interface = (const struct __go_interface_type *) lhs_descriptor;
+  if (lhs_interface->__methods.__count == 0)
+    return rhs_descriptor;
+
+  return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0);
+}
diff --git a/libgo/runtime/go-assert.c b/libgo/runtime/go-assert.c
new file mode 100644
index 000000000..48aa0725c
--- /dev/null
+++ b/libgo/runtime/go-assert.c
@@ -0,0 +1,18 @@
+/* go-assert.c -- libgo specific assertions
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "go-assert.h"
+
+void
+__go_assert_fail (const char *file, unsigned int lineno)
+{
+  /* FIXME: Eventually we should dump a stack trace here.  */
+  fprintf (stderr, "%s:%u: libgo assertion failure\n", file, lineno);
+  abort ();
+}
diff --git a/libgo/runtime/go-assert.h b/libgo/runtime/go-assert.h
new file mode 100644
index 000000000..636559597
--- /dev/null
+++ b/libgo/runtime/go-assert.h
@@ -0,0 +1,18 @@
+/* go-assert.h -- libgo specific assertions
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_ASSERT_H
+#define LIBGO_GO_ASSERT_H
+
+/* We use a Go specific assert function so that functions which call
+   assert aren't required to always split the stack.  */
+
+extern void __go_assert_fail (const char *file, unsigned int lineno)
+  __attribute__ ((noreturn));
+
+#define __go_assert(e) ((e) ? (void) 0 : __go_assert_fail (__FILE__, __LINE__))
+
+#endif /* !defined(LIBGO_GO_ASSERT_H) */
diff --git a/libgo/runtime/go-breakpoint.c b/libgo/runtime/go-breakpoint.c
new file mode 100644
index 000000000..bb6eddc36
--- /dev/null
+++ b/libgo/runtime/go-breakpoint.c
@@ -0,0 +1,15 @@
+/* go-breakpoint.c -- the runtime.Breakpoint function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <sched.h>
+
+void Breakpoint (void) asm ("libgo_runtime.runtime.Breakpoint");
+
+void
+Breakpoint (void)
+{
+  __builtin_trap ();
+}
diff --git a/libgo/runtime/go-byte-array-to-string.c b/libgo/runtime/go-byte-array-to-string.c
new file mode 100644
index 000000000..531730654
--- /dev/null
+++ b/libgo/runtime/go-byte-array-to-string.c
@@ -0,0 +1,24 @@
+/* go-byte-array-to-string.c -- convert an array of bytes to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_string
+__go_byte_array_to_string (const void* p, size_t len)
+{
+  const unsigned char *bytes;
+  unsigned char *retdata;
+  struct __go_string ret;
+
+  bytes = (const unsigned char *) p;
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
+  __builtin_memcpy (retdata, bytes, len);
+  ret.__data = retdata;
+  ret.__length = len;
+  return ret;
+}
diff --git a/libgo/runtime/go-caller.c b/libgo/runtime/go-caller.c
new file mode 100644
index 000000000..b18759f2f
--- /dev/null
+++ b/libgo/runtime/go-caller.c
@@ -0,0 +1,51 @@
+/* go-caller.c -- runtime.Caller and runtime.FuncForPC for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* Implement runtime.Caller.  */
+
+#include <stdint.h>
+
+#include "go-string.h"
+
+/* The values returned by runtime.Caller.  */
+
+struct caller_ret
+{
+  uintptr_t pc;
+  struct __go_string file;
+  int line;
+  _Bool ok;
+};
+
+/* Implement runtime.Caller.  */
+
+struct caller_ret Caller (int n) asm ("libgo_runtime.runtime.Caller");
+
+struct caller_ret
+Caller (int n __attribute__ ((unused)))
+{
+  struct caller_ret ret;
+
+  /* A proper implementation needs to dig through the debugging
+     information.  */
+  ret.pc = (uint64_t) (uintptr_t) __builtin_return_address (0);
+  ret.file.__data = NULL;
+  ret.file.__length = 0;
+  ret.line = 0;
+  ret.ok = 0;
+
+  return ret;
+}
+
+/* Implement runtime.FuncForPC.  */
+
+void *FuncForPC (uintptr_t) asm ("libgo_runtime.runtime.FuncForPC");
+
+void *
+FuncForPC(uintptr_t pc __attribute__ ((unused)))
+{
+  return NULL;
+}
diff --git a/libgo/runtime/go-can-convert-interface.c b/libgo/runtime/go-can-convert-interface.c
new file mode 100644
index 000000000..83217ab95
--- /dev/null
+++ b/libgo/runtime/go-can-convert-interface.c
@@ -0,0 +1,76 @@
+/* go-can-convert-interface.c -- can we convert to an interface?
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-assert.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Return whether we can convert from the type in FROM_DESCRIPTOR to
+   the interface in TO_DESCRIPTOR.  This is used for type
+   switches.  */
+
+_Bool
+__go_can_convert_to_interface (
+    const struct __go_type_descriptor *to_descriptor,
+    const struct __go_type_descriptor *from_descriptor)
+{
+  const struct __go_interface_type *to_interface;
+  int to_method_count;
+  const struct __go_interface_method *to_method;
+  const struct __go_uncommon_type *from_uncommon;
+  int from_method_count;
+  const struct __go_method *from_method;
+  int i;
+
+  /* In a type switch FROM_DESCRIPTOR can be NULL.  */
+  if (from_descriptor == NULL)
+    return 0;
+
+  __go_assert (to_descriptor->__code == GO_INTERFACE);
+  to_interface = (const struct __go_interface_type *) to_descriptor;
+  to_method_count = to_interface->__methods.__count;
+  to_method = ((const struct __go_interface_method *)
+	       to_interface->__methods.__values);
+
+  from_uncommon = from_descriptor->__uncommon;
+  if (from_uncommon == NULL)
+    {
+      from_method_count = 0;
+      from_method = NULL;
+    }
+  else
+    {
+      from_method_count = from_uncommon->__methods.__count;
+      from_method = ((const struct __go_method *)
+		     from_uncommon->__methods.__values);
+    }
+
+  for (i = 0; i < to_method_count; ++i)
+    {
+      while (from_method_count > 0
+	     && (!__go_ptr_strings_equal (from_method->__name,
+					  to_method->__name)
+		 || !__go_ptr_strings_equal (from_method->__pkg_path,
+					     to_method->__pkg_path)))
+	{
+	  ++from_method;
+	  --from_method_count;
+	}
+
+      if (from_method_count == 0)
+	return 0;
+
+      if (!__go_type_descriptors_equal (from_method->__mtype,
+					to_method->__type))
+	return 0;
+
+      ++to_method;
+      ++from_method;
+      --from_method_count;
+    }
+
+  return 1;
+}
diff --git a/libgo/runtime/go-cgo.c b/libgo/runtime/go-cgo.c
new file mode 100644
index 000000000..94917bca0
--- /dev/null
+++ b/libgo/runtime/go-cgo.c
@@ -0,0 +1,42 @@
+/* go-cgo.c -- SWIG support routines for libgo.
+
+   Copyright 2011 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "interface.h"
+#include "go-panic.h"
+#include "go-string.h"
+
+/* These are routines used by SWIG.  The gc runtime library provides
+   the same routines under the same name, though in that case the code
+   is required to import runtime/cgo.  */
+
+void *
+_cgo_allocate (size_t n)
+{
+  return __go_alloc (n);
+}
+
+extern const struct __go_type_descriptor string_type_descriptor
+  asm ("__go_tdn_string");
+
+void
+_cgo_panic (const char *p)
+{
+  int len;
+  unsigned char *data;
+  struct __go_string *ps;
+  struct __go_empty_interface e;
+
+  len = __builtin_strlen (p);
+  data = __go_alloc (len);
+  __builtin_memcpy (data, p, len);
+  ps = __go_alloc (sizeof *ps);
+  ps->__data = data;
+  ps->__length = len;
+  e.__type_descriptor = &string_type_descriptor;
+  e.__object = ps;
+  __go_panic (e);
+}
diff --git a/libgo/runtime/go-chan-cap.c b/libgo/runtime/go-chan-cap.c
new file mode 100644
index 000000000..df603bf10
--- /dev/null
+++ b/libgo/runtime/go-chan-cap.c
@@ -0,0 +1,41 @@
+/* go-chan-cap.c -- the cap function applied to a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-assert.h"
+#include "channel.h"
+
+/* Return the cap function applied to a channel--the size of the
+   buffer.  This could be done inline but I'm doing it as a function
+   for now to make it easy to change the channel structure.  */
+
+size_t
+__go_chan_cap (struct __go_channel *channel)
+{
+  int i;
+  size_t ret;
+
+  if (channel == NULL)
+    return 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  if (channel->num_entries == 0)
+    ret = 0;
+  else
+    {
+      /* One slot is always unused.  We added 1 when we created the
+	 channel.  */
+      ret = channel->num_entries - 1;
+    }
+
+  i = pthread_mutex_unlock (&channel->lock);
+  __go_assert  (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-chan-len.c b/libgo/runtime/go-chan-len.c
new file mode 100644
index 000000000..5aebae141
--- /dev/null
+++ b/libgo/runtime/go-chan-len.c
@@ -0,0 +1,41 @@
+/* go-chan-len.c -- the len function applied to a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-assert.h"
+#include "channel.h"
+
+/* Return the len function applied to a channel--the number of
+   elements in the buffer.  This could be done inline but I'm doing it
+   as a function for now to make it easy to change the channel
+   structure.  */
+
+size_t
+__go_chan_len (struct __go_channel *channel)
+{
+  int i;
+  size_t ret;
+
+  if (channel == NULL)
+    return 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  if (channel->num_entries == 0)
+    ret = 0;
+  else if (channel->next_fetch == channel->next_store)
+    ret = 0;
+  else
+    ret = ((channel->next_store + channel->num_entries - channel->next_fetch)
+	   % channel->num_entries);
+
+  i = pthread_mutex_unlock (&channel->lock);
+  __go_assert  (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-check-interface.c b/libgo/runtime/go-check-interface.c
new file mode 100644
index 000000000..d2258a854
--- /dev/null
+++ b/libgo/runtime/go-check-interface.c
@@ -0,0 +1,46 @@
+/* go-check-interface.c -- check an interface type for a conversion
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-panic.h"
+#include "interface.h"
+
+/* Check that an interface type matches for a conversion to a
+   non-interface type.  This panics if the types are bad.  The actual
+   extraction of the object is inlined.  */
+
+void
+__go_check_interface_type (
+    const struct __go_type_descriptor *lhs_descriptor,
+    const struct __go_type_descriptor *rhs_descriptor,
+    const struct __go_type_descriptor *rhs_inter_descriptor)
+{
+  if (rhs_descriptor == NULL)
+    {
+      struct __go_empty_interface panic_arg;
+
+      newTypeAssertionError(NULL, NULL, lhs_descriptor, NULL, NULL,
+			    lhs_descriptor->__reflection, NULL, &panic_arg);
+      __go_panic(panic_arg);
+    }
+
+  if (lhs_descriptor != rhs_descriptor
+      && !__go_type_descriptors_equal (lhs_descriptor, rhs_descriptor)
+      && (lhs_descriptor->__code != GO_UNSAFE_POINTER
+	  || !__go_is_pointer_type (rhs_descriptor))
+      && (rhs_descriptor->__code != GO_UNSAFE_POINTER
+	  || !__go_is_pointer_type (lhs_descriptor)))
+    {
+      struct __go_empty_interface panic_arg;
+
+      newTypeAssertionError(rhs_inter_descriptor, rhs_descriptor,
+			    lhs_descriptor,
+			    rhs_inter_descriptor->__reflection,
+			    rhs_descriptor->__reflection,
+			    lhs_descriptor->__reflection,
+			    NULL, &panic_arg);
+      __go_panic(panic_arg);
+    }
+}
diff --git a/libgo/runtime/go-close.c b/libgo/runtime/go-close.c
new file mode 100644
index 000000000..ced742985
--- /dev/null
+++ b/libgo/runtime/go-close.c
@@ -0,0 +1,33 @@
+/* go-close.c -- the builtin close function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-assert.h"
+#include "channel.h"
+
+/* Close a channel.  After a channel is closed, sends are no longer
+   permitted.  Receives always return zero.  */
+
+void
+__go_builtin_close (struct __go_channel *channel)
+{
+  int i;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (channel->selected_for_send)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+
+  channel->is_closed = 1;
+
+  i = pthread_cond_broadcast (&channel->cond);
+  __go_assert (i == 0);
+
+  __go_unlock_and_notify_selects (channel);
+}
diff --git a/libgo/runtime/go-closed.c b/libgo/runtime/go-closed.c
new file mode 100644
index 000000000..bfa9cd6f9
--- /dev/null
+++ b/libgo/runtime/go-closed.c
@@ -0,0 +1,34 @@
+/* go-closed.c -- the builtin closed function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-assert.h"
+#include "channel.h"
+
+/* Return whether a channel is closed.  We only return true after at
+   least one nil value has been read from the channel.  */
+
+_Bool
+__go_builtin_closed (struct __go_channel *channel)
+{
+  int i;
+  _Bool ret;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (channel->selected_for_receive)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+
+  ret = channel->saw_close;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  __go_assert (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-construct-map.c b/libgo/runtime/go-construct-map.c
new file mode 100644
index 000000000..15497eadb
--- /dev/null
+++ b/libgo/runtime/go-construct-map.c
@@ -0,0 +1,32 @@
+/* go-construct-map.c -- construct a map from an initializer.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "map.h"
+
+struct __go_map *
+__go_construct_map (const struct __go_map_descriptor *descriptor,
+		    size_t count, size_t entry_size, size_t val_offset,
+		    size_t val_size, const void *ventries)
+{
+  struct __go_map *ret;
+  const unsigned char *entries;
+  size_t i;
+
+  ret = __go_new_map (descriptor, count);
+
+  entries = (const unsigned char *) ventries;
+  for (i = 0; i < count; ++i)
+    {
+      void *val = __go_map_index (ret, entries, 1);
+      __builtin_memcpy (val, entries + val_offset, val_size);
+      entries += entry_size;
+    }
+
+  return ret;
+}
diff --git a/libgo/runtime/go-convert-interface.c b/libgo/runtime/go-convert-interface.c
new file mode 100644
index 000000000..259456cda
--- /dev/null
+++ b/libgo/runtime/go-convert-interface.c
@@ -0,0 +1,138 @@
+/* go-convert-interface.c -- convert interfaces for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "go-panic.h"
+#include "interface.h"
+
+/* This is called when converting one interface type into another
+   interface type.  LHS_DESCRIPTOR is the type descriptor of the
+   resulting interface.  RHS_DESCRIPTOR is the type descriptor of the
+   object being converted.  This builds and returns a new interface
+   method table.  If any method in the LHS_DESCRIPTOR interface is not
+   implemented by the object, the conversion fails.  If the conversion
+   fails, then if MAY_FAIL is true this returns NULL; otherwise, it
+   panics.  */
+
+void *
+__go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor,
+			  const struct __go_type_descriptor *rhs_descriptor,
+			  _Bool may_fail)
+{
+  const struct __go_interface_type *lhs_interface;
+  int lhs_method_count;
+  const struct __go_interface_method* lhs_methods;
+  const void **methods;
+  const struct __go_uncommon_type *rhs_uncommon;
+  int rhs_method_count;
+  const struct __go_method *p_rhs_method;
+  int i;
+
+  if (rhs_descriptor == NULL)
+    {
+      /* A nil value always converts to nil.  */
+      return NULL;
+    }
+
+  __go_assert (lhs_descriptor->__code == GO_INTERFACE);
+  lhs_interface = (const struct __go_interface_type *) lhs_descriptor;
+  lhs_method_count = lhs_interface->__methods.__count;
+  lhs_methods = ((const struct __go_interface_method *)
+		 lhs_interface->__methods.__values);
+
+  /* This should not be called for an empty interface.  */
+  __go_assert (lhs_method_count > 0);
+
+  rhs_uncommon = rhs_descriptor->__uncommon;
+  if (rhs_uncommon == NULL || rhs_uncommon->__methods.__count == 0)
+    {
+      struct __go_empty_interface panic_arg;
+
+      if (may_fail)
+	return NULL;
+
+      newTypeAssertionError (NULL,
+			     rhs_descriptor,
+			     lhs_descriptor,
+			     NULL,
+			     rhs_descriptor->__reflection,
+			     lhs_descriptor->__reflection,
+			     lhs_methods[0].__name,
+			     &panic_arg);
+      __go_panic (panic_arg);
+    }
+
+  rhs_method_count = rhs_uncommon->__methods.__count;
+  p_rhs_method = ((const struct __go_method *)
+		  rhs_uncommon->__methods.__values);
+
+  methods = NULL;
+
+  for (i = 0; i < lhs_method_count; ++i)
+    {
+      const struct __go_interface_method *p_lhs_method;
+
+      p_lhs_method = &lhs_methods[i];
+
+      while (rhs_method_count > 0
+	     && (!__go_ptr_strings_equal (p_lhs_method->__name,
+					  p_rhs_method->__name)
+		 || !__go_ptr_strings_equal (p_lhs_method->__pkg_path,
+					     p_rhs_method->__pkg_path)))
+	{
+	  ++p_rhs_method;
+	  --rhs_method_count;
+	}
+
+      if (rhs_method_count == 0
+	  || !__go_type_descriptors_equal (p_lhs_method->__type,
+					   p_rhs_method->__mtype))
+	{
+	  struct __go_empty_interface panic_arg;
+
+	  if (methods != NULL)
+	    __go_free (methods);
+
+	  if (may_fail)
+	    return NULL;
+
+	  newTypeAssertionError (NULL,
+				 rhs_descriptor,
+				 lhs_descriptor,
+				 NULL,
+				 rhs_descriptor->__reflection,
+				 lhs_descriptor->__reflection,
+				 p_lhs_method->__name,
+				 &panic_arg);
+	  __go_panic (panic_arg);
+	}
+
+      if (methods == NULL)
+	{
+	  methods = (const void **) __go_alloc ((lhs_method_count + 1)
+						* sizeof (void *));
+
+	  /* The first field in the method table is always the type of
+	     the object.  */
+	  methods[0] = rhs_descriptor;
+	}
+
+      methods[i + 1] = p_rhs_method->__function;
+    }
+
+  return methods;
+}
+
+/* This is called by the compiler to convert a value from one
+   interface type to another.  */
+
+void *
+__go_convert_interface (const struct __go_type_descriptor *lhs_descriptor,
+			const struct __go_type_descriptor *rhs_descriptor)
+{
+  return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0);
+}
diff --git a/libgo/runtime/go-copy.c b/libgo/runtime/go-copy.c
new file mode 100644
index 000000000..998aeb927
--- /dev/null
+++ b/libgo/runtime/go-copy.c
@@ -0,0 +1,21 @@
+/* go-append.c -- the go builtin copy function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+/* We should be OK if we don't split the stack here, since we are just
+   calling memmove which shouldn't need much stack.  If we don't do
+   this we will always split the stack, because of memmove.  */
+
+extern void
+__go_copy (void *, void *, size_t)
+  __attribute__ ((no_split_stack));
+
+void
+__go_copy (void *a, void *b, size_t len)
+{
+  __builtin_memmove (a, b, len);
+}
diff --git a/libgo/runtime/go-defer.c b/libgo/runtime/go-defer.c
new file mode 100644
index 000000000..6425f0586
--- /dev/null
+++ b/libgo/runtime/go-defer.c
@@ -0,0 +1,69 @@
+/* go-defer.c -- manage the defer stack.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-defer.h"
+
+/* This function is called each time we need to defer a call.  */
+
+void
+__go_defer (void *frame, void (*pfn) (void *), void *arg)
+{
+  struct __go_defer_stack *n;
+
+  if (__go_panic_defer == NULL)
+    __go_panic_defer = ((struct __go_panic_defer_struct *)
+			__go_alloc (sizeof (struct __go_panic_defer_struct)));
+
+  n = (struct __go_defer_stack *) __go_alloc (sizeof (struct __go_defer_stack));
+  n->__next = __go_panic_defer->__defer;
+  n->__frame = frame;
+  n->__panic = __go_panic_defer->__panic;
+  n->__pfn = pfn;
+  n->__arg = arg;
+  n->__retaddr = NULL;
+  __go_panic_defer->__defer = n;
+}
+
+/* This function is called when we want to undefer the stack.  */
+
+void
+__go_undefer (void *frame)
+{
+  if (__go_panic_defer == NULL)
+    return;
+  while (__go_panic_defer->__defer != NULL
+	 && __go_panic_defer->__defer->__frame == frame)
+    {
+      struct __go_defer_stack *d;
+      void (*pfn) (void *);
+
+      d = __go_panic_defer->__defer;
+      pfn = d->__pfn;
+      d->__pfn = NULL;
+
+      if (pfn != NULL)
+	(*pfn) (d->__arg);
+
+      __go_panic_defer->__defer = d->__next;
+      __go_free (d);
+    }
+}
+
+/* This function is called to record the address to which the deferred
+   function returns.  This may in turn be checked by __go_can_recover.
+   The frontend relies on this function returning false.  */
+
+_Bool
+__go_set_defer_retaddr (void *retaddr)
+{
+  if (__go_panic_defer != NULL && __go_panic_defer->__defer != NULL)
+    __go_panic_defer->__defer->__retaddr = retaddr;
+  return 0;
+}
diff --git a/libgo/runtime/go-defer.h b/libgo/runtime/go-defer.h
new file mode 100644
index 000000000..f8924f3b6
--- /dev/null
+++ b/libgo/runtime/go-defer.h
@@ -0,0 +1,36 @@
+/* go-defer.h -- the defer stack.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+struct __go_panic_stack;
+
+/* The defer stack is a list of these structures.  */
+
+struct __go_defer_stack
+{
+  /* The next entry in the stack.  */
+  struct __go_defer_stack *__next;
+
+  /* The frame pointer for the function which called this defer
+     statement.  */
+  void *__frame;
+
+  /* The value of the panic stack when this function is deferred.
+     This function can not recover this value from the panic stack.
+     This can happen if a deferred function uses its own defer
+     statement.  */
+  struct __go_panic_stack *__panic;
+
+  /* The function to call.  */
+  void (*__pfn) (void *);
+
+  /* The argument to pass to the function.  */
+  void *__arg;
+
+  /* The return address that a recover thunk matches against.  This is
+     set by __go_set_defer_retaddr which is called by the thunks
+     created by defer statements.  */
+  const void *__retaddr;
+};
diff --git a/libgo/runtime/go-deferred-recover.c b/libgo/runtime/go-deferred-recover.c
new file mode 100644
index 000000000..2d9ca1442
--- /dev/null
+++ b/libgo/runtime/go-deferred-recover.c
@@ -0,0 +1,92 @@
+/* go-deferred-recover.c -- support for a deferred recover function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-panic.h"
+#include "go-defer.h"
+
+/* This is called when a call to recover is deferred.  That is,
+   something like
+     defer recover()
+
+   We need to handle this specially.  In 6g/8g, the recover function
+   looks up the stack frame.  In particular, that means that a
+   deferred recover will not recover a panic thrown in the same
+   function that defers the recover.  It will only recover a panic
+   thrown in a function that defers the deferred call to recover.
+
+   In other words:
+
+   func f1() {
+	defer recover()	// does not stop panic
+	panic(0)
+   }
+
+   func f2() {
+	defer func() {
+		defer recover()	// stops panic(0)
+	}()
+	panic(0)
+   }
+
+   func f3() {
+	defer func() {
+		defer recover()	// does not stop panic
+		panic(0)
+	}()
+	panic(1)
+   }
+
+   func f4() {
+	defer func() {
+		defer func() {
+			defer recover()	// stops panic(0)
+		}()
+		panic(0)
+	}()
+	panic(1)
+   }
+
+   The interesting case here is f3.  As can be seen from f2, the
+   deferred recover could pick up panic(1).  However, this does not
+   happen because it is blocked by the panic(0).
+
+   When a function calls recover, then when we invoke it we pass a
+   hidden parameter indicating whether it should recover something.
+   This parameter is set based on whether the function is being
+   invoked directly from defer.  The parameter winds up determining
+   whether __go_recover or __go_deferred_recover is called at all.
+
+   In the case of a deferred recover, the hidden parameter which
+   controls the call is actually the one set up for the function which
+   runs the defer recover() statement.  That is the right thing in all
+   the cases above except for f3.  In f3 the function is permitted to
+   call recover, but the deferred recover call is not.  We address
+   that here by checking for that specific case before calling
+   recover.  If this function was deferred when there is already a
+   panic on the panic stack, then we can only recover that panic, not
+   any other.
+
+   Note that we can get away with using a special function here
+   because you are not permitted to take the address of a predeclared
+   function like recover.  */
+
+struct __go_empty_interface
+__go_deferred_recover ()
+{
+  if (__go_panic_defer == NULL
+      || __go_panic_defer->__defer == NULL
+      || __go_panic_defer->__defer->__panic != __go_panic_defer->__panic)
+    {
+      struct __go_empty_interface ret;
+
+      ret.__type_descriptor = NULL;
+      ret.__object = NULL;
+      return ret;
+    }
+  return __go_recover();
+}
diff --git a/libgo/runtime/go-eface-compare.c b/libgo/runtime/go-eface-compare.c
new file mode 100644
index 000000000..c90177e20
--- /dev/null
+++ b/libgo/runtime/go-eface-compare.c
@@ -0,0 +1,32 @@
+/* go-eface-compare.c -- compare two empty values.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+
+/* Compare two interface values.  Return 0 for equal, not zero for not
+   equal (return value is like strcmp).  */
+
+int
+__go_empty_interface_compare (struct __go_empty_interface left,
+			      struct __go_empty_interface right)
+{
+  const struct __go_type_descriptor *left_descriptor;
+
+  left_descriptor = left.__type_descriptor;
+  if (left_descriptor == NULL && right.__type_descriptor == NULL)
+    return 0;
+  if (left_descriptor == NULL || right.__type_descriptor == NULL)
+    return 1;
+  if (!__go_type_descriptors_equal (left_descriptor,
+				    right.__type_descriptor))
+    return 1;
+  if (__go_is_pointer_type (left_descriptor))
+    return left.__object == right.__object ? 0 : 1;
+  if (!left_descriptor->__equalfn (left.__object, right.__object,
+				   left_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-eface-val-compare.c b/libgo/runtime/go-eface-val-compare.c
new file mode 100644
index 000000000..319ede243
--- /dev/null
+++ b/libgo/runtime/go-eface-val-compare.c
@@ -0,0 +1,32 @@
+/* go-eface-val-compare.c -- compare an empty interface with a value.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-type.h"
+#include "interface.h"
+
+/* Compare an empty interface with a value.  Return 0 for equal, not
+   zero for not equal (return value is like strcmp).  */
+
+int
+__go_empty_interface_value_compare (
+    struct __go_empty_interface left,
+    const struct __go_type_descriptor *right_descriptor,
+    const void *val)
+{
+  const struct __go_type_descriptor *left_descriptor;
+
+  left_descriptor = left.__type_descriptor;
+  if (left_descriptor == NULL)
+    return 1;
+  if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
+    return 1;
+  if (__go_is_pointer_type (left_descriptor))
+    return left.__object == val ? 0 : 1;
+  if (!left_descriptor->__equalfn (left.__object, val,
+				   left_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-getgoroot.c b/libgo/runtime/go-getgoroot.c
new file mode 100644
index 000000000..e74fee886
--- /dev/null
+++ b/libgo/runtime/go-getgoroot.c
@@ -0,0 +1,26 @@
+/* go-getgoroot.c -- getgoroot function for runtime package.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+
+#include "go-string.h"
+
+struct __go_string getgoroot (void) asm ("libgo_runtime.runtime.getgoroot");
+
+struct __go_string
+getgoroot ()
+{
+  const char *p;
+  struct __go_string ret;
+
+  p = getenv ("GOROOT");
+  ret.__data = (const unsigned char *) p;
+  if (ret.__data == NULL)
+    ret.__length = 0;
+  else
+    ret.__length = __builtin_strlen (p);
+  return ret;
+}
diff --git a/libgo/runtime/go-go.c b/libgo/runtime/go-go.c
new file mode 100644
index 000000000..3d8e9e629
--- /dev/null
+++ b/libgo/runtime/go-go.c
@@ -0,0 +1,656 @@
+/* go-go.c -- the go function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <semaphore.h>
+
+#include "config.h"
+#include "go-assert.h"
+#include "go-panic.h"
+#include "go-alloc.h"
+#include "runtime.h"
+#include "malloc.h"
+
+#ifdef USING_SPLIT_STACK
+/* FIXME: This is not declared anywhere.  */
+extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
+				void **);
+#endif
+
+/* We stop the threads by sending them the signal GO_SIG_STOP and we
+   start them by sending them the signal GO_SIG_START.  */
+
+#define GO_SIG_START (SIGRTMIN + 1)
+#define GO_SIG_STOP (SIGRTMIN + 2)
+
+#ifndef SA_RESTART
+  #define SA_RESTART 0
+#endif
+
+/* A doubly linked list of the threads we have started.  */
+
+struct __go_thread_id
+{
+  /* Links.  */
+  struct __go_thread_id *prev;
+  struct __go_thread_id *next;
+  /* True if the thread ID has not yet been filled in.  */
+  _Bool tentative;
+  /* Thread ID.  */
+  pthread_t id;
+  /* Thread's M structure.  */
+  struct M *m;
+  /* If the thread ID has not been filled in, the function we are
+     running.  */
+  void (*pfn) (void *);
+  /* If the thread ID has not been filled in, the argument to the
+     function.  */
+  void *arg;
+};
+
+static struct __go_thread_id *__go_all_thread_ids;
+
+/* A lock to control access to ALL_THREAD_IDS.  */
+
+static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/* A semaphore used to wait until all the threads have stopped.  */
+
+static sem_t __go_thread_ready_sem;
+
+/* A signal set used to wait until garbage collection is complete.  */
+
+static sigset_t __go_thread_wait_sigset;
+
+/* Remove the current thread from the list of threads.  */
+
+static void
+remove_current_thread (void)
+{
+  struct __go_thread_id *list_entry;
+  MCache *mcache;
+  int i;
+  
+  list_entry = m->list_entry;
+  mcache = m->mcache;
+
+  i = pthread_mutex_lock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  if (list_entry->prev != NULL)
+    list_entry->prev->next = list_entry->next;
+  else
+    __go_all_thread_ids = list_entry->next;
+  if (list_entry->next != NULL)
+    list_entry->next->prev = list_entry->prev;
+
+  /* This will look runtime_mheap as needed.  */
+  runtime_MCache_ReleaseAll (mcache);
+
+  /* This should never deadlock--there shouldn't be any code that
+     holds the runtime_mheap lock when locking __go_thread_ids_lock.
+     We don't want to do this after releasing __go_thread_ids_lock
+     because it will mean that the garbage collector might run, and
+     the garbage collector does not try to lock runtime_mheap in all
+     cases since it knows it is running single-threaded.  */
+  runtime_lock (&runtime_mheap);
+  mstats.heap_alloc += mcache->local_alloc;
+  mstats.heap_objects += mcache->local_objects;
+  __builtin_memset (mcache, 0, sizeof (struct MCache));
+  runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
+  runtime_unlock (&runtime_mheap);
+
+  /* As soon as we release this look, a GC could run.  Since this
+     thread is no longer on the list, the GC will not find our M
+     structure, so it could get freed at any time.  That means that
+     any code from here to thread exit must not assume that m is
+     valid.  */
+  m = NULL;
+
+  i = pthread_mutex_unlock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  free (list_entry);
+}
+
+/* Start the thread.  */
+
+static void *
+start_go_thread (void *thread_arg)
+{
+  struct M *newm = (struct M *) thread_arg;
+  void (*pfn) (void *);
+  void *arg;
+  struct __go_thread_id *list_entry;
+  int i;
+
+#ifdef __rtems__
+  __wrap_rtems_task_variable_add ((void **) &m);
+  __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
+#endif
+
+  m = newm;
+
+  list_entry = newm->list_entry;
+
+  pfn = list_entry->pfn;
+  arg = list_entry->arg;
+
+#ifndef USING_SPLIT_STACK
+  /* If we don't support split stack, record the current stack as the
+     top of the stack.  There shouldn't be anything relevant to the
+     garbage collector above this point.  */
+  m->gc_sp = (void *) &arg;
+#endif
+
+  /* Finish up the entry on the thread list.  */
+
+  i = pthread_mutex_lock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  list_entry->id = pthread_self ();
+  list_entry->pfn = NULL;
+  list_entry->arg = NULL;
+  list_entry->tentative = 0;
+
+  i = pthread_mutex_unlock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  (*pfn) (arg);
+
+  remove_current_thread ();
+
+  return NULL;
+}
+
+/* The runtime.Goexit function.  */
+
+void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
+
+void
+Goexit (void)
+{
+  remove_current_thread ();
+  pthread_exit (NULL);
+  abort ();
+}
+
+/* Implement the go statement.  */
+
+void
+__go_go (void (*pfn) (void*), void *arg)
+{
+  int i;
+  pthread_attr_t attr;
+  struct M *newm;
+  struct __go_thread_id *list_entry;
+  pthread_t tid;
+
+  i = pthread_attr_init (&attr);
+  __go_assert (i == 0);
+  i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
+  __go_assert (i == 0);
+
+#ifdef LINKER_SUPPORTS_SPLIT_STACK
+  /* The linker knows how to handle calls between code which uses
+     -fsplit-stack and code which does not.  That means that we can
+     run with a smaller stack and rely on the -fsplit-stack support to
+     save us.  The GNU/Linux glibc library won't let us have a very
+     small stack, but we make it as small as we can.  */
+#ifndef PTHREAD_STACK_MIN
+#define PTHREAD_STACK_MIN 8192
+#endif
+  i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
+  __go_assert (i == 0);
+#endif
+
+  newm = __go_alloc (sizeof (M));
+
+  list_entry = malloc (sizeof (struct __go_thread_id));
+  list_entry->prev = NULL;
+  list_entry->next = NULL;
+  list_entry->tentative = 1;
+  list_entry->m = newm;
+  list_entry->pfn = pfn;
+  list_entry->arg = arg;
+
+  newm->list_entry = list_entry;
+
+  newm->mcache = runtime_allocmcache ();
+
+  /* Add the thread to the list of all threads, marked as tentative
+     since it is not yet ready to go.  */
+  i = pthread_mutex_lock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  if (__go_all_thread_ids != NULL)
+    __go_all_thread_ids->prev = list_entry;
+  list_entry->next = __go_all_thread_ids;
+  __go_all_thread_ids = list_entry;
+
+  i = pthread_mutex_unlock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  /* Start the thread.  */
+  i = pthread_create (&tid, &attr, start_go_thread, newm);
+  __go_assert (i == 0);
+
+  i = pthread_attr_destroy (&attr);
+  __go_assert (i == 0);
+}
+
+/* This is the signal handler for GO_SIG_START.  The garbage collector
+   will send this signal to a thread when it wants the thread to
+   start.  We don't have to actually do anything here, but we need a
+   signal handler since ignoring the signal will mean that the
+   sigsuspend will never see it.  */
+
+static void
+gc_start_handler (int sig __attribute__ ((unused)))
+{
+}
+
+/* Tell the garbage collector that we are ready, and wait for the
+   garbage collector to tell us that it is done.  This may be called
+   by a signal handler, so it is restricted to using functions which
+   are async cancel safe.  */
+
+static void
+stop_for_gc (void)
+{
+  int i;
+
+  /* Tell the garbage collector about our stack.  */
+#ifdef USING_SPLIT_STACK
+  m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
+				&m->gc_next_segment, &m->gc_next_sp,
+				&m->gc_initial_sp);
+#else
+  {
+    uintptr_t top = (uintptr_t) m->gc_sp;
+    uintptr_t bottom = (uintptr_t) &top;
+    if (top < bottom)
+      {
+	m->gc_next_sp = m->gc_sp;
+	m->gc_len = bottom - top;
+      }
+    else
+      {
+	m->gc_next_sp = (void *) bottom;
+	m->gc_len = top - bottom;
+      }
+  }
+#endif
+
+  /* FIXME: Perhaps we should just move __go_panic_defer into M.  */
+  m->gc_panic_defer = __go_panic_defer;
+
+  /* Tell the garbage collector that we are ready by posting to the
+     semaphore.  */
+  i = sem_post (&__go_thread_ready_sem);
+  __go_assert (i == 0);
+
+  /* Wait for the garbage collector to tell us to continue.  */
+  sigsuspend (&__go_thread_wait_sigset);
+}
+
+/* This is the signal handler for GO_SIG_STOP.  The garbage collector
+   will send this signal to a thread when it wants the thread to
+   stop.  */
+
+static void
+gc_stop_handler (int sig __attribute__ ((unused)))
+{
+  struct M *pm = m;
+
+  if (__sync_bool_compare_and_swap (&pm->holds_finlock, 1, 1))
+    {
+      /* We can't interrupt the thread while it holds the finalizer
+	 lock.  Otherwise we can get into a deadlock when mark calls
+	 runtime_walkfintab.  */
+      __sync_bool_compare_and_swap (&pm->gcing_for_finlock, 0, 1);
+      return;
+    }
+
+  if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
+    {
+      /* m->mallocing was already non-zero.  We can't interrupt the
+	 thread while it is running an malloc.  Instead, tell it to
+	 call back to us when done.  */
+      __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
+      return;
+    }
+
+  if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
+    {
+      /* Similarly, we can't interrupt the thread while it is building
+	 profiling information.  Otherwise we can get into a deadlock
+	 when sweepspan calls MProf_Free.  */
+      __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
+      return;
+    }
+
+  stop_for_gc ();
+}
+
+/* This is called by malloc when it gets a signal during the malloc
+   call itself.  */
+
+int
+__go_run_goroutine_gc (int r)
+{
+  /* Force callee-saved registers to be saved on the stack.  This is
+     not needed if we are invoked from the signal handler, but it is
+     needed if we are called directly, since otherwise we might miss
+     something that a function somewhere up the call stack is holding
+     in a register.  */
+  __builtin_unwind_init ();
+
+  stop_for_gc ();
+
+  /* This avoids tail recursion, to make sure that the saved registers
+     are on the stack.  */
+  return r;
+}
+
+/* Stop all the other threads for garbage collection.  */
+
+void
+runtime_stoptheworld (void)
+{
+  int i;
+  pthread_t me;
+  int c;
+  struct __go_thread_id *p;
+
+  i = pthread_mutex_lock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+
+  me = pthread_self ();
+  c = 0;
+  p = __go_all_thread_ids;
+  while (p != NULL)
+    {
+      if (p->tentative || pthread_equal (me, p->id))
+	p = p->next;
+      else
+	{
+	  i = pthread_kill (p->id, GO_SIG_STOP);
+	  if (i == 0)
+	    {
+	      ++c;
+	      p = p->next;
+	    }
+	  else if (i == ESRCH)
+	    {
+	      struct __go_thread_id *next;
+
+	      /* This thread died somehow.  Remove it from the
+		 list.  */
+	      next = p->next;
+	      if (p->prev != NULL)
+		p->prev->next = next;
+	      else
+		__go_all_thread_ids = next;
+	      if (next != NULL)
+		next->prev = p->prev;
+	      free (p);
+	      p = next;
+	    }
+	  else
+	    abort ();
+	}
+    }
+
+  /* Wait for each thread to receive the signal and post to the
+     semaphore.  If a thread receives the signal but contrives to die
+     before it posts to the semaphore, then we will hang forever
+     here.  */
+
+  while (c > 0)
+    {
+      i = sem_wait (&__go_thread_ready_sem);
+      if (i < 0 && errno == EINTR)
+	continue;
+      __go_assert (i == 0);
+      --c;
+    }
+
+  /* The gc_panic_defer field should now be set for all M's except the
+     one in this thread.  Set this one now.  */
+  m->gc_panic_defer = __go_panic_defer;
+
+  /* Leave with __go_thread_ids_lock held.  */
+}
+
+/* Scan all the stacks for garbage collection.  This should be called
+   with __go_thread_ids_lock held.  */
+
+void
+__go_scanstacks (void (*scan) (byte *, int64))
+{
+  pthread_t me;
+  struct __go_thread_id *p;
+
+  /* Make sure all the registers for this thread are on the stack.  */
+  __builtin_unwind_init ();
+
+  me = pthread_self ();
+  for (p = __go_all_thread_ids; p != NULL; p = p->next)
+    {
+      if (p->tentative)
+	{
+	  /* The goroutine function and argument can be allocated on
+	     the heap, so we have to scan them for a thread that has
+	     not yet started.  */
+	  scan ((void *) &p->pfn, sizeof (void *));
+	  scan ((void *) &p->arg, sizeof (void *));
+	  scan ((void *) &p->m, sizeof (void *));
+	  continue;
+	}
+
+#ifdef USING_SPLIT_STACK
+
+      void *sp;
+      size_t len;
+      void *next_segment;
+      void *next_sp;
+      void *initial_sp;
+
+      if (pthread_equal (me, p->id))
+	{
+	  next_segment = NULL;
+	  next_sp = NULL;
+	  initial_sp = NULL;
+	  sp = __splitstack_find (NULL, NULL, &len, &next_segment,
+				  &next_sp, &initial_sp);
+	}
+      else
+	{
+	  sp = p->m->gc_sp;
+	  len = p->m->gc_len;
+	  next_segment = p->m->gc_next_segment;
+	  next_sp = p->m->gc_next_sp;
+	  initial_sp = p->m->gc_initial_sp;
+	}
+
+      while (sp != NULL)
+	{
+	  scan (sp, len);
+	  sp = __splitstack_find (next_segment, next_sp, &len,
+				  &next_segment, &next_sp, &initial_sp);
+	}
+
+#else /* !defined(USING_SPLIT_STACK) */
+
+      if (pthread_equal (me, p->id))
+	{
+	  uintptr_t top = (uintptr_t) m->gc_sp;
+	  uintptr_t bottom = (uintptr_t) &top;
+	  if (top < bottom)
+	    scan (m->gc_sp, bottom - top);
+	  else
+	    scan ((void *) bottom, top - bottom);
+	}
+      else
+	{
+	  scan (p->m->gc_next_sp, p->m->gc_len);
+	}
+	
+#endif /* !defined(USING_SPLIT_STACK) */
+
+      /* Also scan the M structure while we're at it.  */
+
+      scan ((void *) &p->m, sizeof (void *));
+    }
+}
+
+/* Release all the memory caches.  This is called with
+   __go_thread_ids_lock held.  */
+
+void
+__go_stealcache (void)
+{
+  struct __go_thread_id *p;
+
+  for (p = __go_all_thread_ids; p != NULL; p = p->next)
+    runtime_MCache_ReleaseAll (p->m->mcache);
+}
+
+/* Gather memory cache statistics.  This is called with
+   __go_thread_ids_lock held.  */
+
+void
+__go_cachestats (void)
+{
+  struct __go_thread_id *p;
+
+  for (p = __go_all_thread_ids; p != NULL; p = p->next)
+    {
+      MCache *c;
+
+      c = p->m->mcache;
+      mstats.heap_alloc += c->local_alloc;
+      c->local_alloc = 0;
+      mstats.heap_objects += c->local_objects;
+      c->local_objects = 0;
+    }
+}
+
+/* Start the other threads after garbage collection.  */
+
+void
+runtime_starttheworld (void)
+{
+  int i;
+  pthread_t me;
+  struct __go_thread_id *p;
+
+  /* Here __go_thread_ids_lock should be held.  */
+
+  me = pthread_self ();
+  p = __go_all_thread_ids;
+  while (p != NULL)
+    {
+      if (p->tentative || pthread_equal (me, p->id))
+	p = p->next;
+      else
+	{
+	  i = pthread_kill (p->id, GO_SIG_START);
+	  if (i == 0)
+	    p = p->next;
+	  else
+	    abort ();
+	}
+    }
+
+  i = pthread_mutex_unlock (&__go_thread_ids_lock);
+  __go_assert (i == 0);
+}
+
+/* Initialize the interaction between goroutines and the garbage
+   collector.  */
+
+void
+__go_gc_goroutine_init (void *sp __attribute__ ((unused)))
+{
+  struct __go_thread_id *list_entry;
+  int i;
+  sigset_t sset;
+  struct sigaction act;
+
+  /* Add the initial thread to the list of all threads.  */
+
+  list_entry = malloc (sizeof (struct __go_thread_id));
+  list_entry->prev = NULL;
+  list_entry->next = NULL;
+  list_entry->tentative = 0;
+  list_entry->id = pthread_self ();
+  list_entry->m = m;
+  list_entry->pfn = NULL;
+  list_entry->arg = NULL;
+  __go_all_thread_ids = list_entry;
+
+  /* Initialize the semaphore which signals when threads are ready for
+     GC.  */
+
+  i = sem_init (&__go_thread_ready_sem, 0, 0);
+  __go_assert (i == 0);
+
+  /* Fetch the current signal mask.  */
+
+  i = sigemptyset (&sset);
+  __go_assert (i == 0);
+  i = sigprocmask (SIG_BLOCK, NULL, &sset);
+  __go_assert (i == 0);
+
+  /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
+     blocked, and save that set for use with later calls to sigsuspend
+     while waiting for GC to complete.  */
+
+  i = sigdelset (&sset, GO_SIG_START);
+  __go_assert (i == 0);
+  i = sigaddset (&sset, GO_SIG_STOP);
+  __go_assert (i == 0);
+  __go_thread_wait_sigset = sset;
+
+  /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
+     the process signal mask.  */
+
+  i = sigaddset (&sset, GO_SIG_START);
+  __go_assert (i == 0);
+  i = sigdelset (&sset, GO_SIG_STOP);
+  __go_assert (i == 0);
+  i = sigprocmask (SIG_SETMASK, &sset, NULL);
+  __go_assert (i == 0);
+
+  /* Install the signal handlers.  */
+  memset (&act, 0, sizeof act);
+  i = sigemptyset (&act.sa_mask);
+  __go_assert (i == 0);
+
+  act.sa_handler = gc_start_handler;
+  act.sa_flags = SA_RESTART;
+  i = sigaction (GO_SIG_START, &act, NULL);
+  __go_assert (i == 0);
+
+  /* We could consider using an alternate signal stack for this.  The
+     function does not use much stack space, so it may be OK.  */
+  act.sa_handler = gc_stop_handler;
+  i = sigaction (GO_SIG_STOP, &act, NULL);
+  __go_assert (i == 0);
+
+#ifndef USING_SPLIT_STACK
+  /* If we don't support split stack, record the current stack as the
+     top of the stack.  */
+  m->gc_sp = sp;
+#endif
+}
diff --git a/libgo/runtime/go-gomaxprocs.c b/libgo/runtime/go-gomaxprocs.c
new file mode 100644
index 000000000..04dc448b8
--- /dev/null
+++ b/libgo/runtime/go-gomaxprocs.c
@@ -0,0 +1,15 @@
+/* go-gomaxprocs.c -- runtime.GOMAXPROCS.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* This is the runtime.GOMAXPROCS function.  This currently does
+   nothing, since each goroutine runs in a separate thread anyhow.  */
+
+void GOMAXPROCS (int) asm ("libgo_runtime.runtime.GOMAXPROCS");
+
+void
+GOMAXPROCS (int n __attribute__ ((unused)))
+{
+}
diff --git a/libgo/runtime/go-int-array-to-string.c b/libgo/runtime/go-int-array-to-string.c
new file mode 100644
index 000000000..46a33dafc
--- /dev/null
+++ b/libgo/runtime/go-int-array-to-string.c
@@ -0,0 +1,85 @@
+/* go-int-array-to-string.c -- convert an array of ints to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-assert.h"
+#include "go-string.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_string
+__go_int_array_to_string (const void* p, size_t len)
+{
+  const int *ints;
+  size_t slen;
+  size_t i;
+  unsigned char *retdata;
+  struct __go_string ret;
+  unsigned char *s;
+
+  ints = (const int *) p;
+
+  slen = 0;
+  for (i = 0; i < len; ++i)
+    {
+      int v;
+
+      v = ints[i];
+
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0x7f)
+	slen += 1;
+      else if (v <= 0x7ff)
+	slen += 2;
+      else if (v <= 0xffff)
+	slen += 3;
+      else
+	slen += 4;
+    }
+
+  retdata = runtime_mallocgc (slen, RefNoPointers, 1, 0);
+  ret.__data = retdata;
+  ret.__length = slen;
+
+  s = retdata;
+  for (i = 0; i < len; ++i)
+    {
+      int v;
+
+      v = ints[i];
+
+      /* If V is out of range for UTF-8, substitute the replacement
+	 character.  */
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0x7f)
+	*s++ = v;
+      else if (v <= 0x7ff)
+	{
+	  *s++ = 0xc0 | ((v >> 6) & 0x1f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+      else if (v <= 0xffff)
+	{
+	  *s++ = 0xe0 | ((v >> 12) & 0xf);
+	  *s++ = 0x80 | ((v >> 6) & 0x3f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+      else
+	{
+	  *s++ = 0xf0 | ((v >> 18) & 0x7);
+	  *s++ = 0x80 | ((v >> 12) & 0x3f);
+	  *s++ = 0x80 | ((v >> 6) & 0x3f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+    }
+
+  __go_assert ((size_t) (s - retdata) == slen);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-int-to-string.c b/libgo/runtime/go-int-to-string.c
new file mode 100644
index 000000000..24d729cf8
--- /dev/null
+++ b/libgo/runtime/go-int-to-string.c
@@ -0,0 +1,60 @@
+/* go-int-to-string.c -- convert an integer to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_string
+__go_int_to_string (int v)
+{
+  char buf[4];
+  int len;
+  unsigned char *retdata;
+  struct __go_string ret;
+
+  if (v <= 0x7f)
+    {
+      buf[0] = v;
+      len = 1;
+    }
+  else if (v <= 0x7ff)
+    {
+      buf[0] = 0xc0 + (v >> 6);
+      buf[1] = 0x80 + (v & 0x3f);
+      len = 2;
+    }
+  else
+    {
+      /* If the value is out of range for UTF-8, turn it into the
+	 "replacement character".  */
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0xffff)
+	{
+	  buf[0] = 0xe0 + (v >> 12);
+	  buf[1] = 0x80 + ((v >> 6) & 0x3f);
+	  buf[2] = 0x80 + (v & 0x3f);
+	  len = 3;
+	}
+      else
+	{
+	  buf[0] = 0xf0 + (v >> 18);
+	  buf[1] = 0x80 + ((v >> 12) & 0x3f);
+	  buf[2] = 0x80 + ((v >> 6) & 0x3f);
+	  buf[3] = 0x80 + (v & 0x3f);
+	  len = 4;
+	}
+    }
+
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
+  __builtin_memcpy (retdata, buf, len);
+  ret.__data = retdata;
+  ret.__length = len;
+
+  return ret;
+}
diff --git a/libgo/runtime/go-interface-compare.c b/libgo/runtime/go-interface-compare.c
new file mode 100644
index 000000000..11c75e812
--- /dev/null
+++ b/libgo/runtime/go-interface-compare.c
@@ -0,0 +1,31 @@
+/* go-interface-compare.c -- compare two interface values.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+
+/* Compare two interface values.  Return 0 for equal, not zero for not
+   equal (return value is like strcmp).  */
+
+int
+__go_interface_compare (struct __go_interface left,
+			struct __go_interface right)
+{
+  const struct __go_type_descriptor *left_descriptor;
+
+  if (left.__methods == NULL && right.__methods == NULL)
+    return 0;
+  if (left.__methods == NULL || right.__methods == NULL)
+    return 1;
+  left_descriptor = left.__methods[0];
+  if (!__go_type_descriptors_equal (left_descriptor, right.__methods[0]))
+    return 1;
+  if (__go_is_pointer_type (left_descriptor))
+    return left.__object == right.__object ? 0 : 1;
+  if (!left_descriptor->__equalfn (left.__object, right.__object,
+				   left_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-interface-eface-compare.c b/libgo/runtime/go-interface-eface-compare.c
new file mode 100644
index 000000000..9de8424ac
--- /dev/null
+++ b/libgo/runtime/go-interface-eface-compare.c
@@ -0,0 +1,32 @@
+/* go-interface-eface-compare.c -- compare non-empty and empty interface.
+
+   Copyright 2011 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+
+/* Compare a non-empty interface value with an empty interface value.
+   Return 0 for equal, not zero for not equal (return value is like
+   strcmp).  */
+
+int
+__go_interface_empty_compare (struct __go_interface left,
+			      struct __go_empty_interface right)
+{
+  const struct __go_type_descriptor *left_descriptor;
+
+  if (left.__methods == NULL && right.__type_descriptor == NULL)
+    return 0;
+  if (left.__methods == NULL || right.__type_descriptor == NULL)
+    return 1;
+  left_descriptor = left.__methods[0];
+  if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor))
+    return 1;
+  if (__go_is_pointer_type (left_descriptor))
+    return left.__object == right.__object ? 0 : 1;
+  if (!left_descriptor->__equalfn (left.__object, right.__object,
+				   left_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-interface-val-compare.c b/libgo/runtime/go-interface-val-compare.c
new file mode 100644
index 000000000..15898924a
--- /dev/null
+++ b/libgo/runtime/go-interface-val-compare.c
@@ -0,0 +1,32 @@
+/* go-interface-val-compare.c -- compare an interface to a value.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-type.h"
+#include "interface.h"
+
+/* Compare two interface values.  Return 0 for equal, not zero for not
+   equal (return value is like strcmp).  */
+
+int
+__go_interface_value_compare (
+    struct __go_interface left,
+    const struct __go_type_descriptor *right_descriptor,
+    const void *val)
+{
+  const struct __go_type_descriptor *left_descriptor;
+
+  if (left.__methods == NULL)
+    return 1;
+  left_descriptor = left.__methods[0];
+  if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
+    return 1;
+  if (__go_is_pointer_type (left_descriptor))
+    return left.__object == val ? 0 : 1;
+  if (!left_descriptor->__equalfn (left.__object, val,
+				   left_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-lock-os-thread.c b/libgo/runtime/go-lock-os-thread.c
new file mode 100644
index 000000000..204f11dce
--- /dev/null
+++ b/libgo/runtime/go-lock-os-thread.c
@@ -0,0 +1,24 @@
+/* go-lock-os-thread.c -- the LockOSThread and UnlockOSThread functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* The runtime.LockOSThread and runtime.UnlockOSThread functions are
+   meaningless in the current implementation, since for us a goroutine
+   always stays on a single OS thread.  */
+
+extern void LockOSThread (void) __asm__ ("libgo_runtime.runtime.LockOSThread");
+
+void
+LockOSThread (void)
+{
+}
+
+extern void UnlockOSThread (void)
+  __asm__ ("libgo_runtime.runtime.UnlockOSThread");
+
+void
+UnlockOSThread (void)
+{
+}
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
new file mode 100644
index 000000000..a6dbf347f
--- /dev/null
+++ b/libgo/runtime/go-main.c
@@ -0,0 +1,89 @@
+/* go-main.c -- the main function for a Go program.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "config.h"
+
+#include <stdlib.h>
+#include <time.h>
+
+#ifdef HAVE_FPU_CONTROL_H
+#include <fpu_control.h>
+#endif
+
+#include "go-alloc.h"
+#include "array.h"
+#include "go-signal.h"
+#include "go-string.h"
+
+#include "runtime.h"
+#include "malloc.h"
+
+#undef int
+#undef char
+#undef unsigned
+
+/* The main function for a Go program.  This records the command line
+   parameters, calls the real main function, and returns a zero status
+   if the real main function returns.  */
+
+extern char **environ;
+
+extern struct __go_open_array Args asm ("libgo_os.os.Args");
+
+extern struct __go_open_array Envs asm ("libgo_os.os.Envs");
+
+/* These functions are created for the main package.  */
+extern void __go_init_main (void);
+extern void real_main (void) asm ("main.main");
+
+/* The main function.  */
+
+int
+main (int argc, char **argv)
+{
+  int i;
+  struct __go_string *values;
+
+  runtime_mallocinit ();
+  __go_gc_goroutine_init (&argc);
+
+  Args.__count = argc;
+  Args.__capacity = argc;
+  values = __go_alloc (argc * sizeof (struct __go_string));
+  for (i = 0; i < argc; ++i)
+    {
+      values[i].__data = (unsigned char *) argv[i];
+      values[i].__length = __builtin_strlen (argv[i]);
+    }
+  Args.__values = values;
+
+  for (i = 0; environ[i] != NULL; ++i)
+    ;
+  Envs.__count = i;
+  Envs.__capacity = i;
+  values = __go_alloc (i * sizeof (struct __go_string));
+  for (i = 0; environ[i] != NULL; ++i)
+    {
+      values[i].__data = (unsigned char *) environ[i];
+      values[i].__length = __builtin_strlen (environ[i]);
+    }
+  Envs.__values = values;
+
+  __initsig ();
+
+#if defined(HAVE_SRANDOM)
+  srandom ((unsigned int) time (NULL));
+#else
+  srand ((unsigned int) time (NULL));
+#endif
+  __go_init_main ();
+
+  __go_enable_gc ();
+
+  real_main ();
+
+  return 0;
+}
diff --git a/libgo/runtime/go-map-delete.c b/libgo/runtime/go-map-delete.c
new file mode 100644
index 000000000..ec851e531
--- /dev/null
+++ b/libgo/runtime/go-map-delete.c
@@ -0,0 +1,52 @@
+/* go-map-delete.c -- delete an entry from a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "map.h"
+
+/* Delete the entry matching KEY from MAP.  */
+
+void
+__go_map_delete (struct __go_map *map, const void *key)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  _Bool (*equalfn) (const void*, const void*, size_t);
+  size_t key_hash;
+  size_t key_size;
+  size_t bucket_index;
+  void **pentry;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  __go_assert (key_size != 0 && key_size != -1UL);
+  equalfn = key_descriptor->__equalfn;
+
+  key_hash = key_descriptor->__hashfn (key, key_size);
+  bucket_index = key_hash % map->__bucket_count;
+
+  pentry = map->__buckets + bucket_index;
+  while (*pentry != NULL)
+    {
+      char *entry = (char *) *pentry;
+      if (equalfn (key, entry + key_offset, key_size))
+	{
+	  *pentry = *(void **) entry;
+	  __go_free (entry);
+	  map->__element_count -= 1;
+	  break;
+	}
+      pentry = (void **) entry;
+    }
+}
diff --git a/libgo/runtime/go-map-index.c b/libgo/runtime/go-map-index.c
new file mode 100644
index 000000000..1561c97a6
--- /dev/null
+++ b/libgo/runtime/go-map-index.c
@@ -0,0 +1,127 @@
+/* go-map-index.c -- find or insert an entry in a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "map.h"
+
+/* Rehash MAP to a larger size.  */
+
+static void
+__go_map_rehash (struct __go_map *map)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  size_t key_size;
+  size_t (*hashfn) (const void *, size_t);
+  size_t old_bucket_count;
+  void **old_buckets;
+  size_t new_bucket_count;
+  void **new_buckets;
+  size_t i;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  hashfn = key_descriptor->__hashfn;
+
+  old_bucket_count = map->__bucket_count;
+  old_buckets = map->__buckets;
+
+  new_bucket_count = __go_map_next_prime (old_bucket_count * 2);
+  new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *));
+  __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *));
+
+  for (i = 0; i < old_bucket_count; ++i)
+    {
+      char* entry;
+      char* next;
+
+      for (entry = old_buckets[i]; entry != NULL; entry = next)
+	{
+	  size_t key_hash;
+	  size_t new_bucket_index;
+
+	  /* We could speed up rehashing at the cost of memory space
+	     by caching the hash code.  */
+	  key_hash = hashfn (entry + key_offset, key_size);
+	  new_bucket_index = key_hash % new_bucket_count;
+
+	  next = *(char **) entry;
+	  *(char **) entry = new_buckets[new_bucket_index];
+	  new_buckets[new_bucket_index] = entry;
+	}
+    }
+
+  __go_free (old_buckets);
+
+  map->__bucket_count = new_bucket_count;
+  map->__buckets = new_buckets;
+}
+
+/* Find KEY in MAP, return a pointer to the value.  If KEY is not
+   present, then if INSERT is false, return NULL, and if INSERT is
+   true, insert a new value and zero-initialize it before returning a
+   pointer to it.  */
+
+void *
+__go_map_index (struct __go_map *map, const void *key, _Bool insert)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  _Bool (*equalfn) (const void*, const void*, size_t);
+  size_t key_hash;
+  size_t key_size;
+  size_t bucket_index;
+  char *entry;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  __go_assert (key_size != 0 && key_size != -1UL);
+  equalfn = key_descriptor->__equalfn;
+
+  key_hash = key_descriptor->__hashfn (key, key_size);
+  bucket_index = key_hash % map->__bucket_count;
+
+  entry = (char *) map->__buckets[bucket_index];
+  while (entry != NULL)
+    {
+      if (equalfn (key, entry + key_offset, key_size))
+	return entry + descriptor->__val_offset;
+      entry = *(char **) entry;
+    }
+
+  if (!insert)
+    return NULL;
+
+  if (map->__element_count >= map->__bucket_count)
+    {
+      __go_map_rehash (map);
+      bucket_index = key_hash % map->__bucket_count;
+    }
+
+  entry = (char *) __go_alloc (descriptor->__entry_size);
+  __builtin_memset (entry, 0, descriptor->__entry_size);
+
+  __builtin_memcpy (entry + key_offset, key, key_size);
+
+  *(char **) entry = map->__buckets[bucket_index];
+  map->__buckets[bucket_index] = entry;
+
+  map->__element_count += 1;
+
+  return entry + descriptor->__val_offset;
+}
diff --git a/libgo/runtime/go-map-len.c b/libgo/runtime/go-map-len.c
new file mode 100644
index 000000000..75b747339
--- /dev/null
+++ b/libgo/runtime/go-map-len.c
@@ -0,0 +1,21 @@
+/* go-map-len.c -- return the length of a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "map.h"
+
+/* Return the length of a map.  This could be done inline, of course,
+   but I'm doing it as a function for now to make it easy to chang the
+   map structure.  */
+
+size_t
+__go_map_len (struct __go_map *map)
+{
+  if (map == NULL)
+    return 0;
+  return map->__element_count;
+}
diff --git a/libgo/runtime/go-map-range.c b/libgo/runtime/go-map-range.c
new file mode 100644
index 000000000..364cda9b6
--- /dev/null
+++ b/libgo/runtime/go-map-range.c
@@ -0,0 +1,102 @@
+/* go-map-range.c -- implement a range clause over a map.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-assert.h"
+#include "map.h"
+
+/* Initialize a range over a map.  */
+
+void
+__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it)
+{
+  it->entry = NULL;
+  if (h != NULL)
+    {
+      it->map = h;
+      it->next_entry = NULL;
+      it->bucket = 0;
+      --it->bucket;
+      __go_mapiternext(it);
+    }
+}
+
+/* Move to the next iteration, updating *HITER.  */
+
+void
+__go_mapiternext (struct __go_hash_iter *it)
+{
+  const void *entry;
+
+  entry = it->next_entry;
+  if (entry == NULL)
+    {
+      const struct __go_map *map;
+      size_t bucket;
+
+      map = it->map;
+      bucket = it->bucket;
+      while (1)
+	{
+	  ++bucket;
+	  if (bucket >= map->__bucket_count)
+	    {
+	      /* Map iteration is complete.  */
+	      it->entry = NULL;
+	      return;
+	    }
+	  entry = map->__buckets[bucket];
+	  if (entry != NULL)
+	    break;
+	}
+      it->bucket = bucket;
+    }
+  it->entry = entry;
+  it->next_entry = *(const void * const *) entry;
+}
+
+/* Get the key of the current iteration.  */
+
+void
+__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key)
+{
+  const struct __go_map *map;
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  const char *p;
+
+  map = it->map;
+  descriptor = map->__descriptor;
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  p = it->entry;
+  __go_assert (p != NULL);
+  __builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size);
+}
+
+/* Get the key and value of the current iteration.  */
+
+void
+__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
+	       unsigned char *val)
+{
+  const struct __go_map *map;
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_map_type *map_descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  const struct __go_type_descriptor *val_descriptor;
+  const char *p;
+
+  map = it->map;
+  descriptor = map->__descriptor;
+  map_descriptor = descriptor->__map_descriptor;
+  key_descriptor = map_descriptor->__key_type;
+  val_descriptor = map_descriptor->__val_type;
+  p = it->entry;
+  __go_assert (p != NULL);
+  __builtin_memcpy (key, p + descriptor->__key_offset,
+		    key_descriptor->__size);
+  __builtin_memcpy (val, p + descriptor->__val_offset,
+		    val_descriptor->__size);
+}
diff --git a/libgo/runtime/go-nanotime.c b/libgo/runtime/go-nanotime.c
new file mode 100644
index 000000000..8cd423010
--- /dev/null
+++ b/libgo/runtime/go-nanotime.c
@@ -0,0 +1,22 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Return time in nanoseconds.  This is only used for computing runtime.
+
+#include <sys/time.h>
+
+#include "go-assert.h"
+#include "runtime.h"
+
+int64
+runtime_nanotime (void)
+{
+  int i;
+  struct timeval tv;
+
+  i = gettimeofday (&tv, NULL);
+  __go_assert (i == 0);
+
+  return (int64) tv.tv_sec * 1000000000 + (int64) tv.tv_usec * 1000;
+}
diff --git a/libgo/runtime/go-new-channel.c b/libgo/runtime/go-new-channel.c
new file mode 100644
index 000000000..d57f52c6c
--- /dev/null
+++ b/libgo/runtime/go-new-channel.c
@@ -0,0 +1,57 @@
+/* go-new-channel.c -- allocate a new channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "go-panic.h"
+#include "channel.h"
+
+struct __go_channel*
+__go_new_channel (size_t element_size, size_t entries)
+{
+  struct __go_channel* ret;
+  size_t alloc_size;
+  int i;
+
+  if ((size_t) (int) entries != entries || entries > (size_t) -1 / element_size)
+    __go_panic_msg ("chan size out of range");
+
+  alloc_size = (element_size + sizeof (uint64_t) - 1) / sizeof (uint64_t);
+
+  /* We use a circular buffer which means that when next_fetch ==
+     next_store we don't know whether the buffer is empty or full.  So
+     we allocate an extra space, and always leave a space open.
+     FIXME.  */
+  if (entries != 0)
+    ++entries;
+
+  ret = (struct __go_channel*) __go_alloc (sizeof (struct __go_channel)
+					   + ((entries == 0 ? 1 : entries)
+					      * alloc_size
+					      * sizeof (uint64_t)));
+  i = pthread_mutex_init (&ret->lock, NULL);
+  __go_assert (i == 0);
+  i = pthread_cond_init (&ret->cond, NULL);
+  __go_assert (i == 0);
+  ret->element_size = element_size;
+  ret->closed_op_count = 0;
+  ret->waiting_to_send = 0;
+  ret->waiting_to_receive = 0;
+  ret->selected_for_send = 0;
+  ret->selected_for_receive = 0;
+  ret->is_closed = 0;
+  ret->saw_close = 0;
+  ret->select_send_queue = NULL;
+  ret->select_receive_queue = NULL;
+  ret->select_mutex = NULL;
+  ret->select_cond = NULL;
+  ret->num_entries = entries;
+  ret->next_store = 0;
+  ret->next_fetch = 0;
+  return ret;
+}
diff --git a/libgo/runtime/go-new-map.c b/libgo/runtime/go-new-map.c
new file mode 100644
index 000000000..519f38f78
--- /dev/null
+++ b/libgo/runtime/go-new-map.c
@@ -0,0 +1,125 @@
+/* go-new-map.c -- allocate a new map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "map.h"
+
+/* List of prime numbers, copied from libstdc++/src/hashtable.c.  */
+
+static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */
+{
+  2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
+  37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
+  83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
+  157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
+  277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
+  503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
+  953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
+  1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
+  3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
+  5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
+  11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
+  19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
+  33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
+  57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
+  99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
+  159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
+  256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
+  410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
+  658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
+  1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
+  1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
+  2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
+  4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
+  6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
+  11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
+  16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
+  24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
+  36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
+  54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
+  80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
+  118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
+  176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
+  260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
+  386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
+  573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
+  849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
+  1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul,
+  1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul,
+  2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul,
+  4101556399ul, 4294967291ul,
+#if __SIZEOF_LONG__ >= 8
+  6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul,
+  25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul,
+  103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul,
+  412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul,
+  1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul,
+  6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul,
+  26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul,
+  105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul,
+  562949953421231ul, 1125899906842597ul, 2251799813685119ul,
+  4503599627370449ul, 9007199254740881ul, 18014398509481951ul,
+  36028797018963913ul, 72057594037927931ul, 144115188075855859ul,
+  288230376151711717ul, 576460752303423433ul,
+  1152921504606846883ul, 2305843009213693951ul,
+  4611686018427387847ul, 9223372036854775783ul,
+  18446744073709551557ul
+#endif
+};
+
+/* Return the next number from PRIME_LIST >= N.  */
+
+unsigned long
+__go_map_next_prime (unsigned long n)
+{
+  size_t low;
+  size_t high;
+
+  low = 0;
+  high = sizeof prime_list / sizeof prime_list[0];
+  while (low < high)
+    {
+      size_t mid;
+
+      mid = (low + high) / 2;
+
+      /* Here LOW <= MID < HIGH.  */
+
+      if (prime_list[mid] < n)
+	high = mid;
+      else if (prime_list[mid] > n)
+	low = mid + 1;
+      else
+	return n;
+    }
+  if (low >= sizeof prime_list / sizeof prime_list[0])
+    return n;
+  return prime_list[low];
+}
+
+/* Allocate a new map.  */
+
+struct __go_map *
+__go_new_map (const struct __go_map_descriptor *descriptor, size_t entries)
+{
+  struct __go_map *ret;
+
+  if ((size_t) (int) entries != entries)
+    __go_panic_msg ("map size out of range");
+
+  if (entries == 0)
+    entries = 5;
+  else
+    entries = __go_map_next_prime (entries);
+  ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map));
+  ret->__descriptor = descriptor;
+  ret->__element_count = 0;
+  ret->__bucket_count = entries;
+  ret->__buckets = (void **) __go_alloc (entries * sizeof (void *));
+  __builtin_memset (ret->__buckets, 0, entries * sizeof (void *));
+  return ret;
+}
diff --git a/libgo/runtime/go-new.c b/libgo/runtime/go-new.c
new file mode 100644
index 000000000..a592174e5
--- /dev/null
+++ b/libgo/runtime/go-new.c
@@ -0,0 +1,21 @@
+/* go-new.c -- the generic go new() function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "runtime.h"
+#include "malloc.h"
+
+void *
+__go_new (size_t size)
+{
+  return runtime_mallocgc (size, 0, 1, 1);
+}
+
+void *
+__go_new_nopointers (size_t size)
+{
+  return runtime_mallocgc (size, RefNoPointers, 1, 1);
+}
diff --git a/libgo/runtime/go-note.c b/libgo/runtime/go-note.c
new file mode 100644
index 000000000..3b750f30e
--- /dev/null
+++ b/libgo/runtime/go-note.c
@@ -0,0 +1,74 @@
+/* go-note.c -- implement notesleep, notewakeup and noteclear.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* A note is a one-time notification.  noteclear clears the note.
+   notesleep waits for a call to notewakeup.  notewakeup wakes up
+   every thread waiting on the note.  */
+
+#include "go-assert.h"
+#include "runtime.h"
+
+/* We use a single global lock and condition variable.  It would be
+   better to use a futex on Linux.  */
+
+static pthread_mutex_t note_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t note_cond = PTHREAD_COND_INITIALIZER;
+
+/* noteclear is called before any calls to notesleep or
+   notewakeup.  */
+
+void
+noteclear (Note* n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  __go_assert (i == 0);
+
+  n->woken = 0;
+
+  i = pthread_mutex_unlock (&note_lock);
+  __go_assert (i == 0);
+}
+
+/* Wait until notewakeup is called.  */
+
+void
+notesleep (Note* n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  __go_assert (i == 0);
+
+  while (!n->woken)
+    {
+      i = pthread_cond_wait (&note_cond, &note_lock);
+      __go_assert (i == 0);
+    }
+
+  i = pthread_mutex_unlock (&note_lock);
+  __go_assert (i == 0);
+}
+
+/* Wake up every thread sleeping on the note.  */
+
+void
+notewakeup (Note *n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  __go_assert (i == 0);
+
+  n->woken = 1;
+
+  i = pthread_cond_broadcast (&note_cond);
+  __go_assert (i == 0);
+
+  i = pthread_mutex_unlock (&note_lock);
+  __go_assert (i == 0);
+}
diff --git a/libgo/runtime/go-panic-defer.c b/libgo/runtime/go-panic-defer.c
new file mode 100644
index 000000000..64773bb5e
--- /dev/null
+++ b/libgo/runtime/go-panic-defer.c
@@ -0,0 +1,13 @@
+/* go-panic-stack.c -- The panic/defer stack.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-panic.h"
+
+#ifdef __rtems__
+#define __thread
+#endif
+
+__thread struct __go_panic_defer_struct *__go_panic_defer;
diff --git a/libgo/runtime/go-panic.c b/libgo/runtime/go-panic.c
new file mode 100644
index 000000000..48d644162
--- /dev/null
+++ b/libgo/runtime/go-panic.c
@@ -0,0 +1,121 @@
+/* go-panic.c -- support for the go panic function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "runtime.h"
+#include "malloc.h"
+#include "go-alloc.h"
+#include "go-defer.h"
+#include "go-panic.h"
+#include "go-string.h"
+#include "interface.h"
+
+/* Print the panic stack.  This is used when there is no recover.  */
+
+static void
+__printpanics (struct __go_panic_stack *p)
+{
+  if (p->__next != NULL)
+    {
+      __printpanics (p->__next);
+      printf ("\t");
+    }
+  printf ("panic: ");
+  printany (p->__arg);
+  if (p->__was_recovered)
+    printf (" [recovered]");
+  putchar ('\n');
+}
+
+/* This implements __go_panic which is used for the panic
+   function.  */
+
+void
+__go_panic (struct __go_empty_interface arg)
+{
+  struct __go_panic_stack *n;
+
+  if (__go_panic_defer == NULL)
+    __go_panic_defer = ((struct __go_panic_defer_struct *)
+			__go_alloc (sizeof (struct __go_panic_defer_struct)));
+
+  n = (struct __go_panic_stack *) __go_alloc (sizeof (struct __go_panic_stack));
+  n->__arg = arg;
+  n->__next = __go_panic_defer->__panic;
+  __go_panic_defer->__panic = n;
+
+  /* Run all the defer functions.  */
+
+  while (1)
+    {
+      struct __go_defer_stack *d;
+      void (*pfn) (void *);
+
+      d = __go_panic_defer->__defer;
+      if (d == NULL)
+	break;
+
+      pfn = d->__pfn;
+      d->__pfn = NULL;
+
+      if (pfn != NULL)
+	{
+	  (*pfn) (d->__arg);
+
+	  if (n->__was_recovered)
+	    {
+	      /* Some defer function called recover.  That means that
+		 we should stop running this panic.  */
+
+	      __go_panic_defer->__panic = n->__next;
+	      __go_free (n);
+
+	      /* Now unwind the stack by throwing an exception.  The
+		 compiler has arranged to create exception handlers in
+		 each function which uses a defer statement.  These
+		 exception handlers will check whether the entry on
+		 the top of the defer stack is from the current
+		 function.  If it is, we have unwound the stack far
+		 enough.  */
+	      __go_unwind_stack ();
+
+	      /* __go_unwind_stack should not return.  */
+	      abort ();
+	    }
+	}
+
+      __go_panic_defer->__defer = d->__next;
+      __go_free (d);
+    }
+
+  /* The panic was not recovered.  */
+
+  __printpanics (__go_panic_defer->__panic);
+
+  /* FIXME: We should dump a call stack here.  */
+  abort ();
+}
+
+/* This is used by the runtime library.  */
+
+void
+__go_panic_msg (const char* msg)
+{
+  size_t len;
+  unsigned char *sdata;
+  struct __go_string s;
+  struct __go_empty_interface arg;
+
+  len = __builtin_strlen (msg);
+  sdata = runtime_mallocgc (len, RefNoPointers, 0, 0);
+  __builtin_memcpy (sdata, msg, len);
+  s.__data = sdata;
+  s.__length = len;
+  newErrorString(s, &arg);
+  __go_panic (arg);
+}
diff --git a/libgo/runtime/go-panic.h b/libgo/runtime/go-panic.h
new file mode 100644
index 000000000..2836c4681
--- /dev/null
+++ b/libgo/runtime/go-panic.h
@@ -0,0 +1,94 @@
+/* go-panic.h -- declare the go panic functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_PANIC_H
+#define LIBGO_GO_PANIC_H
+
+#include "interface.h"
+
+struct __go_string;
+struct __go_type_descriptor;
+struct __go_defer_stack;
+
+/* The stack of panic calls.  */
+
+struct __go_panic_stack
+{
+  /* The next entry in the stack.  */
+  struct __go_panic_stack *__next;
+
+  /* The value associated with this panic.  */
+  struct __go_empty_interface __arg;
+
+  /* Whether this panic has been recovered.  */
+  _Bool __was_recovered;
+
+  /* Whether this panic was pushed on the stack because of an
+     exception thrown in some other language.  */
+  _Bool __is_foreign;
+};
+
+/* The panic and defer stacks, grouped together into a single thread
+   local variable for convenience for systems without TLS.  */
+
+struct __go_panic_defer_struct
+{
+  /* The list of defers to execute.  */
+  struct __go_defer_stack *__defer;
+
+  /* The list of currently active panics.  There will be more than one
+     if a deferred function calls panic.  */
+  struct __go_panic_stack *__panic;
+
+  /* The current exception being thrown when unwinding after a call to
+     panic .  This is really struct _UnwindException *.  */
+  void *__exception;
+
+  /* Whether the current exception is from some other language.  */
+  _Bool __is_foreign;
+};
+
+#ifdef __rtems__
+#define __thread
+#endif
+
+extern __thread struct __go_panic_defer_struct *__go_panic_defer;
+
+#ifdef __rtems__
+#undef __thread
+#endif
+
+extern void __go_panic (struct __go_empty_interface)
+  __attribute__ ((noreturn));
+
+extern void __go_panic_msg (const char* msg)
+  __attribute__ ((noreturn));
+
+extern void __go_print_string (struct __go_string);
+
+extern struct __go_empty_interface __go_recover (void);
+
+extern void __go_unwind_stack (void);
+
+/* Functions defined in libgo/go/runtime/error.go.  */
+
+extern void newTypeAssertionError(const struct __go_type_descriptor *pt1,
+				  const struct __go_type_descriptor *pt2,
+				  const struct __go_type_descriptor *pt3,
+				  const struct __go_string *ps1,
+				  const struct __go_string *ps2,
+				  const struct __go_string *ps3,
+				  const struct __go_string *pmeth,
+				  struct __go_empty_interface *ret)
+  __asm__ ("libgo_runtime.runtime.NewTypeAssertionError");
+
+extern void newErrorString(struct __go_string, struct __go_empty_interface *)
+  __asm__ ("libgo_runtime.runtime.NewErrorString");
+
+extern void printany(struct __go_empty_interface)
+  __asm__ ("libgo_runtime.runtime.Printany");
+
+#endif /* !defined(LIBGO_GO_PANIC_H) */
diff --git a/libgo/runtime/go-print.c b/libgo/runtime/go-print.c
new file mode 100644
index 000000000..095909de2
--- /dev/null
+++ b/libgo/runtime/go-print.c
@@ -0,0 +1,93 @@
+/* go-print.c -- support for the go print statement.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include "array.h"
+#include "go-panic.h"
+#include "go-string.h"
+#include "interface.h"
+
+/* This implements the various little functions which are called by
+   the predeclared functions print/println/panic/panicln.  */
+
+void
+__go_print_space ()
+{
+  putchar (' ');
+}
+
+void
+__go_print_nl ()
+{
+  putchar ('\n');
+}
+
+void
+__go_print_string (struct __go_string val)
+{
+  printf ("%.*s", (int) val.__length, (const char *) val.__data);
+}
+
+void
+__go_print_uint64 (uint64_t val)
+{
+  printf ("%llu", (unsigned long long) val);
+}
+
+void
+__go_print_int64 (int64_t val)
+{
+  printf ("%lld", (long long) val);
+}
+
+void
+__go_print_double (double val)
+{
+  printf ("%.24g", val);
+}
+
+void
+__go_print_complex (__complex double val)
+{
+  printf ("(%.24g%s%.24gi)",
+	  __builtin_creal (val),
+	  (__builtin_cimag (val) >= 0 || __builtin_isnan (__builtin_cimag(val))
+	   ? "+"
+	   : ""),
+	  __builtin_cimag (val));
+}
+
+void
+__go_print_bool (_Bool val)
+{
+  fputs (val ? "true" : "false", stdout);
+}
+
+void
+__go_print_pointer (void *val)
+{
+  printf ("%p", val);
+}
+
+void
+__go_print_empty_interface (struct __go_empty_interface e)
+{
+  printf ("(%p,%p)", e.__type_descriptor, e.__object);
+}
+
+void
+__go_print_interface (struct __go_interface i)
+{
+  printf ("(%p,%p)", i.__methods, i.__object);
+}
+
+void
+__go_print_slice (struct __go_open_array val)
+{
+  printf ("[%d/%d]%p", val.__count, val.__capacity, val.__values);
+}
diff --git a/libgo/runtime/go-rec-big.c b/libgo/runtime/go-rec-big.c
new file mode 100644
index 000000000..23d65296a
--- /dev/null
+++ b/libgo/runtime/go-rec-big.c
@@ -0,0 +1,34 @@
+/* go-rec-big.c -- receive something larger than 64 bits on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+void
+__go_receive_big (struct __go_channel *channel, void *val, _Bool for_select)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  if (channel == NULL)
+    __go_panic_msg ("receive from nil channel");
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  if (!__go_receive_acquire (channel, for_select))
+    {
+      __builtin_memset (val, 0, channel->element_size);
+      return;
+    }
+
+  offset = channel->next_fetch * alloc_size;
+  __builtin_memcpy (val, &channel->data[offset], channel->element_size);
+
+  __go_receive_release (channel);
+}
diff --git a/libgo/runtime/go-rec-nb-big.c b/libgo/runtime/go-rec-nb-big.c
new file mode 100644
index 000000000..53ffe48ab
--- /dev/null
+++ b/libgo/runtime/go-rec-nb-big.c
@@ -0,0 +1,39 @@
+/* go-rec-nb-big.c -- nonblocking receive of something big on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "channel.h"
+
+_Bool
+__go_receive_nonblocking_big (struct __go_channel* channel, void *val)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  int data = __go_receive_nonblocking_acquire (channel);
+  if (data != RECEIVE_NONBLOCKING_ACQUIRE_DATA)
+    {
+      __builtin_memset (val, 0, channel->element_size);
+      if (data == RECEIVE_NONBLOCKING_ACQUIRE_NODATA)
+	return 0;
+      else
+	{
+	  /* Channel is closed.  */
+	  return 1;
+	}
+    }
+
+  offset = channel->next_fetch * alloc_size;
+  __builtin_memcpy (val, &channel->data[offset], channel->element_size);
+
+  __go_receive_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-rec-nb-small.c b/libgo/runtime/go-rec-nb-small.c
new file mode 100644
index 000000000..9983d3464
--- /dev/null
+++ b/libgo/runtime/go-rec-nb-small.c
@@ -0,0 +1,127 @@
+/* go-rec-nb-small.c -- nonblocking receive of something smal on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-assert.h"
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to receive something on a nonblocking channel.  */
+
+int
+__go_receive_nonblocking_acquire (struct __go_channel *channel)
+{
+  int i;
+  _Bool has_data;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (channel->selected_for_receive)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+
+  if (channel->is_closed
+      && (channel->num_entries == 0
+	  ? channel->next_store == 0
+	  : channel->next_fetch == channel->next_store))
+    {
+      if (channel->saw_close)
+	{
+	  ++channel->closed_op_count;
+	  if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	    {
+	      i = pthread_mutex_unlock (&channel->lock);
+	      __go_assert (i == 0);
+	      __go_panic_msg ("too many operations on closed channel");
+	    }
+	}
+      channel->saw_close = 1;
+      __go_unlock_and_notify_selects (channel);
+      return RECEIVE_NONBLOCKING_ACQUIRE_CLOSED;
+    }
+
+  if (channel->num_entries > 0)
+    has_data = channel->next_fetch != channel->next_store;
+  else
+    {
+      if (channel->waiting_to_receive)
+	{
+	  /* Some other goroutine is already waiting for data on this
+	     channel, so we can't pick it up.  */
+	  has_data = 0;
+	}
+      else if (channel->next_store > 0)
+	{
+	  /* There is data on the channel.  */
+	  has_data = 1;
+	}
+      else if (__go_synch_with_select (channel, 0))
+	{
+	  /* We synched up with a select sending data, so there will
+	     be data for us shortly.  Tell the select to go, and then
+	     wait for the data.  */
+	  __go_broadcast_to_select (channel);
+
+	  while (channel->next_store == 0)
+	    {
+	      i = pthread_cond_wait (&channel->cond, &channel->lock);
+	      __go_assert (i == 0);
+	    }
+
+	  has_data = 1;
+	}
+      else
+	{
+	  /* Otherwise there is no data.  */
+	  has_data = 0;
+	}
+
+      if (has_data)
+	{
+	  channel->waiting_to_receive = 1;
+	  __go_assert (channel->next_store == 1);
+	}
+    }
+
+  if (!has_data)
+    {
+      i = pthread_mutex_unlock (&channel->lock);
+      __go_assert (i == 0);
+      return RECEIVE_NONBLOCKING_ACQUIRE_NODATA;
+    }
+
+  return RECEIVE_NONBLOCKING_ACQUIRE_DATA;
+}
+
+/* Receive something 64 bits or smaller on a nonblocking channel.  */
+
+struct __go_receive_nonblocking_small
+__go_receive_nonblocking_small (struct __go_channel *channel)
+{
+  struct __go_receive_nonblocking_small ret;
+
+  __go_assert (channel->element_size <= sizeof (uint64_t));
+
+  int data = __go_receive_nonblocking_acquire (channel);
+  if (data != RECEIVE_NONBLOCKING_ACQUIRE_DATA)
+    {
+      ret.__val = 0;
+      ret.__success = data == RECEIVE_NONBLOCKING_ACQUIRE_CLOSED;
+      return ret;
+    }
+
+  ret.__val = channel->data[channel->next_fetch];
+
+  __go_receive_release (channel);
+
+  ret.__success = 1;
+
+  return ret;
+}
diff --git a/libgo/runtime/go-rec-small.c b/libgo/runtime/go-rec-small.c
new file mode 100644
index 000000000..765e8d310
--- /dev/null
+++ b/libgo/runtime/go-rec-small.c
@@ -0,0 +1,289 @@
+/* go-rec-small.c -- receive something smaller than 64 bits on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-assert.h"
+#include "go-panic.h"
+#include "channel.h"
+
+/* This mutex controls access to the selected field of struct
+   __go_channel_select.  While this mutex is held, no other mutexes
+   may be acquired.  */
+
+pthread_mutex_t __go_select_data_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Try to synchronize with a select waiting on a sychronized channel.
+   This is used by a send or receive.  The channel is locked.  This
+   returns true if it was able to synch.  */
+
+_Bool
+__go_synch_with_select (struct __go_channel *channel, _Bool is_send)
+{
+  struct __go_channel_select *p;
+  int i;
+
+  __go_assert (channel->num_entries == 0);
+
+  i = pthread_mutex_lock (&__go_select_data_mutex);
+  __go_assert (i == 0);
+
+  for (p = (is_send
+	    ? channel->select_receive_queue
+	    : channel->select_send_queue);
+       p != NULL;
+       p = p->next)
+    {
+      if (*p->selected == NULL)
+	{
+	  *p->selected = channel;
+	  *p->is_read = !is_send;
+	  if (is_send)
+	    channel->selected_for_receive = 1;
+	  else
+	    channel->selected_for_send = 1;
+	  break;
+	}
+    }
+
+  i = pthread_mutex_unlock (&__go_select_data_mutex);
+  __go_assert (i == 0);
+
+  /* The caller is responsible for signalling the select condition
+     variable so that the other select knows that something has
+     changed.  We can't signal it here because we can't acquire the
+     select mutex while we hold a channel lock.  */
+
+  return p != NULL;
+}
+
+/* If we synch with a select, then we need to signal the select that
+   something has changed.  This requires grabbing the select mutex,
+   which can only be done when the channel is unlocked.  This routine
+   does the signalling.  It is called with the channel locked.  It
+   unlocks the channel, broadcasts the signal and relocks the
+   channel.  */
+
+void
+__go_broadcast_to_select (struct __go_channel *channel)
+{
+  pthread_mutex_t *select_mutex;
+  pthread_cond_t *select_cond;
+  int i;
+
+  select_mutex = channel->select_mutex;
+  select_cond = channel->select_cond;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  __go_assert (i == 0);
+
+  __go_assert (select_mutex != NULL && select_cond != NULL);
+
+  i = pthread_mutex_lock (select_mutex);
+  __go_assert (i == 0);
+
+  i = pthread_cond_broadcast (select_cond);
+  __go_assert (i == 0);
+
+  i = pthread_mutex_unlock (select_mutex);
+  __go_assert (i == 0);
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+}
+
+/* Prepare to receive something on a channel.  Return true if the
+   channel is acquired, false if it is closed.  */
+
+_Bool
+__go_receive_acquire (struct __go_channel *channel, _Bool for_select)
+{
+  int i;
+  _Bool my_wait_lock;
+  _Bool synched_with_select;
+
+  my_wait_lock = 0;
+  synched_with_select = 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (1)
+    {
+      _Bool need_broadcast;
+
+      need_broadcast = 0;
+
+      /* Check whether the channel is closed.  */
+      if (channel->is_closed
+	  && (channel->num_entries == 0
+	      ? channel->next_store == 0
+	      : channel->next_fetch == channel->next_store))
+	{
+	  if (channel->saw_close)
+	    {
+	      ++channel->closed_op_count;
+	      if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+		__go_panic_msg ("too many operations on closed channel");
+	    }
+	  channel->saw_close = 1;
+	  channel->selected_for_receive = 0;
+	  __go_unlock_and_notify_selects (channel);
+	  return 0;
+	}
+
+      /* If somebody else has the channel locked for receiving, we
+	 have to wait.  If FOR_SELECT is true, then we are the one
+	 with the lock.  */
+      if (!channel->selected_for_receive || for_select)
+	{
+	  if (channel->num_entries == 0)
+	    {
+	      /* If somebody else is waiting to receive, we have to
+		 wait.  */
+	      if (!channel->waiting_to_receive || my_wait_lock)
+		{
+		  _Bool was_marked;
+
+		  /* Lock the channel so that we get to receive
+		     next.  */
+		  was_marked = channel->waiting_to_receive;
+		  channel->waiting_to_receive = 1;
+		  my_wait_lock = 1;
+
+		  /* See if there is a value to receive.  */
+		  if (channel->next_store > 0)
+		    return 1;
+
+		  /* If we haven't already done so, try to synch with
+		     a select waiting to send on this channel.  If we
+		     have already synched with a select, we are just
+		     looping until the select eventually causes
+		     something to be sent.  */
+		  if (!synched_with_select && !for_select)
+		    {
+		      if (__go_synch_with_select (channel, 0))
+			{
+			  synched_with_select = 1;
+			  need_broadcast = 1;
+			}
+		    }
+
+		  /* If we marked the channel as waiting, we need to
+		     signal, because something changed.  It needs to
+		     be a broadcast since there might be other
+		     receivers waiting.  */
+		  if (!was_marked)
+		    {
+		      i = pthread_cond_broadcast (&channel->cond);
+		      __go_assert (i == 0);
+		    }
+		}
+	    }
+	  else
+	    {
+	      /* If there is a value on the channel, we are OK.  */
+	      if (channel->next_fetch != channel->next_store)
+		return 1;
+	    }
+	}
+
+      /* If we just synched with a select, then we need to signal the
+	 select condition variable.  We can only do that if we unlock
+	 the channel.  So we need to unlock, signal, lock, and go
+	 around the loop again without waiting.  */
+      if (need_broadcast)
+	{
+	  __go_broadcast_to_select (channel);
+	  continue;
+	}
+
+      /* Wait for something to change, then loop around and try
+	 again.  */
+
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+}
+
+/* Finished receiving something on a channel.  */
+
+void
+__go_receive_release (struct __go_channel *channel)
+{
+  int i;
+
+  if (channel->num_entries != 0)
+    channel->next_fetch = (channel->next_fetch + 1) % channel->num_entries;
+  else
+    {
+      /* For a synchronous receiver, we tell the sender that we picked
+	 up the value by setting the next_store field back to 0.
+	 Using the mutexes should implement a memory barrier.  */
+      __go_assert (channel->next_store == 1);
+      channel->next_store = 0;
+
+      channel->waiting_to_receive = 0;
+    }
+
+  channel->selected_for_receive = 0;
+
+  /* This is a broadcast to make sure that a synchronous sender sees
+     it.  */
+  i = pthread_cond_broadcast (&channel->cond);
+  __go_assert (i == 0);
+
+  __go_unlock_and_notify_selects (channel);
+}
+
+/* Unlock a channel and notify any waiting selects that something
+   happened.  */
+
+void
+__go_unlock_and_notify_selects (struct __go_channel *channel)
+{
+  pthread_mutex_t* select_mutex;
+  pthread_cond_t* select_cond;
+  int i;
+
+  select_mutex = channel->select_mutex;
+  select_cond = channel->select_cond;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  __go_assert (i == 0);
+
+  if (select_mutex != NULL)
+    {
+      i = pthread_mutex_lock (select_mutex);
+      __go_assert (i == 0);
+      i = pthread_cond_broadcast (select_cond);
+      __go_assert (i == 0);
+      i = pthread_mutex_unlock (select_mutex);
+      __go_assert (i == 0);
+    }
+}
+
+/* Receive something 64 bits or smaller on a channel.  */
+
+uint64_t
+__go_receive_small (struct __go_channel *channel, _Bool for_select)
+{
+  uint64_t ret;
+
+  if (channel == NULL)
+    __go_panic_msg ("receive from nil channel");
+
+  __go_assert (channel->element_size <= sizeof (uint64_t));
+
+  if (!__go_receive_acquire (channel, for_select))
+    return 0;
+
+  ret = channel->data[channel->next_fetch];
+
+  __go_receive_release (channel);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-recover.c b/libgo/runtime/go-recover.c
new file mode 100644
index 000000000..4de122e3b
--- /dev/null
+++ b/libgo/runtime/go-recover.c
@@ -0,0 +1,69 @@
+/* go-recover.c -- support for the go recover function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+#include "go-panic.h"
+#include "go-defer.h"
+
+/* This is called by a thunk to see if the real function should be
+   permitted to recover a panic value.  Recovering a value is
+   permitted if the thunk was called directly by defer.  RETADDR is
+   the return address of the function which is calling
+   __go_can_recover--this is, the thunk.  */
+
+_Bool
+__go_can_recover (const void* retaddr)
+{
+  struct __go_defer_stack *d;
+  const char* ret;
+  const char* dret;
+
+  if (__go_panic_defer == NULL)
+    return 0;
+  d = __go_panic_defer->__defer;
+  if (d == NULL)
+    return 0;
+
+  /* The panic which this function would recover is the one on the top
+     of the panic stack.  We do not want to recover it if that panic
+     was on the top of the panic stack when this function was
+     deferred.  */
+  if (d->__panic == __go_panic_defer->__panic)
+    return 0;
+
+  /* D->__RETADDR is the address of a label immediately following the
+     call to the thunk.  We can recover a panic if that is the same as
+     the return address of the thunk.  We permit a bit of slack in
+     case there is any code between the function return and the label,
+     such as an instruction to adjust the stack pointer.  */
+
+  ret = (const char *) retaddr;
+  dret = (const char *) d->__retaddr;
+  return ret <= dret && ret + 16 >= dret;
+}
+
+/* This is only called when it is valid for the caller to recover the
+   value on top of the panic stack, if there is one.  */
+
+struct __go_empty_interface
+__go_recover ()
+{
+  struct __go_panic_stack *p;
+
+  if (__go_panic_defer == NULL
+      || __go_panic_defer->__panic == NULL
+      || __go_panic_defer->__panic->__was_recovered)
+    {
+      struct __go_empty_interface ret;
+
+      ret.__type_descriptor = NULL;
+      ret.__object = NULL;
+      return ret;
+    }
+  p = __go_panic_defer->__panic;
+  p->__was_recovered = 1;
+  return p->__arg;
+}
diff --git a/libgo/runtime/go-reflect-call.c b/libgo/runtime/go-reflect-call.c
new file mode 100644
index 000000000..6ae749f9a
--- /dev/null
+++ b/libgo/runtime/go-reflect-call.c
@@ -0,0 +1,375 @@
+/* go-reflect-call.c -- call reflection support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "ffi.h"
+
+#include "go-alloc.h"
+#include "go-assert.h"
+#include "go-type.h"
+#include "runtime.h"
+
+/* Forward declaration.  */
+
+static ffi_type *go_type_to_ffi (const struct __go_type_descriptor *);
+
+/* Return an ffi_type for a Go array type.  The libffi library does
+   not have any builtin support for passing arrays as values.  We work
+   around this by pretending that the array is a struct.  */
+
+static ffi_type *
+go_array_to_ffi (const struct __go_array_type *descriptor)
+{
+  ffi_type *ret;
+  uintptr_t len;
+  ffi_type *element;
+  uintptr_t i;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  len = descriptor->__len;
+  ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *));
+  element = go_type_to_ffi (descriptor->__element_type);
+  for (i = 0; i < len; ++i)
+    ret->elements[i] = element;
+  ret->elements[len] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go slice type.  This describes the
+   __go_open_array type defines in array.h.  */
+
+static ffi_type *
+go_slice_to_ffi (
+    const struct __go_slice_type *descriptor __attribute__ ((unused)))
+{
+  ffi_type *ret;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc (4 * sizeof (ffi_type *));
+  ret->elements[0] = &ffi_type_pointer;
+  ret->elements[1] = &ffi_type_sint;
+  ret->elements[2] = &ffi_type_sint;
+  ret->elements[3] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go struct type.  */
+
+static ffi_type *
+go_struct_to_ffi (const struct __go_struct_type *descriptor)
+{
+  ffi_type *ret;
+  int field_count;
+  const struct __go_struct_field *fields;
+  int i;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  field_count = descriptor->__fields.__count;
+  fields = (const struct __go_struct_field *) descriptor->__fields.__values;
+  ret->elements = (ffi_type **) __go_alloc ((field_count + 1)
+					    * sizeof (ffi_type *));
+  for (i = 0; i < field_count; ++i)
+    ret->elements[i] = go_type_to_ffi (fields[i].__type);
+  ret->elements[field_count] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go string type.  This describes the
+   __go_string struct.  */
+
+static ffi_type *
+go_string_to_ffi (void)
+{
+  ffi_type *ret;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
+  ret->elements[0] = &ffi_type_pointer;
+  ret->elements[1] = &ffi_type_sint;
+  ret->elements[2] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go interface type.  This describes the
+   __go_interface and __go_empty_interface structs.  */
+
+static ffi_type *
+go_interface_to_ffi (void)
+{
+  ffi_type *ret;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
+  ret->elements[0] = &ffi_type_pointer;
+  ret->elements[1] = &ffi_type_pointer;
+  ret->elements[2] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go complex type.  */
+
+static ffi_type *
+go_complex_to_ffi (ffi_type *float_type)
+{
+  ffi_type *ret;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *));
+  ret->elements[0] = float_type;
+  ret->elements[1] = float_type;
+  ret->elements[2] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a type described by a
+   __go_type_descriptor.  */
+
+static ffi_type *
+go_type_to_ffi (const struct __go_type_descriptor *descriptor)
+{
+  switch (descriptor->__code)
+    {
+    case GO_BOOL:
+      if (sizeof (_Bool) == 1)
+	return &ffi_type_uint8;
+      else if (sizeof (_Bool) == sizeof (int))
+	return &ffi_type_uint;
+      abort ();
+    case GO_FLOAT32:
+      if (sizeof (float) == 4)
+	return &ffi_type_float;
+      abort ();
+    case GO_FLOAT64:
+      if (sizeof (double) == 8)
+	return &ffi_type_double;
+      abort ();
+    case GO_COMPLEX64:
+      if (sizeof (float) == 4)
+	return go_complex_to_ffi (&ffi_type_float);
+      abort ();
+    case GO_COMPLEX128:
+      if (sizeof (double) == 8)
+	return go_complex_to_ffi (&ffi_type_double);
+      abort ();
+    case GO_INT16:
+      return &ffi_type_sint16;
+    case GO_INT32:
+      return &ffi_type_sint32;
+    case GO_INT64:
+      return &ffi_type_sint64;
+    case GO_INT8:
+      return &ffi_type_sint8;
+    case GO_INT:
+      return &ffi_type_sint;
+    case GO_UINT16:
+      return &ffi_type_uint16;
+    case GO_UINT32:
+      return &ffi_type_uint32;
+    case GO_UINT64:
+      return &ffi_type_uint64;
+    case GO_UINT8:
+      return &ffi_type_uint8;
+    case GO_UINT:
+      return &ffi_type_uint;
+    case GO_UINTPTR:
+      if (sizeof (void *) == 2)
+	return &ffi_type_uint16;
+      else if (sizeof (void *) == 4)
+	return &ffi_type_uint32;
+      else if (sizeof (void *) == 8)
+	return &ffi_type_uint64;
+      abort ();
+    case GO_ARRAY:
+      return go_array_to_ffi ((const struct __go_array_type *) descriptor);
+    case GO_SLICE:
+      return go_slice_to_ffi ((const struct __go_slice_type *) descriptor);
+    case GO_STRUCT:
+      return go_struct_to_ffi ((const struct __go_struct_type *) descriptor);
+    case GO_STRING:
+      return go_string_to_ffi ();
+    case GO_INTERFACE:
+      return go_interface_to_ffi ();
+    case GO_CHAN:
+    case GO_FUNC:
+    case GO_MAP:
+    case GO_PTR:
+    case GO_UNSAFE_POINTER:
+      /* These types are always pointers, and for FFI purposes nothing
+	 else matters.  */
+      return &ffi_type_pointer;
+    default:
+      abort ();
+    }
+}
+
+/* Return the return type for a function, given the number of out
+   parameters and their types.  */
+
+static ffi_type *
+go_func_return_ffi (const struct __go_func_type *func)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  ffi_type *ret;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return &ffi_type_void;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  if (count == 1)
+    return go_type_to_ffi (types[0]);
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc ((count + 1) * sizeof (ffi_type *));
+  for (i = 0; i < count; ++i)
+    ret->elements[i] = go_type_to_ffi (types[i]);
+  ret->elements[count] = NULL;
+  return ret;
+}
+
+/* Build an ffi_cif structure for a function described by a
+   __go_func_type structure.  */
+
+static void
+go_func_to_cif (const struct __go_func_type *func, _Bool is_interface,
+		ffi_cif *cif)
+{
+  int num_params;
+  const struct __go_type_descriptor **in_types;
+  size_t num_args;
+  ffi_type **args;
+  int off;
+  int i;
+  ffi_type *rettype;
+  ffi_status status;
+
+  num_params = func->__in.__count;
+  in_types = ((const struct __go_type_descriptor **)
+	      func->__in.__values);
+
+  num_args = num_params + (is_interface ? 1 : 0);
+  args = (ffi_type **) __go_alloc (num_args * sizeof (ffi_type *));
+  if (is_interface)
+    args[0] = &ffi_type_pointer;
+  off = is_interface ? 1 : 0;
+  for (i = 0; i < num_params; ++i)
+    args[i + off] = go_type_to_ffi (in_types[i]);
+
+  rettype = go_func_return_ffi (func);
+
+  status = ffi_prep_cif (cif, FFI_DEFAULT_ABI, num_args, rettype, args);
+  __go_assert (status == FFI_OK);
+}
+
+/* Get the total size required for the result parameters of a
+   function.  */
+
+static size_t
+go_results_size (const struct __go_func_type *func)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  size_t off;
+  size_t maxalign;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return 0;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  off = 0;
+  maxalign = 0;
+  for (i = 0; i < count; ++i)
+    {
+      size_t align;
+
+      align = types[i]->__field_align;
+      if (align > maxalign)
+	maxalign = align;
+      off = (off + align - 1) & ~ (align - 1);
+      off += types[i]->__size;
+    }
+
+  off = (off + maxalign - 1) & ~ (maxalign - 1);
+
+  return off;
+}
+
+/* Copy the results of calling a function via FFI from CALL_RESULT
+   into the addresses in RESULTS.  */
+
+static void
+go_set_results (const struct __go_func_type *func, unsigned char *call_result,
+		void **results)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  size_t off;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  off = 0;
+  for (i = 0; i < count; ++i)
+    {
+      size_t align;
+      size_t size;
+
+      align = types[i]->__field_align;
+      size = types[i]->__size;
+      off = (off + align - 1) & ~ (align - 1);
+      __builtin_memcpy (results[i], call_result + off, size);
+      off += size;
+    }
+}
+
+/* Call a function.  The type of the function is FUNC_TYPE, and the
+   address is FUNC_ADDR.  PARAMS is an array of parameter addresses.
+   RESULTS is an array of result addresses.  */
+
+void
+reflect_call (const struct __go_func_type *func_type, const void *func_addr,
+	      _Bool is_interface, void **params, void **results)
+{
+  ffi_cif cif;
+  unsigned char *call_result;
+
+  __go_assert (func_type->__common.__code == GO_FUNC);
+  go_func_to_cif (func_type, is_interface, &cif);
+
+  call_result = (unsigned char *) malloc (go_results_size (func_type));
+
+  ffi_call (&cif, func_addr, call_result, params);
+
+  /* Some day we may need to free result values if RESULTS is
+     NULL.  */
+  if (results != NULL)
+    go_set_results (func_type, call_result, results);
+
+  free (call_result);
+}
diff --git a/libgo/runtime/go-reflect-chan.c b/libgo/runtime/go-reflect-chan.c
new file mode 100644
index 000000000..412cfeedf
--- /dev/null
+++ b/libgo/runtime/go-reflect-chan.c
@@ -0,0 +1,148 @@
+/* go-reflect-chan.c -- channel reflection support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "config.h"
+#include "go-type.h"
+#include "channel.h"
+
+/* This file implements support for reflection on channels.  These
+   functions are called from reflect/value.go.  */
+
+extern unsigned char *makechan (const struct __go_type_descriptor *, uint32_t)
+  asm ("libgo_reflect.reflect.makechan");
+
+unsigned char *
+makechan (const struct __go_type_descriptor *typ, uint32_t size)
+{
+  return (unsigned char *) __go_new_channel (typ->__size, size);
+}
+
+extern void chansend (unsigned char *, unsigned char *, _Bool *)
+  asm ("libgo_reflect.reflect.chansend");
+
+void
+chansend (unsigned char *ch, unsigned char *val, _Bool *pres)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  if (channel->element_size <= sizeof (uint64_t))
+    {
+      union
+      {
+	char b[sizeof (uint64_t)];
+	uint64_t v;
+      } u;
+
+      __builtin_memset (u.b, 0, sizeof (uint64_t));
+#ifndef WORDS_BIGENDIAN
+      __builtin_memcpy (u.b, val, channel->element_size);
+#else
+      __builtin_memcpy (u.b + sizeof (uint64_t) - channel->element_size, val,
+			channel->element_size);
+#endif
+      if (pres == NULL)
+	__go_send_small (channel, u.v, 0);
+      else
+	*pres = __go_send_nonblocking_small (channel, u.v);
+    }
+  else
+    {
+      if (pres == NULL)
+	__go_send_big (channel, val, 0);
+      else
+	*pres = __go_send_nonblocking_big (channel, val);
+    }
+}
+
+extern void chanrecv (unsigned char *, unsigned char *, _Bool *)
+  asm ("libgo_reflect.reflect.chanrecv");
+
+void
+chanrecv (unsigned char *ch, unsigned char *val, _Bool *pres)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  if (channel->element_size <= sizeof (uint64_t))
+    {
+      union
+      {
+	char b[sizeof (uint64_t)];
+	uint64_t v;
+      } u;
+
+      if (pres == NULL)
+	u.v = __go_receive_small (channel, 0);
+      else
+	{
+	  struct __go_receive_nonblocking_small s;
+
+	  s = __go_receive_nonblocking_small (channel);
+	  *pres = s.__success;
+	  if (!s.__success)
+	    return;
+	  u.v = s.__val;
+	}
+
+#ifndef WORDS_BIGENDIAN
+      __builtin_memcpy (val, u.b, channel->element_size);
+#else
+      __builtin_memcpy (val, u.b + sizeof (uint64_t) - channel->element_size,
+			channel->element_size);
+#endif
+    }
+  else
+    {
+      if (pres == NULL)
+	__go_receive_big (channel, val, 0);
+      else
+	*pres = __go_receive_nonblocking_big (channel, val);
+    }
+}
+
+extern _Bool chanclosed (unsigned char *)
+  asm ("libgo_reflect.reflect.chanclosed");
+
+_Bool
+chanclosed (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return __go_builtin_closed (channel);
+}
+
+extern void chanclose (unsigned char *)
+  asm ("libgo_reflect.reflect.chanclose");
+
+void
+chanclose (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  __go_builtin_close (channel);
+}
+
+extern int32_t chanlen (unsigned char *) asm ("libgo_reflect.reflect.chanlen");
+
+int32_t
+chanlen (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return (int32_t) __go_chan_len (channel);
+}
+
+extern int32_t chancap (unsigned char *) asm ("libgo_reflect.reflect.chancap");
+
+int32_t
+chancap (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return (int32_t) __go_chan_cap (channel);
+}
diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c
new file mode 100644
index 000000000..67960dee4
--- /dev/null
+++ b/libgo/runtime/go-reflect-map.c
@@ -0,0 +1,139 @@
+/* go-reflect-map.c -- map reflection support for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "map.h"
+
+/* This file implements support for reflection on maps.  These
+   functions are called from reflect/value.go.  */
+
+extern _Bool mapaccess (unsigned char *, unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapaccess");
+
+_Bool
+mapaccess (unsigned char *m, unsigned char *key, unsigned char *val)
+{
+  struct __go_map *map = (struct __go_map *) m;
+  void *p;
+  const struct __go_type_descriptor *val_descriptor;
+
+  p = __go_map_index (map, key, 0);
+  if (p == NULL)
+    return 0;
+  else
+    {
+      val_descriptor = map->__descriptor->__map_descriptor->__val_type;
+      __builtin_memcpy (val, p, val_descriptor->__size);
+      return 1;
+    }
+}
+
+extern void mapassign (unsigned char *, unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapassign");
+
+void
+mapassign (unsigned char *m, unsigned char *key, unsigned char *val)
+{
+  struct __go_map *map = (struct __go_map *) m;
+
+  if (val == NULL)
+    __go_map_delete (map, key);
+  else
+    {
+      void *p;
+      const struct __go_type_descriptor *val_descriptor;
+
+      p = __go_map_index (map, key, 1);
+      val_descriptor = map->__descriptor->__map_descriptor->__val_type;
+      __builtin_memcpy (p, val, val_descriptor->__size);
+    }
+}
+
+extern int32_t maplen (unsigned char *)
+  asm ("libgo_reflect.reflect.maplen");
+
+int32_t
+maplen (unsigned char *m __attribute__ ((unused)))
+{
+  struct __go_map *map = (struct __go_map *) m;
+  return (int32_t) map->__element_count;
+}
+
+extern unsigned char *mapiterinit (unsigned char *)
+  asm ("libgo_reflect.reflect.mapiterinit");
+
+unsigned char *
+mapiterinit (unsigned char *m)
+{
+  struct __go_hash_iter *it;
+
+  it = __go_alloc (sizeof (struct __go_hash_iter));
+  __go_mapiterinit ((struct __go_map *) m, it);
+  return (unsigned char *) it;
+}
+
+extern void mapiternext (unsigned char *)
+  asm ("libgo_reflect.reflect.mapiternext");
+
+void
+mapiternext (unsigned char *it)
+{
+  __go_mapiternext ((struct __go_hash_iter *) it);
+}
+
+extern _Bool mapiterkey (unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapiterkey");
+
+_Bool
+mapiterkey (unsigned char *ita, unsigned char *key)
+{
+  struct __go_hash_iter *it = (struct __go_hash_iter *) ita;
+
+  if (it->entry == NULL)
+    return 0;
+  else
+    {
+      __go_mapiter1 (it, key);
+      return 1;
+    }
+}
+
+/* Make a new map.  We have to build our own map descriptor.  */
+
+extern unsigned char *makemap (const struct __go_map_type *)
+  asm ("libgo_reflect.reflect.makemap");
+
+unsigned char *
+makemap (const struct __go_map_type *t)
+{
+  struct __go_map_descriptor *md;
+  unsigned int o;
+  const struct __go_type_descriptor *kt;
+  const struct __go_type_descriptor *vt;
+
+  /* FIXME: Reference count.  */
+  md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md));
+  md->__map_descriptor = t;
+  o = sizeof (void *);
+  kt = t->__key_type;
+  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
+  md->__key_offset = o;
+  o += kt->__size;
+  vt = t->__val_type;
+  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
+  md->__val_offset = o;
+  o += vt->__size;
+  o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1);
+  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
+  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
+  md->__entry_size = o;
+
+  return (unsigned char *) __go_new_map (md, 0);
+}
diff --git a/libgo/runtime/go-reflect.c b/libgo/runtime/go-reflect.c
new file mode 100644
index 000000000..9485c0979
--- /dev/null
+++ b/libgo/runtime/go-reflect.c
@@ -0,0 +1,186 @@
+/* go-reflect.c -- implement unsafe.Reflect and unsafe.Typeof for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "interface.h"
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-string.h"
+#include "go-type.h"
+
+/* For field alignment.  */
+
+struct field_align
+{
+  char c;
+  struct __go_type_descriptor *p;
+};
+
+/* The type descriptors in the runtime package.  */
+
+extern const struct __go_type_descriptor ptr_bool_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.BoolType");
+extern const struct __go_type_descriptor ptr_float_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.FloatType");
+extern const struct __go_type_descriptor ptr_complex_descriptor
+  asm ("__go_td_pN33_libgo_runtime.runtime.ComplexType");
+extern const struct __go_type_descriptor ptr_int_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.IntType");
+extern const struct __go_type_descriptor ptr_uint_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.UintType");
+extern const struct __go_type_descriptor ptr_string_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.StringType");
+extern const struct __go_type_descriptor ptr_unsafe_pointer_decriptor
+  asm ("__go_td_pN39_libgo_runtime.runtime.UnsafePointerType");
+extern const struct __go_type_descriptor ptr_array_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.ArrayType");
+extern const struct __go_type_descriptor ptr_slice_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.SliceType");
+extern const struct __go_type_descriptor ptr_chan_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.ChanType");
+extern const struct __go_type_descriptor ptr_func_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.FuncType");
+extern const struct __go_type_descriptor ptr_interface_descriptor
+  asm ("__go_td_pN35_libgo_runtime.runtime.InterfaceType");
+extern const struct __go_type_descriptor ptr_map_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.MapType");
+extern const struct __go_type_descriptor ptr_ptr_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.PtrType");
+extern const struct __go_type_descriptor ptr_struct_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.StructType");
+
+const struct __go_type_descriptor *
+get_descriptor (int code)
+{
+  switch (code)
+    {
+    case GO_BOOL:
+      return &ptr_bool_descriptor;
+    case GO_FLOAT32:
+    case GO_FLOAT64:
+      return &ptr_float_descriptor;
+    case GO_COMPLEX64:
+    case GO_COMPLEX128:
+      return &ptr_complex_descriptor;
+    case GO_INT16:
+    case GO_INT32:
+    case GO_INT64:
+    case GO_INT8:
+    case GO_INT:
+      return &ptr_int_descriptor;
+    case GO_UINT16:
+    case GO_UINT32:
+    case GO_UINT64:
+    case GO_UINT8:
+    case GO_UINTPTR:
+    case GO_UINT:
+      return &ptr_uint_descriptor;
+    case GO_STRING:
+      return &ptr_string_descriptor;
+    case GO_UNSAFE_POINTER:
+      return &ptr_unsafe_pointer_decriptor;
+    case GO_ARRAY:
+      return &ptr_array_descriptor;
+    case GO_SLICE:
+      return &ptr_slice_descriptor;
+    case GO_CHAN:
+      return &ptr_chan_descriptor;
+    case GO_FUNC:
+      return &ptr_func_descriptor;
+    case GO_INTERFACE:
+      return &ptr_interface_descriptor;
+    case GO_MAP:
+      return &ptr_map_descriptor;
+    case GO_PTR:
+      return &ptr_ptr_descriptor;
+    case GO_STRUCT:
+      return &ptr_struct_descriptor;
+    default:
+      abort ();
+    }
+}
+
+/* Implement unsafe.Reflect.  */
+
+struct reflect_ret
+{
+  struct __go_empty_interface rettype;
+  void *addr;
+};
+
+struct reflect_ret Reflect (struct __go_empty_interface)
+  asm ("libgo_unsafe.unsafe.Reflect");
+
+struct reflect_ret
+Reflect (struct __go_empty_interface e)
+{
+  struct reflect_ret ret;
+
+  if (e.__type_descriptor == NULL)
+    {
+      ret.rettype.__type_descriptor = NULL;
+      ret.rettype.__object = NULL;
+      ret.addr = NULL;
+    }
+  else
+    {
+      size_t size;
+
+      ret.rettype.__type_descriptor =
+	get_descriptor (e.__type_descriptor->__code);
+
+      /* This memcpy is really just an assignment of a const pointer
+	 to a non-const pointer.  FIXME: We should canonicalize this
+	 pointer, so that for a given type we always return the same
+	 pointer.  */
+      __builtin_memcpy (&ret.rettype.__object, &e.__type_descriptor,
+			sizeof (void *));
+
+      /* Make a copy of the value.  */
+      size = e.__type_descriptor->__size;
+      if (size <= sizeof (uint64_t))
+	ret.addr = __go_alloc (sizeof (uint64_t));
+      else
+	ret.addr = __go_alloc (size);
+      if (__go_is_pointer_type (e.__type_descriptor))
+	*(void **) ret.addr = e.__object;
+      else
+	__builtin_memcpy (ret.addr, e.__object, size);
+    }
+
+  return ret;
+}
+
+/* Implement unsafe.Typeof.  */
+
+struct __go_empty_interface Typeof (struct __go_empty_interface)
+  asm ("libgo_unsafe.unsafe.Typeof");
+
+struct __go_empty_interface
+Typeof (const struct __go_empty_interface e)
+{
+  struct __go_empty_interface ret;
+
+  if (e.__type_descriptor == NULL)
+    {
+      ret.__type_descriptor = NULL;
+      ret.__object = NULL;
+    }
+  else
+    {
+      ret.__type_descriptor = get_descriptor (e.__type_descriptor->__code);
+
+      /* This memcpy is really just an assignment of a const pointer
+	 to a non-const pointer.  FIXME: We should canonicalize this
+	 pointer, so that for a given type we always return the same
+	 pointer.  */
+      __builtin_memcpy (&ret.__object, &e.__type_descriptor, sizeof (void *));
+    }
+
+  return ret;
+}
diff --git a/libgo/runtime/go-rune.c b/libgo/runtime/go-rune.c
new file mode 100644
index 000000000..7e31eb8d6
--- /dev/null
+++ b/libgo/runtime/go-rune.c
@@ -0,0 +1,77 @@
+/* go-rune.c -- rune functions for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+
+/* Get a character from the UTF-8 string STR, of length LEN.  Store
+   the Unicode character, if any, in *RUNE.  Return the number of
+   characters used from STR.  */
+
+int
+__go_get_rune (const unsigned char *str, size_t len, int *rune)
+{
+  int c, c1, c2, c3;
+
+  /* Default to the "replacement character".  */
+  *rune = 0xfffd;
+
+  if (len <= 0)
+    return 1;
+
+  c = *str;
+  if (c <= 0x7f)
+    {
+      *rune = c;
+      return 1;
+    }
+
+  if (len <= 1)
+    return 1;
+
+  c1 = str[1];
+  if ((c & 0xe0) == 0xc0
+      && (c1 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0x1f) << 6)
+	       + (c1 & 0x3f));
+      return 2;
+    }
+
+  if (len <= 2)
+    return 1;
+
+  c2 = str[2];
+  if ((c & 0xf0) == 0xe0
+      && (c1 & 0xc0) == 0x80
+      && (c2 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0xf) << 12)
+	       + ((c1 & 0x3f) << 6)
+	       + (c2 & 0x3f));
+      return 3;
+    }
+
+  if (len <= 3)
+    return 1;
+
+  c3 = str[3];
+  if ((c & 0xf8) == 0xf0
+      && (c1 & 0xc0) == 0x80
+      && (c2 & 0xc0) == 0x80
+      && (c3 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0x7) << 18)
+	       + ((c1 & 0x3f) << 12)
+	       + ((c2 & 0x3f) << 6)
+	       + (c3 & 0x3f));
+      return 4;
+    }
+
+  /* Invalid encoding.  Return 1 so that we advance.  */
+  return 1;
+}
diff --git a/libgo/runtime/go-runtime-error.c b/libgo/runtime/go-runtime-error.c
new file mode 100644
index 000000000..ceba2d673
--- /dev/null
+++ b/libgo/runtime/go-runtime-error.c
@@ -0,0 +1,84 @@
+/* go-runtime-error.c -- Go runtime error.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-panic.h"
+
+/* The compiler generates calls to this function.  This enum values
+   are known to the compiler and used by compiled code.  Any change
+   here must be reflected in the compiler.  */
+
+enum
+{
+  /* Slice index out of bounds: negative or larger than the length of
+     the slice.  */
+  SLICE_INDEX_OUT_OF_BOUNDS = 0,
+
+  /* Array index out of bounds.  */
+  ARRAY_INDEX_OUT_OF_BOUNDS = 1,
+
+  /* String index out of bounds.  */
+  STRING_INDEX_OUT_OF_BOUNDS = 2,
+
+  /* Slice slice out of bounds: negative or larger than the length of
+     the slice or high bound less than low bound.  */
+  SLICE_SLICE_OUT_OF_BOUNDS = 3,
+
+  /* Array slice out of bounds.  */
+  ARRAY_SLICE_OUT_OF_BOUNDS = 4,
+
+  /* String slice out of bounds.  */
+  STRING_SLICE_OUT_OF_BOUNDS = 5,
+
+  /* Dereference of nil pointer.  This is used when there is a
+     dereference of a pointer to a very large struct or array, to
+     ensure that a gigantic array is not used a proxy to access random
+     memory locations.  */
+  NIL_DEREFERENCE = 6,
+
+  /* Slice length or capacity out of bounds in make: negative or
+     overflow or length greater than capacity.  */
+  MAKE_SLICE_OUT_OF_BOUNDS = 7,
+
+  /* Map capacity out of bounds in make: negative or overflow.  */
+  MAKE_MAP_OUT_OF_BOUNDS = 8,
+
+  /* Channel capacity out of bounds in make: negative or overflow.  */
+  MAKE_CHAN_OUT_OF_BOUNDS = 9
+};
+
+extern void __go_runtime_error () __attribute__ ((noreturn));
+
+void
+__go_runtime_error (int i)
+{
+  switch (i)
+    {
+    case SLICE_INDEX_OUT_OF_BOUNDS:
+    case ARRAY_INDEX_OUT_OF_BOUNDS:
+    case STRING_INDEX_OUT_OF_BOUNDS:
+      __go_panic_msg ("index out of range");
+
+    case SLICE_SLICE_OUT_OF_BOUNDS:
+    case ARRAY_SLICE_OUT_OF_BOUNDS:
+    case STRING_SLICE_OUT_OF_BOUNDS:
+      __go_panic_msg ("slice bounds out of range");
+
+    case NIL_DEREFERENCE:
+      __go_panic_msg ("nil pointer dereference");
+
+    case MAKE_SLICE_OUT_OF_BOUNDS:
+      __go_panic_msg ("make slice len or cap out of range");
+
+    case MAKE_MAP_OUT_OF_BOUNDS:
+      __go_panic_msg ("make map len out of range");
+
+    case MAKE_CHAN_OUT_OF_BOUNDS:
+      __go_panic_msg ("make chan len out of range");
+
+    default:
+      __go_panic_msg ("unknown runtime error");
+    }
+}
diff --git a/libgo/runtime/go-sched.c b/libgo/runtime/go-sched.c
new file mode 100644
index 000000000..2e36d31a5
--- /dev/null
+++ b/libgo/runtime/go-sched.c
@@ -0,0 +1,15 @@
+/* go-sched.c -- the runtime.Gosched function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <sched.h>
+
+void Gosched (void) asm ("libgo_runtime.runtime.Gosched");
+
+void
+Gosched (void)
+{
+  sched_yield ();
+}
diff --git a/libgo/runtime/go-select.c b/libgo/runtime/go-select.c
new file mode 100644
index 000000000..9d9f728f2
--- /dev/null
+++ b/libgo/runtime/go-select.c
@@ -0,0 +1,758 @@
+/* go-select.c -- implement select.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <pthread.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "config.h"
+#include "go-assert.h"
+#include "channel.h"
+
+/* __go_select builds an array of these structures.  */
+
+struct select_channel
+{
+  /* The channel being selected.  */
+  struct __go_channel* channel;
+  /* If this channel is selected, the value to return.  */
+  size_t retval;
+  /* If this channel is a duplicate of one which appears earlier in
+     the array, this is the array index of the earlier channel.  This
+     is -1UL if this is not a dup.  */
+  size_t dup_index;
+  /* An entry to put on the send or receive queue.  */
+  struct __go_channel_select queue_entry;
+  /* True if selected for send.  */
+  _Bool is_send;
+  /* True if channel is ready--it has data to receive or space to
+     send.  */
+  _Bool is_ready;
+};
+
+/* This mutex controls access to __go_select_cond.  This mutex may not
+   be acquired if any channel locks are held.  */
+
+static pthread_mutex_t __go_select_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* When we have to wait for channels, we tell them to trigger this
+   condition variable when they send or receive something.  */
+
+static pthread_cond_t __go_select_cond = PTHREAD_COND_INITIALIZER;
+
+/* Sort the channels by address.  This avoids deadlock when multiple
+   selects are running on overlapping sets of channels.  */
+
+static int
+channel_sort (const void *p1, const void *p2)
+{
+  const struct select_channel *c1 = (const struct select_channel *) p1;
+  const struct select_channel *c2 = (const struct select_channel *) p2;
+
+  if ((uintptr_t) c1->channel < (uintptr_t) c2->channel)
+    return -1;
+  else if ((uintptr_t) c1->channel > (uintptr_t) c2->channel)
+    return 1;
+  else
+    return 0;
+}
+
+/* Return whether there is an entry on QUEUE which can be used for a
+   synchronous send or receive.  */
+
+static _Bool
+is_queue_ready (struct __go_channel_select *queue)
+{
+  int x;
+
+  if (queue == NULL)
+    return 0;
+
+  x = pthread_mutex_lock (&__go_select_data_mutex);
+  __go_assert (x == 0);
+
+  while (queue != NULL)
+    {
+      if (*queue->selected == NULL)
+	break;
+      queue = queue->next;
+    }
+
+  x = pthread_mutex_unlock (&__go_select_data_mutex);
+  __go_assert (x == 0);
+
+  return queue != NULL;
+}
+
+/* Return whether CHAN is ready.  If IS_SEND is true check whether it
+   has space to send, otherwise check whether it has a value to
+   receive.  */
+
+static _Bool
+is_channel_ready (struct __go_channel* channel, _Bool is_send)
+{
+  if (is_send)
+    {
+      if (channel->selected_for_send)
+	return 0;
+      if (channel->is_closed)
+	return 1;
+      if (channel->num_entries > 0)
+	{
+	  /* An asynchronous channel is ready for sending if there is
+	     room in the buffer.  */
+	  return ((channel->next_store + 1) % channel->num_entries
+		  != channel->next_fetch);
+	}
+      else
+	{
+	  if (channel->waiting_to_send)
+	    {
+	      /* Some other goroutine is waiting to send on this
+		 channel, so we can't.  */
+	      return 0;
+	    }
+	  if (channel->waiting_to_receive)
+	    {
+	      /* Some other goroutine is waiting to receive a value,
+		 so we can send one.  */
+	      return 1;
+	    }
+	  if (is_queue_ready (channel->select_receive_queue))
+	    {
+	      /* There is a select statement waiting to synchronize
+		 with this one.  */
+	      return 1;
+	    }
+	  return 0;
+	}
+    }
+  else
+    {
+      if (channel->selected_for_receive)
+	return 0;
+      if (channel->is_closed)
+	return 1;
+      if (channel->num_entries > 0)
+	{
+	  /* An asynchronous channel is ready for receiving if there
+	     is a value in the buffer.  */
+	  return channel->next_fetch != channel->next_store;
+	}
+      else
+	{
+	  if (channel->waiting_to_receive)
+	    {
+	      /* Some other goroutine is waiting to receive from this
+		 channel, so it is not ready for us to receive.  */
+	      return 0;
+	    }
+	  if (channel->next_store > 0)
+	    {
+	      /* There is data on the channel.  */
+	      return 1;
+	    }
+	  if (is_queue_ready (channel->select_send_queue))
+	    {
+	      /* There is a select statement waiting to synchronize
+		 with this one.  */
+	      return 1;
+	    }
+	  return 0;
+	}
+    }
+}
+
+/* Mark a channel as selected.  The channel is locked.  IS_SELECTED is
+   true if the channel was selected for us by another goroutine.  We
+   set *NEEDS_BROADCAST if we need to broadcast on the select
+   condition variable.  Return true if we got it.  */
+
+static _Bool
+mark_channel_selected (struct __go_channel *channel, _Bool is_send,
+		       _Bool is_selected, _Bool *needs_broadcast)
+{
+  if (channel->num_entries == 0)
+    {
+      /* This is a synchronous channel.  If there is no goroutine
+	 currently waiting, but there is another select waiting, then
+	 we need to tell that select to use this channel.  That may
+	 fail--there may be no other goroutines currently waiting--as
+	 a third goroutine may already have claimed the select.  */
+      if (!is_selected
+	  && !channel->is_closed
+	  && (is_send
+	      ? !channel->waiting_to_receive
+	      : channel->next_store == 0))
+	{
+	  int x;
+	  struct __go_channel_select *queue;
+
+	  x = pthread_mutex_lock (&__go_select_data_mutex);
+	  __go_assert (x == 0);
+
+	  queue = (is_send
+		   ? channel->select_receive_queue
+		   : channel->select_send_queue);
+	  __go_assert (queue != NULL);
+
+	  while (queue != NULL)
+	    {
+	      if (*queue->selected == NULL)
+		{
+		  *queue->selected = channel;
+		  *queue->is_read = !is_send;
+		  break;
+		}
+	      queue = queue->next;
+	    }
+
+	  x = pthread_mutex_unlock (&__go_select_data_mutex);
+	  __go_assert (x == 0);
+
+	  if (queue == NULL)
+	    return 0;
+
+	  if (is_send)
+	    channel->selected_for_receive = 1;
+	  else
+	    channel->selected_for_send = 1;
+
+	  /* We are going to have to tell the other select that there
+	     is something to do.  */
+	  *needs_broadcast = 1;
+	}
+    }
+
+  if (is_send)
+    channel->selected_for_send = 1;
+  else
+    channel->selected_for_receive = 1;
+
+  return 1;
+}
+
+/* Mark a channel to indicate that a select is waiting.  The channel
+   is locked.  */
+
+static void
+mark_select_waiting (struct select_channel *sc,
+		     struct __go_channel **selected_pointer,
+		     _Bool *selected_for_read_pointer)
+{
+  struct __go_channel *channel = sc->channel;
+  _Bool is_send = sc->is_send;
+
+  if (channel->num_entries == 0)
+    {
+      struct __go_channel_select **pp;
+
+      pp = (is_send
+	    ? &channel->select_send_queue
+	    : &channel->select_receive_queue);
+
+      /* Add an entry to the queue of selects on this channel.  */
+      sc->queue_entry.next = *pp;
+      sc->queue_entry.selected = selected_pointer;
+      sc->queue_entry.is_read = selected_for_read_pointer;
+
+      *pp = &sc->queue_entry;
+    }
+
+  channel->select_mutex = &__go_select_mutex;
+  channel->select_cond = &__go_select_cond;
+
+  /* We never actually clear the select_mutex and select_cond fields.
+     In order to clear them safely, we would need to have some way of
+     knowing when no select is waiting for the channel.  Thus we
+     introduce a bit of inefficiency for every channel that select
+     needs to wait for.  This is harmless other than the performance
+     cost.  */
+}
+
+/* Remove the entry for this select waiting on this channel.  The
+   channel is locked.  We check both queues, because the channel may
+   be selected for both reading and writing.  */
+
+static void
+clear_select_waiting (struct select_channel *sc,
+		      struct __go_channel **selected_pointer)
+{
+  struct __go_channel *channel = sc->channel;
+
+  if (channel->num_entries == 0)
+    {
+      _Bool found;
+      struct __go_channel_select **pp;
+
+      found = 0;
+
+      for (pp = &channel->select_send_queue; *pp != NULL; pp = &(*pp)->next)
+	{
+	  if ((*pp)->selected == selected_pointer)
+	    {
+	      *pp = (*pp)->next;
+	      found = 1;
+	      break;
+	    }
+	}
+
+      for (pp = &channel->select_receive_queue; *pp != NULL; pp = &(*pp)->next)
+	{
+	  if ((*pp)->selected == selected_pointer)
+	    {
+	      *pp = (*pp)->next;
+	      found = 1;
+	      break;
+	    }
+	}
+
+      __go_assert (found);
+    }
+}
+
+/* Look through the list of channels to see which ones are ready.
+   Lock each channels, and set the is_ready flag.  Return the number
+   of ready channels.  */
+
+static size_t
+lock_channels_find_ready (struct select_channel *channels, size_t count)
+{
+  size_t ready_count;
+  size_t i;
+
+  ready_count = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+      size_t dup_index = channels[i].dup_index;
+      int x;
+
+      if (channel == NULL)
+	continue;
+
+      if (dup_index != (size_t) -1UL)
+	{
+	  if (channels[dup_index].is_ready)
+	    {
+	      channels[i].is_ready = 1;
+	      ++ready_count;
+	    }
+	  continue;
+	}
+
+      x = pthread_mutex_lock (&channel->lock);
+      __go_assert (x == 0);
+
+      if (is_channel_ready (channel, is_send))
+	{
+	  channels[i].is_ready = 1;
+	  ++ready_count;
+	}
+    }
+
+  return ready_count;
+}
+
+/* The channel we are going to select has been forced by some other
+   goroutine.  SELECTED_CHANNEL is the channel we will use,
+   SELECTED_FOR_READ is whether the other goroutine wants to read from
+   the channel.  Note that the channel could be specified multiple
+   times in this select, so we must mark each appropriate entry for
+   this channel as ready.  Every other channel is marked as not ready.
+   All the channels are locked before this routine is called.  This
+   returns the number of ready channels.  */
+
+size_t
+force_selected_channel_ready (struct select_channel *channels, size_t count,
+			      struct __go_channel *selected_channel,
+			      _Bool selected_for_read)
+{
+  size_t ready_count;
+  size_t i;
+
+  ready_count = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channel != selected_channel
+	  || (is_send ? !selected_for_read : selected_for_read))
+	channels[i].is_ready = 0;
+      else
+	{
+	  channels[i].is_ready = 1;
+	  ++ready_count;
+	}
+    }
+  __go_assert (ready_count > 0);
+  return ready_count;
+}
+
+/* Unlock all the channels.  */
+
+static void
+unlock_channels (struct select_channel *channels, size_t count)
+{
+  size_t i;
+  int x;
+
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[i].dup_index != (size_t) -1UL)
+	continue;
+
+      x = pthread_mutex_unlock (&channel->lock);
+      __go_assert (x == 0);
+    }
+}
+
+/* At least one channel is ready.  Randomly pick a channel to return.
+   Unlock all the channels.  IS_SELECTED is true if the channel was
+   picked for us by some other goroutine.  If SELECTED_POINTER is not
+   NULL, remove it from the queue for all the channels.  Return the
+   retval field of the selected channel.  This will return 0 if we
+   can't use the selected channel, because it relied on synchronizing
+   with some other select, and that select already synchronized with a
+   different channel.  */
+
+static size_t
+unlock_channels_and_select (struct select_channel *channels,
+			    size_t count, size_t ready_count,
+			    _Bool is_selected,
+			    struct __go_channel **selected_pointer)
+{
+  size_t selected;
+  size_t ret;
+  _Bool needs_broadcast;
+  size_t i;
+  int x;
+
+  /* Pick which channel we are going to return.  */
+#if defined(HAVE_RANDOM)
+  selected = (size_t) random () % ready_count;
+#else
+  selected = (size_t) rand () % ready_count;
+#endif
+  ret = 0;
+  needs_broadcast = 0;
+
+  /* Look at the channels in reverse order so that we don't unlock a
+     duplicated channel until we have seen all its dups.  */
+  for (i = 0; i < count; ++i)
+    {
+      size_t j = count - i - 1;
+      struct __go_channel *channel = channels[j].channel;
+      _Bool is_send = channels[j].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[j].is_ready)
+	{
+	  if (selected == 0)
+	    {
+	      if (mark_channel_selected (channel, is_send, is_selected,
+					 &needs_broadcast))
+		ret = channels[j].retval;
+	    }
+
+	  --selected;
+	}
+
+      if (channels[j].dup_index == (size_t) -1UL)
+	{
+	  if (selected_pointer != NULL)
+	    clear_select_waiting (&channels[j], selected_pointer);
+
+	  x = pthread_mutex_unlock (&channel->lock);
+	  __go_assert (x == 0);
+	}
+    }
+
+  /* The NEEDS_BROADCAST variable is set if we are synchronizing with
+     some other select statement.  We can't do the actual broadcast
+     until we have unlocked all the channels.  */
+
+  if (needs_broadcast)
+    {
+      x = pthread_mutex_lock (&__go_select_mutex);
+      __go_assert (x == 0);
+
+      x = pthread_cond_broadcast (&__go_select_cond);
+      __go_assert (x == 0);
+
+      x = pthread_mutex_unlock (&__go_select_mutex);
+      __go_assert (x == 0);
+    }
+
+  return ret;
+}
+
+/* Mark all channels to show that we are waiting for them.  This is
+   called with the select mutex held, but none of the channels are
+   locked.  This returns true if some channel was found to be
+   ready.  */
+
+static _Bool
+mark_all_channels_waiting (struct select_channel* channels, size_t count,
+			   struct __go_channel **selected_pointer,
+			   _Bool *selected_for_read_pointer)
+{
+  _Bool ret;
+  int x;
+  size_t i;
+
+  ret = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[i].dup_index != (size_t) -1UL)
+	{
+	  size_t j;
+
+	  /* A channel may be selected for both read and write.  */
+	  if (channels[channels[i].dup_index].is_send != is_send)
+	    {
+	      for (j = channels[i].dup_index + 1; j < i; ++j)
+		{
+		  if (channels[j].channel == channel
+		      && channels[j].is_send == is_send)
+		    break;
+		}
+	      if (j < i)
+		continue;
+	    }
+	}
+
+      x = pthread_mutex_lock (&channel->lock);
+      __go_assert (x == 0);
+
+      /* To avoid a race condition, we have to check again whether the
+	 channel is ready.  It may have become ready since we did the
+	 first set of checks but before we acquired the select mutex.
+	 If we don't check here, we could sleep forever on the select
+	 condition variable.  */
+      if (is_channel_ready (channel, is_send))
+	ret = 1;
+
+      /* If SELECTED_POINTER is NULL, then we have already marked the
+	 channel as waiting.  */
+      if (selected_pointer != NULL)
+	mark_select_waiting (&channels[i], selected_pointer,
+			     selected_for_read_pointer);
+
+      x = pthread_mutex_unlock (&channel->lock);
+      __go_assert (x == 0);
+    }
+
+  return ret;
+}
+
+/* Implement select.  This is called by the compiler-generated code
+   with pairs of arguments: a pointer to a channel, and an int which
+   is non-zero for send, zero for receive.  */
+
+size_t
+__go_select (size_t count, _Bool has_default,
+	     struct __go_channel **channel_args, _Bool *is_send_args)
+{
+  struct select_channel stack_buffer[16];
+  struct select_channel *allocated_buffer;
+  struct select_channel *channels;
+  size_t i;
+  int x;
+  struct __go_channel *selected_channel;
+  _Bool selected_for_read;
+  _Bool is_queued;
+
+  if (count < sizeof stack_buffer / sizeof stack_buffer[0])
+    {
+      channels = &stack_buffer[0];
+      allocated_buffer = NULL;
+    }
+  else
+    {
+      allocated_buffer = ((struct select_channel *)
+			  malloc (count * sizeof (struct select_channel)));
+      channels = allocated_buffer;
+    }
+
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel_arg = channel_args[i];
+      _Bool is_send = is_send_args[i];
+
+      channels[i].channel = (struct __go_channel*) channel_arg;
+      channels[i].retval = i + 1;
+      channels[i].dup_index = (size_t) -1UL;
+      channels[i].queue_entry.next = NULL;
+      channels[i].queue_entry.selected = NULL;
+      channels[i].is_send = is_send;
+      channels[i].is_ready = 0;
+    }
+
+  qsort (channels, count, sizeof (struct select_channel), channel_sort);
+
+  for (i = 0; i < count; ++i)
+    {
+      size_t j;
+
+      for (j = 0; j < i; ++j)
+	{
+	  if (channels[j].channel == channels[i].channel)
+	    {
+	      channels[i].dup_index = j;
+	      break;
+	    }
+	}
+    }
+
+  /* SELECT_CHANNEL is used to select synchronized channels.  If no
+     channels are ready, we store a pointer to this variable on the
+     select queue for each synchronized channel.  Because the variable
+     may be set by channel operations running in other goroutines,
+     SELECT_CHANNEL may only be accessed when all the channels are
+     locked and/or when the select_data_mutex is locked.  */
+  selected_channel = NULL;
+
+  /* SELECTED_FOR_READ is set to true if SELECTED_CHANNEL was set by a
+     goroutine which wants to read from the channel.  The access
+     restrictions for this are like those for SELECTED_CHANNEL.  */
+  selected_for_read = 0;
+
+  /* IS_QUEUED is true if we have queued up this select on the queues
+     for any associated synchronous channels.  We only do this if no
+     channels are ready the first time around the loop.  */
+  is_queued = 0;
+
+  while (1)
+    {
+      int ready_count;
+      _Bool is_selected;
+
+      /* Lock all channels, identify which ones are ready.  */
+      ready_count = lock_channels_find_ready (channels, count);
+
+      /* All the channels are locked, so we can look at
+	 SELECTED_CHANNEL.  If it is not NULL, then our choice has
+	 been forced by some other goroutine.  This can only happen
+	 after the first time through the loop.  */
+      is_selected = selected_channel != NULL;
+      if (is_selected)
+	ready_count = force_selected_channel_ready (channels, count,
+						    selected_channel,
+						    selected_for_read);
+
+      if (ready_count > 0)
+	{
+	  size_t ret;
+
+	  ret = unlock_channels_and_select (channels, count, ready_count,
+					    is_selected,
+					    (is_queued
+					     ? &selected_channel
+					     : NULL));
+
+	  /* If RET is zero, it means that the channel we picked
+	     turned out not to be ready, because some other select
+	     grabbed it during our traversal.  Loop around and try
+	     again.  */
+	  if (ret == 0)
+	    {
+	      is_queued = 0;
+	      /* We are no longer on any channel queues, so it is safe
+		 to touch SELECTED_CHANNEL here.  It must be NULL,
+		 because otherwise that would somebody has promised to
+		 synch up with us and then failed to do so.  */
+	      __go_assert (selected_channel == NULL);
+	      continue;
+	    }
+
+	  if (allocated_buffer != NULL)
+	    free (allocated_buffer);
+
+	  return ret;
+	}
+
+      /* No channels were ready.  */
+
+      unlock_channels (channels, count);
+
+      if (has_default)
+	{
+	  /* Use the default clause.  */
+	  if (allocated_buffer != NULL)
+	    free (allocated_buffer);
+	  return 0;
+	}
+
+      /* This is a blocking select.  Grab the select lock, tell all
+	 the channels to notify us when something happens, and wait
+	 for something to happen.  */
+
+      x = pthread_mutex_lock (&__go_select_mutex);
+      __go_assert (x == 0);
+
+      /* Check whether CHANNEL_SELECTED was set while the channels
+	 were unlocked.  If it was set, then we can simply loop around
+	 again.  We need to check this while the select mutex is held.
+	 It is possible that something will set CHANNEL_SELECTED while
+	 we mark the channels as waiting.  If this happens, that
+	 goroutine is required to signal the select condition
+	 variable, which means acquiring the select mutex.  Since we
+	 have the select mutex locked ourselves, we can not miss that
+	 signal.  */
+
+      x = pthread_mutex_lock (&__go_select_data_mutex);
+      __go_assert (x == 0);
+
+      is_selected = selected_channel != NULL;
+
+      x = pthread_mutex_unlock (&__go_select_data_mutex);
+      __go_assert (x == 0);
+
+      if (!is_selected)
+	{
+	  /* Mark the channels as waiting, and check whether they have
+	     become ready.  */
+	  if (!mark_all_channels_waiting (channels, count,
+					  (is_queued
+					   ? NULL
+					   : &selected_channel),
+					  (is_queued
+					   ? NULL
+					   : &selected_for_read)))
+	    {
+	      x = pthread_cond_wait (&__go_select_cond, &__go_select_mutex);
+	      __go_assert (x == 0);
+	    }
+
+	  is_queued = 1;
+	}
+
+      x = pthread_mutex_unlock (&__go_select_mutex);
+      __go_assert (x == 0);
+    }
+}
diff --git a/libgo/runtime/go-semacquire.c b/libgo/runtime/go-semacquire.c
new file mode 100644
index 000000000..24c6a7388
--- /dev/null
+++ b/libgo/runtime/go-semacquire.c
@@ -0,0 +1,151 @@
+/* go-semacquire.c -- implement runtime.Semacquire and runtime.Semrelease.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include <pthread.h>
+
+#include "go-assert.h"
+#include "runtime.h"
+
+/* We use a single global lock and condition variable.  This is
+   painful, since it will cause unnecessary contention, but is hard to
+   avoid in a portable manner.  On Linux we can use futexes, but they
+   are unfortunately not exposed by libc and are thus also hard to use
+   portably.  */
+
+static pthread_mutex_t sem_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t sem_cond = PTHREAD_COND_INITIALIZER;
+
+/* If the value in *ADDR is positive, and we are able to atomically
+   decrement it, return true.  Otherwise do nothing and return
+   false.  */
+
+static _Bool
+acquire (uint32 *addr)
+{
+  while (1)
+    {
+      uint32 val;
+
+      val = *addr;
+      if (val == 0)
+	return 0;
+      if (__sync_bool_compare_and_swap (addr, val, val - 1))
+	return 1;
+    }
+}
+
+/* Implement runtime.Semacquire.  ADDR points to a semaphore count.
+   We have acquired the semaphore when we have decremented the count
+   and it remains nonnegative.  */
+
+void
+semacquire (uint32 *addr)
+{
+  while (1)
+    {
+      int i;
+
+      /* If the current count is positive, and we are able to atomically
+	 decrement it, then we have acquired the semaphore.  */
+      if (acquire (addr))
+	return;
+
+      /* Lock the mutex.  */
+      i = pthread_mutex_lock (&sem_lock);
+      __go_assert (i == 0);
+
+      /* Check the count again with the mutex locked.  */
+      if (acquire (addr))
+	{
+	  i = pthread_mutex_unlock (&sem_lock);
+	  __go_assert (i == 0);
+	  return;
+	}
+
+      /* The count is zero.  Even if a call to runtime.Semrelease
+	 increments it to become positive, that call will try to
+	 acquire the mutex and block, so we are sure to see the signal
+	 of the condition variable.  */
+      i = pthread_cond_wait (&sem_cond, &sem_lock);
+      __go_assert (i == 0);
+
+      /* Unlock the mutex and try again.  */
+      i = pthread_mutex_unlock (&sem_lock);
+      __go_assert (i == 0);
+    }
+}
+
+/* Implement runtime.Semrelease.  ADDR points to a semaphore count.  We
+   must atomically increment the count.  If the count becomes
+   positive, we signal the condition variable to wake up another
+   process.  */
+
+void
+semrelease (uint32 *addr)
+{
+  int32_t val;
+
+  val = __sync_fetch_and_add (addr, 1);
+
+  /* VAL is the old value.  It should never be negative.  If it is
+     negative, that implies that Semacquire somehow decremented a zero
+     value, or that the count has overflowed.  */
+  __go_assert (val >= 0);
+
+  /* If the old value was zero, then we have now released a count, and
+     we signal the condition variable.  If the old value was positive,
+     then nobody can be waiting.  We have to use
+     pthread_cond_broadcast, not pthread_cond_signal, because
+     otherwise there would be a race condition when the count is
+     incremented twice before any locker manages to decrement it.  */
+  if (val == 0)
+    {
+      int i;
+
+      i = pthread_mutex_lock (&sem_lock);
+      __go_assert (i == 0);
+
+      i = pthread_cond_broadcast (&sem_cond);
+      __go_assert (i == 0);
+
+      i = pthread_mutex_unlock (&sem_lock);
+      __go_assert (i == 0);
+    }
+}
+
+
+#ifndef HAVE_SYNC_FETCH_AND_ADD_4
+
+/* For targets which don't have the required sync support.  Really
+   this should be provided by gcc itself.  FIXME.  */
+
+static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER;
+
+uint32
+__sync_fetch_and_add_4(uint32*, uint32)
+  __attribute__((visibility("hidden")));
+
+uint32
+__sync_fetch_and_add_4(uint32* ptr, uint32 add)
+{
+  int i;
+  uint32 ret;
+
+  i = pthread_mutex_lock(&sync_lock);
+  __go_assert(i == 0);
+
+  ret = *ptr;
+  *ptr += add;
+
+  i = pthread_mutex_unlock(&sync_lock);
+  __go_assert(i == 0);
+
+  return ret;
+}
+
+#endif
diff --git a/libgo/runtime/go-send-big.c b/libgo/runtime/go-send-big.c
new file mode 100644
index 000000000..f58ffb6c8
--- /dev/null
+++ b/libgo/runtime/go-send-big.c
@@ -0,0 +1,31 @@
+/* go-send-big.c -- send something bigger than uint64_t on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+void
+__go_send_big (struct __go_channel* channel, const void *val, _Bool for_select)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  if (channel == NULL)
+    __go_panic_msg ("send to nil channel");
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  if (!__go_send_acquire (channel, for_select))
+    return;
+
+  offset = channel->next_store * alloc_size;
+  __builtin_memcpy (&channel->data[offset], val, channel->element_size);
+
+  __go_send_release (channel);
+}
diff --git a/libgo/runtime/go-send-nb-big.c b/libgo/runtime/go-send-nb-big.c
new file mode 100644
index 000000000..288ce7f44
--- /dev/null
+++ b/libgo/runtime/go-send-nb-big.c
@@ -0,0 +1,30 @@
+/* go-send-nb-big.c -- nonblocking send of something big on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "channel.h"
+
+_Bool
+__go_send_nonblocking_big (struct __go_channel* channel, const void *val)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  int data = __go_send_nonblocking_acquire (channel);
+  if (data != SEND_NONBLOCKING_ACQUIRE_SPACE)
+    return data == SEND_NONBLOCKING_ACQUIRE_CLOSED;
+
+  offset = channel->next_store * alloc_size;
+  __builtin_memcpy (&channel->data[offset], val, channel->element_size);
+
+  __go_send_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-send-nb-small.c b/libgo/runtime/go-send-nb-small.c
new file mode 100644
index 000000000..f23ae0164
--- /dev/null
+++ b/libgo/runtime/go-send-nb-small.c
@@ -0,0 +1,112 @@
+/* go-send-nb-small.c -- nonblocking send of something small on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-assert.h"
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to send something on a nonblocking channel.  */
+
+int
+__go_send_nonblocking_acquire (struct __go_channel *channel)
+{
+  int i;
+  _Bool has_space;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (channel->selected_for_send)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+
+  if (channel->is_closed)
+    {
+      ++channel->closed_op_count;
+      if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	{
+	  i = pthread_mutex_unlock (&channel->lock);
+	  __go_assert (i == 0);
+	  __go_panic_msg ("too many operations on closed channel");
+	}
+      i = pthread_mutex_unlock (&channel->lock);
+      __go_assert (i == 0);
+      return SEND_NONBLOCKING_ACQUIRE_CLOSED;
+    }
+
+  if (channel->num_entries > 0)
+      has_space = ((channel->next_store + 1) % channel->num_entries
+		   != channel->next_fetch);
+  else
+    {
+      /* This is a synchronous channel.  If somebody is current
+	 sending, then we can't send.  Otherwise, see if somebody is
+	 waiting to receive, or see if we can synch with a select.  */
+      if (channel->waiting_to_send)
+	{
+	  /* Some other goroutine is currently sending on this
+	     channel, which means that we can't.  */
+	  has_space = 0;
+	}
+      else if (channel->waiting_to_receive)
+	{
+	  /* Some other goroutine is waiting to receive a value, so we
+	     can send directly to them.  */
+	  has_space = 1;
+	}
+      else if (__go_synch_with_select (channel, 1))
+	{
+	  /* We found a select waiting to receive data, so we can send
+	     to that.  */
+	  __go_broadcast_to_select (channel);
+	  has_space = 1;
+	}
+      else
+	{
+	  /* Otherwise, we can't send, because nobody is waiting to
+	     receive.  */
+	  has_space = 0;
+	}
+
+      if (has_space)
+	{
+	  channel->waiting_to_send = 1;
+	  __go_assert (channel->next_store == 0);
+	}
+    }
+
+  if (!has_space)
+    {
+      i = pthread_mutex_unlock (&channel->lock);
+      __go_assert (i == 0);
+
+      return SEND_NONBLOCKING_ACQUIRE_NOSPACE;
+    }
+
+  return SEND_NONBLOCKING_ACQUIRE_SPACE;
+}
+
+/* Send something 64 bits or smaller on a channel.  */
+
+_Bool
+__go_send_nonblocking_small (struct __go_channel *channel, uint64_t val)
+{
+  __go_assert (channel->element_size <= sizeof (uint64_t));
+
+  int data = __go_send_nonblocking_acquire (channel);
+  if (data != SEND_NONBLOCKING_ACQUIRE_SPACE)
+    return data == SEND_NONBLOCKING_ACQUIRE_CLOSED;
+
+  channel->data[channel->next_store] = val;
+
+  __go_send_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-send-small.c b/libgo/runtime/go-send-small.c
new file mode 100644
index 000000000..506c90e64
--- /dev/null
+++ b/libgo/runtime/go-send-small.c
@@ -0,0 +1,165 @@
+/* go-send-small.c -- send something 64 bits or smaller on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+
+#include "go-assert.h"
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to send something on a channel.  Return true if the channel
+   is acquired, false, if it is closed.  FOR_SELECT is true if this
+   call is being made after a select statement returned with this
+   channel selected.  */
+
+_Bool
+__go_send_acquire (struct __go_channel *channel, _Bool for_select)
+{
+  int i;
+
+  i = pthread_mutex_lock (&channel->lock);
+  __go_assert (i == 0);
+
+  while (1)
+    {
+      /* Check whether the channel is closed.  */
+      if (channel->is_closed)
+	{
+	  ++channel->closed_op_count;
+	  if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	    {
+	      i = pthread_mutex_unlock (&channel->lock);
+	      __go_assert (i == 0);
+	      __go_panic_msg ("too many operations on closed channel");
+	    }
+	  channel->selected_for_send = 0;
+	  __go_unlock_and_notify_selects (channel);
+	  return 0;
+	}
+
+      /* If somebody else has the channel locked for sending, we have
+	 to wait.  If FOR_SELECT is true, then we are the one with the
+	 lock.  */
+      if (!channel->selected_for_send || for_select)
+	{
+	  if (channel->num_entries == 0)
+	    {
+	      /* This is a synchronous channel.  If nobody else is
+		 waiting to send, we grab the channel and tell the
+		 caller to send the data.  We will then wait for a
+		 receiver.  */
+	      if (!channel->waiting_to_send)
+		{
+		  __go_assert (channel->next_store == 0);
+		  return 1;
+		}
+	    }
+	  else
+	    {
+	      /* If there is room on the channel, we are OK.  */
+	      if ((channel->next_store + 1) % channel->num_entries
+		  != channel->next_fetch)
+		return 1;
+	    }
+	}
+
+      /* Wait for something to change, then loop around and try
+	 again.  */
+
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      __go_assert (i == 0);
+    }
+}
+
+/* Finished sending something on a channel.  */
+
+void
+__go_send_release (struct __go_channel *channel)
+{
+  int i;
+
+  if (channel->num_entries != 0)
+    {
+      /* This is a buffered channel.  Bump the store count and signal
+	 the condition variable.  */
+      channel->next_store = (channel->next_store + 1) % channel->num_entries;
+
+      i = pthread_cond_signal (&channel->cond);
+      __go_assert (i == 0);
+    }
+  else
+    {
+      _Bool synched_with_select;
+
+      /* This is a synchronous channel.  Indicate that we have a value
+	 waiting.  */
+      channel->next_store = 1;
+      channel->waiting_to_send = 1;
+
+      /* Tell everybody else to do something.  This has to be a
+	 broadcast because we might have both senders and receivers
+	 waiting on the condition, but senders won't send another
+	 signal.  */
+      i = pthread_cond_broadcast (&channel->cond);
+      __go_assert (i == 0);
+
+      /* Wait until the value is received.  */
+      synched_with_select = 0;
+      while (1)
+	{
+	  if (channel->next_store == 0)
+	    break;
+
+	  /* If nobody is currently waiting to receive, try to synch
+	     up with a select.  */
+	  if (!channel->waiting_to_receive && !synched_with_select)
+	    {
+	      if (__go_synch_with_select (channel, 1))
+		{
+		  synched_with_select = 1;
+		  __go_broadcast_to_select (channel);
+		  continue;
+		}
+	    }
+
+	  i = pthread_cond_wait (&channel->cond, &channel->lock);
+	  __go_assert (i == 0);
+	}
+
+      channel->waiting_to_send = 0;
+
+      /* Using the mutexes should implement a memory barrier.  */
+
+      /* We have to signal again since we cleared the waiting_to_send
+	 field.  This has to be a broadcast because both senders and
+	 receivers might be waiting, but only senders will be able to
+	 act.  */
+      i = pthread_cond_broadcast (&channel->cond);
+      __go_assert (i == 0);
+    }
+
+  channel->selected_for_send = 0;
+
+  __go_unlock_and_notify_selects (channel);
+}
+
+/* Send something 64 bits or smaller on a channel.  */
+
+void
+__go_send_small (struct __go_channel *channel, uint64_t val, _Bool for_select)
+{
+  if (channel == NULL)
+    __go_panic_msg ("send to nil channel");
+
+  __go_assert (channel->element_size <= sizeof (uint64_t));
+
+  if (!__go_send_acquire (channel, for_select))
+    return;
+
+  channel->data[channel->next_store] = val;
+
+  __go_send_release (channel);
+}
diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c
new file mode 100644
index 000000000..3838ab988
--- /dev/null
+++ b/libgo/runtime/go-signal.c
@@ -0,0 +1,200 @@
+/* go-signal.c -- signal handling for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <signal.h>
+#include <stdlib.h>
+
+#include "go-assert.h"
+#include "go-panic.h"
+#include "go-signal.h"
+
+#include "runtime.h"
+
+#undef int 
+
+#ifndef SA_ONSTACK
+#define SA_ONSTACK 0
+#endif
+
+/* What to do for a signal.  */
+
+struct sigtab
+{
+  /* Signal number.  */
+  int sig;
+  /* Nonzero if the signal should be ignored.  */
+  _Bool ignore;
+};
+
+/* What to do for signals.  */
+
+static struct sigtab signals[] =
+{
+  { SIGHUP, 0 },
+  { SIGINT, 0 },
+  { SIGALRM, 1 },
+  { SIGTERM, 0 },
+#ifdef SIGBUS
+  { SIGBUS, 0 },
+#endif
+#ifdef SIGFPE
+  { SIGFPE, 0 },
+#endif
+#ifdef SIGUSR1
+  { SIGUSR1, 1 },
+#endif
+#ifdef SIGSEGV
+  { SIGSEGV, 0 },
+#endif
+#ifdef SIGUSR2
+  { SIGUSR2, 1 },
+#endif
+#ifdef SIGPIPE
+  { SIGPIPE, 1 },
+#endif
+#ifdef SIGCHLD
+  { SIGCHLD, 1 },
+#endif
+#ifdef SIGTSTP
+  { SIGTSTP, 1 },
+#endif
+#ifdef SIGTTIN
+  { SIGTTIN, 1 },
+#endif
+#ifdef SIGTTOU
+  { SIGTTOU, 1 },
+#endif
+#ifdef SIGURG
+  { SIGURG, 1 },
+#endif
+#ifdef SIGXCPU
+  { SIGXCPU, 1 },
+#endif
+#ifdef SIGXFSZ
+  { SIGXFSZ, 1 },
+#endif
+#ifdef SIGVTARLM
+  { SIGVTALRM, 1 },
+#endif
+#ifdef SIGPROF
+  { SIGPROF, 1 },
+#endif
+#ifdef SIGWINCH
+  { SIGWINCH, 1 },
+#endif
+#ifdef SIGIO
+  { SIGIO, 1 },
+#endif
+#ifdef SIGPWR
+  { SIGPWR, 1 },
+#endif
+  { -1, 0 }
+};
+
+/* The Go signal handler.  */
+
+static void
+sighandler (int sig)
+{
+  const char *msg;
+  int i;
+
+  /* FIXME: Should check siginfo for more information when
+     available.  */
+  msg = NULL;
+  switch (sig)
+    {
+#ifdef SIGBUS
+    case SIGBUS:
+      msg = "invalid memory address or nil pointer dereference";
+      break;
+#endif
+
+#ifdef SIGFPE
+    case SIGFPE:
+      msg = "integer divide by zero or floating point error";
+      break;
+#endif
+
+#ifdef SIGSEGV
+    case SIGSEGV:
+      msg = "invalid memory address or nil pointer dereference";
+      break;
+#endif
+
+    default:
+      break;
+    }
+
+  if (msg != NULL)
+    {
+      sigset_t clear;
+
+      if (__sync_bool_compare_and_swap (&m->mallocing, 1, 1))
+	{
+	  fprintf (stderr, "caught signal while mallocing: %s\n", msg);
+	  __go_assert (0);
+	}
+
+      /* The signal handler blocked signals; unblock them.  */
+      i = sigfillset (&clear);
+      __go_assert (i == 0);
+      i = sigprocmask (SIG_UNBLOCK, &clear, NULL);
+      __go_assert (i == 0);
+
+      __go_panic_msg (msg);
+    }
+
+  if (__go_sigsend (sig))
+    return;
+  for (i = 0; signals[i].sig != -1; ++i)
+    {
+      if (signals[i].sig == sig)
+	{
+	  struct sigaction sa;
+
+	  if (signals[i].ignore)
+	    return;
+
+	  memset (&sa, 0, sizeof sa);
+
+	  sa.sa_handler = SIG_DFL;
+
+	  i = sigemptyset (&sa.sa_mask);
+	  __go_assert (i == 0);
+
+	  if (sigaction (sig, &sa, NULL) != 0)
+	    abort ();
+
+	  raise (sig);
+	  exit (2);
+	}
+    }
+  abort ();
+}
+
+/* Initialize signal handling for Go.  This is called when the program
+   starts.  */
+
+void
+__initsig ()
+{
+  struct sigaction sa;
+  int i;
+
+  siginit ();
+
+  memset (&sa, 0, sizeof sa);
+
+  sa.sa_handler = sighandler;
+
+  i = sigfillset (&sa.sa_mask);
+  __go_assert (i == 0);
+
+  for (i = 0; signals[i].sig != -1; ++i)
+    if (sigaction (signals[i].sig, &sa, NULL) != 0)
+      __go_assert (0);
+}
diff --git a/libgo/runtime/go-signal.h b/libgo/runtime/go-signal.h
new file mode 100644
index 000000000..a30173a34
--- /dev/null
+++ b/libgo/runtime/go-signal.h
@@ -0,0 +1,7 @@
+/* go-signal.h -- signal handling for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+extern void __initsig (void);
diff --git a/libgo/runtime/go-strcmp.c b/libgo/runtime/go-strcmp.c
new file mode 100644
index 000000000..8e6cb1834
--- /dev/null
+++ b/libgo/runtime/go-strcmp.c
@@ -0,0 +1,27 @@
+/* go-strcmp.c -- the go string comparison function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+
+int
+__go_strcmp(struct __go_string s1, struct __go_string s2)
+{
+  int i;
+
+  i = __builtin_memcmp(s1.__data, s2.__data,
+		       (s1.__length < s2.__length
+			? s1.__length
+			: s2.__length));
+  if (i != 0)
+    return i;
+
+  if (s1.__length < s2.__length)
+    return -1;
+  else if (s1.__length > s2.__length)
+    return 1;
+  else
+    return 0;
+}
diff --git a/libgo/runtime/go-string-to-byte-array.c b/libgo/runtime/go-string-to-byte-array.c
new file mode 100644
index 000000000..3b646c81a
--- /dev/null
+++ b/libgo/runtime/go-string-to-byte-array.c
@@ -0,0 +1,24 @@
+/* go-string-to-byte-array.c -- convert a string to an array of bytes in Go.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "array.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_open_array
+__go_string_to_byte_array (struct __go_string str)
+{
+  unsigned char *data;
+  struct __go_open_array ret;
+
+  data = (unsigned char *) runtime_mallocgc (str.__length, RefNoPointers, 1, 0);
+  __builtin_memcpy (data, str.__data, str.__length);
+  ret.__values = (void *) data;
+  ret.__count = str.__length;
+  ret.__capacity = str.__length;
+  return ret;
+}
diff --git a/libgo/runtime/go-string-to-int-array.c b/libgo/runtime/go-string-to-int-array.c
new file mode 100644
index 000000000..8d7f94f93
--- /dev/null
+++ b/libgo/runtime/go-string-to-int-array.c
@@ -0,0 +1,50 @@
+/* go-string-to-int-array.c -- convert a string to an array of ints in Go.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-string.h"
+#include "array.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_open_array
+__go_string_to_int_array (struct __go_string str)
+{
+  size_t c;
+  const unsigned char *p;
+  const unsigned char *pend;
+  uint32_t *data;
+  uint32_t *pd;
+  struct __go_open_array ret;
+
+  c = 0;
+  p = str.__data;
+  pend = p + str.__length;
+  while (p < pend)
+    {
+      int rune;
+
+      ++c;
+      p += __go_get_rune (p, pend - p, &rune);
+    }
+
+  data = (uint32_t *) runtime_mallocgc (c * sizeof (uint32_t), RefNoPointers,
+					1, 0);
+  p = str.__data;
+  pd = data;
+  while (p < pend)
+    {
+      int rune;
+
+      p += __go_get_rune (p, pend - p, &rune);
+      *pd++ = rune;
+    }
+
+  ret.__values = (void *) data;
+  ret.__count = c;
+  ret.__capacity = c;
+  return ret;
+}
diff --git a/libgo/runtime/go-string.h b/libgo/runtime/go-string.h
new file mode 100644
index 000000000..2c8e1acd3
--- /dev/null
+++ b/libgo/runtime/go-string.h
@@ -0,0 +1,42 @@
+/* go-string.h -- the string type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_STRING_H
+#define LIBGO_GO_STRING_H
+
+#include <stddef.h>
+
+/* A string is an instance of this structure.  */
+
+struct __go_string
+{
+  /* The bytes.  */
+  const unsigned char *__data;
+  /* The length.  */
+  int __length;
+};
+
+static inline _Bool
+__go_strings_equal (struct __go_string s1, struct __go_string s2)
+{
+  return (s1.__length == s2.__length
+	  && __builtin_memcmp (s1.__data, s2.__data, s1.__length) == 0);
+}
+
+static inline _Bool
+__go_ptr_strings_equal (const struct __go_string *ps1,
+			const struct __go_string *ps2)
+{
+  if (ps1 == NULL)
+    return ps2 == NULL;
+  if (ps2 == NULL)
+    return 0;
+  return __go_strings_equal (*ps1, *ps2);
+}
+
+extern int __go_get_rune (const unsigned char *, size_t, int *);
+
+#endif /* !defined(LIBGO_GO_STRING_H) */
diff --git a/libgo/runtime/go-strplus.c b/libgo/runtime/go-strplus.c
new file mode 100644
index 000000000..c0cd356ca
--- /dev/null
+++ b/libgo/runtime/go-strplus.c
@@ -0,0 +1,30 @@
+/* go-strplus.c -- the go string append function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_string
+__go_string_plus (struct __go_string s1, struct __go_string s2)
+{
+  int len;
+  unsigned char *retdata;
+  struct __go_string ret;
+
+  if (s1.__length == 0)
+    return s2;
+  else if (s2.__length == 0)
+    return s1;
+
+  len = s1.__length + s2.__length;
+  retdata = runtime_mallocgc (len, RefNoPointers, 1, 0);
+  __builtin_memcpy (retdata, s1.__data, s1.__length);
+  __builtin_memcpy (retdata + s1.__length, s2.__data, s2.__length);
+  ret.__data = retdata;
+  ret.__length = len;
+  return ret;
+}
diff --git a/libgo/runtime/go-strslice.c b/libgo/runtime/go-strslice.c
new file mode 100644
index 000000000..94ecee92e
--- /dev/null
+++ b/libgo/runtime/go-strslice.c
@@ -0,0 +1,26 @@
+/* go-strslice.c -- the go string slice function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "go-panic.h"
+#include "runtime.h"
+#include "malloc.h"
+
+struct __go_string
+__go_string_slice (struct __go_string s, int start, int end)
+{
+  int len;
+  struct __go_string ret;
+
+  len = s.__length;
+  if (end == -1)
+    end = len;
+  if (start > len || end < start || end > len)
+    __go_panic_msg ("string index out of bounds");
+  ret.__data = s.__data + start;
+  ret.__length = end - start;
+  return ret;
+}
diff --git a/libgo/runtime/go-trampoline.c b/libgo/runtime/go-trampoline.c
new file mode 100644
index 000000000..43003e81c
--- /dev/null
+++ b/libgo/runtime/go-trampoline.c
@@ -0,0 +1,53 @@
+/* go-trampoline.c -- allocate a trampoline for a nested function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#include "go-alloc.h"
+#include "go-assert.h"
+
+/* In order to build a trampoline we need space which is both writable
+   and executable.  We currently just allocate a whole page.  This
+   needs to be more system dependent.  */
+
+void *
+__go_allocate_trampoline (size_t size, void *closure)
+{
+  unsigned int page_size;
+  void *ret;
+  size_t off;
+
+  page_size = getpagesize ();
+  __go_assert (page_size >= size);
+  ret = __go_alloc (2 * page_size - 1);
+  ret = (void *) (((uintptr_t) ret + page_size - 1)
+		  & ~ ((uintptr_t) page_size - 1));
+
+  /* Because the garbage collector only looks at correct address
+     offsets, we need to ensure that it will see the closure
+     address.  */
+  off = ((size + sizeof (void *) - 1) / sizeof (void *)) * sizeof (void *);
+  __go_assert (size + off + sizeof (void *) <= page_size);
+  __builtin_memcpy (ret + off, &closure, sizeof (void *));
+
+#ifdef HAVE_SYS_MMAN_H
+  {
+    int i;
+    i = mprotect (ret, size, PROT_READ | PROT_WRITE | PROT_EXEC);
+    __go_assert (i == 0);
+  }
+#endif
+
+  return ret;
+}
diff --git a/libgo/runtime/go-type-eface.c b/libgo/runtime/go-type-eface.c
new file mode 100644
index 000000000..84ca05ee1
--- /dev/null
+++ b/libgo/runtime/go-type-eface.c
@@ -0,0 +1,55 @@
+/* go-type-eface.c -- hash and equality empty interface functions.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+#include "go-type.h"
+
+/* A hash function for an empty interface.  */
+
+size_t
+__go_type_hash_empty_interface (const void *vval,
+				size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_empty_interface *val;
+  const struct __go_type_descriptor *descriptor;
+  size_t size;
+
+  val = (const struct __go_empty_interface *) vval;
+  descriptor = val->__type_descriptor;
+  if (descriptor == NULL)
+    return 0;
+  size = descriptor->__size;
+  if (__go_is_pointer_type (descriptor))
+    return descriptor->__hashfn (&val->__object, size);
+  else
+    return descriptor->__hashfn (val->__object, size);
+}
+
+/* An equality function for an empty interface.  */
+
+_Bool
+__go_type_equal_empty_interface (const void *vv1, const void *vv2,
+				 size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_empty_interface *v1;
+  const struct __go_empty_interface *v2;
+  const struct __go_type_descriptor* v1_descriptor;
+  const struct __go_type_descriptor* v2_descriptor;
+
+  v1 = (const struct __go_empty_interface *) vv1;
+  v2 = (const struct __go_empty_interface *) vv2;
+  v1_descriptor = v1->__type_descriptor;
+  v2_descriptor = v2->__type_descriptor;
+  if (v1_descriptor == NULL || v2_descriptor == NULL)
+    return v1_descriptor == v2_descriptor;
+  if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
+    return 0;
+  if (__go_is_pointer_type (v1_descriptor))
+    return v1->__object == v2->__object;
+  else
+    return v1_descriptor->__equalfn (v1->__object, v2->__object,
+				     v1_descriptor->__size);
+}
diff --git a/libgo/runtime/go-type-error.c b/libgo/runtime/go-type-error.c
new file mode 100644
index 000000000..865850c9c
--- /dev/null
+++ b/libgo/runtime/go-type-error.c
@@ -0,0 +1,28 @@
+/* go-type-error.c -- invalid hash and equality functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-type.h"
+#include "go-panic.h"
+
+/* A hash function used for a type which does not support hash
+   functions.  */
+
+size_t
+__go_type_hash_error (const void *val __attribute__ ((unused)),
+		      size_t key_size __attribute__ ((unused)))
+{
+  __go_panic_msg ("hash of unhashable type");
+}
+
+/* An equality function for an interface.  */
+
+_Bool
+__go_type_equal_error (const void *v1 __attribute__ ((unused)),
+		       const void *v2 __attribute__ ((unused)),
+		       size_t key_size __attribute__ ((unused)))
+{
+  __go_panic_msg ("comparing uncomparable types");
+}
diff --git a/libgo/runtime/go-type-identity.c b/libgo/runtime/go-type-identity.c
new file mode 100644
index 000000000..f1de3c28a
--- /dev/null
+++ b/libgo/runtime/go-type-identity.c
@@ -0,0 +1,50 @@
+/* go-type-identity.c -- hash and equality identity functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-type.h"
+
+/* Typedefs for accesses of different sizes.  */
+
+typedef int QItype __attribute__ ((mode (QI)));
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+
+/* An identity hash function for a type.  This is used for types where
+   we can simply use the type value itself as a hash code.  This is
+   true of, e.g., integers and pointers.  */
+
+size_t
+__go_type_hash_identity (const void *key, size_t key_size)
+{
+  switch (key_size)
+    {
+    case 1:
+      return *(const QItype *) key;
+    case 2:
+      return *(const HItype *) key;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+    case 7:
+      return *(const SItype *) key;
+    default:
+      return *(const DItype *) key;
+    }
+}
+
+/* An identity equality function for a type.  This is used for types
+   where we can check for equality by checking that the values have
+   the same bits.  */
+
+_Bool
+__go_type_equal_identity (const void *k1, const void *k2, size_t key_size)
+{
+  return __builtin_memcmp (k1, k2, key_size) == 0;
+}
diff --git a/libgo/runtime/go-type-interface.c b/libgo/runtime/go-type-interface.c
new file mode 100644
index 000000000..9750b843c
--- /dev/null
+++ b/libgo/runtime/go-type-interface.c
@@ -0,0 +1,55 @@
+/* go-type-interface.c -- hash and equality interface functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+#include "go-type.h"
+
+/* A hash function for an interface.  */
+
+size_t
+__go_type_hash_interface (const void *vval,
+			  size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_interface *val;
+  const struct __go_type_descriptor *descriptor;
+  size_t size;
+
+  val = (const struct __go_interface *) vval;
+  if (val->__methods == NULL)
+    return 0;
+  descriptor = (const struct __go_type_descriptor *) val->__methods[0];
+  size = descriptor->__size;
+  if (__go_is_pointer_type (descriptor))
+    return descriptor->__hashfn (&val->__object, size);
+  else
+    return descriptor->__hashfn (val->__object, size);
+}
+
+/* An equality function for an interface.  */
+
+_Bool
+__go_type_equal_interface (const void *vv1, const void *vv2,
+			   size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_interface *v1;
+  const struct __go_interface *v2;
+  const struct __go_type_descriptor* v1_descriptor;
+  const struct __go_type_descriptor* v2_descriptor;
+
+  v1 = (const struct __go_interface *) vv1;
+  v2 = (const struct __go_interface *) vv2;
+  if (v1->__methods == NULL || v2->__methods == NULL)
+    return v1->__methods == v2->__methods;
+  v1_descriptor = (const struct __go_type_descriptor *) v1->__methods[0];
+  v2_descriptor = (const struct __go_type_descriptor *) v2->__methods[0];
+  if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
+    return 0;
+  if (__go_is_pointer_type (v1_descriptor))
+    return v1->__object == v2->__object;
+  else
+    return v1_descriptor->__equalfn (v1->__object, v2->__object,
+				     v1_descriptor->__size);
+}
diff --git a/libgo/runtime/go-type-string.c b/libgo/runtime/go-type-string.c
new file mode 100644
index 000000000..998955d62
--- /dev/null
+++ b/libgo/runtime/go-type-string.c
@@ -0,0 +1,45 @@
+/* go-type-string.c -- hash and equality string functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* A string hash function for a map.  */
+
+size_t
+__go_type_hash_string (const void *vkey,
+		       size_t key_size __attribute__ ((unused)))
+{
+  size_t ret;
+  const struct __go_string *key;
+  size_t len;
+  size_t i;
+  const unsigned char *p;
+
+  ret = 5381;
+  key = (const struct __go_string *) vkey;
+  len = key->__length;
+  for (i = 0, p = key->__data; i < len; i++, p++)
+    ret = ret * 33 + *p;
+  return ret;
+}
+
+/* A string equality function for a map.  */
+
+_Bool
+__go_type_equal_string (const void *vk1, const void *vk2,
+			size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_string *k1;
+  const struct __go_string *k2;
+
+  k1 = (const struct __go_string *) vk1;
+  k2 = (const struct __go_string *) vk2;
+  return (k1->__length == k2->__length
+	  && __builtin_memcmp (k1->__data, k2->__data, k1->__length) == 0);
+}
diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h
new file mode 100644
index 000000000..b1f32850a
--- /dev/null
+++ b/libgo/runtime/go-type.h
@@ -0,0 +1,309 @@
+/* go-type.h -- basic information for a Go type.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_TYPE_H
+#define LIBGO_GO_TYPE_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "go-string.h"
+#include "array.h"
+
+/* Many of the types in this file must match the data structures
+   generated by the compiler, and must also match the Go types which
+   appear in go/runtime/type.go and go/reflect/type.go.  */
+
+/* Type kinds.  These are used to get the type descriptor to use for
+   the type itself, when using unsafe.Typeof or unsafe.Reflect.  The
+   values here must match the values generated by the compiler (the
+   RUNTIME_TYPE_KIND_xxx values in gcc/go/types.h).  These are macros
+   rather than an enum to make it easy to change values in the future
+   and hard to get confused about it.
+
+   These correspond to the kind values used by the gc compiler.  */
+
+#define GO_BOOL 1
+#define GO_INT 2
+#define GO_INT8 3
+#define GO_INT16 4
+#define GO_INT32 5
+#define GO_INT64 6
+#define GO_UINT 7
+#define GO_UINT8 8
+#define GO_UINT16 9
+#define GO_UINT32 10
+#define GO_UINT64 11
+#define GO_UINTPTR 12
+#define GO_FLOAT32 13
+#define GO_FLOAT64 14
+#define GO_COMPLEX64 15
+#define GO_COMPLEX128 16
+#define GO_ARRAY 17
+#define GO_CHAN 18
+#define GO_FUNC 19
+#define GO_INTERFACE 20
+#define GO_MAP 21
+#define GO_PTR 22
+#define GO_SLICE 23
+#define GO_STRING 24
+#define GO_STRUCT 25
+#define GO_UNSAFE_POINTER 26
+
+/* For each Go type the compiler constructs one of these structures.
+   This is used for type reflectin, interfaces, maps, and reference
+   counting.  */
+
+struct __go_type_descriptor
+{
+  /* The type code for this type, a value in enum __go_type_codes.
+     This is used by unsafe.Reflect and unsafe.Typeof to determine the
+     type descriptor to return for this type itself.  It is also used
+     by reflect.toType when mapping to a reflect Type structure.  */
+  unsigned char __code;
+
+  /* The alignment in bytes of a variable with this type.  */
+  unsigned char __align;
+
+  /* The alignment in bytes of a struct field with this type.  */
+  unsigned char __field_align;
+
+  /* The size in bytes of a value of this type.  Note that all types
+     in Go have a fixed size.  */
+  uintptr_t __size;
+
+  /* The type's hash code.  */
+  uint32_t __hash;
+
+  /* This function takes a pointer to a value of this type, and the
+     size of this type, and returns a hash code.  We pass the size
+     explicitly becaues it means that we can share a single instance
+     of this function for various different types.  */
+  size_t (*__hashfn) (const void *, size_t);
+
+  /* This function takes two pointers to values of this type, and the
+     size of this type, and returns whether the values are equal.  */
+  _Bool (*__equalfn) (const void *, const void *, size_t);
+
+  /* A string describing this type.  This is only used for
+     debugging.  */
+  const struct __go_string *__reflection;
+
+  /* A pointer to fields which are only used for some types.  */
+  const struct __go_uncommon_type *__uncommon;
+};
+
+/* The information we store for each method of a type.  */
+
+struct __go_method
+{
+  /* The name of the method.  */
+  const struct __go_string *__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string *__pkg_path;
+
+  /* The type of the method, without the receiver.  This will be a
+     function type.  */
+  const struct __go_type_descriptor *__mtype;
+
+  /* The type of the method, with the receiver.  This will be a
+     function type.  */
+  const struct __go_type_descriptor *__type;
+
+  /* A pointer to the code which implements the method.  This is
+     really a function pointer.  */
+  const void *__function;
+};
+
+/* Additional information that we keep for named types and for types
+   with methods.  */
+
+struct __go_uncommon_type
+{
+  /* The name of the type.  */
+  const struct __go_string *__name;
+
+  /* The type's package.  This is NULL for builtin types.  */
+  const struct __go_string *__pkg_path;
+
+  /* The type's methods.  This is an array of struct __go_method.  */
+  struct __go_open_array __methods;
+};
+
+/* The type descriptor for a fixed array type.  */
+
+struct __go_array_type
+{
+  /* Starts like all type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  struct __go_type_descriptor *__element_type;
+
+  /* The length of the array.  */
+  uintptr_t __len;
+};
+
+/* The type descriptor for a slice.  */
+
+struct __go_slice_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  struct __go_type_descriptor *__element_type;
+};
+
+/* The direction of a channel.  */
+#define CHANNEL_RECV_DIR 1
+#define CHANNEL_SEND_DIR 2
+#define CHANNEL_BOTH_DIR (CHANNEL_RECV_DIR | CHANNEL_SEND_DIR)
+
+/* The type descriptor for a channel.  */
+
+struct __go_channel_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  const struct __go_type_descriptor *__element_type;
+
+  /* The direction.  */
+  uintptr_t __dir;
+};
+
+/* The type descriptor for a function.  */
+
+struct __go_func_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* Whether this is a varargs function.  If this is true, there will
+     be at least one parameter.  For "..." the last parameter type is
+     "interface{}".  For "... T" the last parameter type is "[]T".  */
+  _Bool __dotdotdot;
+
+  /* The input parameter types.  This is an array of pointers to
+     struct __go_type_descriptor.  */
+  struct __go_open_array __in;
+
+  /* The output parameter types.  This is an array of pointers to
+     struct __go_type_descriptor.  */
+  struct __go_open_array __out;
+};
+
+/* A method on an interface type.  */
+
+struct __go_interface_method
+{
+  /* The name of the method.  */
+  const struct __go_string *__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string *__pkg_path;
+
+  /* The real type of the method.  */
+  struct __go_type_descriptor *__type;
+};
+
+/* An interface type.  */
+
+struct __go_interface_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* Array of __go_interface_method .  The methods are sorted in the
+     same order that they appear in the definition of the
+     interface.  */
+  struct __go_open_array __methods;
+};
+
+/* A map type.  */
+
+struct __go_map_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The map key type.  */
+  const struct __go_type_descriptor *__key_type;
+
+  /* The map value type.  */
+  const struct __go_type_descriptor *__val_type;
+};
+
+/* A pointer type.  */
+
+struct __go_ptr_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The type to which this points.  */
+  const struct __go_type_descriptor *__element_type;
+};
+
+/* A field in a structure.  */
+
+struct __go_struct_field
+{
+  /* The name of the field--NULL for an anonymous field.  */
+  const struct __go_string *__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string *__pkg_path;
+
+  /* The type of the field.  */
+  const struct __go_type_descriptor *__type;
+
+  /* The field tag, or NULL.  */
+  const struct __go_string *__tag;
+
+  /* The offset of the field in the struct.  */
+  uintptr_t __offset;
+};
+
+/* A struct type.  */
+
+struct __go_struct_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* An array of struct __go_struct_field.  */
+  struct __go_open_array __fields;
+};
+
+/* Whether a type descriptor is a pointer.  */
+
+static inline _Bool
+__go_is_pointer_type (const struct __go_type_descriptor *td)
+{
+  return td->__code == GO_PTR || td->__code == GO_UNSAFE_POINTER;
+}
+
+extern _Bool
+__go_type_descriptors_equal(const struct __go_type_descriptor*,
+			    const struct __go_type_descriptor*);
+
+extern size_t __go_type_hash_identity (const void *, size_t);
+extern _Bool __go_type_equal_identity (const void *, const void *, size_t);
+extern size_t __go_type_hash_string (const void *, size_t);
+extern _Bool __go_type_equal_string (const void *, const void *, size_t);
+extern size_t __go_type_hash_interface (const void *, size_t);
+extern _Bool __go_type_equal_interface (const void *, const void *, size_t);
+extern size_t __go_type_hash_error (const void *, size_t);
+extern _Bool __go_type_equal_error (const void *, const void *, size_t);
+
+#endif /* !defined(LIBGO_GO_TYPE_H) */
diff --git a/libgo/runtime/go-typedesc-equal.c b/libgo/runtime/go-typedesc-equal.c
new file mode 100644
index 000000000..932519aab
--- /dev/null
+++ b/libgo/runtime/go-typedesc-equal.c
@@ -0,0 +1,38 @@
+/* go-typedesc-equal.c -- return whether two type descriptors are equal.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* Compare type descriptors for equality.  This is necessary because
+   types may have different descriptors in different shared libraries.
+   Also, unnamed types may have multiple type descriptors even in a
+   single shared library.  */
+
+_Bool
+__go_type_descriptors_equal (const struct __go_type_descriptor *td1,
+			     const struct __go_type_descriptor *td2)
+{
+  if (td1 == td2)
+    return 1;
+  /* In a type switch we can get a NULL descriptor.  */
+  if (td1 == NULL || td2 == NULL)
+    return 0;
+  if (td1->__code != td2->__code || td1->__hash != td2->__hash)
+    return 0;
+  if (td1->__uncommon != NULL && td1->__uncommon->__name != NULL)
+    {
+      if (td2->__uncommon == NULL || td2->__uncommon->__name == NULL)
+	return 0;
+      return (__go_ptr_strings_equal (td1->__uncommon->__name,
+				      td2->__uncommon->__name)
+	      && __go_ptr_strings_equal (td1->__uncommon->__pkg_path,
+					 td2->__uncommon->__pkg_path));
+    }
+  if (td2->__uncommon != NULL && td2->__uncommon->__name != NULL)
+    return 0;
+  return __go_ptr_strings_equal (td1->__reflection, td2->__reflection);
+}
diff --git a/libgo/runtime/go-typestring.c b/libgo/runtime/go-typestring.c
new file mode 100644
index 000000000..dcbbc6575
--- /dev/null
+++ b/libgo/runtime/go-typestring.c
@@ -0,0 +1,18 @@
+/* go-typestring.c -- the runtime.typestring function.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "interface.h"
+#include "go-type.h"
+#include "go-string.h"
+
+struct __go_string typestring(struct __go_empty_interface)
+  asm ("libgo_runtime.runtime.typestring");
+
+struct __go_string
+typestring (struct __go_empty_interface e)
+{
+  return *e.__type_descriptor->__reflection;
+}
diff --git a/libgo/runtime/go-unreflect.c b/libgo/runtime/go-unreflect.c
new file mode 100644
index 000000000..886048548
--- /dev/null
+++ b/libgo/runtime/go-unreflect.c
@@ -0,0 +1,30 @@
+/* go-unreflect.c -- implement unsafe.Unreflect for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.Unreflect.  */
+
+struct __go_empty_interface Unreflect (struct __go_empty_interface type,
+				       void *object)
+  asm ("libgo_unsafe.unsafe.Unreflect");
+
+struct __go_empty_interface
+Unreflect (struct __go_empty_interface type, void *object)
+{
+  struct __go_empty_interface ret;
+
+  /* FIXME: We should check __type_descriptor to verify that this is
+     really a type descriptor.  */
+  ret.__type_descriptor = type.__object;
+  if (__go_is_pointer_type (ret.__type_descriptor))
+    ret.__object = *(void **) object;
+  else
+    ret.__object = object;
+  return ret;
+}
diff --git a/libgo/runtime/go-unsafe-new.c b/libgo/runtime/go-unsafe-new.c
new file mode 100644
index 000000000..e55d415be
--- /dev/null
+++ b/libgo/runtime/go-unsafe-new.c
@@ -0,0 +1,27 @@
+/* go-unsafe-new.c -- unsafe.New function for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.New.  */
+
+void *New (struct __go_empty_interface type) asm ("libgo_unsafe.unsafe.New");
+
+/* The dynamic type of the argument will be a pointer to a type
+   descriptor.  */
+
+void *
+New (struct __go_empty_interface type)
+{
+  const struct __go_type_descriptor *descriptor;
+
+  /* FIXME: We should check __type_descriptor to verify that this is
+     really a type descriptor.  */
+  descriptor = (const struct __go_type_descriptor *) type.__object;
+  return __go_alloc (descriptor->__size);
+}
diff --git a/libgo/runtime/go-unsafe-newarray.c b/libgo/runtime/go-unsafe-newarray.c
new file mode 100644
index 000000000..3bea2829f
--- /dev/null
+++ b/libgo/runtime/go-unsafe-newarray.c
@@ -0,0 +1,28 @@
+/* go-unsafe-newarray.c -- unsafe.NewArray function for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.NewArray.  */
+
+void *NewArray (struct __go_empty_interface type, int n)
+  asm ("libgo_unsafe.unsafe.NewArray");
+
+/* The dynamic type of the argument will be a pointer to a type
+   descriptor.  */
+
+void *
+NewArray (struct __go_empty_interface type, int n)
+{
+  const struct __go_type_descriptor *descriptor;
+
+  /* FIXME: We should check __type_descriptor to verify that this is
+     really a type descriptor.  */
+  descriptor = (const struct __go_type_descriptor *) type.__object;
+  return __go_alloc (descriptor->__size * n);
+}
diff --git a/libgo/runtime/go-unsafe-pointer.c b/libgo/runtime/go-unsafe-pointer.c
new file mode 100644
index 000000000..804360f8a
--- /dev/null
+++ b/libgo/runtime/go-unsafe-pointer.c
@@ -0,0 +1,97 @@
+/* go-unsafe-pointer.c -- unsafe.Pointer type descriptor for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* This file provides the type descriptor for the unsafe.Pointer type.
+   The unsafe package is defined by the compiler itself, which means
+   that there is no package to compile to define the type
+   descriptor.  */
+
+extern const struct __go_type_descriptor unsafe_Pointer
+  asm ("__go_tdn_libgo_unsafe.unsafe.Pointer");
+
+/* Used to determine the field alignment.  */
+struct field_align
+{
+  char c;
+  void *p;
+};
+
+/* The reflection string.  */
+#define REFLECTION "unsafe.Pointer"
+static const struct __go_string reflection_string =
+{
+  (const unsigned char *) REFLECTION,
+  sizeof REFLECTION - 1
+};
+
+const struct __go_type_descriptor unsafe_Pointer =
+{
+  /* __code */
+  GO_UNSAFE_POINTER,
+  /* __align */
+  __alignof (void *),
+  /* __field_align */
+  offsetof (struct field_align, p) - 1,
+  /* __size */
+  sizeof (void *),
+  /* __hash */
+  78501163U,
+  /* __hashfn */
+  __go_type_hash_identity,
+  /* __equalfn */
+  __go_type_equal_identity,
+  /* __reflection */
+  &reflection_string,
+  /* __uncommon */
+  NULL
+};
+
+/* We also need the type descriptor for the pointer to unsafe.Pointer,
+   since any package which refers to that type descriptor will expect
+   it to be defined elsewhere.  */
+
+extern const struct __go_ptr_type pointer_unsafe_Pointer
+  asm ("__go_td_pN27_libgo_unsafe.unsafe.Pointer");
+
+/* The reflection string.  */
+#define PREFLECTION "*unsafe.Pointer"
+static const struct __go_string preflection_string =
+{
+  (const unsigned char *) PREFLECTION,
+  sizeof PREFLECTION - 1,
+};
+
+const struct __go_ptr_type pointer_unsafe_Pointer =
+{
+  /* __common */
+  {
+    /* __code */
+    GO_PTR,
+    /* __align */
+    __alignof (void *),
+    /* __field_align */
+    offsetof (struct field_align, p) - 1,
+    /* __size */
+    sizeof (void *),
+    /* __hash */
+    1256018616U,
+    /* __hashfn */
+    __go_type_hash_identity,
+    /* __equalfn */
+    __go_type_equal_identity,
+    /* __reflection */
+    &preflection_string,
+    /* __uncommon */
+    NULL
+  },
+  /* __element_type */
+  &unsafe_Pointer
+};
diff --git a/libgo/runtime/go-unwind.c b/libgo/runtime/go-unwind.c
new file mode 100644
index 000000000..c0fc59cef
--- /dev/null
+++ b/libgo/runtime/go-unwind.c
@@ -0,0 +1,426 @@
+/* go-unwind.c -- unwind the stack for panic/recover.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "config.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "unwind.h"
+#define NO_SIZE_OF_ENCODED_VALUE
+#include "unwind-pe.h"
+
+#include "go-alloc.h"
+#include "go-defer.h"
+#include "go-panic.h"
+
+/* The code for a Go exception.  */
+
+#ifdef __ARM_EABI_UNWINDER__
+static const _Unwind_Exception_Class __go_exception_class =
+  { 'G', 'N', 'U', 'C', 'G', 'O', '\0', '\0' };
+#else
+static const _Unwind_Exception_Class __go_exception_class =
+  ((((((((_Unwind_Exception_Class) 'G' 
+         << 8 | (_Unwind_Exception_Class) 'N')
+        << 8 | (_Unwind_Exception_Class) 'U')
+       << 8 | (_Unwind_Exception_Class) 'C')
+      << 8 | (_Unwind_Exception_Class) 'G')
+     << 8 | (_Unwind_Exception_Class) 'O')
+    << 8 | (_Unwind_Exception_Class) '\0')
+   << 8 | (_Unwind_Exception_Class) '\0');
+#endif
+
+
+/* This function is called by exception handlers used when unwinding
+   the stack after a recovered panic.  The exception handler looks
+   like this:
+     __go_check_defer (frame);
+     return;
+   If we have not yet reached the frame we are looking for, we
+   continue unwinding.  */
+
+void
+__go_check_defer (void *frame)
+{
+  struct _Unwind_Exception *hdr;
+
+  if (__go_panic_defer == NULL)
+    {
+      /* Some other language has thrown an exception.  We know there
+	 are no defer handlers, so there is nothing to do.  */
+    }
+  else if (__go_panic_defer->__is_foreign)
+    {
+      struct __go_panic_stack *n;
+      _Bool was_recovered;
+
+      /* Some other language has thrown an exception.  We need to run
+	 the local defer handlers.  If they call recover, we stop
+	 unwinding the stack here.  */
+
+      n = ((struct __go_panic_stack *)
+	   __go_alloc (sizeof (struct __go_panic_stack)));
+
+      n->__arg.__type_descriptor = NULL;
+      n->__arg.__object = NULL;
+      n->__was_recovered = 0;
+      n->__is_foreign = 1;
+      n->__next = __go_panic_defer->__panic;
+      __go_panic_defer->__panic = n;
+
+      while (1)
+	{
+	  struct __go_defer_stack *d;
+	  void (*pfn) (void *);
+
+	  d = __go_panic_defer->__defer;
+	  if (d == NULL || d->__frame != frame || d->__pfn == NULL)
+	    break;
+
+	  pfn = d->__pfn;
+	  __go_panic_defer->__defer = d->__next;
+
+	  (*pfn) (d->__arg);
+
+	  __go_free (d);
+
+	  if (n->__was_recovered)
+	    {
+	      /* The recover function caught the panic thrown by some
+		 other language.  */
+	      break;
+	    }
+	}
+
+      was_recovered = n->__was_recovered;
+      __go_panic_defer->__panic = n->__next;
+      __go_free (n);
+
+      if (was_recovered)
+	{
+	  /* Just return and continue executing Go code.  */
+	  return;
+	}
+    }
+  else if (__go_panic_defer->__defer != NULL
+	   && __go_panic_defer->__defer->__pfn == NULL
+	   && __go_panic_defer->__defer->__frame == frame)
+    {
+      struct __go_defer_stack *d;
+
+      /* This is the defer function which called recover.  Simply
+	 return to stop the stack unwind, and let the Go code continue
+	 to execute.  */
+      d = __go_panic_defer->__defer;
+      __go_panic_defer->__defer = d->__next;
+      __go_free (d);
+      return;
+    }
+
+  /* This is some other defer function.  It was already run by the
+     call to panic, or just above.  Rethrow the exception.  */
+
+  hdr = (struct _Unwind_Exception *) __go_panic_defer->__exception;
+
+#ifdef LIBGO_SJLJ_EXCEPTIONS
+  _Unwind_SjLj_Resume_or_Rethrow (hdr);
+#else
+#if defined(_LIBUNWIND_STD_ABI)
+  _Unwind_RaiseException (hdr);
+#else
+  _Unwind_Resume_or_Rethrow (hdr);
+#endif
+#endif
+
+  /* Rethrowing the exception should not return.  */
+  abort();
+}
+
+/* Unwind function calls until we reach the one which used a defer
+   function which called recover.  Each function which uses a defer
+   statement will have an exception handler, as shown above.  */
+
+void
+__go_unwind_stack ()
+{
+  struct _Unwind_Exception *hdr;
+
+  hdr = ((struct _Unwind_Exception *)
+	 __go_alloc (sizeof (struct _Unwind_Exception)));
+  __builtin_memcpy (&hdr->exception_class, &__go_exception_class,
+		    sizeof hdr->exception_class);
+  hdr->exception_cleanup = NULL;
+
+  __go_panic_defer->__exception = hdr;
+
+#ifdef __USING_SJLJ_EXCEPTIONS__
+  _Unwind_SjLj_RaiseException (hdr);
+#else
+  _Unwind_RaiseException (hdr);
+#endif
+
+  /* Raising an exception should not return.  */
+  abort ();
+}
+
+/* The rest of this code is really similar to gcc/unwind-c.c and
+   libjava/exception.cc.  */
+
+typedef struct
+{
+  _Unwind_Ptr Start;
+  _Unwind_Ptr LPStart;
+  _Unwind_Ptr ttype_base;
+  const unsigned char *TType;
+  const unsigned char *action_table;
+  unsigned char ttype_encoding;
+  unsigned char call_site_encoding;
+} lsda_header_info;
+
+static const unsigned char *
+parse_lsda_header (struct _Unwind_Context *context, const unsigned char *p,
+		   lsda_header_info *info)
+{
+  _uleb128_t tmp;
+  unsigned char lpstart_encoding;
+
+  info->Start = (context ? _Unwind_GetRegionStart (context) : 0);
+
+  /* Find @LPStart, the base to which landing pad offsets are relative.  */
+  lpstart_encoding = *p++;
+  if (lpstart_encoding != DW_EH_PE_omit)
+    p = read_encoded_value (context, lpstart_encoding, p, &info->LPStart);
+  else
+    info->LPStart = info->Start;
+
+  /* Find @TType, the base of the handler and exception spec type data.  */
+  info->ttype_encoding = *p++;
+  if (info->ttype_encoding != DW_EH_PE_omit)
+    {
+      p = read_uleb128 (p, &tmp);
+      info->TType = p + tmp;
+    }
+  else
+    info->TType = 0;
+
+  /* The encoding and length of the call-site table; the action table
+     immediately follows.  */
+  info->call_site_encoding = *p++;
+  p = read_uleb128 (p, &tmp);
+  info->action_table = p + tmp;
+
+  return p;
+}
+
+/* The personality function is invoked when unwinding the stack due to
+   a panic.  Its job is to find the cleanup and exception handlers to
+   run.  We can't split the stack here, because we won't be able to
+   unwind from that split.  */
+
+#ifdef __ARM_EABI_UNWINDER__
+/* ARM EABI personality routines must also unwind the stack.  */
+#define CONTINUE_UNWINDING \
+  do								\
+    {								\
+      if (__gnu_unwind_frame (ue_header, context) != _URC_OK)	\
+	return _URC_FAILURE;					\
+      return _URC_CONTINUE_UNWIND;				\
+    }								\
+  while (0)
+#else
+#define CONTINUE_UNWINDING return _URC_CONTINUE_UNWIND
+#endif
+
+#ifdef __USING_SJLJ_EXCEPTIONS__
+#define PERSONALITY_FUNCTION    __gccgo_personality_sj0
+#define __builtin_eh_return_data_regno(x) x
+#else
+#define PERSONALITY_FUNCTION    __gccgo_personality_v0
+#endif
+
+#ifdef __ARM_EABI_UNWINDER__
+_Unwind_Reason_Code
+PERSONALITY_FUNCTION (_Unwind_State, struct _Unwind_Exception *,
+		      struct _Unwind_Context *)
+  __attribute__ ((no_split_stack, flatten));
+
+_Unwind_Reason_Code
+PERSONALITY_FUNCTION (_Unwind_State state,
+		      struct _Unwind_Exception * ue_header,
+		      struct _Unwind_Context * context)
+#else
+_Unwind_Reason_Code
+PERSONALITY_FUNCTION (int, _Unwind_Action, _Unwind_Exception_Class,
+		      struct _Unwind_Exception *, struct _Unwind_Context *)
+  __attribute__ ((no_split_stack, flatten));
+
+_Unwind_Reason_Code
+PERSONALITY_FUNCTION (int version,
+		      _Unwind_Action actions,
+		      _Unwind_Exception_Class exception_class,
+		      struct _Unwind_Exception *ue_header,
+		      struct _Unwind_Context *context)
+#endif
+{
+  lsda_header_info info;
+  const unsigned char *language_specific_data, *p, *action_record;
+  _Unwind_Ptr landing_pad, ip;
+  int ip_before_insn = 0;
+  _Bool is_foreign;
+
+#ifdef __ARM_EABI_UNWINDER__
+  _Unwind_Action actions;
+
+  switch (state & _US_ACTION_MASK)
+    {
+    case _US_VIRTUAL_UNWIND_FRAME:
+      actions = _UA_SEARCH_PHASE;
+      break;
+
+    case _US_UNWIND_FRAME_STARTING:
+      actions = _UA_CLEANUP_PHASE;
+      if (!(state & _US_FORCE_UNWIND)
+	  && ue_header->barrier_cache.sp == _Unwind_GetGR(context, 13))
+	actions |= _UA_HANDLER_FRAME;
+      break;
+
+    case _US_UNWIND_FRAME_RESUME:
+      CONTINUE_UNWINDING;
+      break;
+
+    default:
+      std::abort();
+    }
+  actions |= state & _US_FORCE_UNWIND;
+
+  is_foreign = 0;
+
+  /* The dwarf unwinder assumes the context structure holds things like the
+     function and LSDA pointers.  The ARM implementation caches these in
+     the exception header (UCB).  To avoid rewriting everything we make the
+     virtual IP register point at the UCB.  */
+  ip = (_Unwind_Ptr) ue_header;
+  _Unwind_SetGR (context, 12, ip);
+#else
+  if (version != 1)
+    return _URC_FATAL_PHASE1_ERROR;
+
+  is_foreign = exception_class != __go_exception_class;
+#endif
+
+  language_specific_data = (const unsigned char *)
+    _Unwind_GetLanguageSpecificData (context);
+
+  /* If no LSDA, then there are no handlers or cleanups.  */
+  if (! language_specific_data)
+    CONTINUE_UNWINDING;
+
+  /* Parse the LSDA header.  */
+  p = parse_lsda_header (context, language_specific_data, &info);
+#ifdef HAVE_GETIPINFO
+  ip = _Unwind_GetIPInfo (context, &ip_before_insn);
+#else
+  ip = _Unwind_GetIP (context);
+#endif
+  if (! ip_before_insn)
+    --ip;
+  landing_pad = 0;
+  action_record = NULL;
+
+#ifdef __USING_SJLJ_EXCEPTIONS__
+  /* The given "IP" is an index into the call-site table, with two
+     exceptions -- -1 means no-action, and 0 means terminate.  But
+     since we're using uleb128 values, we've not got random access
+     to the array.  */
+  if ((int) ip <= 0)
+    return _URC_CONTINUE_UNWIND;
+  else
+    {
+      _uleb128_t cs_lp, cs_action;
+      do
+	{
+	  p = read_uleb128 (p, &cs_lp);
+	  p = read_uleb128 (p, &cs_action);
+	}
+      while (--ip);
+
+      /* Can never have null landing pad for sjlj -- that would have
+	 been indicated by a -1 call site index.  */
+      landing_pad = (_Unwind_Ptr)cs_lp + 1;
+      if (cs_action)
+	action_record = info.action_table + cs_action - 1;
+      goto found_something;
+    }
+#else
+  /* Search the call-site table for the action associated with this IP.  */
+  while (p < info.action_table)
+    {
+      _Unwind_Ptr cs_start, cs_len, cs_lp;
+      _uleb128_t cs_action;
+
+      /* Note that all call-site encodings are "absolute" displacements.  */
+      p = read_encoded_value (0, info.call_site_encoding, p, &cs_start);
+      p = read_encoded_value (0, info.call_site_encoding, p, &cs_len);
+      p = read_encoded_value (0, info.call_site_encoding, p, &cs_lp);
+      p = read_uleb128 (p, &cs_action);
+
+      /* The table is sorted, so if we've passed the ip, stop.  */
+      if (ip < info.Start + cs_start)
+	p = info.action_table;
+      else if (ip < info.Start + cs_start + cs_len)
+	{
+	  if (cs_lp)
+	    landing_pad = info.LPStart + cs_lp;
+	  if (cs_action)
+	    action_record = info.action_table + cs_action - 1;
+	  goto found_something;
+	}
+    }
+#endif
+
+  /* IP is not in table.  No associated cleanups.  */
+  CONTINUE_UNWINDING;
+
+ found_something:
+  if (landing_pad == 0)
+    {
+      /* IP is present, but has a null landing pad.
+	 No handler to be run.  */
+      CONTINUE_UNWINDING;
+    }
+
+  if (actions & _UA_SEARCH_PHASE)
+    {
+      if (action_record == 0)
+	{
+	  /* This indicates a cleanup rather than an exception
+	     handler.  */
+	  CONTINUE_UNWINDING;
+	}
+
+      return _URC_HANDLER_FOUND;
+    }
+
+  /* It's possible for __go_panic_defer to be NULL here for an
+     exception thrown by a language other than Go.  */
+  if (__go_panic_defer == NULL)
+    {
+      if (!is_foreign)
+	abort ();
+    }
+  else
+    {
+      __go_panic_defer->__exception = ue_header;
+      __go_panic_defer->__is_foreign = is_foreign;
+    }
+
+  _Unwind_SetGR (context, __builtin_eh_return_data_regno (0),
+		 (_Unwind_Ptr) ue_header);
+  _Unwind_SetGR (context, __builtin_eh_return_data_regno (1), 0);
+  _Unwind_SetIP (context, landing_pad);
+  return _URC_INSTALL_CONTEXT;
+}
diff --git a/libgo/runtime/goc2c.c b/libgo/runtime/goc2c.c
new file mode 100644
index 000000000..bf7483309
--- /dev/null
+++ b/libgo/runtime/goc2c.c
@@ -0,0 +1,735 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/* Translate a .goc file into a .c file.  A .goc file is a combination
+   of a limited form of Go with C.  */
+
+/*
+   package PACKAGENAME
+   {# line}
+   func NAME([NAME TYPE { , NAME TYPE }]) [(NAME TYPE { , NAME TYPE })] \{
+     C code with proper brace nesting
+   \}
+*/
+
+/* We generate C code which implements the function such that it can
+   be called from Go and executes the C code.  */
+
+#include <assert.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+/* Whether we're emitting for gcc */
+static int gcc;
+
+/* Package prefix to use; only meaningful for gcc */
+static const char *prefix;
+
+/* File and line number */
+static const char *file;
+static unsigned int lineno = 1;
+
+/* List of names and types.  */
+struct params {
+	struct params *next;
+	char *name;
+	char *type;
+};
+
+/* index into type_table */
+enum {
+	Bool,
+	Float,
+	Int,
+	Uint,
+	Uintptr,
+	String,
+	Slice,
+	Eface,
+};
+
+static struct {
+	char *name;
+	int size;
+} type_table[] = {
+	/* variable sized first, for easy replacement */
+	/* order matches enum above */
+	/* default is 32-bit architecture sizes */
+	"bool",		1,
+	"float",	4,
+	"int",		4,
+	"uint",		4,
+	"uintptr",	4,
+	"String",	8,
+	"Slice",	12,
+	"Eface",	8,
+
+	/* fixed size */
+	"float32",	4,
+	"float64",	8,
+	"byte",		1,
+	"int8",		1,
+	"uint8",	1,
+	"int16",	2,
+	"uint16",	2,
+	"int32",	4,
+	"uint32",	4,
+	"int64",	8,
+	"uint64",	8,
+
+	NULL,
+};
+
+/* Fixed structure alignment (non-gcc only) */
+int structround = 4;
+
+/* Unexpected EOF.  */
+static void
+bad_eof(void)
+{
+	fprintf(stderr, "%s:%u: unexpected EOF\n", file, lineno);
+	exit(1);
+}
+
+/* Out of memory.  */
+static void
+bad_mem(void)
+{
+	fprintf(stderr, "%s:%u: out of memory\n", file, lineno);
+	exit(1);
+}
+
+/* Allocate memory without fail.  */
+static void *
+xmalloc(unsigned int size)
+{
+	void *ret = malloc(size);
+	if (ret == NULL)
+		bad_mem();
+	return ret;
+}
+
+/* Reallocate memory without fail.  */
+static void*
+xrealloc(void *buf, unsigned int size)
+{
+	void *ret = realloc(buf, size);
+	if (ret == NULL)
+		bad_mem();
+	return ret;
+}
+
+/* Free a list of parameters.  */
+static void
+free_params(struct params *p)
+{
+	while (p != NULL) {
+		struct params *next;
+
+		next = p->next;
+		free(p->name);
+		free(p->type);
+		free(p);
+		p = next;
+	}
+}
+
+/* Read a character, tracking lineno.  */
+static int
+getchar_update_lineno(void)
+{
+	int c;
+
+	c = getchar();
+	if (c == '\n')
+		++lineno;
+	return c;
+}
+
+/* Read a character, giving an error on EOF, tracking lineno.  */
+static int
+getchar_no_eof(void)
+{
+	int c;
+
+	c = getchar_update_lineno();
+	if (c == EOF)
+		bad_eof();
+	return c;
+}
+
+/* Read a character, skipping comments.  */
+static int
+getchar_skipping_comments(void)
+{
+	int c;
+
+	while (1) {
+		c = getchar_update_lineno();
+		if (c != '/')
+			return c;
+
+		c = getchar();
+		if (c == '/') {
+			do {
+				c = getchar_update_lineno();
+			} while (c != EOF && c != '\n');
+			return c;
+		} else if (c == '*') {
+			while (1) {
+				c = getchar_update_lineno();
+				if (c == EOF)
+					return EOF;
+				if (c == '*') {
+					do {
+						c = getchar_update_lineno();
+					} while (c == '*');
+					if (c == '/')
+						break;
+				}
+			}
+		} else {
+			ungetc(c, stdin);
+			return '/';
+		}
+	}
+}
+
+/* Read and return a token.  Tokens are delimited by whitespace or by
+   [(),{}].  The latter are all returned as single characters.  */
+static char *
+read_token(void)
+{
+	int c;
+	char *buf;
+	unsigned int alc, off;
+	const char* delims = "(),{}";
+
+	while (1) {
+		c = getchar_skipping_comments();
+		if (c == EOF)
+			return NULL;
+		if (!isspace(c))
+			break;
+	}
+	alc = 16;
+	buf = xmalloc(alc + 1);
+	off = 0;
+	if (strchr(delims, c) != NULL) {
+		buf[off] = c;
+		++off;
+	} else {
+		while (1) {
+			if (off >= alc) {
+				alc *= 2;
+				buf = xrealloc(buf, alc + 1);
+			}
+			buf[off] = c;
+			++off;
+			c = getchar_skipping_comments();
+			if (c == EOF)
+				break;
+			if (isspace(c) || strchr(delims, c) != NULL) {
+				if (c == '\n')
+					lineno--;
+				ungetc(c, stdin);
+				break;
+			}
+		}
+	}
+	buf[off] = '\0';
+	return buf;
+}
+
+/* Read a token, giving an error on EOF.  */
+static char *
+read_token_no_eof(void)
+{
+	char *token = read_token();
+	if (token == NULL)
+		bad_eof();
+	return token;
+}
+
+/* Read the package clause, and return the package name.  */
+static char *
+read_package(void)
+{
+	char *token;
+
+	token = read_token_no_eof();
+	if (strcmp(token, "package") != 0) {
+		fprintf(stderr,
+			"%s:%u: expected \"package\", got \"%s\"\n",
+			file, lineno, token);
+		exit(1);
+	}
+	return read_token_no_eof();
+}
+
+/* Read and copy preprocessor lines.  */
+static void
+read_preprocessor_lines(void)
+{
+	while (1) {
+		int c;
+
+		do {
+			c = getchar_skipping_comments();
+		} while (isspace(c));
+		if (c != '#') {
+			ungetc(c, stdin);
+			break;
+		}
+		putchar(c);
+		do {
+			c = getchar_update_lineno();
+			putchar(c);
+		} while (c != '\n');
+	}
+}
+
+/* Read a type in Go syntax and return a type in C syntax.  We only
+   permit basic types and pointers.  */
+static char *
+read_type(void)
+{
+	char *p, *op, *q;
+	int pointer_count;
+	unsigned int len;
+
+	p = read_token_no_eof();
+	if (*p != '*')
+		return p;
+	op = p;
+	pointer_count = 0;
+	while (*p == '*') {
+		++pointer_count;
+		++p;
+	}
+	len = strlen(p);
+	q = xmalloc(len + pointer_count + 1);
+	memcpy(q, p, len);
+	while (pointer_count > 0) {
+		q[len] = '*';
+		++len;
+		--pointer_count;
+	}
+	q[len] = '\0';
+	free(op);
+	return q;
+}
+
+/* Return the size of the given type. */
+static int
+type_size(char *p)
+{
+	int i;
+
+	if(p[strlen(p)-1] == '*')
+		return type_table[Uintptr].size;
+
+	for(i=0; type_table[i].name; i++)
+		if(strcmp(type_table[i].name, p) == 0)
+			return type_table[i].size;
+	if(!gcc) {
+		fprintf(stderr, "%s:%u: unknown type %s\n", file, lineno, p);
+		exit(1);
+	}
+	return 1;
+}
+
+/* Read a list of parameters.  Each parameter is a name and a type.
+   The list ends with a ')'.  We have already read the '('.  */
+static struct params *
+read_params(int *poffset)
+{
+	char *token;
+	struct params *ret, **pp, *p;
+	int offset, size, rnd;
+
+	ret = NULL;
+	pp = &ret;
+	token = read_token_no_eof();
+	offset = 0;
+	if (strcmp(token, ")") != 0) {
+		while (1) {
+			p = xmalloc(sizeof(struct params));
+			p->name = token;
+			p->type = read_type();
+			p->next = NULL;
+			*pp = p;
+			pp = &p->next;
+
+			size = type_size(p->type);
+			rnd = size;
+			if(rnd > structround)
+				rnd = structround;
+			if(offset%rnd)
+				offset += rnd - offset%rnd;
+			offset += size;
+
+			token = read_token_no_eof();
+			if (strcmp(token, ",") != 0)
+				break;
+			token = read_token_no_eof();
+		}
+	}
+	if (strcmp(token, ")") != 0) {
+		fprintf(stderr, "%s:%u: expected '('\n",
+			file, lineno);
+		exit(1);
+	}
+	if (poffset != NULL)
+		*poffset = offset;
+	return ret;
+}
+
+/* Read a function header.  This reads up to and including the initial
+   '{' character.  Returns 1 if it read a header, 0 at EOF.  */
+static int
+read_func_header(char **name, struct params **params, int *paramwid, struct params **rets)
+{
+	int lastline;
+	char *token;
+
+	lastline = -1;
+	while (1) {
+		token = read_token();
+		if (token == NULL)
+			return 0;
+		if (strcmp(token, "func") == 0) {
+			if(lastline != -1)
+				printf("\n");
+			break;
+		}
+		if (lastline != lineno) {
+			if (lastline == lineno-1)
+				printf("\n");
+			else
+				printf("\n#line %d \"%s\"\n", lineno, file);
+			lastline = lineno;
+		}
+		printf("%s ", token);
+	}
+
+	*name = read_token_no_eof();
+
+	token = read_token();
+	if (token == NULL || strcmp(token, "(") != 0) {
+		fprintf(stderr, "%s:%u: expected \"(\"\n",
+			file, lineno);
+		exit(1);
+	}
+	*params = read_params(paramwid);
+
+	token = read_token();
+	if (token == NULL || strcmp(token, "(") != 0)
+		*rets = NULL;
+	else {
+		*rets = read_params(NULL);
+		token = read_token();
+	}
+	if (token == NULL || strcmp(token, "{") != 0) {
+		fprintf(stderr, "%s:%u: expected \"{\"\n",
+			file, lineno);
+		exit(1);
+	}
+	return 1;
+}
+
+/* Write out parameters.  */
+static void
+write_params(struct params *params, int *first)
+{
+	struct params *p;
+
+	for (p = params; p != NULL; p = p->next) {
+		if (*first)
+			*first = 0;
+		else
+			printf(", ");
+		printf("%s %s", p->type, p->name);
+	}
+}
+
+/* Write a 6g function header.  */
+static void
+write_6g_func_header(char *package, char *name, struct params *params,
+		     int paramwid, struct params *rets)
+{
+	int first, n;
+
+	printf("void\n%s·%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+
+	/* insert padding to align output struct */
+	if(rets != NULL && paramwid%structround != 0) {
+		n = structround - paramwid%structround;
+		if(n & 1)
+			printf(", uint8");
+		if(n & 2)
+			printf(", uint16");
+		if(n & 4)
+			printf(", uint32");
+	}
+
+	write_params(rets, &first);
+	printf(")\n{\n");
+}
+
+/* Write a 6g function trailer.  */
+static void
+write_6g_func_trailer(struct params *rets)
+{
+	struct params *p;
+
+	for (p = rets; p != NULL; p = p->next)
+		printf("\tFLUSH(&%s);\n", p->name);
+	printf("}\n");
+}
+
+/* Define the gcc function return type if necessary.  */
+static void
+define_gcc_return_type(char *package, char *name, struct params *rets)
+{
+	struct params *p;
+
+	if (rets == NULL || rets->next == NULL)
+		return;
+	printf("struct %s_%s_ret {\n", package, name);
+	for (p = rets; p != NULL; p = p->next)
+		printf("  %s %s;\n", p->type, p->name);
+	printf("};\n");
+}
+
+/* Write out the gcc function return type.  */
+static void
+write_gcc_return_type(char *package, char *name, struct params *rets)
+{
+	if (rets == NULL)
+		printf("void");
+	else if (rets->next == NULL)
+		printf("%s", rets->type);
+	else
+		printf("struct %s_%s_ret", package, name);
+}
+
+/* Write out a gcc function header.  */
+static void
+write_gcc_func_header(char *package, char *name, struct params *params,
+		      struct params *rets)
+{
+	int first;
+	struct params *p;
+
+	define_gcc_return_type(package, name, rets);
+	write_gcc_return_type(package, name, rets);
+	printf(" %s_%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+	printf(") asm (\"");
+	if (prefix != NULL)
+	  printf("%s.", prefix);
+	printf("%s.%s\");\n", package, name);
+	write_gcc_return_type(package, name, rets);
+	printf(" %s_%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+	printf(")\n{\n");
+	for (p = rets; p != NULL; p = p->next)
+		printf("  %s %s;\n", p->type, p->name);
+}
+
+/* Write out a gcc function trailer.  */
+static void
+write_gcc_func_trailer(char *package, char *name, struct params *rets)
+{
+	if (rets == NULL)
+		;
+	else if (rets->next == NULL)
+		printf("return %s;\n", rets->name);
+	else {
+		struct params *p;
+
+		printf("  {\n    struct %s_%s_ret __ret;\n", package, name);
+		for (p = rets; p != NULL; p = p->next)
+			printf("    __ret.%s = %s;\n", p->name, p->name);
+		printf("    return __ret;\n  }\n");
+	}
+	printf("}\n");
+}
+
+/* Write out a function header.  */
+static void
+write_func_header(char *package, char *name,
+		  struct params *params, int paramwid,
+		  struct params *rets)
+{
+	if (gcc)
+		write_gcc_func_header(package, name, params, rets);
+	else
+		write_6g_func_header(package, name, params, paramwid, rets);
+	printf("#line %d \"%s\"\n", lineno, file);
+}
+
+/* Write out a function trailer.  */
+static void
+write_func_trailer(char *package, char *name,
+		   struct params *rets)
+{
+	if (gcc)
+		write_gcc_func_trailer(package, name, rets);
+	else
+		write_6g_func_trailer(rets);
+}
+
+/* Read and write the body of the function, ending in an unnested }
+   (which is read but not written).  */
+static void
+copy_body(void)
+{
+	int nesting = 0;
+	while (1) {
+		int c;
+
+		c = getchar_no_eof();
+		if (c == '}' && nesting == 0)
+			return;
+		putchar(c);
+		switch (c) {
+		default:
+			break;
+		case '{':
+			++nesting;
+			break;
+		case '}':
+			--nesting;
+			break;
+		case '/':
+			c = getchar_update_lineno();
+			putchar(c);
+			if (c == '/') {
+				do {
+					c = getchar_no_eof();
+					putchar(c);
+				} while (c != '\n');
+			} else if (c == '*') {
+				while (1) {
+					c = getchar_no_eof();
+					putchar(c);
+					if (c == '*') {
+						do {
+							c = getchar_no_eof();
+							putchar(c);
+						} while (c == '*');
+						if (c == '/')
+							break;
+					}
+				}
+			}
+			break;
+		case '"':
+		case '\'':
+			{
+				int delim = c;
+				do {
+					c = getchar_no_eof();
+					putchar(c);
+					if (c == '\\') {
+						c = getchar_no_eof();
+						putchar(c);
+						c = '\0';
+					}
+				} while (c != delim);
+			}
+			break;
+		}
+	}
+}
+
+/* Process the entire file.  */
+static void
+process_file(void)
+{
+	char *package, *name;
+	struct params *params, *rets;
+	int paramwid;
+
+	package = read_package();
+	read_preprocessor_lines();
+	while (read_func_header(&name, &params, &paramwid, &rets)) {
+		write_func_header(package, name, params, paramwid, rets);
+		copy_body();
+		write_func_trailer(package, name, rets);
+		free(name);
+		free_params(params);
+		free_params(rets);
+	}
+	free(package);
+}
+
+static void
+usage(void)
+{
+	fprintf(stderr, "Usage: goc2c [--6g | --gc] [--go-prefix PREFIX] [file]\n");
+	exit(1);
+}
+
+int
+main(int argc, char **argv)
+{
+	char *goarch;
+
+	while(argc > 1 && argv[1][0] == '-') {
+		if(strcmp(argv[1], "-") == 0)
+			break;
+		if(strcmp(argv[1], "--6g") == 0)
+			gcc = 0;
+		else if(strcmp(argv[1], "--gcc") == 0)
+			gcc = 1;
+		else if (strcmp(argv[1], "--go-prefix") == 0 && argc > 2) {
+			prefix = argv[2];
+			argc--;
+			argv++;
+		} else
+			usage();
+		argc--;
+		argv++;
+	}
+
+	if(argc <= 1 || strcmp(argv[1], "-") == 0) {
+		file = "<stdin>";
+		process_file();
+		return 0;
+	}
+
+	if(argc > 2)
+		usage();
+
+	file = argv[1];
+	if(freopen(file, "r", stdin) == 0) {
+		fprintf(stderr, "open %s: %s\n", file, strerror(errno));
+		exit(1);
+	}
+
+	if(!gcc) {
+		// 6g etc; update size table
+		goarch = getenv("GOARCH");
+		if(goarch != NULL && strcmp(goarch, "amd64") == 0) {
+			type_table[Uintptr].size = 8;
+			type_table[String].size = 16;
+			type_table[Slice].size = 8+4+4;
+			type_table[Eface].size = 8+8;
+			structround = 8;
+		}
+	}
+
+	process_file();
+	return 0;
+}
diff --git a/libgo/runtime/iface.goc b/libgo/runtime/iface.goc
new file mode 100644
index 000000000..356b318cb
--- /dev/null
+++ b/libgo/runtime/iface.goc
@@ -0,0 +1,131 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "go-type.h"
+#include "interface.h"
+#define nil NULL
+
+typedef _Bool bool;
+typedef struct __go_type_descriptor descriptor;
+typedef const struct __go_type_descriptor const_descriptor;
+typedef struct __go_interface interface;
+typedef struct __go_empty_interface empty_interface;
+
+// Compare two type descriptors.
+func ifacetypeeq(a *descriptor, b *descriptor) (eq bool) {
+	eq = __go_type_descriptors_equal(a, b);
+}
+
+// Return the descriptor for an empty interface type.n
+func efacetype(e empty_interface) (d *const_descriptor) {
+	return e.__type_descriptor;
+}
+
+// Return the descriptor for a non-empty interface type.
+func ifacetype(i interface) (d *const_descriptor) {
+	if (i.__methods == nil) {
+		return nil;
+	}
+	d = i.__methods[0];
+}
+
+// Convert an empty interface to an empty interface.
+func ifaceE2E2(e empty_interface) (ret empty_interface, ok bool) {
+	ret = e;
+	ok = ret.__type_descriptor != nil;
+}
+
+// Convert a non-empty interface to an empty interface.
+func ifaceI2E2(i interface) (ret empty_interface, ok bool) {
+	if (i.__methods == nil) {
+		ret.__type_descriptor = nil;
+		ret.__object = nil;
+		ok = 0;
+	} else {
+		ret.__type_descriptor = i.__methods[0];
+		ret.__object = i.__object;
+		ok = 1;
+	}
+}
+
+// Convert an empty interface to a non-empty interface.
+func ifaceE2I2(inter *descriptor, e empty_interface) (ret interface, ok bool) {
+	if (e.__type_descriptor == nil) {
+		ret.__methods = nil;
+		ret.__object = nil;
+		ok = 0;
+	} else {
+		ret.__methods = __go_convert_interface_2(inter,
+							 e.__type_descriptor,
+							 1);
+		ret.__object = e.__object;
+		ok = ret.__methods != nil;
+	}
+}
+
+// Convert a non-empty interface to a non-empty interface.
+func ifaceI2I2(inter *descriptor, i interface) (ret interface, ok bool) {
+	if (i.__methods == nil) {
+		ret.__methods = nil;
+		ret.__object = nil;
+		ok = 0;
+	} else {
+		ret.__methods = __go_convert_interface_2(inter,
+							 i.__methods[0], 1);
+		ret.__object = i.__object;
+		ok = ret.__methods != nil;
+	}
+}
+
+// Convert an empty interface to a pointer type.
+func ifaceE2T2P(inter *descriptor, e empty_interface) (ret *void, ok bool) {
+	if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) {
+		ret = nil;
+		ok = 0;
+	} else {
+		ret = e.__object;
+		ok = 1;
+	}
+}
+
+// Convert a non-empty interface to a pointer type.
+func ifaceI2T2P(inter *descriptor, i interface) (ret *void, ok bool) {
+	if (i.__methods == nil
+	    || !__go_type_descriptors_equal(inter, i.__methods[0])) {
+		ret = nil;
+		ok = 0;
+	} else {
+		ret = i.__object;
+		ok = 1;
+	}
+}
+
+// Convert an empty interface to a non-pointer type.
+func ifaceE2T2(inter *descriptor, e empty_interface, ret *void) (ok bool) {
+	if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) {
+		__builtin_memset(ret, 0, inter->__size);
+		ok = 0;
+	} else {
+		__builtin_memcpy(ret, e.__object, inter->__size);
+		ok = 1;
+	}
+}
+
+// Convert a non-empty interface to a non-pointer type.
+func ifaceI2T2(inter *descriptor, i interface, ret *void) (ok bool) {
+	if (i.__methods == nil
+	    || !__go_type_descriptors_equal(inter, i.__methods[0])) {
+		__builtin_memset(ret, 0, inter->__size);
+		ok = 0;
+	} else {
+		__builtin_memcpy(ret, i.__object, inter->__size);
+		ok = 1;
+	}
+}
+
+// Return whether we can convert an interface to a type.
+func ifaceI2Tp(to *descriptor, from *descriptor) (ok bool) {
+	ok = __go_can_convert_to_interface(to, from);
+}
diff --git a/libgo/runtime/interface.h b/libgo/runtime/interface.h
new file mode 100644
index 000000000..610f20890
--- /dev/null
+++ b/libgo/runtime/interface.h
@@ -0,0 +1,57 @@
+/* interface.h -- the interface type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_INTERFACE_H
+#define LIBGO_INTERFACE_H
+
+#include "go-type.h"
+
+/* A variable of interface type is an instance of this struct, if the
+   interface has any methods.  */
+
+struct __go_interface
+{
+  /* A pointer to the interface method table.  The first pointer is
+     the type descriptor of the object.  Subsequent pointers are
+     pointers to functions.  This is effectively the vtable for this
+     interface.  The function pointers are in the same order as the
+     list in the internal representation of the interface, which sorts
+     them by name.  */
+  const void **__methods;
+
+  /* The object.  If the object is a pointer--if the type descriptor
+     code is GO_PTR or GO_UNSAFE_POINTER--then this field is the value
+     of the object itself.  Otherwise this is a pointer to memory
+     which holds the value.  */
+  void *__object;
+};
+
+/* A variable of an empty interface type is an instance of this
+   struct.  */
+
+struct __go_empty_interface
+{
+  /* The type descriptor of the object.  */
+  const struct __go_type_descriptor *__type_descriptor;
+
+  /* The object.  This is the same as __go_interface above.  */
+  void *__object;
+};
+
+extern void *
+__go_convert_interface (const struct __go_type_descriptor *,
+			const struct __go_type_descriptor *);
+
+extern void *
+__go_convert_interface_2 (const struct __go_type_descriptor *,
+			  const struct __go_type_descriptor *,
+			  _Bool may_fail);
+
+extern _Bool
+__go_can_convert_to_interface(const struct __go_type_descriptor *,
+			      const struct __go_type_descriptor *);
+
+#endif /* !defined(LIBGO_INTERFACE_H) */
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
new file mode 100644
index 000000000..d826d479f
--- /dev/null
+++ b/libgo/runtime/malloc.goc
@@ -0,0 +1,357 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See malloc.h for overview.
+//
+// TODO(rsc): double-check stats.
+
+package runtime
+#include <stddef.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "go-alloc.h"
+#include "runtime.h"
+#include "malloc.h"
+#include "go-string.h"
+#include "interface.h"
+#include "go-type.h"
+typedef struct __go_empty_interface Eface;
+typedef struct __go_type_descriptor Type;
+typedef struct __go_func_type FuncType;
+
+MHeap runtime_mheap;
+extern MStats mstats;	// defined in extern.go
+
+extern volatile int32 runtime_MemProfileRate
+  __asm__ ("libgo_runtime.runtime.MemProfileRate");
+
+// Same algorithm from chan.c, but a different
+// instance of the static uint32 x.
+// Not protected by a lock - let the threads use
+// the same random number if they like.
+static uint32
+fastrand1(void)
+{
+	static uint32 x = 0x49f6428aUL;
+
+	x += x;
+	if(x & 0x80000000L)
+		x ^= 0x88888eefUL;
+	return x;
+}
+
+// Allocate an object of at least size bytes.
+// Small objects are allocated from the per-thread cache's free lists.
+// Large objects (> 32 kB) are allocated straight from the heap.
+void*
+runtime_mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
+{
+	int32 sizeclass, rate;
+	MCache *c;
+	uintptr npages;
+	MSpan *s;
+	void *v;
+	uint32 *ref;
+
+	if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
+		runtime_throw("malloc/free - deadlock");
+	if(size == 0)
+		size = 1;
+
+	mstats.nmalloc++;
+	if(size <= MaxSmallSize) {
+		// Allocate from mcache free lists.
+		sizeclass = runtime_SizeToClass(size);
+		size = runtime_class_to_size[sizeclass];
+		c = m->mcache;
+		v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
+		if(v == nil)
+			runtime_throw("out of memory");
+		mstats.alloc += size;
+		mstats.total_alloc += size;
+		mstats.by_size[sizeclass].nmalloc++;
+
+		if(!runtime_mlookup(v, nil, nil, nil, &ref)) {
+			// runtime_printf("malloc %D; runtime_mlookup failed\n", (uint64)size);
+			runtime_throw("malloc runtime_mlookup");
+		}
+		*ref = RefNone | refflag;
+	} else {
+		// TODO(rsc): Report tracebacks for very large allocations.
+
+		// Allocate directly from heap.
+		npages = size >> PageShift;
+		if((size & PageMask) != 0)
+			npages++;
+		s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
+		if(s == nil)
+			runtime_throw("out of memory");
+		size = npages<<PageShift;
+		mstats.alloc += size;
+		mstats.total_alloc += size;
+		v = (void*)(s->start << PageShift);
+
+		// setup for mark sweep
+		s->gcref0 = RefNone | refflag;
+		ref = &s->gcref0;
+	}
+
+	__sync_bool_compare_and_swap(&m->mallocing, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) {
+		if(!(refflag & RefNoProfiling))
+			__go_run_goroutine_gc(0);
+		else {
+			// We are being called from the profiler.  Tell it
+			// to invoke the garbage collector when it is
+			// done.  No need to use a sync function here.
+			m->gcing_for_prof = 1;
+		}
+	}
+
+	if(!(refflag & RefNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
+		if(size >= (uint32) rate)
+			goto profile;
+		if((uint32) m->mcache->next_sample > size)
+			m->mcache->next_sample -= size;
+		else {
+			// pick next profile time
+			if(rate > 0x3fffffff)	// make 2*rate not overflow
+				rate = 0x3fffffff;
+			m->mcache->next_sample = fastrand1() % (2*rate);
+		profile:
+			*ref |= RefProfiled;
+			runtime_MProf_Malloc(v, size);
+		}
+	}
+
+	if(dogc && mstats.heap_alloc >= mstats.next_gc)
+		runtime_gc(0);
+	return v;
+}
+
+void*
+__go_alloc(uintptr size)
+{
+	return runtime_mallocgc(size, 0, 0, 1);
+}
+
+// Free the object whose base pointer is v.
+void
+__go_free(void *v)
+{
+	int32 sizeclass, size;
+	MSpan *s;
+	MCache *c;
+	uint32 prof, *ref;
+
+	if(v == nil)
+		return;
+
+	if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
+		runtime_throw("malloc/free - deadlock");
+
+	if(!runtime_mlookup(v, nil, nil, &s, &ref)) {
+		// runtime_printf("free %p: not an allocated block\n", v);
+		runtime_throw("free runtime_mlookup");
+	}
+	prof = *ref & RefProfiled;
+	*ref = RefFree;
+
+	// Find size class for v.
+	sizeclass = s->sizeclass;
+	if(sizeclass == 0) {
+		// Large object.
+		if(prof)
+			runtime_MProf_Free(v, s->npages<<PageShift);
+		mstats.alloc -= s->npages<<PageShift;
+		runtime_memclr(v, s->npages<<PageShift);
+		runtime_MHeap_Free(&runtime_mheap, s, 1);
+	} else {
+		// Small object.
+		c = m->mcache;
+		size = runtime_class_to_size[sizeclass];
+		if(size > (int32)sizeof(uintptr))
+			((uintptr*)v)[1] = 1;	// mark as "needs to be zeroed"
+		if(prof)
+			runtime_MProf_Free(v, size);
+		mstats.alloc -= size;
+		mstats.by_size[sizeclass].nfree++;
+		runtime_MCache_Free(c, v, sizeclass, size);
+	}
+	__sync_bool_compare_and_swap(&m->mallocing, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing, 1, 0))
+		__go_run_goroutine_gc(1);
+}
+
+int32
+runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
+{
+	uintptr n, nobj, i;
+	byte *p;
+	MSpan *s;
+
+	mstats.nlookup++;
+	s = runtime_MHeap_LookupMaybe(&runtime_mheap, (uintptr)v>>PageShift);
+	if(sp)
+		*sp = s;
+	if(s == nil) {
+		if(base)
+			*base = nil;
+		if(size)
+			*size = 0;
+		if(ref)
+			*ref = 0;
+		return 0;
+	}
+
+	p = (byte*)((uintptr)s->start<<PageShift);
+	if(s->sizeclass == 0) {
+		// Large object.
+		if(base)
+			*base = p;
+		if(size)
+			*size = s->npages<<PageShift;
+		if(ref)
+			*ref = &s->gcref0;
+		return 1;
+	}
+
+	if((byte*)v >= (byte*)s->gcref) {
+		// pointers into the gc ref counts
+		// do not count as pointers.
+		return 0;
+	}
+
+	n = runtime_class_to_size[s->sizeclass];
+	i = ((byte*)v - p)/n;
+	if(base)
+		*base = p + i*n;
+	if(size)
+		*size = n;
+
+	// good for error checking, but expensive
+	if(0) {
+		nobj = (s->npages << PageShift) / (n + RefcountOverhead);
+		if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
+			// runtime_printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
+			//	s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
+			// runtime_printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
+			//	s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
+			//	(uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
+			runtime_throw("bad gcref");
+		}
+	}
+	if(ref)
+		*ref = &s->gcref[i];
+
+	return 1;
+}
+
+MCache*
+runtime_allocmcache(void)
+{
+	MCache *c;
+
+	if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
+		runtime_throw("allocmcache - deadlock");
+
+	runtime_lock(&runtime_mheap);
+	c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
+
+	// Clear the free list used by FixAlloc; assume the rest is zeroed.
+	c->list[0].list = nil;
+
+	mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
+	mstats.mcache_sys = runtime_mheap.cachealloc.sys;
+	runtime_unlock(&runtime_mheap);
+
+	__sync_bool_compare_and_swap(&m->mallocing, 1, 0);
+	if(__sync_bool_compare_and_swap(&m->gcing, 1, 0))
+		__go_run_goroutine_gc(2);
+
+	return c;
+}
+
+extern int32 runtime_sizeof_C_MStats
+  __asm__ ("libgo_runtime.runtime.Sizeof_C_MStats");
+
+void
+runtime_mallocinit(void)
+{
+	runtime_sizeof_C_MStats = sizeof(MStats);
+
+	runtime_initfintab();
+	runtime_Mprof_Init();
+
+	runtime_SysMemInit();
+	runtime_InitSizes();
+	runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
+	m->mcache = runtime_allocmcache();
+
+	// See if it works.
+	runtime_free(runtime_malloc(1));
+}
+
+// Runtime stubs.
+
+void*
+runtime_mal(uintptr n)
+{
+	return runtime_mallocgc(n, 0, 1, 1);
+}
+
+func Alloc(n uintptr) (p *byte) {
+	p = runtime_malloc(n);
+}
+
+func Free(p *byte) {
+	runtime_free(p);
+}
+
+func Lookup(p *byte) (base *byte, size uintptr) {
+	runtime_mlookup(p, &base, &size, nil, nil);
+}
+
+func GC() {
+	runtime_gc(1);
+}
+
+func SetFinalizer(obj Eface, finalizer Eface) {
+	byte *base;
+	uintptr size;
+	const FuncType *ft;
+
+	if(obj.__type_descriptor == nil) {
+		// runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
+	throw:
+		runtime_throw("runtime.SetFinalizer");
+	}
+	if(obj.__type_descriptor->__code != GO_PTR) {
+		// runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
+		goto throw;
+	}
+	if(!runtime_mlookup(obj.__object, &base, &size, nil, nil) || obj.__object != base) {
+		// runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+		goto throw;
+	}
+	ft = nil;
+	if(finalizer.__type_descriptor != nil) {
+		if(finalizer.__type_descriptor->__code != GO_FUNC) {
+		badfunc:
+			// runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
+			goto throw;
+		}
+		ft = (const FuncType*)finalizer.__type_descriptor;
+		if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
+			goto badfunc;
+
+		if(runtime_getfinalizer(obj.__object, 0)) {
+			// runtime_printf("runtime.SetFinalizer: finalizer already set");
+			goto throw;
+		}
+	}
+	runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft);
+}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
new file mode 100644
index 000000000..369f9b8e7
--- /dev/null
+++ b/libgo/runtime/malloc.h
@@ -0,0 +1,399 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory allocator, based on tcmalloc.
+// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
+
+// The main allocator works in runs of pages.
+// Small allocation sizes (up to and including 32 kB) are
+// rounded to one of about 100 size classes, each of which
+// has its own free list of objects of exactly that size.
+// Any free page of memory can be split into a set of objects
+// of one size class, which are then managed using free list
+// allocators.
+//
+// The allocator's data structures are:
+//
+//	FixAlloc: a free-list allocator for fixed-size objects,
+//		used to manage storage used by the allocator.
+//	MHeap: the malloc heap, managed at page (4096-byte) granularity.
+//	MSpan: a run of pages managed by the MHeap.
+//	MHeapMap: a mapping from page IDs to MSpans.
+//	MCentral: a shared free list for a given size class.
+//	MCache: a per-thread (in Go, per-M) cache for small objects.
+//	MStats: allocation statistics.
+//
+// Allocating a small object proceeds up a hierarchy of caches:
+//
+//	1. Round the size up to one of the small size classes
+//	   and look in the corresponding MCache free list.
+//	   If the list is not empty, allocate an object from it.
+//	   This can all be done without acquiring a lock.
+//
+//	2. If the MCache free list is empty, replenish it by
+//	   taking a bunch of objects from the MCentral free list.
+//	   Moving a bunch amortizes the cost of acquiring the MCentral lock.
+//
+//	3. If the MCentral free list is empty, replenish it by
+//	   allocating a run of pages from the MHeap and then
+//	   chopping that memory into a objects of the given size.
+//	   Allocating many objects amortizes the cost of locking
+//	   the heap.
+//
+//	4. If the MHeap is empty or has no page runs large enough,
+//	   allocate a new group of pages (at least 1MB) from the
+//	   operating system.  Allocating a large run of pages
+//	   amortizes the cost of talking to the operating system.
+//
+// Freeing a small object proceeds up the same hierarchy:
+//
+//	1. Look up the size class for the object and add it to
+//	   the MCache free list.
+//
+//	2. If the MCache free list is too long or the MCache has
+//	   too much memory, return some to the MCentral free lists.
+//
+//	3. If all the objects in a given span have returned to
+//	   the MCentral list, return that span to the page heap.
+//
+//	4. If the heap has too much memory, return some to the
+//	   operating system.
+//
+//	TODO(rsc): Step 4 is not implemented.
+//
+// Allocating and freeing a large object uses the page heap
+// directly, bypassing the MCache and MCentral free lists.
+//
+// The small objects on the MCache and MCentral free lists
+// may or may not be zeroed.  They are zeroed if and only if
+// the second word of the object is zero.  The spans in the
+// page heap are always zeroed.  When a span full of objects
+// is returned to the page heap, the objects that need to be
+// are zeroed first.  There are two main benefits to delaying the
+// zeroing this way:
+//
+//	1. stack frames allocated from the small object lists
+//	   can avoid zeroing altogether.
+//	2. the cost of zeroing when reusing a small object is
+//	   charged to the mutator, not the garbage collector.
+//
+// This C code was written with an eye toward translating to Go
+// in the future.  Methods have the form Type_Method(Type *t, ...).
+
+typedef struct FixAlloc	FixAlloc;
+typedef struct MCentral	MCentral;
+typedef struct MHeap	MHeap;
+typedef struct MHeapMap	MHeapMap;
+typedef struct MSpan	MSpan;
+typedef struct MStats	MStats;
+typedef struct MLink	MLink;
+
+enum
+{
+	PageShift	= 12,
+	PageSize	= 1<<PageShift,
+	PageMask	= PageSize - 1,
+};
+typedef	uintptr	PageID;		// address >> PageShift
+
+enum
+{
+	// Tunable constants.
+	NumSizeClasses = 67,		// Number of size classes (must match msize.c)
+	MaxSmallSize = 32<<10,
+
+	FixAllocChunk = 128<<10,	// Chunk size for FixAlloc
+	MaxMCacheListLen = 256,		// Maximum objects on MCacheList
+	MaxMCacheSize = 2<<20,		// Maximum bytes in one MCache
+	MaxMHeapList = 1<<(20 - PageShift),	// Maximum page length for fixed-size list in MHeap.
+	HeapAllocChunk = 1<<20,		// Chunk size for heap growth
+};
+
+#if __SIZEOF_POINTER__ == 8
+#include "mheapmap64.h"
+#else
+#include "mheapmap32.h"
+#endif
+
+// A generic linked list of blocks.  (Typically the block is bigger than sizeof(MLink).)
+struct MLink
+{
+	MLink *next;
+};
+
+// SysAlloc obtains a large chunk of zeroed memory from the
+// operating system, typically on the order of a hundred kilobytes
+// or a megabyte.
+//
+// SysUnused notifies the operating system that the contents
+// of the memory region are no longer needed and can be reused
+// for other purposes.  The program reserves the right to start
+// accessing those pages in the future.
+//
+// SysFree returns it unconditionally; this is only used if
+// an out-of-memory error has been detected midway through
+// an allocation.  It is okay if SysFree is a no-op.
+
+void*	runtime_SysAlloc(uintptr nbytes);
+void	runtime_SysFree(void *v, uintptr nbytes);
+void	runtime_SysUnused(void *v, uintptr nbytes);
+void	runtime_SysMemInit(void);
+
+// FixAlloc is a simple free-list allocator for fixed size objects.
+// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
+// MCache and MSpan objects.
+//
+// Memory returned by FixAlloc_Alloc is not zeroed.
+// The caller is responsible for locking around FixAlloc calls.
+// Callers can keep state in the object but the first word is
+// smashed by freeing and reallocating.
+struct FixAlloc
+{
+	uintptr size;
+	void *(*alloc)(uintptr);
+	void (*first)(void *arg, byte *p);	// called first time p is returned
+	void *arg;
+	MLink *list;
+	byte *chunk;
+	uint32 nchunk;
+	uintptr inuse;	// in-use bytes now
+	uintptr sys;	// bytes obtained from system
+};
+
+void	runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
+void*	runtime_FixAlloc_Alloc(FixAlloc *f);
+void	runtime_FixAlloc_Free(FixAlloc *f, void *p);
+
+
+// Statistics.
+// Shared with Go: if you edit this structure, also edit extern.go.
+struct MStats
+{
+	// General statistics.  No locking; approximate.
+	uint64	alloc;		// bytes allocated and still in use
+	uint64	total_alloc;	// bytes allocated (even if freed)
+	uint64	sys;		// bytes obtained from system (should be sum of xxx_sys below)
+	uint64	nlookup;	// number of pointer lookups
+	uint64	nmalloc;	// number of mallocs
+	uint64	nfree;  // number of frees
+	
+	// Statistics about malloc heap.
+	// protected by mheap.Lock
+	uint64	heap_alloc;	// bytes allocated and still in use
+	uint64	heap_sys;	// bytes obtained from system
+	uint64	heap_idle;	// bytes in idle spans
+	uint64	heap_inuse;	// bytes in non-idle spans
+	uint64	heap_objects;	// total number of allocated objects
+
+	// Statistics about allocation of low-level fixed-size structures.
+	// Protected by FixAlloc locks.
+	uint64	stacks_inuse;	// bootstrap stacks
+	uint64	stacks_sys;
+	uint64	mspan_inuse;	// MSpan structures
+	uint64	mspan_sys;
+	uint64	mcache_inuse;	// MCache structures
+	uint64	mcache_sys;
+	uint64	heapmap_sys;	// heap map
+	uint64	buckhash_sys;	// profiling bucket hash table
+	
+	// Statistics about garbage collector.
+	// Protected by stopping the world during GC.
+	uint64	next_gc;	// next GC (in heap_alloc time)
+	uint64	pause_total_ns;
+	uint64	pause_ns[256];
+	uint32	numgc;
+	bool	enablegc;
+	bool	debuggc;
+	
+	// Statistics about allocation size classes.
+	// No locking; approximate.
+	struct {
+		uint32 size;
+		uint64 nmalloc;
+		uint64 nfree;
+	} by_size[NumSizeClasses];
+};
+
+extern MStats mstats
+  __asm__ ("libgo_runtime.runtime.MemStats");
+
+
+// Size classes.  Computed and initialized by InitSizes.
+//
+// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
+//	1 <= sizeclass < NumSizeClasses, for n.
+//	Size class 0 is reserved to mean "not small".
+//
+// class_to_size[i] = largest size in class i
+// class_to_allocnpages[i] = number of pages to allocate when
+// 	making new objects in class i
+// class_to_transfercount[i] = number of objects to move when
+//	taking a bunch of objects out of the central lists
+//	and putting them in the thread free list.
+
+int32	runtime_SizeToClass(int32);
+extern	int32	runtime_class_to_size[NumSizeClasses];
+extern	int32	runtime_class_to_allocnpages[NumSizeClasses];
+extern	int32	runtime_class_to_transfercount[NumSizeClasses];
+extern	void	runtime_InitSizes(void);
+
+
+// Per-thread (in Go, per-M) cache for small objects.
+// No locking needed because it is per-thread (per-M).
+typedef struct MCacheList MCacheList;
+struct MCacheList
+{
+	MLink *list;
+	uint32 nlist;
+	uint32 nlistmin;
+};
+
+struct MCache
+{
+	MCacheList list[NumSizeClasses];
+	uint64 size;
+	int64 local_alloc;	// bytes allocated (or freed) since last lock of heap
+	int64 local_objects;	// objects allocated (or freed) since last lock of heap
+	int32 next_sample;	// trigger heap sample after allocating this many bytes
+};
+
+void*	runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
+void	runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+void	runtime_MCache_ReleaseAll(MCache *c);
+
+// An MSpan is a run of pages.
+enum
+{
+	MSpanInUse = 0,
+	MSpanFree,
+	MSpanListHead,
+	MSpanDead,
+};
+struct MSpan
+{
+	MSpan	*next;		// in a span linked list
+	MSpan	*prev;		// in a span linked list
+	MSpan	*allnext;		// in the list of all spans
+	PageID	start;		// starting page number
+	uintptr	npages;		// number of pages in span
+	MLink	*freelist;	// list of free objects
+	uint32	ref;		// number of allocated objects in this span
+	uint32	sizeclass;	// size class
+	uint32	state;		// MSpanInUse etc
+	union {
+		uint32	*gcref;	// sizeclass > 0
+		uint32	gcref0;	// sizeclass == 0
+	};
+};
+
+void	runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
+
+// Every MSpan is in one doubly-linked list,
+// either one of the MHeap's free lists or one of the
+// MCentral's span lists.  We use empty MSpan structures as list heads.
+void	runtime_MSpanList_Init(MSpan *list);
+bool	runtime_MSpanList_IsEmpty(MSpan *list);
+void	runtime_MSpanList_Insert(MSpan *list, MSpan *span);
+void	runtime_MSpanList_Remove(MSpan *span);	// from whatever list it is in
+
+
+// Central list of free objects of a given size.
+struct MCentral
+{
+	Lock;
+	int32 sizeclass;
+	MSpan nonempty;
+	MSpan empty;
+	int32 nfree;
+};
+
+void	runtime_MCentral_Init(MCentral *c, int32 sizeclass);
+int32	runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first);
+void	runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+
+// Main malloc heap.
+// The heap itself is the "free[]" and "large" arrays,
+// but all the other global data is here too.
+struct MHeap
+{
+	Lock;
+	MSpan free[MaxMHeapList];	// free lists of given length
+	MSpan large;			// free lists length >= MaxMHeapList
+	MSpan *allspans;
+
+	// span lookup
+	MHeapMap map;
+
+	// range of addresses we might see in the heap
+	byte *min;
+	byte *max;
+	
+	// central free lists for small size classes.
+	// the union makes sure that the MCentrals are
+	// spaced 64 bytes apart, so that each MCentral.Lock
+	// gets its own cache line.
+	union {
+		MCentral;
+		byte pad[64];
+	} central[NumSizeClasses];
+
+	FixAlloc spanalloc;	// allocator for Span*
+	FixAlloc cachealloc;	// allocator for MCache*
+};
+extern MHeap runtime_mheap;
+
+void	runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
+MSpan*	runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
+void	runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
+MSpan*	runtime_MHeap_Lookup(MHeap *h, PageID p);
+MSpan*	runtime_MHeap_LookupMaybe(MHeap *h, PageID p);
+void	runtime_MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
+
+void*	runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
+int32	runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
+void	runtime_gc(int32 force);
+
+void*	runtime_SysAlloc(uintptr);
+void	runtime_SysUnused(void*, uintptr);
+void	runtime_SysFree(void*, uintptr);
+
+enum
+{
+	RefcountOverhead = 4,	// one uint32 per object
+
+	RefFree = 0,	// must be zero
+	RefStack,		// stack segment - don't free and don't scan for pointers
+	RefNone,		// no references
+	RefSome,		// some references
+	RefNoPointers = 0x80000000U,	// flag - no pointers here
+	RefHasFinalizer = 0x40000000U,	// flag - has finalizer
+	RefProfiled = 0x20000000U,	// flag - is in profiling table
+	RefNoProfiling = 0x10000000U,	// flag - must not profile
+	RefFlags = 0xFFFF0000U,
+};
+
+void	runtime_Mprof_Init(void);
+void	runtime_MProf_Malloc(void*, uintptr);
+void	runtime_MProf_Free(void*, uintptr);
+void	runtime_MProf_Mark(void (*scan)(byte *, int64));
+
+// Malloc profiling settings.
+// Must match definition in extern.go.
+enum {
+	MProf_None = 0,
+	MProf_Sample = 1,
+	MProf_All = 2,
+};
+extern int32 runtime_malloc_profile;
+
+typedef struct Finalizer Finalizer;
+struct Finalizer
+{
+	Finalizer *next;	// for use by caller of getfinalizer
+	void (*fn)(void*);
+	void *arg;
+	const struct __go_func_type *ft;
+};
+
+Finalizer*	runtime_getfinalizer(void*, bool);
diff --git a/libgo/runtime/map.goc b/libgo/runtime/map.goc
new file mode 100644
index 000000000..d6308cbd3
--- /dev/null
+++ b/libgo/runtime/map.goc
@@ -0,0 +1,69 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "map.h"
+#define nil NULL
+
+typedef unsigned char byte;
+typedef _Bool bool;
+
+typedef struct __go_map hmap;
+typedef struct __go_hash_iter hiter;
+
+/* Access a value in a map, returning a value and a presence indicator.  */
+
+func mapaccess2(h *hmap, key *byte, val *byte) (present bool) {
+	byte *mapval;
+	size_t valsize;
+
+	mapval = __go_map_index(h, key, 0);
+	valsize = h->__descriptor->__map_descriptor->__val_type->__size;
+	if (mapval == nil) {
+		__builtin_memset(val, 0, valsize);
+		present = 0;
+	} else {
+		__builtin_memcpy(val, mapval, valsize);
+		present = 1;
+	}
+}
+
+/* Optionally assign a value to a map (m[k] = v, p).  */
+
+func mapassign2(h *hmap, key *byte, val *byte, p bool) {
+	if (!p) {
+		__go_map_delete(h, key);
+	} else {
+		byte *mapval;
+		size_t valsize;
+
+		mapval = __go_map_index(h, key, 1);
+		valsize = h->__descriptor->__map_descriptor->__val_type->__size;
+		__builtin_memcpy(mapval, val, valsize);
+	}
+}
+
+/* Initialize a range over a map.  */
+
+func mapiterinit(h *hmap, it *hiter) {
+	__go_mapiterinit(h, it);
+}
+
+/* Move to the next iteration, updating *HITER.  */
+
+func mapiternext(it *hiter) {
+	__go_mapiternext(it);
+}
+
+/* Get the key of the current iteration.  */
+
+func mapiter1(it *hiter, key *byte) {
+	__go_mapiter1(it, key);
+}
+
+/* Get the key and value of the current iteration.  */
+
+func mapiter2(it *hiter, key *byte, val *byte) {
+	__go_mapiter2(it, key, val);
+}
diff --git a/libgo/runtime/map.h b/libgo/runtime/map.h
new file mode 100644
index 000000000..a0c834a54
--- /dev/null
+++ b/libgo/runtime/map.h
@@ -0,0 +1,86 @@
+/* map.h -- the map type for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-type.h"
+
+/* A map descriptor is what we need to manipulate the map.  This is
+   constant for a given map type.  */
+
+struct __go_map_descriptor
+{
+  /* A pointer to the type descriptor for the type of the map itself.  */
+  const struct __go_map_type *__map_descriptor;
+
+  /* A map entry is a struct with three fields:
+       map_entry_type *next_entry;
+       key_type key;
+       value_type value;
+     This is the size of that struct.  */
+  size_t __entry_size;
+
+  /* The offset of the key field in a map entry struct.  */
+  size_t __key_offset;
+
+  /* The offset of the value field in a map entry struct (the value
+     field immediately follows the key field, but there may be some
+     bytes inserted for alignment).  */
+  size_t __val_offset;
+};
+
+struct __go_map
+{
+  /* The constant descriptor for this map.  */
+  const struct __go_map_descriptor *__descriptor;
+
+  /* The number of elements in the hash table.  */
+  size_t __element_count;
+
+  /* The number of entries in the __buckets array.  */
+  size_t __bucket_count;
+
+  /* Each bucket is a pointer to a linked list of map entries.  */
+  void **__buckets;
+};
+
+/* For a map iteration the compiled code will use a pointer to an
+   iteration structure.  The iteration structure will be allocated on
+   the stack.  The Go code must allocate at least enough space.  */
+
+struct __go_hash_iter
+{
+  /* A pointer to the current entry.  This will be set to NULL when
+     the range has completed.  The Go will test this field, so it must
+     be the first one in the structure.  */
+  const void *entry;
+  /* The map we are iterating over.  */
+  const struct __go_map *map;
+  /* A pointer to the next entry in the current bucket.  This permits
+     deleting the current entry.  This will be NULL when we have seen
+     all the entries in the current bucket.  */
+  const void *next_entry;
+  /* The bucket index of the current and next entry.  */
+  size_t bucket;
+};
+
+extern struct __go_map *__go_new_map (const struct __go_map_descriptor *,
+				      size_t);
+
+extern unsigned long __go_map_next_prime (unsigned long);
+
+extern void *__go_map_index (struct __go_map *, const void *, _Bool);
+
+extern void __go_map_delete (struct __go_map *, const void *);
+
+extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *);
+
+extern void __go_mapiternext (struct __go_hash_iter *);
+
+extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key);
+
+extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
+			   unsigned char *val);
diff --git a/libgo/runtime/mcache.c b/libgo/runtime/mcache.c
new file mode 100644
index 000000000..ce6575758
--- /dev/null
+++ b/libgo/runtime/mcache.c
@@ -0,0 +1,131 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Per-thread (in Go, per-M) malloc cache for small objects.
+//
+// See malloc.h for an overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+void*
+runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed)
+{
+	MCacheList *l;
+	MLink *first, *v;
+	int32 n;
+
+	// Allocate from list.
+	l = &c->list[sizeclass];
+	if(l->list == nil) {
+		// Replenish using central lists.
+		n = runtime_MCentral_AllocList(&runtime_mheap.central[sizeclass],
+			runtime_class_to_transfercount[sizeclass], &first);
+		l->list = first;
+		l->nlist = n;
+		c->size += n*size;
+	}
+	v = l->list;
+	l->list = v->next;
+	l->nlist--;
+	if(l->nlist < l->nlistmin)
+		l->nlistmin = l->nlist;
+	c->size -= size;
+
+	// v is zeroed except for the link pointer
+	// that we used above; zero that.
+	v->next = nil;
+	if(zeroed) {
+		// block is zeroed iff second word is zero ...
+		if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0)
+			runtime_memclr((byte*)v, size);
+		else {
+			// ... except for the link pointer
+			// that we used above; zero that.
+			v->next = nil;
+		}
+	}
+	c->local_alloc += size;
+	c->local_objects++;
+	return v;
+}
+
+// Take n elements off l and return them to the central free list.
+static void
+ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
+{
+	MLink *first, **lp;
+	int32 i;
+
+	// Cut off first n elements.
+	first = l->list;
+	lp = &l->list;
+	for(i=0; i<n; i++)
+		lp = &(*lp)->next;
+	l->list = *lp;
+	*lp = nil;
+	l->nlist -= n;
+	if(l->nlist < l->nlistmin)
+		l->nlistmin = l->nlist;
+	c->size -= n*runtime_class_to_size[sizeclass];
+
+	// Return them to central free list.
+	runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], n, first);
+}
+
+void
+runtime_MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+{
+	int32 i, n;
+	MCacheList *l;
+	MLink *p;
+
+	// Put back on list.
+	l = &c->list[sizeclass];
+	p = v;
+	p->next = l->list;
+	l->list = p;
+	l->nlist++;
+	c->size += size;
+	c->local_alloc -= size;
+	c->local_objects--;
+
+	if(l->nlist >= MaxMCacheListLen) {
+		// Release a chunk back.
+		ReleaseN(c, l, runtime_class_to_transfercount[sizeclass], sizeclass);
+	}
+
+	if(c->size >= MaxMCacheSize) {
+		// Scavenge.
+		for(i=0; i<NumSizeClasses; i++) {
+			l = &c->list[i];
+			n = l->nlistmin;
+
+			// n is the minimum number of elements we've seen on
+			// the list since the last scavenge.  If n > 0, it means that
+			// we could have gotten by with n fewer elements
+			// without needing to consult the central free list.
+			// Move toward that situation by releasing n/2 of them.
+			if(n > 0) {
+				if(n > 1)
+					n /= 2;
+				ReleaseN(c, l, n, i);
+			}
+			l->nlistmin = l->nlist;
+		}
+	}
+}
+
+void
+runtime_MCache_ReleaseAll(MCache *c)
+{
+	int32 i;
+	MCacheList *l;
+
+	for(i=0; i<NumSizeClasses; i++) {
+		l = &c->list[i];
+		ReleaseN(c, l, l->nlist, i);
+		l->nlistmin = 0;
+	}
+}
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
new file mode 100644
index 000000000..81e54b07d
--- /dev/null
+++ b/libgo/runtime/mcentral.c
@@ -0,0 +1,209 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Central free lists.
+//
+// See malloc.h for an overview.
+//
+// The MCentral doesn't actually contain the list of free objects; the MSpan does.
+// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
+// and those that are completely allocated (c->empty).
+//
+// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
+// into sections of class_to_transfercount[sizeclass] objects
+// so that it is faster to move those lists between MCaches and MCentrals.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static bool MCentral_Grow(MCentral *c);
+static void* MCentral_Alloc(MCentral *c);
+static void MCentral_Free(MCentral *c, void *v);
+
+// Initialize a single central free list.
+void
+runtime_MCentral_Init(MCentral *c, int32 sizeclass)
+{
+	runtime_initlock(c);
+	c->sizeclass = sizeclass;
+	runtime_MSpanList_Init(&c->nonempty);
+	runtime_MSpanList_Init(&c->empty);
+}
+
+// Allocate up to n objects from the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+int32
+runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
+{
+	MLink *first, *last, *v;
+	int32 i;
+
+	runtime_lock(c);
+	// Replenish central list if empty.
+	if(runtime_MSpanList_IsEmpty(&c->nonempty)) {
+		if(!MCentral_Grow(c)) {
+			runtime_unlock(c);
+			*pfirst = nil;
+			return 0;
+		}
+	}
+
+	// Copy from list, up to n.
+	// First one is guaranteed to work, because we just grew the list.
+	first = MCentral_Alloc(c);
+	last = first;
+	for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
+		last->next = v;
+		last = v;
+	}
+	last->next = nil;
+	c->nfree -= i;
+
+	runtime_unlock(c);
+	*pfirst = first;
+	return i;
+}
+
+// Helper: allocate one object from the central free list.
+static void*
+MCentral_Alloc(MCentral *c)
+{
+	MSpan *s;
+	MLink *v;
+
+	if(runtime_MSpanList_IsEmpty(&c->nonempty))
+		return nil;
+	s = c->nonempty.next;
+	s->ref++;
+	v = s->freelist;
+	s->freelist = v->next;
+	if(s->freelist == nil) {
+		runtime_MSpanList_Remove(s);
+		runtime_MSpanList_Insert(&c->empty, s);
+	}
+	return v;
+}
+
+// Free n objects back into the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+void
+runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *start)
+{
+	MLink *v, *next;
+
+	// Assume next == nil marks end of list.
+	// n and end would be useful if we implemented
+	// the transfer cache optimization in the TODO above.
+	USED(n);
+
+	runtime_lock(c);
+	for(v=start; v; v=next) {
+		next = v->next;
+		MCentral_Free(c, v);
+	}
+	runtime_unlock(c);
+}
+
+// Helper: free one object back into the central free list.
+static void
+MCentral_Free(MCentral *c, void *v)
+{
+	MSpan *s;
+	PageID page;
+	MLink *p, *next;
+	int32 size;
+
+	// Find span for v.
+	page = (uintptr)v >> PageShift;
+	s = runtime_MHeap_Lookup(&runtime_mheap, page);
+	if(s == nil || s->ref == 0)
+		runtime_throw("invalid free");
+
+	// Move to nonempty if necessary.
+	if(s->freelist == nil) {
+		runtime_MSpanList_Remove(s);
+		runtime_MSpanList_Insert(&c->nonempty, s);
+	}
+
+	// Add v back to s's free list.
+	p = v;
+	p->next = s->freelist;
+	s->freelist = p;
+	c->nfree++;
+
+	// If s is completely freed, return it to the heap.
+	if(--s->ref == 0) {
+		size = runtime_class_to_size[c->sizeclass];
+		runtime_MSpanList_Remove(s);
+		// The second word of each freed block indicates
+		// whether it needs to be zeroed.  The first word
+		// is the link pointer and must always be cleared.
+		for(p=s->freelist; p; p=next) {
+			next = p->next;
+			if(size > (int32)sizeof(uintptr) && ((uintptr*)p)[1] != 0)
+				runtime_memclr((byte*)p, size);
+			else
+				p->next = nil;
+		}
+		s->freelist = nil;
+		c->nfree -= (s->npages << PageShift) / size;
+		runtime_unlock(c);
+		runtime_MHeap_Free(&runtime_mheap, s, 0);
+		runtime_lock(c);
+	}
+}
+
+void
+runtime_MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
+{
+	int32 size;
+	int32 npages;
+
+	npages = runtime_class_to_allocnpages[sizeclass];
+	size = runtime_class_to_size[sizeclass];
+	*npagesp = npages;
+	*sizep = size;
+	*nobj = (npages << PageShift) / (size + RefcountOverhead);
+}
+
+// Fetch a new span from the heap and
+// carve into objects for the free list.
+static bool
+MCentral_Grow(MCentral *c)
+{
+	int32 i, n, npages, size;
+	MLink **tailp, *v;
+	byte *p;
+	MSpan *s;
+
+	runtime_unlock(c);
+	runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
+	s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0);
+	if(s == nil) {
+		// TODO(rsc): Log out of memory
+		runtime_lock(c);
+		return false;
+	}
+
+	// Carve span into sequence of blocks.
+	tailp = &s->freelist;
+	p = (byte*)(s->start << PageShift);
+	s->gcref = (uint32*)(p + size*n);
+	for(i=0; i<n; i++) {
+		v = (MLink*)p;
+		*tailp = v;
+		tailp = &v->next;
+		p += size;
+	}
+	*tailp = nil;
+
+	runtime_lock(c);
+	c->nfree += n;
+	runtime_MSpanList_Insert(&c->nonempty, s);
+	return true;
+}
diff --git a/libgo/runtime/mem.c b/libgo/runtime/mem.c
new file mode 100644
index 000000000..4d6c74209
--- /dev/null
+++ b/libgo/runtime/mem.c
@@ -0,0 +1,76 @@
+#include <errno.h>
+
+#include "runtime.h"
+#include "malloc.h"
+
+#ifndef MAP_ANON
+#ifdef MAP_ANONYMOUS
+#define MAP_ANON MAP_ANONYMOUS
+#else
+#define USE_DEV_ZERO
+#define MAP_ANON 0
+#endif
+#endif
+
+#ifdef USE_DEV_ZERO
+static int dev_zero = -1;
+#endif
+
+void*
+runtime_SysAlloc(uintptr n)
+{
+	void *p;
+	int fd = -1;
+
+	mstats.sys += n;
+
+#ifdef USE_DEV_ZERO
+	if (dev_zero == -1) {
+		dev_zero = open("/dev/zero", O_RDONLY);
+		if (dev_zero < 0) {
+			printf("open /dev/zero: errno=%d\n", errno);
+			exit(2);
+		}
+	}
+	fd = dev_zero;
+#endif
+
+	p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, fd, 0);
+	if (p == MAP_FAILED) {
+		if(errno == EACCES) {
+			printf("mmap: access denied\n");
+			printf("If you're running SELinux, enable execmem for this process.\n");
+		} else {
+			printf("mmap: errno=%d\n", errno);
+		}
+		exit(2);
+	}
+	return p;
+}
+
+void
+runtime_SysUnused(void *v, uintptr n)
+{
+	USED(v);
+	USED(n);
+	// TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime_SysFree(void *v, uintptr n)
+{
+	mstats.sys -= n;
+	runtime_munmap(v, n);
+}
+
+void
+runtime_SysMemInit(void)
+{
+	// Code generators assume that references to addresses
+	// on the first page will fault.  Map the page explicitly with
+	// no permissions, to head off possible bugs like the system
+	// allocating that page as the virtual address space fills.
+	// Ignore any error, since other systems might be smart
+	// enough to never allow anything there.
+	runtime_mmap(nil, 4096, PROT_NONE, MAP_FIXED|MAP_ANON|MAP_PRIVATE, -1, 0);
+}
diff --git a/libgo/runtime/mem_posix_memalign.c b/libgo/runtime/mem_posix_memalign.c
new file mode 100644
index 000000000..3855dfcf1
--- /dev/null
+++ b/libgo/runtime/mem_posix_memalign.c
@@ -0,0 +1,38 @@
+#include <errno.h>
+
+#include "runtime.h"
+#include "malloc.h"
+
+void*
+runtime_SysAlloc(uintptr n)
+{
+	void *p;
+
+	mstats.sys += n;
+	errno = posix_memalign(&p, PageSize, n);
+	if (errno > 0) {
+		perror("posix_memalign");
+		exit(2);
+	}
+	return p;
+}
+
+void
+runtime_SysUnused(void *v, uintptr n)
+{
+	USED(v);
+	USED(n);
+	// TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+runtime_SysFree(void *v, uintptr n)
+{
+	mstats.sys -= n;
+	free(v);
+}
+
+void
+runtime_SysMemInit(void)
+{
+}
diff --git a/libgo/runtime/mfinal.c b/libgo/runtime/mfinal.c
new file mode 100644
index 000000000..23c0d7a16
--- /dev/null
+++ b/libgo/runtime/mfinal.c
@@ -0,0 +1,217 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static Lock finlock;
+
+void
+runtime_initfintab()
+{
+	runtime_initlock(&finlock);
+}
+
+// Finalizer hash table.  Direct hash, linear scan, at most 3/4 full.
+// Table size is power of 3 so that hash can be key % max.
+// Key[i] == (void*)-1 denotes free but formerly occupied entry
+// (doesn't stop the linear scan).
+// Key and val are separate tables because the garbage collector
+// must be instructed to ignore the pointers in key but follow the
+// pointers in val.
+typedef struct Fintab Fintab;
+struct Fintab
+{
+	void **key;
+	Finalizer **val;
+	int32 nkey;	// number of non-nil entries in key
+	int32 ndead;	// number of dead (-1) entries in key
+	int32 max;	// size of key, val allocations
+};
+
+static void
+addfintab(Fintab *t, void *k, Finalizer *v)
+{
+	int32 i, j;
+
+	i = (uintptr)k % (uintptr)t->max;
+	for(j=0; j<t->max; j++) {
+		if(t->key[i] == nil) {
+			t->nkey++;
+			goto ret;
+		}
+		if(t->key[i] == (void*)-1) {
+			t->ndead--;
+			goto ret;
+		}
+		if(++i == t->max)
+			i = 0;
+	}
+
+	// cannot happen - table is known to be non-full
+	runtime_throw("finalizer table inconsistent");
+
+ret:
+	t->key[i] = k;
+	t->val[i] = v;
+}
+
+static Finalizer*
+lookfintab(Fintab *t, void *k, bool del)
+{
+	int32 i, j;
+	Finalizer *v;
+
+	if(t->max == 0)
+		return nil;
+	i = (uintptr)k % (uintptr)t->max;
+	for(j=0; j<t->max; j++) {
+		if(t->key[i] == nil)
+			return nil;
+		if(t->key[i] == k) {
+			v = t->val[i];
+			if(del) {
+				t->key[i] = (void*)-1;
+				t->val[i] = nil;
+				t->ndead++;
+			}
+			return v;
+		}
+		if(++i == t->max)
+			i = 0;
+	}
+
+	// cannot happen - table is known to be non-full
+	runtime_throw("finalizer table inconsistent");
+	return nil;
+}
+
+static Fintab fintab;
+
+// add finalizer; caller is responsible for making sure not already in table
+void
+runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
+{
+	Fintab newtab;
+	int32 i;
+	uint32 *ref;
+	byte *base;
+	Finalizer *e;
+	
+	e = nil;
+	if(f != nil) {
+		e = runtime_mal(sizeof *e);
+		e->fn = f;
+		e->ft = ft;
+	}
+
+	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
+		runtime_throw("finalizer deadlock");
+
+	runtime_lock(&finlock);
+	if(!runtime_mlookup(p, &base, nil, nil, &ref) || p != base) {
+		runtime_unlock(&finlock);
+		__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
+		runtime_throw("addfinalizer on invalid pointer");
+	}
+	if(f == nil) {
+		if(*ref & RefHasFinalizer) {
+			lookfintab(&fintab, p, 1);
+			*ref &= ~RefHasFinalizer;
+		}
+		goto unlock;
+	}
+
+	if(*ref & RefHasFinalizer) {
+		runtime_unlock(&finlock);
+		__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
+		runtime_throw("double finalizer");
+	}
+	*ref |= RefHasFinalizer;
+
+	if(fintab.nkey >= fintab.max/2+fintab.max/4) {
+		// keep table at most 3/4 full:
+		// allocate new table and rehash.
+
+		runtime_memclr((byte*)&newtab, sizeof newtab);
+		newtab.max = fintab.max;
+		if(newtab.max == 0)
+			newtab.max = 3*3*3;
+		else if(fintab.ndead < fintab.nkey/2) {
+			// grow table if not many dead values.
+			// otherwise just rehash into table of same size.
+			newtab.max *= 3;
+		}
+
+		newtab.key = runtime_mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
+		newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
+
+		for(i=0; i<fintab.max; i++) {
+			void *k;
+
+			k = fintab.key[i];
+			if(k != nil && k != (void*)-1)
+				addfintab(&newtab, k, fintab.val[i]);
+		}
+		runtime_free(fintab.key);
+		runtime_free(fintab.val);
+		fintab = newtab;
+	}
+
+	addfintab(&fintab, p, e);
+ unlock:
+	runtime_unlock(&finlock);
+
+	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
+		__go_run_goroutine_gc(200);
+	}
+}
+
+// get finalizer; if del, delete finalizer.
+// caller is responsible for updating RefHasFinalizer bit.
+Finalizer*
+runtime_getfinalizer(void *p, bool del)
+{
+	Finalizer *f;
+	
+	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
+		runtime_throw("finalizer deadlock");
+
+	runtime_lock(&finlock);
+	f = lookfintab(&fintab, p, del);
+	runtime_unlock(&finlock);
+
+	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
+	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
+		__go_run_goroutine_gc(201);
+	}
+
+	return f;
+}
+
+void
+runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64))
+{
+	void **key;
+	void **ekey;
+
+	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
+		runtime_throw("finalizer deadlock");
+
+	scan((byte*)&fintab, sizeof fintab);
+	runtime_lock(&finlock);
+	key = fintab.key;
+	ekey = key + fintab.max;
+	for(; key < ekey; key++)
+		if(*key != nil && *key != ((void*)-1))
+			fn(*key);
+	runtime_unlock(&finlock);
+
+	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
+	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
+		runtime_throw("walkfintab not called from gc");
+	}
+}
diff --git a/libgo/runtime/mfixalloc.c b/libgo/runtime/mfixalloc.c
new file mode 100644
index 000000000..c05583dc2
--- /dev/null
+++ b/libgo/runtime/mfixalloc.c
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fixed-size object allocator.  Returned memory is not zeroed.
+//
+// See malloc.h for overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+// Initialize f to allocate objects of the given size,
+// using the allocator to obtain chunks of memory.
+void
+runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
+{
+	f->size = size;
+	f->alloc = alloc;
+	f->first = first;
+	f->arg = arg;
+	f->list = nil;
+	f->chunk = nil;
+	f->nchunk = 0;
+	f->inuse = 0;
+	f->sys = 0;
+}
+
+void*
+runtime_FixAlloc_Alloc(FixAlloc *f)
+{
+	void *v;
+
+	if(f->list) {
+		v = f->list;
+		f->list = *(void**)f->list;
+		f->inuse += f->size;
+		return v;
+	}
+	if(f->nchunk < f->size) {
+		f->sys += FixAllocChunk;
+		f->chunk = f->alloc(FixAllocChunk);
+		if(f->chunk == nil)
+			runtime_throw("out of memory (FixAlloc)");
+		f->nchunk = FixAllocChunk;
+	}
+	v = f->chunk;
+	if(f->first)
+		f->first(f->arg, v);
+	f->chunk += f->size;
+	f->nchunk -= f->size;
+	f->inuse += f->size;
+	return v;
+}
+
+void
+runtime_FixAlloc_Free(FixAlloc *f, void *p)
+{
+	f->inuse -= f->size;
+	*(void**)p = f->list;
+	f->list = p;
+}
+
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
new file mode 100644
index 000000000..f2703ab02
--- /dev/null
+++ b/libgo/runtime/mgc0.c
@@ -0,0 +1,392 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector -- step 0.
+//
+// Stop the world, mark and sweep garbage collector.
+// NOT INTENDED FOR PRODUCTION USE.
+//
+// A mark and sweep collector provides a way to exercise
+// and test the memory allocator and the stack walking machinery
+// without also needing to get reference counting
+// exactly right.
+
+#include "runtime.h"
+#include "malloc.h"
+
+enum {
+	Debug = 0
+};
+
+typedef struct BlockList BlockList;
+struct BlockList
+{
+	byte *obj;
+	uintptr size;
+};
+
+static bool finstarted;
+static pthread_mutex_t finqlock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t finqcond = PTHREAD_COND_INITIALIZER;
+static Finalizer *finq;
+static int32 fingwait;
+static BlockList *bl, *ebl;
+
+static void runfinq(void*);
+
+enum {
+	PtrSize = sizeof(void*)
+};
+
+static void
+scanblock(byte *b, int64 n)
+{
+	int32 off;
+	void *obj;
+	uintptr size;
+	uint32 *refp, ref;
+	void **vp;
+	int64 i;
+	BlockList *w;
+
+	w = bl;
+	w->obj = b;
+	w->size = n;
+	w++;
+
+	while(w > bl) {
+		w--;
+		b = w->obj;
+		n = w->size;
+
+		if(Debug > 1)
+			runtime_printf("scanblock %p %lld\n", b, (long long) n);
+		off = (uint32)(uintptr)b & (PtrSize-1);
+		if(off) {
+			b += PtrSize - off;
+			n -= PtrSize - off;
+		}
+	
+		vp = (void**)b;
+		n /= PtrSize;
+		for(i=0; i<n; i++) {
+			obj = vp[i];
+			if(obj == nil)
+				continue;
+			if(runtime_mheap.min <= (byte*)obj && (byte*)obj < runtime_mheap.max) {
+				if(runtime_mlookup(obj, (byte**)&obj, &size, nil, &refp)) {
+					ref = *refp;
+					switch(ref & ~RefFlags) {
+					case RefNone:
+						if(Debug > 1)
+							runtime_printf("found at %p: ", &vp[i]);
+						*refp = RefSome | (ref & RefFlags);
+						if(!(ref & RefNoPointers)) {
+							if(w >= ebl)
+								runtime_throw("scanblock: garbage collection stack overflow");
+							w->obj = obj;
+							w->size = size;
+							w++;
+						}
+						break;
+					}
+				}
+			}
+		}
+	}
+}
+
+static void
+markfin(void *v)
+{
+	uintptr size;
+	uint32 *refp;
+
+	size = 0;
+	refp = nil;
+	if(!runtime_mlookup(v, (byte**)&v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
+		runtime_throw("mark - finalizer inconsistency");
+	
+	// do not mark the finalizer block itself.  just mark the things it points at.
+	scanblock(v, size);
+}
+
+struct root_list {
+	struct root_list *next;
+	struct root {
+		void *decl;
+		size_t size;
+	} roots[];
+};
+
+static struct root_list* roots;
+
+void
+__go_register_gc_roots (struct root_list* r)
+{
+	// FIXME: This needs locking if multiple goroutines can call
+	// dlopen simultaneously.
+	r->next = roots;
+	roots = r;
+}
+
+static void
+mark(void)
+{
+	uintptr blsize, nobj;
+	struct root_list *pl;
+
+	// Figure out how big an object stack we need.
+	// Get a new one if we need more than we have
+	// or we need significantly less than we have.
+	nobj = mstats.heap_objects;
+	if(nobj > (uintptr)(ebl - bl) || nobj < (uintptr)(ebl-bl)/4) {
+		if(bl != nil)
+			runtime_SysFree(bl, (byte*)ebl - (byte*)bl);
+		
+		// While we're allocated a new object stack,
+		// add 20% headroom and also round up to
+		// the nearest page boundary, since mmap
+		// will anyway.
+		nobj = nobj * 12/10;
+		blsize = nobj * sizeof *bl;
+		blsize = (blsize + 4095) & ~4095;
+		nobj = blsize / sizeof *bl;
+		bl = runtime_SysAlloc(blsize);
+		ebl = bl + nobj;
+	}
+
+	for(pl = roots; pl != nil; pl = pl->next) {
+		struct root* pr = &pl->roots[0];
+		while(1) {
+			void *decl = pr->decl;
+			if(decl == nil)
+				break;
+			scanblock(decl, pr->size);
+			pr++;
+		}
+	}
+
+	scanblock((byte*)&m0, sizeof m0);
+	scanblock((byte*)&finq, sizeof finq);
+	runtime_MProf_Mark(scanblock);
+
+	// mark stacks
+	__go_scanstacks(scanblock);
+
+	// mark things pointed at by objects with finalizers
+	runtime_walkfintab(markfin, scanblock);
+}
+
+// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
+static void
+sweepspan(MSpan *s)
+{
+	int32 n, npages, size;
+	byte *p;
+	uint32 ref, *gcrefp, *gcrefep;
+	MCache *c;
+	Finalizer *f;
+
+	p = (byte*)(s->start << PageShift);
+	if(s->sizeclass == 0) {
+		// Large block.
+		ref = s->gcref0;
+		switch(ref & ~(RefFlags^RefHasFinalizer)) {
+		case RefNone:
+			// Free large object.
+			mstats.alloc -= s->npages<<PageShift;
+			mstats.nfree++;
+			runtime_memclr(p, s->npages<<PageShift);
+			if(ref & RefProfiled)
+				runtime_MProf_Free(p, s->npages<<PageShift);
+			s->gcref0 = RefFree;
+			runtime_MHeap_Free(&runtime_mheap, s, 1);
+			break;
+		case RefNone|RefHasFinalizer:
+			f = runtime_getfinalizer(p, 1);
+			if(f == nil)
+				runtime_throw("finalizer inconsistency");
+			f->arg = p;
+			f->next = finq;
+			finq = f;
+			ref &= ~RefHasFinalizer;
+			// fall through
+		case RefSome:
+		case RefSome|RefHasFinalizer:
+			s->gcref0 = RefNone | (ref&RefFlags);
+			break;
+		}
+		return;
+	}
+
+	// Chunk full of small blocks.
+	runtime_MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+	gcrefp = s->gcref;
+	gcrefep = s->gcref + n;
+	for(; gcrefp < gcrefep; gcrefp++, p += size) {
+		ref = *gcrefp;
+		if(ref < RefNone)	// RefFree or RefStack
+			continue;
+		switch(ref & ~(RefFlags^RefHasFinalizer)) {
+		case RefNone:
+			// Free small object.
+			if(ref & RefProfiled)
+				runtime_MProf_Free(p, size);
+			*gcrefp = RefFree;
+			c = m->mcache;
+			if(size > (int32)sizeof(uintptr))
+				((uintptr*)p)[1] = 1;	// mark as "needs to be zeroed"
+			mstats.alloc -= size;
+			mstats.nfree++;
+			mstats.by_size[s->sizeclass].nfree++;
+			runtime_MCache_Free(c, p, s->sizeclass, size);
+			break;
+		case RefNone|RefHasFinalizer:
+			f = runtime_getfinalizer(p, 1);
+			if(f == nil)
+				runtime_throw("finalizer inconsistency");
+			f->arg = p;
+			f->next = finq;
+			finq = f;
+			ref &= ~RefHasFinalizer;
+			// fall through
+		case RefSome:
+		case RefSome|RefHasFinalizer:
+			*gcrefp = RefNone | (ref&RefFlags);
+			break;
+		}
+	}
+}
+
+static void
+sweep(void)
+{
+	MSpan *s;
+
+	for(s = runtime_mheap.allspans; s != nil; s = s->allnext)
+		if(s->state == MSpanInUse)
+			sweepspan(s);
+}
+
+static pthread_mutex_t gcsema = PTHREAD_MUTEX_INITIALIZER;
+
+// Initialized from $GOGC.  GOGC=off means no gc.
+//
+// Next gc is after we've allocated an extra amount of
+// memory proportional to the amount already in use.
+// If gcpercent=100 and we're using 4M, we'll gc again
+// when we get to 8M.  This keeps the gc cost in linear
+// proportion to the allocation cost.  Adjusting gcpercent
+// just changes the linear constant (and also the amount of
+// extra memory used).
+static int32 gcpercent = -2;
+
+void
+runtime_gc(int32 force __attribute__ ((unused)))
+{
+	int64 t0, t1;
+	char *p;
+	Finalizer *fp;
+
+	// The gc is turned off (via enablegc) until
+	// the bootstrap has completed.
+	// Also, malloc gets called in the guts
+	// of a number of libraries that might be
+	// holding locks.  To avoid priority inversion
+	// problems, don't bother trying to run gc
+	// while holding a lock.  The next mallocgc
+	// without a lock will do the gc instead.
+	if(!mstats.enablegc || m->locks > 0 /* || runtime_panicking */)
+		return;
+
+	if(gcpercent == -2) {	// first time through
+		p = runtime_getenv("GOGC");
+		if(p == nil || p[0] == '\0')
+			gcpercent = 100;
+		else if(runtime_strcmp(p, "off") == 0)
+			gcpercent = -1;
+		else
+			gcpercent = runtime_atoi(p);
+	}
+	if(gcpercent < 0)
+		return;
+
+	pthread_mutex_lock(&finqlock);
+	pthread_mutex_lock(&gcsema);
+	m->locks++;	// disable gc during the mallocs in newproc
+	t0 = runtime_nanotime();
+	runtime_stoptheworld();
+	if(force || mstats.heap_alloc >= mstats.next_gc) {
+		__go_cachestats();
+		mark();
+		sweep();
+		__go_stealcache();
+		mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
+	}
+
+	t1 = runtime_nanotime();
+	mstats.numgc++;
+	mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t1 - t0;
+	mstats.pause_total_ns += t1 - t0;
+	if(mstats.debuggc)
+		runtime_printf("pause %llu\n", (unsigned long long)t1-t0);
+	pthread_mutex_unlock(&gcsema);
+	runtime_starttheworld();
+
+	// finqlock is still held.
+	fp = finq;
+	if(fp != nil) {
+		// kick off or wake up goroutine to run queued finalizers
+		if(!finstarted) {
+			__go_go(runfinq, nil);
+			finstarted = 1;
+		}
+		else if(fingwait) {
+			fingwait = 0;
+			pthread_cond_signal(&finqcond);
+		}
+	}
+	m->locks--;
+	pthread_mutex_unlock(&finqlock);
+}
+
+static void
+runfinq(void* dummy)
+{
+	Finalizer *f, *next;
+
+	USED(dummy);
+
+	for(;;) {
+		pthread_mutex_lock(&finqlock);
+		f = finq;
+		finq = nil;
+		if(f == nil) {
+			fingwait = 1;
+			pthread_cond_wait(&finqcond, &finqlock);
+			pthread_mutex_unlock(&finqlock);
+			continue;
+		}
+		pthread_mutex_unlock(&finqlock);
+		for(; f; f=next) {
+			void *params[1];
+
+			next = f->next;
+			params[0] = &f->arg;
+			reflect_call(f->ft, (void*)f->fn, 0, params, nil);
+			f->fn = nil;
+			f->arg = nil;
+			f->next = nil;
+			runtime_free(f);
+		}
+		runtime_gc(1);	// trigger another gc to clean up the finalized objects, if possible
+	}
+}
+
+void
+__go_enable_gc()
+{
+  mstats.enablegc = 1;
+}
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
new file mode 100644
index 000000000..52c6d8c1b
--- /dev/null
+++ b/libgo/runtime/mheap.c
@@ -0,0 +1,350 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.h for overview.
+//
+// When a MSpan is in the heap free list, state == MSpanFree
+// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
+//
+// When a MSpan is allocated, state == MSpanInUse
+// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
+static bool MHeap_Grow(MHeap*, uintptr);
+static void MHeap_FreeLocked(MHeap*, MSpan*);
+static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
+static MSpan *BestFit(MSpan*, uintptr, MSpan*);
+
+static void
+RecordSpan(void *vh, byte *p)
+{
+	MHeap *h;
+	MSpan *s;
+
+	h = vh;
+	s = (MSpan*)p;
+	s->allnext = h->allspans;
+	h->allspans = s;
+}
+
+// Initialize the heap; fetch memory using alloc.
+void
+runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+{
+	uint32 i;
+
+	runtime_initlock(h);
+	runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+	runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+	runtime_MHeapMap_Init(&h->map, alloc);
+	// h->mapcache needs no init
+	for(i=0; i<nelem(h->free); i++)
+		runtime_MSpanList_Init(&h->free[i]);
+	runtime_MSpanList_Init(&h->large);
+	for(i=0; i<nelem(h->central); i++)
+		runtime_MCentral_Init(&h->central[i], i);
+}
+
+// Allocate a new span of npage pages from the heap
+// and record its size class in the HeapMap and HeapMapCache.
+MSpan*
+runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
+{
+	MSpan *s;
+
+	runtime_lock(h);
+	mstats.heap_alloc += m->mcache->local_alloc;
+	m->mcache->local_alloc = 0;
+	mstats.heap_objects += m->mcache->local_objects;
+	m->mcache->local_objects = 0;
+	s = MHeap_AllocLocked(h, npage, sizeclass);
+	if(s != nil) {
+		mstats.heap_inuse += npage<<PageShift;
+		if(acct) {
+			mstats.heap_objects++;
+			mstats.heap_alloc += npage<<PageShift;
+		}
+	}
+	runtime_unlock(h);
+	return s;
+}
+
+static MSpan*
+MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
+{
+	uintptr n;
+	MSpan *s, *t;
+
+	// Try in fixed-size lists up to max.
+	for(n=npage; n < nelem(h->free); n++) {
+		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
+			s = h->free[n].next;
+			goto HaveSpan;
+		}
+	}
+
+	// Best fit in list of large spans.
+	if((s = MHeap_AllocLarge(h, npage)) == nil) {
+		if(!MHeap_Grow(h, npage))
+			return nil;
+		if((s = MHeap_AllocLarge(h, npage)) == nil)
+			return nil;
+	}
+
+HaveSpan:
+	// Mark span in use.
+	if(s->state != MSpanFree)
+		runtime_throw("MHeap_AllocLocked - MSpan not free");
+	if(s->npages < npage)
+		runtime_throw("MHeap_AllocLocked - bad npages");
+	runtime_MSpanList_Remove(s);
+	s->state = MSpanInUse;
+
+	if(s->npages > npage) {
+		// Trim extra and put it back in the heap.
+		t = runtime_FixAlloc_Alloc(&h->spanalloc);
+		mstats.mspan_inuse = h->spanalloc.inuse;
+		mstats.mspan_sys = h->spanalloc.sys;
+		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
+		s->npages = npage;
+		runtime_MHeapMap_Set(&h->map, t->start - 1, s);
+		runtime_MHeapMap_Set(&h->map, t->start, t);
+		runtime_MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
+		t->state = MSpanInUse;
+		MHeap_FreeLocked(h, t);
+	}
+
+	// Record span info, because gc needs to be
+	// able to map interior pointer to containing span.
+	s->sizeclass = sizeclass;
+	for(n=0; n<npage; n++)
+		runtime_MHeapMap_Set(&h->map, s->start+n, s);
+	return s;
+}
+
+// Allocate a span of exactly npage pages from the list of large spans.
+static MSpan*
+MHeap_AllocLarge(MHeap *h, uintptr npage)
+{
+	return BestFit(&h->large, npage, nil);
+}
+
+// Search list for smallest span with >= npage pages.
+// If there are multiple smallest spans, take the one
+// with the earliest starting address.
+static MSpan*
+BestFit(MSpan *list, uintptr npage, MSpan *best)
+{
+	MSpan *s;
+
+	for(s=list->next; s != list; s=s->next) {
+		if(s->npages < npage)
+			continue;
+		if(best == nil
+		|| s->npages < best->npages
+		|| (s->npages == best->npages && s->start < best->start))
+			best = s;
+	}
+	return best;
+}
+
+// Try to add at least npage pages of memory to the heap,
+// returning whether it worked.
+static bool
+MHeap_Grow(MHeap *h, uintptr npage)
+{
+	uintptr ask;
+	void *v;
+	MSpan *s;
+
+	// Ask for a big chunk, to reduce the number of mappings
+	// the operating system needs to track; also amortizes
+	// the overhead of an operating system mapping.
+	// Allocate a multiple of 64kB (16 pages).
+	npage = (npage+15)&~15;
+	ask = npage<<PageShift;
+	if(ask < HeapAllocChunk)
+		ask = HeapAllocChunk;
+
+	v = runtime_SysAlloc(ask);
+	if(v == nil) {
+		if(ask > (npage<<PageShift)) {
+			ask = npage<<PageShift;
+			v = runtime_SysAlloc(ask);
+		}
+		if(v == nil)
+			return false;
+	}
+	mstats.heap_sys += ask;
+
+	if((byte*)v < h->min || h->min == nil)
+		h->min = v;
+	if((byte*)v+ask > h->max)
+		h->max = (byte*)v+ask;
+
+	// NOTE(rsc): In tcmalloc, if we've accumulated enough
+	// system allocations, the heap map gets entirely allocated
+	// in 32-bit mode.  (In 64-bit mode that's not practical.)
+	if(!runtime_MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
+		runtime_SysFree(v, ask);
+		return false;
+	}
+
+	// Create a fake "in use" span and free it, so that the
+	// right coalescing happens.
+	s = runtime_FixAlloc_Alloc(&h->spanalloc);
+	mstats.mspan_inuse = h->spanalloc.inuse;
+	mstats.mspan_sys = h->spanalloc.sys;
+	runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
+	runtime_MHeapMap_Set(&h->map, s->start, s);
+	runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+	s->state = MSpanInUse;
+	MHeap_FreeLocked(h, s);
+	return true;
+}
+
+// Look up the span at the given page number.
+// Page number is guaranteed to be in map
+// and is guaranteed to be start or end of span.
+MSpan*
+runtime_MHeap_Lookup(MHeap *h, PageID p)
+{
+	return runtime_MHeapMap_Get(&h->map, p);
+}
+
+// Look up the span at the given page number.
+// Page number is *not* guaranteed to be in map
+// and may be anywhere in the span.
+// Map entries for the middle of a span are only
+// valid for allocated spans.  Free spans may have
+// other garbage in their middles, so we have to
+// check for that.
+MSpan*
+runtime_MHeap_LookupMaybe(MHeap *h, PageID p)
+{
+	MSpan *s;
+
+	s = runtime_MHeapMap_GetMaybe(&h->map, p);
+	if(s == nil || p < s->start || p - s->start >= s->npages)
+		return nil;
+	if(s->state != MSpanInUse)
+		return nil;
+	return s;
+}
+
+// Free the span back into the heap.
+void
+runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
+{
+	runtime_lock(h);
+	mstats.heap_alloc += m->mcache->local_alloc;
+	m->mcache->local_alloc = 0;
+	mstats.heap_objects += m->mcache->local_objects;
+	m->mcache->local_objects = 0;
+	mstats.heap_inuse -= s->npages<<PageShift;
+	if(acct) {
+		mstats.heap_alloc -= s->npages<<PageShift;
+		mstats.heap_objects--;
+	}
+	MHeap_FreeLocked(h, s);
+	runtime_unlock(h);
+}
+
+static void
+MHeap_FreeLocked(MHeap *h, MSpan *s)
+{
+	MSpan *t;
+
+	if(s->state != MSpanInUse || s->ref != 0) {
+		// runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+		runtime_throw("MHeap_FreeLocked - invalid free");
+	}
+	s->state = MSpanFree;
+	runtime_MSpanList_Remove(s);
+
+	// Coalesce with earlier, later spans.
+	if((t = runtime_MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
+		s->start = t->start;
+		s->npages += t->npages;
+		runtime_MHeapMap_Set(&h->map, s->start, s);
+		runtime_MSpanList_Remove(t);
+		t->state = MSpanDead;
+		runtime_FixAlloc_Free(&h->spanalloc, t);
+		mstats.mspan_inuse = h->spanalloc.inuse;
+		mstats.mspan_sys = h->spanalloc.sys;
+	}
+	if((t = runtime_MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
+		s->npages += t->npages;
+		runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+		runtime_MSpanList_Remove(t);
+		t->state = MSpanDead;
+		runtime_FixAlloc_Free(&h->spanalloc, t);
+		mstats.mspan_inuse = h->spanalloc.inuse;
+		mstats.mspan_sys = h->spanalloc.sys;
+	}
+
+	// Insert s into appropriate list.
+	if(s->npages < nelem(h->free))
+		runtime_MSpanList_Insert(&h->free[s->npages], s);
+	else
+		runtime_MSpanList_Insert(&h->large, s);
+
+	// TODO(rsc): IncrementalScavenge() to return memory to OS.
+}
+
+// Initialize a new span with the given start and npages.
+void
+runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
+{
+	span->next = nil;
+	span->prev = nil;
+	span->start = start;
+	span->npages = npages;
+	span->freelist = nil;
+	span->ref = 0;
+	span->sizeclass = 0;
+	span->state = 0;
+}
+
+// Initialize an empty doubly-linked list.
+void
+runtime_MSpanList_Init(MSpan *list)
+{
+	list->state = MSpanListHead;
+	list->next = list;
+	list->prev = list;
+}
+
+void
+runtime_MSpanList_Remove(MSpan *span)
+{
+	if(span->prev == nil && span->next == nil)
+		return;
+	span->prev->next = span->next;
+	span->next->prev = span->prev;
+	span->prev = nil;
+	span->next = nil;
+}
+
+bool
+runtime_MSpanList_IsEmpty(MSpan *list)
+{
+	return list->next == list;
+}
+
+void
+runtime_MSpanList_Insert(MSpan *list, MSpan *span)
+{
+	if(span->next != nil || span->prev != nil)
+		runtime_throw("MSpanList_Insert");
+	span->next = list->next;
+	span->prev = list;
+	span->next->prev = span;
+	span->prev->next = span;
+}
diff --git a/libgo/runtime/mheapmap32.c b/libgo/runtime/mheapmap32.c
new file mode 100644
index 000000000..547c602fe
--- /dev/null
+++ b/libgo/runtime/mheapmap32.c
@@ -0,0 +1,99 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Heap map, 32-bit version
+// See malloc.h and mheap.c for overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+#if __SIZEOF_POINTER__ == 4
+
+// 3-level radix tree mapping page ids to Span*.
+void
+runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+{
+	m->allocator = allocator;
+}
+
+MSpan*
+runtime_MHeapMap_Get(MHeapMap *m, PageID k)
+{
+	int32 i1, i2;
+
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Get");
+
+	return m->p[i1]->s[i2];
+}
+
+MSpan*
+runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+{
+	int32 i1, i2;
+	MHeapMapNode2 *p2;
+
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Get");
+
+	p2 = m->p[i1];
+	if(p2 == nil)
+		return nil;
+	return p2->s[i2];
+}
+
+void
+runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+{
+	int32 i1, i2;
+
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Set");
+
+	m->p[i1]->s[i2] = s;
+}
+
+// Allocate the storage required for entries [k, k+1, ..., k+len-1]
+// so that Get and Set calls need not check for nil pointers.
+bool
+runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+{
+	uintptr end;
+	int32 i1;
+	MHeapMapNode2 *p2;
+
+	end = k+len;
+	while(k < end) {
+		if((k >> MHeapMap_TotalBits) != 0)
+			return false;
+		i1 = (k >> MHeapMap_Level2Bits) & MHeapMap_Level1Mask;
+
+		// first-level pointer
+		if(m->p[i1] == nil) {
+			p2 = m->allocator(sizeof *p2);
+			if(p2 == nil)
+				return false;
+			mstats.heapmap_sys += sizeof *p2;
+			m->p[i1] = p2;
+		}
+
+		// advance key past this leaf node
+		k = ((k >> MHeapMap_Level2Bits) + 1) << MHeapMap_Level2Bits;
+	}
+	return true;
+}
+
+#endif /* __SIZEOF_POINTER__ == 4 */
diff --git a/libgo/runtime/mheapmap32.h b/libgo/runtime/mheapmap32.h
new file mode 100644
index 000000000..286162469
--- /dev/null
+++ b/libgo/runtime/mheapmap32.h
@@ -0,0 +1,41 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Free(v) must be able to determine the MSpan containing v.
+// The MHeapMap is a 2-level radix tree mapping page numbers to MSpans.
+
+typedef struct MHeapMapNode2 MHeapMapNode2;
+
+enum
+{
+	// 32 bit address - 12 bit page size = 20 bits to map
+	MHeapMap_Level1Bits = 10,
+	MHeapMap_Level2Bits = 10,
+
+	MHeapMap_TotalBits =
+		MHeapMap_Level1Bits +
+		MHeapMap_Level2Bits,
+
+	MHeapMap_Level1Mask = (1<<MHeapMap_Level1Bits) - 1,
+	MHeapMap_Level2Mask = (1<<MHeapMap_Level2Bits) - 1,
+};
+
+struct MHeapMap
+{
+	void *(*allocator)(uintptr);
+	MHeapMapNode2 *p[1<<MHeapMap_Level1Bits];
+};
+
+struct MHeapMapNode2
+{
+	MSpan *s[1<<MHeapMap_Level2Bits];
+};
+
+void	runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool	runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan*	runtime_MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan*	runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void	runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+
+
diff --git a/libgo/runtime/mheapmap64.c b/libgo/runtime/mheapmap64.c
new file mode 100644
index 000000000..d6305953a
--- /dev/null
+++ b/libgo/runtime/mheapmap64.c
@@ -0,0 +1,120 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Heap map, 64-bit version 
+// See malloc.h and mheap.c for overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+#if __SIZEOF_POINTER__ == 8
+
+// 3-level radix tree mapping page ids to Span*.
+void
+runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr))
+{
+	m->allocator = allocator;
+}
+
+MSpan*
+runtime_MHeapMap_Get(MHeapMap *m, PageID k)
+{
+	int32 i1, i2, i3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Get");
+
+	return m->p[i1]->p[i2]->s[i3];
+}
+
+MSpan*
+runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+{
+	int32 i1, i2, i3;
+	MHeapMapNode2 *p2;
+	MHeapMapNode3 *p3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Get");
+
+	p2 = m->p[i1];
+	if(p2 == nil)
+		return nil;
+	p3 = p2->p[i2];
+	if(p3 == nil)
+		return nil;
+	return p3->s[i3];
+}
+
+void
+runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+{
+	int32 i1, i2, i3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		runtime_throw("MHeapMap_Set");
+
+	m->p[i1]->p[i2]->s[i3] = s;
+}
+
+// Allocate the storage required for entries [k, k+1, ..., k+len-1]
+// so that Get and Set calls need not check for nil pointers.
+bool
+runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+{
+	uintptr end;
+	int32 i1, i2;
+	MHeapMapNode2 *p2;
+	MHeapMapNode3 *p3;
+
+	end = k+len;
+	while(k < end) {
+		if((k >> MHeapMap_TotalBits) != 0)
+			return false;
+		i2 = (k >> MHeapMap_Level3Bits) & MHeapMap_Level2Mask;
+		i1 = (k >> (MHeapMap_Level3Bits + MHeapMap_Level2Bits)) & MHeapMap_Level1Mask;
+
+		// first-level pointer
+		if((p2 = m->p[i1]) == nil) {
+			p2 = m->allocator(sizeof *p2);
+			if(p2 == nil)
+				return false;
+			mstats.heapmap_sys += sizeof *p2;
+			m->p[i1] = p2;
+		}
+
+		// second-level pointer
+		if(p2->p[i2] == nil) {
+			p3 = m->allocator(sizeof *p3);
+			if(p3 == nil)
+				return false;
+			mstats.heapmap_sys += sizeof *p3;
+			p2->p[i2] = p3;
+		}
+
+		// advance key past this leaf node
+		k = ((k >> MHeapMap_Level3Bits) + 1) << MHeapMap_Level3Bits;
+	}
+	return true;
+}
+
+#endif /* __SIZEOF_POINTER__ == 8 */
diff --git a/libgo/runtime/mheapmap64.h b/libgo/runtime/mheapmap64.h
new file mode 100644
index 000000000..be304cb2e
--- /dev/null
+++ b/libgo/runtime/mheapmap64.h
@@ -0,0 +1,60 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Free(v) must be able to determine the MSpan containing v.
+// The MHeapMap is a 3-level radix tree mapping page numbers to MSpans.
+//
+// NOTE(rsc): On a 32-bit platform (= 20-bit page numbers),
+// we can swap in a 2-level radix tree.
+//
+// NOTE(rsc): We use a 3-level tree because tcmalloc does, but
+// having only three levels requires approximately 1 MB per node
+// in the tree, making the minimum map footprint 3 MB.
+// Using a 4-level tree would cut the minimum footprint to 256 kB.
+// On the other hand, it's just virtual address space: most of
+// the memory is never going to be touched, thus never paged in.
+
+typedef struct MHeapMapNode2 MHeapMapNode2;
+typedef struct MHeapMapNode3 MHeapMapNode3;
+
+enum
+{
+	// 64 bit address - 12 bit page size = 52 bits to map
+	MHeapMap_Level1Bits = 18,
+	MHeapMap_Level2Bits = 18,
+	MHeapMap_Level3Bits = 16,
+
+	MHeapMap_TotalBits =
+		MHeapMap_Level1Bits +
+		MHeapMap_Level2Bits +
+		MHeapMap_Level3Bits,
+
+	MHeapMap_Level1Mask = (1<<MHeapMap_Level1Bits) - 1,
+	MHeapMap_Level2Mask = (1<<MHeapMap_Level2Bits) - 1,
+	MHeapMap_Level3Mask = (1<<MHeapMap_Level3Bits) - 1,
+};
+
+struct MHeapMap
+{
+	void *(*allocator)(uintptr);
+	MHeapMapNode2 *p[1<<MHeapMap_Level1Bits];
+};
+
+struct MHeapMapNode2
+{
+	MHeapMapNode3 *p[1<<MHeapMap_Level2Bits];
+};
+
+struct MHeapMapNode3
+{
+	MSpan *s[1<<MHeapMap_Level3Bits];
+};
+
+void	runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool	runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan*	runtime_MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan*	runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void	runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+
+
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
new file mode 100644
index 000000000..6bd4ef727
--- /dev/null
+++ b/libgo/runtime/mprof.goc
@@ -0,0 +1,305 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc profiling.
+// Patterned after tcmalloc's algorithms; shorter code.
+
+package runtime
+#include "runtime.h"
+#include "malloc.h"
+#include "defs.h"
+#include "go-type.h"
+
+typedef struct __go_open_array Slice;
+
+// NOTE(rsc): Everything here could use cas if contention became an issue.
+static Lock proflock;
+
+// Per-call-stack allocation information.
+// Lookup by hashing call stack into a linked-list hash table.
+typedef struct Bucket Bucket;
+struct Bucket
+{
+	Bucket	*next;	// next in hash list
+	Bucket	*allnext;	// next in list of all buckets
+	uintptr	allocs;
+	uintptr	frees;
+	uintptr	alloc_bytes;
+	uintptr	free_bytes;
+	uintptr	hash;
+	uintptr	nstk;
+	uintptr	stk[1];
+};
+enum {
+	BuckHashSize = 179999,
+};
+static Bucket **buckhash;
+static Bucket *buckets;
+static uintptr bucketmem;
+
+// Return the bucket for stk[0:nstk], allocating new bucket if needed.
+static Bucket*
+stkbucket(uintptr *stk, int32 nstk)
+{
+	int32 i;
+	uintptr h;
+	Bucket *b;
+
+	if(buckhash == nil) {
+		buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0]);
+		mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0];
+	}
+
+	// Hash stack.
+	h = 0;
+	for(i=0; i<nstk; i++) {
+		h += stk[i];
+		h += h<<10;
+		h ^= h>>6;
+	}
+	h += h<<3;
+	h ^= h>>11;
+
+	i = h%BuckHashSize;
+	for(b = buckhash[i]; b; b=b->next)
+		if(b->hash == h && b->nstk == (uintptr)nstk &&
+		   runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
+			return b;
+
+	b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
+	bucketmem += sizeof *b + nstk*sizeof stk[0];
+	runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
+	b->hash = h;
+	b->nstk = nstk;
+	b->next = buckhash[i];
+	buckhash[i] = b;
+	b->allnext = buckets;
+	buckets = b;
+	return b;
+}
+
+// Map from pointer to Bucket* that allocated it.
+// Three levels:
+//	Linked-list hash table for top N-20 bits.
+//	Array index for next 13 bits.
+//	Linked list for next 7 bits.
+// This is more efficient than using a general map,
+// because of the typical clustering of the pointer keys.
+
+typedef struct AddrHash AddrHash;
+typedef struct AddrEntry AddrEntry;
+
+struct AddrHash
+{
+	AddrHash *next;	// next in top-level hash table linked list
+	uintptr addr;	// addr>>20
+	AddrEntry *dense[1<<13];
+};
+
+struct AddrEntry
+{
+	AddrEntry *next;	// next in bottom-level linked list
+	uint32 addr;
+	Bucket *b;
+};
+
+enum {
+	AddrHashBits = 12	// 1MB per entry, so good for 4GB of used address space
+};
+static AddrHash *addrhash[1<<AddrHashBits];
+static AddrEntry *addrfree;
+static uintptr addrmem;
+
+// Multiplicative hash function:
+// hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)).
+// This is a good multiplier as suggested in CLR, Knuth.  The hash
+// value is taken to be the top AddrHashBits bits of the bottom 32 bits
+// of the muliplied value.
+enum {
+	HashMultiplier = 2654435769U
+};
+
+// Set the bucket associated with addr to b.
+static void
+setaddrbucket(uintptr addr, Bucket *b)
+{
+	int32 i;
+	uint32 h;
+	AddrHash *ah;
+	AddrEntry *e;
+
+	h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
+	for(ah=addrhash[h]; ah; ah=ah->next)
+		if(ah->addr == (addr>>20))
+			goto found;
+
+	ah = runtime_mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
+	addrmem += sizeof *ah;
+	ah->next = addrhash[h];
+	ah->addr = addr>>20;
+	addrhash[h] = ah;
+
+found:
+	if((e = addrfree) == nil) {
+		e = runtime_mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
+		addrmem += 64*sizeof *e;
+		for(i=0; i+1<64; i++)
+			e[i].next = &e[i+1];
+		e[63].next = nil;
+	}
+	addrfree = e->next;
+	e->addr = (uint32)~(addr & ((1<<20)-1));
+	e->b = b;
+	h = (addr>>7)&(nelem(ah->dense)-1);	// entry in dense is top 13 bits of low 20.
+	e->next = ah->dense[h];
+	ah->dense[h] = e;
+}
+
+// Get the bucket associated with addr and clear the association.
+static Bucket*
+getaddrbucket(uintptr addr)
+{
+	uint32 h;
+	AddrHash *ah;
+	AddrEntry *e, **l;
+	Bucket *b;
+
+	h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits);
+	for(ah=addrhash[h]; ah; ah=ah->next)
+		if(ah->addr == (addr>>20))
+			goto found;
+	return nil;
+
+found:
+	h = (addr>>7)&(nelem(ah->dense)-1);	// entry in dense is top 13 bits of low 20.
+	for(l=&ah->dense[h]; (e=*l) != nil; l=&e->next) {
+		if(e->addr == (uint32)~(addr & ((1<<20)-1))) {
+			*l = e->next;
+			b = e->b;
+			e->next = addrfree;
+			addrfree = e;
+			return b;
+		}
+	}
+	return nil;
+}
+
+void
+runtime_Mprof_Init()
+{
+	runtime_initlock(&proflock);
+}
+
+// Called by malloc to record a profiled block.
+void
+runtime_MProf_Malloc(void *p, uintptr size)
+{
+	int32 nstk;
+	uintptr stk[32];
+	Bucket *b;
+
+	if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
+		return;
+#if 0
+	nstk = runtime_callers(1, stk, 32);
+#else
+	nstk = 0;
+#endif
+	runtime_lock(&proflock);
+	b = stkbucket(stk, nstk);
+	b->allocs++;
+	b->alloc_bytes += size;
+	setaddrbucket((uintptr)p, b);
+	runtime_unlock(&proflock);
+	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
+		__go_run_goroutine_gc(100);
+}
+
+// Called when freeing a profiled block.
+void
+runtime_MProf_Free(void *p, uintptr size)
+{
+	Bucket *b;
+
+	if(!__sync_bool_compare_and_swap(&m->nomemprof, 0, 1))
+		return;
+
+	runtime_lock(&proflock);
+	b = getaddrbucket((uintptr)p);
+	if(b != nil) {
+		b->frees++;
+		b->free_bytes += size;
+	}
+	runtime_unlock(&proflock);
+	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
+		__go_run_goroutine_gc(101);
+}
+
+
+// Go interface to profile data.  (Declared in extern.go)
+// Assumes Go sizeof(int) == sizeof(int32)
+
+// Must match MemProfileRecord in extern.go.
+typedef struct Record Record;
+struct Record {
+	int64 alloc_bytes, free_bytes;
+	int64 alloc_objects, free_objects;
+	uintptr stk[32];
+};
+
+// Write b's data to r.
+static void
+record(Record *r, Bucket *b)
+{
+	uint32 i;
+
+	r->alloc_bytes = b->alloc_bytes;
+	r->free_bytes = b->free_bytes;
+	r->alloc_objects = b->allocs;
+	r->free_objects = b->frees;
+	for(i=0; i<b->nstk && i<nelem(r->stk); i++)
+		r->stk[i] = b->stk[i];
+	for(; i<nelem(r->stk); i++)
+		r->stk[i] = 0;
+}
+
+func MemProfile(p Slice, include_inuse_zero bool) (n int32, ok bool) {
+	Bucket *b;
+	Record *r;
+
+	__sync_bool_compare_and_swap(&m->nomemprof, 0, 1);
+
+	runtime_lock(&proflock);
+	n = 0;
+	for(b=buckets; b; b=b->allnext)
+		if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
+			n++;
+	ok = false;
+	if(n <= p.__count) {
+		ok = true;
+		r = (Record*)p.__values;
+		for(b=buckets; b; b=b->allnext)
+			if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
+				record(r++, b);
+	}
+	runtime_unlock(&proflock);
+
+	__sync_bool_compare_and_swap(&m->nomemprof, 1, 0);
+
+	if(__sync_bool_compare_and_swap(&m->gcing_for_prof, 1, 0))
+		__go_run_goroutine_gc(102);
+}
+
+void
+runtime_MProf_Mark(void (*scan)(byte *, int64))
+{
+	// buckhash is not allocated via mallocgc.
+	scan((byte*)&buckets, sizeof buckets);
+	scan((byte*)&addrhash, sizeof addrhash);
+	scan((byte*)&addrfree, sizeof addrfree);
+}
diff --git a/libgo/runtime/msize.c b/libgo/runtime/msize.c
new file mode 100644
index 000000000..8b021a2b6
--- /dev/null
+++ b/libgo/runtime/msize.c
@@ -0,0 +1,169 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc small size classes.
+//
+// See malloc.h for overview.
+//
+// The size classes are chosen so that rounding an allocation
+// request up to the next size class wastes at most 12.5% (1.125x).
+//
+// Each size class has its own page count that gets allocated
+// and chopped up when new objects of the size class are needed.
+// That page count is chosen so that chopping up the run of
+// pages into objects of the given size wastes at most 12.5% (1.125x)
+// of the memory.  It is not necessary that the cutoff here be
+// the same as above.
+//
+// The two sources of waste multiply, so the worst possible case
+// for the above constraints would be that allocations of some
+// size might have a 26.6% (1.266x) overhead.
+// In practice, only one of the wastes comes into play for a
+// given size (sizes < 512 waste mainly on the round-up,
+// sizes > 512 waste mainly on the page chopping).
+//
+// TODO(rsc): Compute max waste for any given size.
+
+#include "runtime.h"
+#include "malloc.h"
+
+int32 runtime_class_to_size[NumSizeClasses];
+int32 runtime_class_to_allocnpages[NumSizeClasses];
+int32 runtime_class_to_transfercount[NumSizeClasses];
+
+// The SizeToClass lookup is implemented using two arrays,
+// one mapping sizes <= 1024 to their class and one mapping
+// sizes >= 1024 and <= MaxSmallSize to their class.
+// All objects are 8-aligned, so the first array is indexed by
+// the size divided by 8 (rounded up).  Objects >= 1024 bytes
+// are 128-aligned, so the second array is indexed by the
+// size divided by 128 (rounded up).  The arrays are filled in
+// by InitSizes.
+
+static int32 size_to_class8[1024/8 + 1];
+static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1];
+
+int32
+runtime_SizeToClass(int32 size)
+{
+	if(size > MaxSmallSize)
+		runtime_throw("SizeToClass - invalid size");
+	if(size > 1024-8)
+		return size_to_class128[(size-1024+127) >> 7];
+	return size_to_class8[(size+7)>>3];
+}
+
+void
+runtime_InitSizes(void)
+{
+	int32 align, sizeclass, size, osize, nextsize, n;
+	uint32 i;
+	uintptr allocsize, npages;
+
+	// Initialize the runtime_class_to_size table (and choose class sizes in the process).
+	runtime_class_to_size[0] = 0;
+	sizeclass = 1;	// 0 means no class
+	align = 8;
+	for(size = align; size <= MaxSmallSize; size += align) {
+		if((size&(size-1)) == 0) {	// bump alignment once in a while
+			if(size >= 2048)
+				align = 256;
+			else if(size >= 128)
+				align = size / 8;
+			else if(size >= 16)
+				align = 16;	// required for x86 SSE instructions, if we want to use them
+		}
+		if((align&(align-1)) != 0)
+			runtime_throw("InitSizes - bug");
+
+		// Make the allocnpages big enough that
+		// the leftover is less than 1/8 of the total,
+		// so wasted space is at most 12.5%.
+		allocsize = PageSize;
+		osize = size + RefcountOverhead;
+		while(allocsize%osize > (allocsize/8))
+			allocsize += PageSize;
+		npages = allocsize >> PageShift;
+
+		// If the previous sizeclass chose the same
+		// allocation size and fit the same number of
+		// objects into the page, we might as well
+		// use just this size instead of having two
+		// different sizes.
+		if(sizeclass > 1
+		&& (int32)npages == runtime_class_to_allocnpages[sizeclass-1]
+		&& allocsize/osize == allocsize/(runtime_class_to_size[sizeclass-1]+RefcountOverhead)) {
+			runtime_class_to_size[sizeclass-1] = size;
+			continue;
+		}
+
+		runtime_class_to_allocnpages[sizeclass] = npages;
+		runtime_class_to_size[sizeclass] = size;
+		sizeclass++;
+	}
+	if(sizeclass != NumSizeClasses) {
+		// runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
+		runtime_throw("InitSizes - bad NumSizeClasses");
+	}
+
+	// Initialize the size_to_class tables.
+	nextsize = 0;
+	for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+		for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8)
+			size_to_class8[nextsize/8] = sizeclass;
+		if(nextsize >= 1024)
+			for(; nextsize <= runtime_class_to_size[sizeclass]; nextsize += 128)
+				size_to_class128[(nextsize-1024)/128] = sizeclass;
+	}
+
+	// Double-check SizeToClass.
+	if(0) {
+		for(n=0; n < MaxSmallSize; n++) {
+			sizeclass = runtime_SizeToClass(n);
+			if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) {
+				// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
+				// runtime_printf("incorrect SizeToClass");
+				goto dump;
+			}
+			if(sizeclass > 1 && runtime_class_to_size[sizeclass-1] >= n) {
+				// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]);
+				// runtime_printf("SizeToClass too big");
+				goto dump;
+			}
+		}
+	}
+
+	// Copy out for statistics table.
+	for(i=0; i<nelem(runtime_class_to_size); i++)
+		mstats.by_size[i].size = runtime_class_to_size[i];
+
+	// Initialize the runtime_class_to_transfercount table.
+	for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+		n = 64*1024 / runtime_class_to_size[sizeclass];
+		if(n < 2)
+			n = 2;
+		if(n > 32)
+			n = 32;
+		runtime_class_to_transfercount[sizeclass] = n;
+	}
+	return;
+
+dump:
+	if(1){
+		runtime_printf("NumSizeClasses=%d\n", NumSizeClasses);
+		runtime_printf("runtime_class_to_size:");
+		for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
+			runtime_printf(" %d", runtime_class_to_size[sizeclass]);
+		runtime_printf("\n\n");
+		runtime_printf("size_to_class8:");
+		for(i=0; i<nelem(size_to_class8); i++)
+			runtime_printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime_class_to_size[size_to_class8[i]]);
+		runtime_printf("\n");
+		runtime_printf("size_to_class128:");
+		for(i=0; i<nelem(size_to_class128); i++)
+			runtime_printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime_class_to_size[size_to_class128[i]]);
+		runtime_printf("\n");
+	}
+	runtime_throw("InitSizes failed");
+}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
new file mode 100644
index 000000000..191fac613
--- /dev/null
+++ b/libgo/runtime/proc.c
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"	/* so that acid generated from proc.c includes malloc data structures */
+
+typedef struct Sched Sched;
+
+M	m0;
+
+#ifdef __rtems__
+#define __thread
+#endif
+
+__thread M *m = &m0;
diff --git a/libgo/runtime/reflect.goc b/libgo/runtime/reflect.goc
new file mode 100644
index 000000000..01d218adb
--- /dev/null
+++ b/libgo/runtime/reflect.goc
@@ -0,0 +1,35 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflect
+#include "go-type.h"
+#include "interface.h"
+#define nil NULL
+typedef unsigned char byte;
+
+typedef struct __go_interface Iface;
+typedef struct __go_empty_interface Eface;
+
+func setiface(typ *byte, x *byte, ret *byte) {
+	struct __go_interface_type *t;
+	const struct __go_type_descriptor* xt;
+
+	/* FIXME: We should check __type_descriptor to verify that
+	   this is really a type descriptor.  */
+	t = (struct __go_interface_type *)typ;
+	if(t->__methods.__count == 0) {
+		// already an empty interface
+		*(Eface*)ret = *(Eface*)x;
+		return;
+	}
+	xt = ((Eface*)x)->__type_descriptor;
+	if(xt == nil) {
+		// can assign nil to any interface
+		((Iface*)ret)->__methods = nil;
+		((Iface*)ret)->__object = nil;
+		return;
+	}
+	((Iface*)ret)->__methods = __go_convert_interface(&t->__common, xt);
+	((Iface*)ret)->__object = ((Eface*)x)->__object;
+}
diff --git a/libgo/runtime/rtems-task-variable-add.c b/libgo/runtime/rtems-task-variable-add.c
new file mode 100644
index 000000000..89dbb007a
--- /dev/null
+++ b/libgo/runtime/rtems-task-variable-add.c
@@ -0,0 +1,24 @@
+/* rtems-task-variable-add.c -- adding a task specific variable in RTEMS OS.
+
+   Copyright 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <rtems/error.h>
+#include <rtems/system.h>
+#include <rtems/rtems/tasks.h>
+
+#include "go-assert.h"
+
+/* RTEMS does not support GNU TLS extension __thread.  */
+void
+__wrap_rtems_task_variable_add (void **var)
+{
+  rtems_status_code sc = rtems_task_variable_add (RTEMS_SELF, var, NULL);
+  if (sc != RTEMS_SUCCESSFUL)
+    {
+      rtems_error (sc, "rtems_task_variable_add failed");
+      __go_assert (0);
+    }
+}
+
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
new file mode 100644
index 000000000..95216e4a5
--- /dev/null
+++ b/libgo/runtime/runtime.h
@@ -0,0 +1,196 @@
+/* runtime.h -- runtime support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "config.h"
+
+#define _GNU_SOURCE
+#include "go-assert.h"
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <semaphore.h>
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-string.h"
+
+typedef struct __go_string String;
+
+/* This file supports C files copied from the 6g runtime library.
+   This is a version of the 6g runtime.h rewritten for gccgo's version
+   of the code.  */
+
+typedef signed int   int8    __attribute__ ((mode (QI)));
+typedef unsigned int uint8   __attribute__ ((mode (QI)));
+typedef signed int   int16   __attribute__ ((mode (HI)));
+typedef unsigned int uint16  __attribute__ ((mode (HI)));
+typedef signed int   int32   __attribute__ ((mode (SI)));
+typedef unsigned int uint32  __attribute__ ((mode (SI)));
+typedef signed int   int64   __attribute__ ((mode (DI)));
+typedef unsigned int uint64  __attribute__ ((mode (DI)));
+typedef float        float32 __attribute__ ((mode (SF)));
+typedef double       float64 __attribute__ ((mode (DF)));
+typedef unsigned int uintptr __attribute__ ((mode (pointer)));
+
+/* Defined types.  */
+
+typedef	uint8			bool;
+typedef	uint8			byte;
+typedef	struct	M		M;
+typedef	struct	MCache		MCache;
+typedef	struct	Lock		Lock;
+
+/* We use mutexes for locks.  6g uses futexes directly, and perhaps
+   someday we will do that too.  */
+
+struct	Lock
+{
+	uint32 key;
+	sem_t sem;
+};
+
+/* A Note.  */
+
+typedef	struct	Note		Note;
+
+struct Note {
+	int32 woken;
+};
+
+/* Per CPU declarations.  */
+
+#ifdef __rtems__
+#define __thread
+#endif
+
+extern __thread		M* 	m;
+
+extern M	m0;
+
+#ifdef __rtems__
+#undef __thread
+#endif
+
+/* Constants.  */
+
+enum
+{
+	true	= 1,
+	false	= 0,
+};
+
+/* Structures.  */
+
+struct	M
+{
+	int32	mallocing;
+	int32	gcing;
+	int32	locks;
+	int32	nomemprof;
+	int32	gcing_for_prof;
+	int32	holds_finlock;
+	int32	gcing_for_finlock;
+	MCache	*mcache;
+
+	/* For the list of all threads.  */
+	struct __go_thread_id *list_entry;
+
+	/* For the garbage collector.  */
+	void	*gc_sp;
+	size_t	gc_len;
+	void	*gc_next_segment;
+	void	*gc_next_sp;
+	void	*gc_initial_sp;
+	struct __go_panic_defer_struct *gc_panic_defer;
+};
+
+/* Macros.  */
+#define	nelem(x)	(sizeof(x)/sizeof((x)[0]))
+#define	nil		((void*)0)
+#define USED(v)		((void) v)
+
+/* We map throw to assert.  */
+#define runtime_throw(s) __go_assert(s == 0)
+
+void*	runtime_mal(uintptr);
+void	runtime_mallocinit(void);
+void	runtime_initfintab(void);
+void	siginit(void);
+bool	__go_sigsend(int32 sig);
+int64	runtime_nanotime(void);
+
+void	runtime_stoptheworld(void);
+void	runtime_starttheworld(void);
+void	__go_go(void (*pfn)(void*), void*);
+void	__go_gc_goroutine_init(void*);
+void	__go_enable_gc(void);
+int	__go_run_goroutine_gc(int);
+void	__go_scanstacks(void (*scan)(byte *, int64));
+void	__go_stealcache(void);
+void	__go_cachestats(void);
+
+/*
+ * mutual exclusion locks.  in the uncontended case,
+ * as fast as spin locks (just a few user-level instructions),
+ * but on the contention path they sleep in the kernel.
+ */
+void	runtime_initlock(Lock*);
+void	runtime_lock(Lock*);
+void	runtime_unlock(Lock*);
+void	runtime_destroylock(Lock*);
+
+void semacquire (uint32 *) asm ("libgo_runtime.runtime.Semacquire");
+void semrelease (uint32 *) asm ("libgo_runtime.runtime.Semrelease");
+
+/*
+ * sleep and wakeup on one-time events.
+ * before any calls to notesleep or notewakeup,
+ * must call noteclear to initialize the Note.
+ * then, any number of threads can call notesleep
+ * and exactly one thread can call notewakeup (once).
+ * once notewakeup has been called, all the notesleeps
+ * will return.  future notesleeps will return immediately.
+ */
+void	noteclear(Note*);
+void	notesleep(Note*);
+void	notewakeup(Note*);
+
+/* Functions.  */
+#define runtime_printf printf
+#define runtime_malloc(s) __go_alloc(s)
+#define runtime_free(p) __go_free(p)
+#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
+#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
+#define runtime_getenv(s) getenv(s)
+#define runtime_atoi(s) atoi(s)
+#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
+#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
+MCache*	runtime_allocmcache(void);
+void	free(void *v);
+struct __go_func_type;
+void	runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *);
+void	runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64));
+#define runtime_mmap mmap
+#define runtime_munmap(p, s) munmap((p), (s))
+#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
+
+struct __go_func_type;
+void reflect_call(const struct __go_func_type *, const void *, _Bool, void **,
+		  void **)
+  asm ("libgo_reflect.reflect.call");
+
+#ifdef __rtems__
+void __wrap_rtems_task_variable_add(void **);
+#endif
diff --git a/libgo/runtime/sigqueue.goc b/libgo/runtime/sigqueue.goc
new file mode 100644
index 000000000..b5f2954bc
--- /dev/null
+++ b/libgo/runtime/sigqueue.goc
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements runtime support for signal handling.
+//
+// Most synchronization primitives are not available from
+// the signal handler (it cannot block and cannot use locks)
+// so the handler communicates with a processing goroutine
+// via struct sig, below.
+//
+// Ownership for sig.Note passes back and forth between
+// the signal handler and the signal goroutine in rounds.
+// The initial state is that sig.note is cleared (setup by siginit).
+// At the beginning of each round, mask == 0.
+// The round goes through three stages:
+//
+// (In parallel)
+// 1a) One or more signals arrive and are handled
+// by sigsend using cas to set bits in sig.mask.
+// The handler that changes sig.mask from zero to non-zero
+// calls notewakeup(&sig).
+// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup.
+//
+// 2) Having received the wakeup, sigrecv knows that sigsend
+// will not send another wakeup, so it can noteclear(&sig)
+// to prepare for the next round. (Sigsend may still be adding
+// signals to sig.mask at this point, which is fine.)
+//
+// 3) Sigrecv uses cas to grab the current sig.mask and zero it,
+// triggering the next round.
+//
+// The signal handler takes ownership of the note by atomically
+// changing mask from a zero to non-zero value. It gives up
+// ownership by calling notewakeup. The signal goroutine takes
+// ownership by returning from notesleep (caused by the notewakeup)
+// and gives up ownership by clearing mask.
+
+package runtime
+#include "config.h"
+#include "runtime.h"
+#include "malloc.h"
+#include "defs.h"
+
+static struct {
+	Note;
+	uint32 mask;
+	bool inuse;
+} sig;
+
+void
+siginit(void)
+{
+	noteclear(&sig);
+}
+
+// Called from sighandler to send a signal back out of the signal handling thread.
+bool
+__go_sigsend(int32 s)
+{
+	uint32 bit, mask;
+
+	if(!sig.inuse)
+		return false;
+	bit = 1 << s;
+	for(;;) {
+		mask = sig.mask;
+		if(mask & bit)
+			break;		// signal already in queue
+		if(runtime_cas(&sig.mask, mask, mask|bit)) {
+			// Added to queue.
+			// Only send a wakeup for the first signal in each round.
+			if(mask == 0)
+				notewakeup(&sig);
+			break;
+		}
+	}
+	return true;
+}
+
+// Called to receive a bitmask of queued signals.
+func Sigrecv() (m uint32) {
+	// runtime·entersyscall();
+	notesleep(&sig);
+	// runtime·exitsyscall();
+	noteclear(&sig);
+	for(;;) {
+		m = sig.mask;
+		if(runtime_cas(&sig.mask, m, 0))
+			break;
+	}
+}
+
+func Signame(sig int32) (name String) {
+	const char* s = NULL;
+	char buf[100];
+#if defined(HAVE_STRSIGNAL)
+	s = strsignal(sig);
+#endif
+	if (s == NULL) {
+		snprintf(buf, sizeof buf, "signal %d", sig);
+		s = buf;
+	}
+	int32 len = __builtin_strlen(s);
+	unsigned char *data = runtime_mallocgc(len, RefNoPointers, 0, 0);
+	__builtin_memcpy(data, s, len);
+	name.__data = data;
+	name.__length = len;
+}
+
+func Siginit() {
+	sig.inuse = true;	// enable reception of signals; cannot disable
+}
diff --git a/libgo/runtime/string.goc b/libgo/runtime/string.goc
new file mode 100644
index 000000000..332277c52
--- /dev/null
+++ b/libgo/runtime/string.goc
@@ -0,0 +1,57 @@
+// Copyright 2009, 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "runtime.h"
+#define charntorune(pv, str, len) __go_get_rune(str, len, pv)
+
+enum
+{
+	Runeself	= 0x80,
+};
+
+func stringiter(s String, k int32) (retk int32) {
+	int32 l, n;
+
+	if(k >= s.__length) {
+		// retk=0 is end of iteration
+		retk = 0;
+		goto out;
+	}
+
+	l = s.__data[k];
+	if(l < Runeself) {
+		retk = k+1;
+		goto out;
+	}
+
+	// multi-char rune
+	n = charntorune(&l, s.__data+k, s.__length-k);
+	retk = k + (n ? n : 1);
+
+out:
+}
+
+func stringiter2(s String, k int32) (retk int32, retv int32) {
+	int32 n;
+
+	if(k >= s.__length) {
+		// retk=0 is end of iteration
+		retk = 0;
+		retv = 0;
+		goto out;
+	}
+
+	retv = s.__data[k];
+	if(retv < Runeself) {
+		retk = k+1;
+		goto out;
+	}
+
+	// multi-char rune
+	n = charntorune(&retv, s.__data+k, s.__length-k);
+	retk = k + (n ? n : 1);
+
+out:
+}
diff --git a/libgo/runtime/thread.c b/libgo/runtime/thread.c
new file mode 100644
index 000000000..bac3f7dfd
--- /dev/null
+++ b/libgo/runtime/thread.c
@@ -0,0 +1,118 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <errno.h>
+#include "runtime.h"
+#include "go-assert.h"
+
+void
+runtime_initlock(Lock *l)
+{
+	l->key = 0;
+	if(sem_init(&l->sem, 0, 0) != 0)
+		runtime_throw("sem_init failed");
+}
+
+static uint32
+runtime_xadd(uint32 volatile *val, int32 delta)
+{
+	uint32 oval, nval;
+
+	for(;;){
+		oval = *val;
+		nval = oval + delta;
+		if(runtime_cas(val, oval, nval))
+			return nval;
+	}
+}
+
+// noinline so that runtime_lock doesn't have to split the stack.
+static void runtime_lock_full(Lock *l) __attribute__ ((noinline));
+
+static void
+runtime_lock_full(Lock *l)
+{
+	for(;;){
+		if(sem_wait(&l->sem) == 0)
+			return;
+		if(errno != EINTR)
+			runtime_throw("sem_wait failed");
+	}
+}
+
+void
+runtime_lock(Lock *l)
+{
+	if(m != nil) {
+		if(m->locks < 0)
+			runtime_throw("lock count");
+		m->locks++;
+	}
+
+	if(runtime_xadd(&l->key, 1) > 1)	// someone else has it; wait
+		runtime_lock_full(l);
+}
+
+static void runtime_unlock_full(Lock *l) __attribute__ ((noinline));
+
+static void
+runtime_unlock_full(Lock *l)
+{
+	if(sem_post(&l->sem) != 0)
+		runtime_throw("sem_post failed");
+}
+
+void
+runtime_unlock(Lock *l)
+{
+	if(m != nil) {
+		m->locks--;
+		if(m->locks < 0)
+			runtime_throw("lock count");
+	}
+
+	if(runtime_xadd(&l->key, -1) > 0)	// someone else is waiting
+		runtime_unlock_full(l);
+}
+
+void
+runtime_destroylock(Lock *l)
+{
+	sem_destroy(&l->sem);
+}
+
+#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4
+
+// For targets which don't have the required sync support.  Really
+// this should be provided by gcc itself.  FIXME.
+
+static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER;
+
+_Bool
+__sync_bool_compare_and_swap_4(uint32*, uint32, uint32)
+  __attribute__((visibility("hidden")));
+
+_Bool
+__sync_bool_compare_and_swap_4(uint32* ptr, uint32 old, uint32 new)
+{
+  int i;
+  _Bool ret;
+
+  i = pthread_mutex_lock(&sync_lock);
+  __go_assert(i == 0);
+
+  if(*ptr != old) {
+    ret = 0;
+  } else {
+    *ptr = new;
+    ret = 1;
+  }
+
+  i = pthread_mutex_unlock(&sync_lock);
+  __go_assert(i == 0);
+
+  return ret;
+}
+
+#endif
-- 
cgit v1.2.3