summaryrefslogtreecommitdiff
path: root/libgo/runtime/go-append.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/go-append.c')
-rw-r--r--libgo/runtime/go-append.c67
1 files changed, 67 insertions, 0 deletions
diff --git a/libgo/runtime/go-append.c b/libgo/runtime/go-append.c
new file mode 100644
index 000000000..91493b1b7
--- /dev/null
+++ b/libgo/runtime/go-append.c
@@ -0,0 +1,67 @@
+/* go-append.c -- the go builtin append function.
+
+ Copyright 2010 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "go-type.h"
+#include "go-panic.h"
+#include "array.h"
+#include "runtime.h"
+#include "malloc.h"
+
+/* We should be OK if we don't split the stack here, since the only
+ libc functions we call are memcpy and memmove. If we don't do
+ this, we will always split the stack, because of memcpy and
+ memmove. */
+extern struct __go_open_array
+__go_append (struct __go_open_array, void *, size_t, size_t)
+ __attribute__ ((no_split_stack));
+
+struct __go_open_array
+__go_append (struct __go_open_array a, void *bvalues, size_t bcount,
+ size_t element_size)
+{
+ size_t ucount;
+ int count;
+
+ if (bvalues == NULL || bcount == 0)
+ return a;
+
+ ucount = (size_t) a.__count + bcount;
+ count = (int) ucount;
+ if ((size_t) count != ucount || count <= a.__count)
+ __go_panic_msg ("append: slice overflow");
+
+ if (count > a.__capacity)
+ {
+ int m;
+ void *n;
+
+ m = a.__capacity;
+ if (m == 0)
+ m = (int) bcount;
+ else
+ {
+ do
+ {
+ if (a.__count < 1024)
+ m += m;
+ else
+ m += m / 4;
+ }
+ while (m < count);
+ }
+
+ n = __go_alloc (m * element_size);
+ __builtin_memcpy (n, a.__values, a.__count * element_size);
+
+ a.__values = n;
+ a.__capacity = m;
+ }
+
+ __builtin_memmove ((char *) a.__values + a.__count * element_size,
+ bvalues, bcount * element_size);
+ a.__count = count;
+ return a;
+}