summaryrefslogtreecommitdiff
path: root/libgo/go/bytes
diff options
context:
space:
mode:
authorupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
committerupstream source tree <ports@midipix.org>2015-03-15 20:14:05 -0400
commit554fd8c5195424bdbcabf5de30fdc183aba391bd (patch)
tree976dc5ab7fddf506dadce60ae936f43f58787092 /libgo/go/bytes
downloadcbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2
cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository.
Diffstat (limited to 'libgo/go/bytes')
-rw-r--r--libgo/go/bytes/buffer.go315
-rw-r--r--libgo/go/bytes/buffer_test.go349
-rw-r--r--libgo/go/bytes/bytes.go602
-rw-r--r--libgo/go/bytes/bytes_decl.go8
-rw-r--r--libgo/go/bytes/bytes_test.go844
-rw-r--r--libgo/go/bytes/export_test.go8
-rw-r--r--libgo/go/bytes/indexbyte.c28
7 files changed, 2154 insertions, 0 deletions
diff --git a/libgo/go/bytes/buffer.go b/libgo/go/bytes/buffer.go
new file mode 100644
index 000000000..62cf82810
--- /dev/null
+++ b/libgo/go/bytes/buffer.go
@@ -0,0 +1,315 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "io"
+ "os"
+ "utf8"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can
+// check for invalid usage.
+type readOp int
+
+const (
+ opInvalid readOp = iota // Non-read operation.
+ opReadRune // Read rune.
+ opRead // Any other read operation.
+)
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It is an error to call b.Truncate(n) with n > b.Len().
+func (b *Buffer) Truncate(n int) {
+ b.lastRead = opInvalid
+ if n == 0 {
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// Grow buffer to guarantee space for n more bytes.
+// Return index where bytes should be written.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else {
+ // not enough space anywhere
+ buf = make([]byte, 2*cap(b.buf)+n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Write appends the contents of p to the buffer. The return
+// value n is the length of p; err is always nil.
+func (b *Buffer) Write(p []byte) (n int, err os.Error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(p))
+ copy(b.buf[m:], p)
+ return len(p), nil
+}
+
+// WriteString appends the contents of s to the buffer. The return
+// value n is the length of s; err is always nil.
+func (b *Buffer) WriteString(s string) (n int, err os.Error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(s))
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer.
+// The return value n is the number of bytes read.
+// Any error except os.EOF encountered during the read
+// is also returned.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err os.Error) {
+ b.lastRead = opInvalid
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if cap(b.buf)-len(b.buf) < MinRead {
+ var newBuf []byte
+ // can we get space without allocation?
+ if b.off+cap(b.buf)-len(b.buf) >= MinRead {
+ // reuse beginning of buffer
+ newBuf = b.buf[0 : len(b.buf)-b.off]
+ } else {
+ // not enough space at end; put space on end
+ newBuf = make([]byte, len(b.buf)-b.off, 2*(cap(b.buf)-b.off)+MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == os.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// WriteTo writes data to w until the buffer is drained or an error
+// occurs. The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err os.Error) {
+ b.lastRead = opInvalid
+ for b.off < len(b.buf) {
+ m, e := w.Write(b.buf[b.off:])
+ n += int64(m)
+ b.off += m
+ if e != nil {
+ return n, e
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer.
+// The returned error is always nil, but is included
+// to match bufio.Writer's WriteByte.
+func (b *Buffer) WriteByte(c byte) os.Error {
+ b.lastRead = opInvalid
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode
+// code point r to the buffer, returning its length and
+// an error, which is always nil but is included
+// to match bufio.Writer's WriteRune.
+func (b *Buffer) WriteRune(r int) (n int, err os.Error) {
+ if r < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ return 1, nil
+ }
+ n = utf8.EncodeRune(b.runeBytes[0:], r)
+ b.Write(b.runeBytes[0:n])
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is os.EOF even if len(p) is zero;
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err os.Error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, os.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ b.lastRead = opInvalid
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error os.EOF.
+func (b *Buffer) ReadByte() (c byte, err os.Error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, os.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ b.lastRead = opRead
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is os.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r int, size int, err os.Error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, 0, os.EOF
+ }
+ b.lastRead = opReadRune
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ return int(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a ReadRune, UnreadRune returns an error. (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() os.Error {
+ if b.lastRead != opReadRune {
+ return os.ErrorString("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ _, n := utf8.DecodeLastRune(b.buf[0:b.off])
+ b.off -= n
+ }
+ return nil
+}
+
+// UnreadByte unreads the last byte returned by the most recent
+// read operation. If write has happened since the last read, UnreadByte
+// returns an error.
+func (b *Buffer) UnreadByte() os.Error {
+ if b.lastRead != opReadRune && b.lastRead != opRead {
+ return os.ErrorString("bytes.Buffer: UnreadByte: previous operation was not a read")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ b.off--
+ }
+ return nil
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/libgo/go/bytes/buffer_test.go b/libgo/go/bytes/buffer_test.go
new file mode 100644
index 000000000..509793d24
--- /dev/null
+++ b/libgo/go/bytes/buffer_test.go
@@ -0,0 +1,349 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes_test
+
+import (
+ . "bytes"
+ "rand"
+ "testing"
+ "utf8"
+)
+
+
+const N = 10000 // make this bigger for a larger (and slower) test
+var data string // test data for write tests
+var bytes []byte // test data; same as data but as a slice.
+
+
+func init() {
+ bytes = make([]byte, N)
+ for i := 0; i < N; i++ {
+ bytes[i] = 'a' + byte(i%26)
+ }
+ data = string(bytes)
+}
+
+// Verify that contents of buf match the string s.
+func check(t *testing.T, testname string, buf *Buffer, s string) {
+ bytes := buf.Bytes()
+ str := buf.String()
+ if buf.Len() != len(bytes) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
+ }
+
+ if buf.Len() != len(str) {
+ t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
+ }
+
+ if buf.Len() != len(s) {
+ t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
+ }
+
+ if string(bytes) != s {
+ t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
+ }
+}
+
+
+// Fill buf through n writes of string fus.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
+ check(t, testname+" (fill 1)", buf, s)
+ for ; n > 0; n-- {
+ m, err := buf.WriteString(fus)
+ if m != len(fus) {
+ t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
+ }
+ if err != nil {
+ t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+ }
+ s += fus
+ check(t, testname+" (fill 4)", buf, s)
+ }
+ return s
+}
+
+
+// Fill buf through n writes of byte slice fub.
+// The initial contents of buf corresponds to the string s;
+// the result is the final contents of buf returned as a string.
+func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
+ check(t, testname+" (fill 1)", buf, s)
+ for ; n > 0; n-- {
+ m, err := buf.Write(fub)
+ if m != len(fub) {
+ t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
+ }
+ if err != nil {
+ t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
+ }
+ s += string(fub)
+ check(t, testname+" (fill 4)", buf, s)
+ }
+ return s
+}
+
+
+func TestNewBuffer(t *testing.T) {
+ buf := NewBuffer(bytes)
+ check(t, "NewBuffer", buf, data)
+}
+
+
+func TestNewBufferString(t *testing.T) {
+ buf := NewBufferString(data)
+ check(t, "NewBufferString", buf, data)
+}
+
+
+// Empty buf through repeated reads into fub.
+// The initial contents of buf corresponds to the string s.
+func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
+ check(t, testname+" (empty 1)", buf, s)
+
+ for {
+ n, err := buf.Read(fub)
+ if n == 0 {
+ break
+ }
+ if err != nil {
+ t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
+ }
+ s = s[n:]
+ check(t, testname+" (empty 3)", buf, s)
+ }
+
+ check(t, testname+" (empty 4)", buf, "")
+}
+
+
+func TestBasicOperations(t *testing.T) {
+ var buf Buffer
+
+ for i := 0; i < 5; i++ {
+ check(t, "TestBasicOperations (1)", &buf, "")
+
+ buf.Reset()
+ check(t, "TestBasicOperations (2)", &buf, "")
+
+ buf.Truncate(0)
+ check(t, "TestBasicOperations (3)", &buf, "")
+
+ n, err := buf.Write([]byte(data[0:1]))
+ if n != 1 {
+ t.Errorf("wrote 1 byte, but n == %d", n)
+ }
+ if err != nil {
+ t.Errorf("err should always be nil, but err == %s", err)
+ }
+ check(t, "TestBasicOperations (4)", &buf, "a")
+
+ buf.WriteByte(data[1])
+ check(t, "TestBasicOperations (5)", &buf, "ab")
+
+ n, err = buf.Write([]byte(data[2:26]))
+ if n != 24 {
+ t.Errorf("wrote 25 bytes, but n == %d", n)
+ }
+ check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
+
+ buf.Truncate(26)
+ check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
+
+ buf.Truncate(20)
+ check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
+
+ empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
+ empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
+
+ buf.WriteByte(data[1])
+ c, err := buf.ReadByte()
+ if err != nil {
+ t.Error("ReadByte unexpected eof")
+ }
+ if c != data[1] {
+ t.Errorf("ReadByte wrong value c=%v", c)
+ }
+ c, err = buf.ReadByte()
+ if err == nil {
+ t.Error("ReadByte unexpected not eof")
+ }
+ }
+}
+
+
+func TestLargeStringWrites(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
+ empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
+ }
+ check(t, "TestLargeStringWrites (3)", &buf, "")
+}
+
+
+func TestLargeByteWrites(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, bytes)
+ empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
+ }
+ check(t, "TestLargeByteWrites (3)", &buf, "")
+}
+
+
+func TestLargeStringReads(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ }
+ check(t, "TestLargeStringReads (3)", &buf, "")
+}
+
+
+func TestLargeByteReads(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, bytes[0:len(bytes)/i])
+ empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
+ }
+ check(t, "TestLargeByteReads (3)", &buf, "")
+}
+
+
+func TestMixedReadsAndWrites(t *testing.T) {
+ var buf Buffer
+ s := ""
+ for i := 0; i < 50; i++ {
+ wlen := rand.Intn(len(data))
+ if i%2 == 0 {
+ s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
+ } else {
+ s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, bytes[0:wlen])
+ }
+
+ rlen := rand.Intn(len(data))
+ fub := make([]byte, rlen)
+ n, _ := buf.Read(fub)
+ s = s[n:]
+ }
+ empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
+}
+
+
+func TestNil(t *testing.T) {
+ var b *Buffer
+ if b.String() != "<nil>" {
+ t.Errorf("expcted <nil>; got %q", b.String())
+ }
+}
+
+
+func TestReadFrom(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, bytes[0:len(bytes)/i])
+ var b Buffer
+ b.ReadFrom(&buf)
+ empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+
+func TestWriteTo(t *testing.T) {
+ var buf Buffer
+ for i := 3; i < 30; i += 3 {
+ s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, bytes[0:len(bytes)/i])
+ var b Buffer
+ buf.WriteTo(&b)
+ empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
+ }
+}
+
+
+func TestRuneIO(t *testing.T) {
+ const NRune = 1000
+ // Built a test array while we write the data
+ b := make([]byte, utf8.UTFMax*NRune)
+ var buf Buffer
+ n := 0
+ for r := 0; r < NRune; r++ {
+ size := utf8.EncodeRune(b[n:], r)
+ nbytes, err := buf.WriteRune(r)
+ if err != nil {
+ t.Fatalf("WriteRune(%U) error: %s", r, err)
+ }
+ if nbytes != size {
+ t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes)
+ }
+ n += size
+ }
+ b = b[0:n]
+
+ // Check the resulting bytes
+ if !Equal(buf.Bytes(), b) {
+ t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b)
+ }
+
+ p := make([]byte, utf8.UTFMax)
+ // Read it back with ReadRune
+ for r := 0; r < NRune; r++ {
+ size := utf8.EncodeRune(p, r)
+ nr, nbytes, err := buf.ReadRune()
+ if nr != r || nbytes != size || err != nil {
+ t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err)
+ }
+ }
+
+ // Check that UnreadRune works
+ buf.Reset()
+ buf.Write(b)
+ for r := 0; r < NRune; r++ {
+ r1, size, _ := buf.ReadRune()
+ if err := buf.UnreadRune(); err != nil {
+ t.Fatalf("UnreadRune(%U) got error %q", r, err)
+ }
+ r2, nbytes, err := buf.ReadRune()
+ if r1 != r2 || r1 != r || nbytes != size || err != nil {
+ t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err)
+ }
+ }
+}
+
+
+func TestNext(t *testing.T) {
+ b := []byte{0, 1, 2, 3, 4}
+ tmp := make([]byte, 5)
+ for i := 0; i <= 5; i++ {
+ for j := i; j <= 5; j++ {
+ for k := 0; k <= 6; k++ {
+ // 0 <= i <= j <= 5; 0 <= k <= 6
+ // Check that if we start with a buffer
+ // of length j at offset i and ask for
+ // Next(k), we get the right bytes.
+ buf := NewBuffer(b[0:j])
+ n, _ := buf.Read(tmp[0:i])
+ if n != i {
+ t.Fatalf("Read %d returned %d", i, n)
+ }
+ bb := buf.Next(k)
+ want := k
+ if want > j-i {
+ want = j - i
+ }
+ if len(bb) != want {
+ t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
+ }
+ for l, v := range bb {
+ if v != byte(l+i) {
+ t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/libgo/go/bytes/bytes.go b/libgo/go/bytes/bytes.go
new file mode 100644
index 000000000..bfe2ef39d
--- /dev/null
+++ b/libgo/go/bytes/bytes.go
@@ -0,0 +1,602 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The bytes package implements functions for the manipulation of byte slices.
+// Analogous to the facilities of the strings package.
+package bytes
+
+import (
+ "unicode"
+ "utf8"
+)
+
+// Compare returns an integer comparing the two byte arrays lexicographically.
+// The result will be 0 if a==b, -1 if a < b, and +1 if a > b
+func Compare(a, b []byte) int {
+ m := len(a)
+ if m > len(b) {
+ m = len(b)
+ }
+ for i, ac := range a[0:m] {
+ bc := b[i]
+ switch {
+ case ac > bc:
+ return 1
+ case ac < bc:
+ return -1
+ }
+ }
+ switch {
+ case len(a) < len(b):
+ return -1
+ case len(a) > len(b):
+ return 1
+ }
+ return 0
+}
+
+// Equal returns a boolean reporting whether a == b.
+func Equal(a, b []byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, c := range a {
+ if c != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// explode splits s into an array of UTF-8 sequences, one per Unicode character (still arrays of bytes),
+// up to a maximum of n byte arrays. Invalid UTF-8 sequences are chopped into individual bytes.
+func explode(s []byte, n int) [][]byte {
+ if n <= 0 {
+ n = len(s)
+ }
+ a := make([][]byte, n)
+ var size int
+ na := 0
+ for len(s) > 0 {
+ if na+1 >= n {
+ a[na] = s
+ na++
+ break
+ }
+ _, size = utf8.DecodeRune(s)
+ a[na] = s[0:size]
+ s = s[size:]
+ na++
+ }
+ return a[0:na]
+}
+
+// Count counts the number of non-overlapping instances of sep in s.
+func Count(s, sep []byte) int {
+ if len(sep) == 0 {
+ return utf8.RuneCount(s) + 1
+ }
+ c := sep[0]
+ n := 0
+ for i := 0; i+len(sep) <= len(s); i++ {
+ if s[i] == c && (len(sep) == 1 || Equal(s[i:i+len(sep)], sep)) {
+ n++
+ i += len(sep) - 1
+ }
+ }
+ return n
+}
+
+// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
+func Index(s, sep []byte) int {
+ n := len(sep)
+ if n == 0 {
+ return 0
+ }
+ c := sep[0]
+ for i := 0; i+n <= len(s); i++ {
+ if s[i] == c && (n == 1 || Equal(s[i:i+n], sep)) {
+ return i
+ }
+ }
+ return -1
+}
+
+func indexBytePortable(s []byte, c byte) int {
+ for i, b := range s {
+ if b == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
+func LastIndex(s, sep []byte) int {
+ n := len(sep)
+ if n == 0 {
+ return len(s)
+ }
+ c := sep[0]
+ for i := len(s) - n; i >= 0; i-- {
+ if s[i] == c && (n == 1 || Equal(s[i:i+n], sep)) {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index of the first occurrence in s of the given rune.
+// It returns -1 if rune is not present in s.
+func IndexRune(s []byte, rune int) int {
+ for i := 0; i < len(s); {
+ r, size := utf8.DecodeRune(s[i:])
+ if r == rune {
+ return i
+ }
+ i += size
+ }
+ return -1
+}
+
+// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index of the first occurrence in s of any of the Unicode
+// code points in chars. It returns -1 if chars is empty or if there is no code
+// point in common.
+func IndexAny(s []byte, chars string) int {
+ if len(chars) > 0 {
+ var rune, width int
+ for i := 0; i < len(s); i += width {
+ rune = int(s[i])
+ if rune < utf8.RuneSelf {
+ width = 1
+ } else {
+ rune, width = utf8.DecodeRune(s[i:])
+ }
+ for _, r := range chars {
+ if rune == r {
+ return i
+ }
+ }
+ }
+ }
+ return -1
+}
+
+// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
+// points. It returns the byte index of the last occurrence in s of any of
+// the Unicode code points in chars. It returns -1 if chars is empty or if
+// there is no code point in common.
+func LastIndexAny(s []byte, chars string) int {
+ if len(chars) > 0 {
+ for i := len(s); i > 0; {
+ rune, size := utf8.DecodeLastRune(s[0:i])
+ i -= size
+ for _, m := range chars {
+ if rune == m {
+ return i
+ }
+ }
+ }
+ }
+ return -1
+}
+
+// Generic split: splits after each instance of sep,
+// including sepSave bytes of sep in the subarrays.
+func genSplit(s, sep []byte, sepSave, n int) [][]byte {
+ if n == 0 {
+ return nil
+ }
+ if len(sep) == 0 {
+ return explode(s, n)
+ }
+ if n < 0 {
+ n = Count(s, sep) + 1
+ }
+ c := sep[0]
+ start := 0
+ a := make([][]byte, n)
+ na := 0
+ for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {
+ if s[i] == c && (len(sep) == 1 || Equal(s[i:i+len(sep)], sep)) {
+ a[na] = s[start : i+sepSave]
+ na++
+ start = i + len(sep)
+ i += len(sep) - 1
+ }
+ }
+ a[na] = s[start:]
+ return a[0 : na+1]
+}
+
+// Split slices s into subslices separated by sep and returns a slice of
+// the subslices between those separators.
+// If sep is empty, Split splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
+func Split(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
+
+// SplitAfter slices s into subslices after each instance of sep and
+// returns a slice of those subslices.
+// If sep is empty, Split splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+// n == 0: the result is nil (zero subslices)
+// n < 0: all subslices
+func SplitAfter(s, sep []byte, n int) [][]byte {
+ return genSplit(s, sep, len(sep), n)
+}
+
+// Fields splits the array s around each instance of one or more consecutive white space
+// characters, returning a slice of subarrays of s or an empty list if s contains only white space.
+func Fields(s []byte) [][]byte {
+ return FieldsFunc(s, unicode.IsSpace)
+}
+
+// FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It splits the array s at each run of code points c satisfying f(c) and
+// returns a slice of subarrays of s. If no code points in s satisfy f(c), an
+// empty slice is returned.
+func FieldsFunc(s []byte, f func(int) bool) [][]byte {
+ n := 0
+ inField := false
+ for i := 0; i < len(s); {
+ rune, size := utf8.DecodeRune(s[i:])
+ wasInField := inField
+ inField = !f(rune)
+ if inField && !wasInField {
+ n++
+ }
+ i += size
+ }
+
+ a := make([][]byte, n)
+ na := 0
+ fieldStart := -1
+ for i := 0; i <= len(s) && na < n; {
+ rune, size := utf8.DecodeRune(s[i:])
+ if fieldStart < 0 && size > 0 && !f(rune) {
+ fieldStart = i
+ i += size
+ continue
+ }
+ if fieldStart >= 0 && (size == 0 || f(rune)) {
+ a[na] = s[fieldStart:i]
+ na++
+ fieldStart = -1
+ }
+ if size == 0 {
+ break
+ }
+ i += size
+ }
+ return a[0:na]
+}
+
+// Join concatenates the elements of a to create a single byte array. The separator
+// sep is placed between elements in the resulting array.
+func Join(a [][]byte, sep []byte) []byte {
+ if len(a) == 0 {
+ return []byte{}
+ }
+ if len(a) == 1 {
+ return a[0]
+ }
+ n := len(sep) * (len(a) - 1)
+ for i := 0; i < len(a); i++ {
+ n += len(a[i])
+ }
+
+ b := make([]byte, n)
+ bp := 0
+ for i := 0; i < len(a); i++ {
+ s := a[i]
+ for j := 0; j < len(s); j++ {
+ b[bp] = s[j]
+ bp++
+ }
+ if i+1 < len(a) {
+ s = sep
+ for j := 0; j < len(s); j++ {
+ b[bp] = s[j]
+ bp++
+ }
+ }
+ }
+ return b
+}
+
+// HasPrefix tests whether the byte array s begins with prefix.
+func HasPrefix(s, prefix []byte) bool {
+ return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the byte array s ends with suffix.
+func HasSuffix(s, suffix []byte) bool {
+ return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
+
+// Map returns a copy of the byte array s with all its characters modified
+// according to the mapping function. If mapping returns a negative value, the character is
+// dropped from the string with no replacement. The characters in s and the
+// output are interpreted as UTF-8-encoded Unicode code points.
+func Map(mapping func(rune int) int, s []byte) []byte {
+ // In the worst case, the array can grow when mapped, making
+ // things unpleasant. But it's so rare we barge in assuming it's
+ // fine. It could also shrink but that falls out naturally.
+ maxbytes := len(s) // length of b
+ nbytes := 0 // number of bytes encoded in b
+ b := make([]byte, maxbytes)
+ for i := 0; i < len(s); {
+ wid := 1
+ rune := int(s[i])
+ if rune >= utf8.RuneSelf {
+ rune, wid = utf8.DecodeRune(s[i:])
+ }
+ rune = mapping(rune)
+ if rune >= 0 {
+ if nbytes+utf8.RuneLen(rune) > maxbytes {
+ // Grow the buffer.
+ maxbytes = maxbytes*2 + utf8.UTFMax
+ nb := make([]byte, maxbytes)
+ copy(nb, b[0:nbytes])
+ b = nb
+ }
+ nbytes += utf8.EncodeRune(b[nbytes:maxbytes], rune)
+ }
+ i += wid
+ }
+ return b[0:nbytes]
+}
+
+// Repeat returns a new byte slice consisting of count copies of b.
+func Repeat(b []byte, count int) []byte {
+ nb := make([]byte, len(b)*count)
+ bp := 0
+ for i := 0; i < count; i++ {
+ for j := 0; j < len(b); j++ {
+ nb[bp] = b[j]
+ bp++
+ }
+ }
+ return nb
+}
+
+// ToUpper returns a copy of the byte array s with all Unicode letters mapped to their upper case.
+func ToUpper(s []byte) []byte { return Map(unicode.ToUpper, s) }
+
+// ToUpper returns a copy of the byte array s with all Unicode letters mapped to their lower case.
+func ToLower(s []byte) []byte { return Map(unicode.ToLower, s) }
+
+// ToTitle returns a copy of the byte array s with all Unicode letters mapped to their title case.
+func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
+
+// ToUpperSpecial returns a copy of the byte array s with all Unicode letters mapped to their
+// upper case, giving priority to the special casing rules.
+func ToUpperSpecial(_case unicode.SpecialCase, s []byte) []byte {
+ return Map(func(r int) int { return _case.ToUpper(r) }, s)
+}
+
+// ToLowerSpecial returns a copy of the byte array s with all Unicode letters mapped to their
+// lower case, giving priority to the special casing rules.
+func ToLowerSpecial(_case unicode.SpecialCase, s []byte) []byte {
+ return Map(func(r int) int { return _case.ToLower(r) }, s)
+}
+
+// ToTitleSpecial returns a copy of the byte array s with all Unicode letters mapped to their
+// title case, giving priority to the special casing rules.
+func ToTitleSpecial(_case unicode.SpecialCase, s []byte) []byte {
+ return Map(func(r int) int { return _case.ToTitle(r) }, s)
+}
+
+
+// isSeparator reports whether the rune could mark a word boundary.
+// TODO: update when package unicode captures more of the properties.
+func isSeparator(rune int) bool {
+ // ASCII alphanumerics and underscore are not separators
+ if rune <= 0x7F {
+ switch {
+ case '0' <= rune && rune <= '9':
+ return false
+ case 'a' <= rune && rune <= 'z':
+ return false
+ case 'A' <= rune && rune <= 'Z':
+ return false
+ case rune == '_':
+ return false
+ }
+ return true
+ }
+ // Letters and digits are not separators
+ if unicode.IsLetter(rune) || unicode.IsDigit(rune) {
+ return false
+ }
+ // Otherwise, all we can do for now is treat spaces as separators.
+ return unicode.IsSpace(rune)
+}
+
+// BUG(r): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
+
+// Title returns a copy of s with all Unicode letters that begin words
+// mapped to their title case.
+func Title(s []byte) []byte {
+ // Use a closure here to remember state.
+ // Hackish but effective. Depends on Map scanning in order and calling
+ // the closure once per rune.
+ prev := ' '
+ return Map(
+ func(r int) int {
+ if isSeparator(prev) {
+ prev = r
+ return unicode.ToTitle(r)
+ }
+ prev = r
+ return r
+ },
+ s)
+}
+
+// TrimLeftFunc returns a subslice of s by slicing off all leading UTF-8-encoded
+// Unicode code points c that satisfy f(c).
+func TrimLeftFunc(s []byte, f func(r int) bool) []byte {
+ i := indexFunc(s, f, false)
+ if i == -1 {
+ return nil
+ }
+ return s[i:]
+}
+
+// TrimRightFunc returns a subslice of s by slicing off all trailing UTF-8
+// encoded Unicode code points c that satisfy f(c).
+func TrimRightFunc(s []byte, f func(r int) bool) []byte {
+ i := lastIndexFunc(s, f, false)
+ if i >= 0 && s[i] >= utf8.RuneSelf {
+ _, wid := utf8.DecodeRune(s[i:])
+ i += wid
+ } else {
+ i++
+ }
+ return s[0:i]
+}
+
+// TrimFunc returns a subslice of s by slicing off all leading and trailing
+// UTF-8-encoded Unicode code points c that satisfy f(c).
+func TrimFunc(s []byte, f func(r int) bool) []byte {
+ return TrimRightFunc(TrimLeftFunc(s, f), f)
+}
+
+// IndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index in s of the first Unicode
+// code point satisfying f(c), or -1 if none do.
+func IndexFunc(s []byte, f func(r int) bool) int {
+ return indexFunc(s, f, true)
+}
+
+// LastIndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index in s of the last Unicode
+// code point satisfying f(c), or -1 if none do.
+func LastIndexFunc(s []byte, f func(r int) bool) int {
+ return lastIndexFunc(s, f, true)
+}
+
+// indexFunc is the same as IndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func indexFunc(s []byte, f func(r int) bool, truth bool) int {
+ start := 0
+ for start < len(s) {
+ wid := 1
+ rune := int(s[start])
+ if rune >= utf8.RuneSelf {
+ rune, wid = utf8.DecodeRune(s[start:])
+ }
+ if f(rune) == truth {
+ return start
+ }
+ start += wid
+ }
+ return -1
+}
+
+// lastIndexFunc is the same as LastIndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func lastIndexFunc(s []byte, f func(r int) bool, truth bool) int {
+ for i := len(s); i > 0; {
+ rune, size := utf8.DecodeLastRune(s[0:i])
+ i -= size
+ if f(rune) == truth {
+ return i
+ }
+ }
+ return -1
+}
+
+func makeCutsetFunc(cutset string) func(rune int) bool {
+ return func(rune int) bool {
+ for _, c := range cutset {
+ if c == rune {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// Trim returns a subslice of s by slicing off all leading and
+// trailing UTF-8-encoded Unicode code points contained in cutset.
+func Trim(s []byte, cutset string) []byte {
+ return TrimFunc(s, makeCutsetFunc(cutset))
+}
+
+// TrimLeft returns a subslice of s by slicing off all leading
+// UTF-8-encoded Unicode code points contained in cutset.
+func TrimLeft(s []byte, cutset string) []byte {
+ return TrimLeftFunc(s, makeCutsetFunc(cutset))
+}
+
+// TrimRight returns a subslice of s by slicing off all trailing
+// UTF-8-encoded Unicode code points that are contained in cutset.
+func TrimRight(s []byte, cutset string) []byte {
+ return TrimRightFunc(s, makeCutsetFunc(cutset))
+}
+
+// TrimSpace returns a subslice of s by slicing off all leading and
+// trailing white space, as defined by Unicode.
+func TrimSpace(s []byte) []byte {
+ return TrimFunc(s, unicode.IsSpace)
+}
+
+// Runes returns a slice of runes (Unicode code points) equivalent to s.
+func Runes(s []byte) []int {
+ t := make([]int, utf8.RuneCount(s))
+ i := 0
+ for len(s) > 0 {
+ r, l := utf8.DecodeRune(s)
+ t[i] = r
+ i++
+ s = s[l:]
+ }
+ return t
+}
+
+// Replace returns a copy of the slice s with the first n
+// non-overlapping instances of old replaced by new.
+// If n < 0, there is no limit on the number of replacements.
+func Replace(s, old, new []byte, n int) []byte {
+ if n == 0 {
+ return s // avoid allocation
+ }
+ // Compute number of replacements.
+ if m := Count(s, old); m == 0 {
+ return s // avoid allocation
+ } else if n <= 0 || m < n {
+ n = m
+ }
+
+ // Apply replacements to buffer.
+ t := make([]byte, len(s)+n*(len(new)-len(old)))
+ w := 0
+ start := 0
+ for i := 0; i < n; i++ {
+ j := start
+ if len(old) == 0 {
+ if i > 0 {
+ _, wid := utf8.DecodeRune(s[start:])
+ j += wid
+ }
+ } else {
+ j += Index(s[start:], old)
+ }
+ w += copy(t[w:], s[start:j])
+ w += copy(t[w:], new)
+ start = j + len(old)
+ }
+ w += copy(t[w:], s[start:])
+ return t[0:w]
+}
diff --git a/libgo/go/bytes/bytes_decl.go b/libgo/go/bytes/bytes_decl.go
new file mode 100644
index 000000000..5d2b9e639
--- /dev/null
+++ b/libgo/go/bytes/bytes_decl.go
@@ -0,0 +1,8 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
+func IndexByte(s []byte, c byte) int // asm_$GOARCH.s
diff --git a/libgo/go/bytes/bytes_test.go b/libgo/go/bytes/bytes_test.go
new file mode 100644
index 000000000..063686ec5
--- /dev/null
+++ b/libgo/go/bytes/bytes_test.go
@@ -0,0 +1,844 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes_test
+
+import (
+ . "bytes"
+ "testing"
+ "unicode"
+ "utf8"
+)
+
+func eq(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayOfString(a [][]byte) []string {
+ result := make([]string, len(a))
+ for j := 0; j < len(a); j++ {
+ result[j] = string(a[j])
+ }
+ return result
+}
+
+// For ease of reading, the test cases use strings that are converted to byte
+// arrays before invoking the functions.
+
+var abcd = "abcd"
+var faces = "☺☻☹"
+var commas = "1,2,3,4"
+var dots = "1....2....3....4"
+
+type BinOpTest struct {
+ a string
+ b string
+ i int
+}
+
+var comparetests = []BinOpTest{
+ {"", "", 0},
+ {"a", "", 1},
+ {"", "a", -1},
+ {"abc", "abc", 0},
+ {"ab", "abc", -1},
+ {"abc", "ab", 1},
+ {"x", "ab", 1},
+ {"ab", "x", -1},
+ {"x", "a", 1},
+ {"b", "x", -1},
+}
+
+func TestCompare(t *testing.T) {
+ for _, tt := range comparetests {
+ a := []byte(tt.a)
+ b := []byte(tt.b)
+ cmp := Compare(a, b)
+ eql := Equal(a, b)
+ if cmp != tt.i {
+ t.Errorf(`Compare(%q, %q) = %v`, tt.a, tt.b, cmp)
+ }
+ if eql != (tt.i == 0) {
+ t.Errorf(`Equal(%q, %q) = %v`, tt.a, tt.b, eql)
+ }
+ }
+}
+
+var indexTests = []BinOpTest{
+ {"", "", 0},
+ {"", "a", -1},
+ {"", "foo", -1},
+ {"fo", "foo", -1},
+ {"foo", "foo", 0},
+ {"oofofoofooo", "f", 2},
+ {"oofofoofooo", "foo", 4},
+ {"barfoobarfoo", "foo", 3},
+ {"foo", "", 0},
+ {"foo", "o", 1},
+ {"abcABCabc", "A", 3},
+ // cases with one byte strings - test IndexByte and special case in Index()
+ {"", "a", -1},
+ {"x", "a", -1},
+ {"x", "x", 0},
+ {"abc", "a", 0},
+ {"abc", "b", 1},
+ {"abc", "c", 2},
+ {"abc", "x", -1},
+ {"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
+ {"foofyfoobarfoobar", "y", 4},
+ {"oooooooooooooooooooooo", "r", -1},
+}
+
+var lastIndexTests = []BinOpTest{
+ {"", "", 0},
+ {"", "a", -1},
+ {"", "foo", -1},
+ {"fo", "foo", -1},
+ {"foo", "foo", 0},
+ {"foo", "f", 0},
+ {"oofofoofooo", "f", 7},
+ {"oofofoofooo", "foo", 7},
+ {"barfoobarfoo", "foo", 9},
+ {"foo", "", 3},
+ {"foo", "o", 2},
+ {"abcABCabc", "A", 3},
+ {"abcABCabc", "a", 6},
+}
+
+var indexAnyTests = []BinOpTest{
+ {"", "", -1},
+ {"", "a", -1},
+ {"", "abc", -1},
+ {"a", "", -1},
+ {"a", "a", 0},
+ {"aaa", "a", 0},
+ {"abc", "xyz", -1},
+ {"abc", "xcz", 2},
+ {"ab☺c", "x☺yz", 2},
+ {"aRegExp*", ".(|)*+?^$[]", 7},
+ {dots + dots + dots, " ", -1},
+}
+
+var lastIndexAnyTests = []BinOpTest{
+ {"", "", -1},
+ {"", "a", -1},
+ {"", "abc", -1},
+ {"a", "", -1},
+ {"a", "a", 0},
+ {"aaa", "a", 2},
+ {"abc", "xyz", -1},
+ {"abc", "ab", 1},
+ {"a☺b☻c☹d", "uvw☻xyz", 2 + len("☺")},
+ {"a.RegExp*", ".(|)*+?^$[]", 8},
+ {dots + dots + dots, " ", -1},
+}
+
+var indexRuneTests = []BinOpTest{
+ {"", "a", -1},
+ {"", "☺", -1},
+ {"foo", "☹", -1},
+ {"foo", "o", 1},
+ {"foo☺bar", "☺", 3},
+ {"foo☺☻☹bar", "☹", 9},
+}
+
+// Execute f on each test case. funcName should be the name of f; it's used
+// in failure reports.
+func runIndexTests(t *testing.T, f func(s, sep []byte) int, funcName string, testCases []BinOpTest) {
+ for _, test := range testCases {
+ a := []byte(test.a)
+ b := []byte(test.b)
+ actual := f(a, b)
+ if actual != test.i {
+ t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, b, actual, test.i)
+ }
+ }
+}
+
+func runIndexAnyTests(t *testing.T, f func(s []byte, chars string) int, funcName string, testCases []BinOpTest) {
+ for _, test := range testCases {
+ a := []byte(test.a)
+ actual := f(a, test.b)
+ if actual != test.i {
+ t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, test.b, actual, test.i)
+ }
+ }
+}
+
+func TestIndex(t *testing.T) { runIndexTests(t, Index, "Index", indexTests) }
+func TestLastIndex(t *testing.T) { runIndexTests(t, LastIndex, "LastIndex", lastIndexTests) }
+func TestIndexAny(t *testing.T) { runIndexAnyTests(t, IndexAny, "IndexAny", indexAnyTests) }
+func TestLastIndexAny(t *testing.T) {
+ runIndexAnyTests(t, LastIndexAny, "LastIndexAny", lastIndexAnyTests)
+}
+
+func TestIndexByte(t *testing.T) {
+ for _, tt := range indexTests {
+ if len(tt.b) != 1 {
+ continue
+ }
+ a := []byte(tt.a)
+ b := tt.b[0]
+ pos := IndexByte(a, b)
+ if pos != tt.i {
+ t.Errorf(`IndexByte(%q, '%c') = %v`, tt.a, b, pos)
+ }
+ posp := IndexBytePortable(a, b)
+ if posp != tt.i {
+ t.Errorf(`indexBytePortable(%q, '%c') = %v`, tt.a, b, posp)
+ }
+ }
+}
+
+// test a larger buffer with different sizes and alignments
+func TestIndexByteBig(t *testing.T) {
+ const n = 1024
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ // different start alignments
+ b1 := b[i:]
+ for j := 0; j < len(b1); j++ {
+ b1[j] = 'x'
+ pos := IndexByte(b1, 'x')
+ if pos != j {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ b1[j] = 0
+ pos = IndexByte(b1, 'x')
+ if pos != -1 {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ }
+ // different end alignments
+ b1 = b[:i]
+ for j := 0; j < len(b1); j++ {
+ b1[j] = 'x'
+ pos := IndexByte(b1, 'x')
+ if pos != j {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ b1[j] = 0
+ pos = IndexByte(b1, 'x')
+ if pos != -1 {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ }
+ // different start and end alignments
+ b1 = b[i/2 : n-(i+1)/2]
+ for j := 0; j < len(b1); j++ {
+ b1[j] = 'x'
+ pos := IndexByte(b1, 'x')
+ if pos != j {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ b1[j] = 0
+ pos = IndexByte(b1, 'x')
+ if pos != -1 {
+ t.Errorf("IndexByte(%q, 'x') = %v", b1, pos)
+ }
+ }
+ }
+}
+
+func TestIndexRune(t *testing.T) {
+ for _, tt := range indexRuneTests {
+ a := []byte(tt.a)
+ r, _ := utf8.DecodeRuneInString(tt.b)
+ pos := IndexRune(a, r)
+ if pos != tt.i {
+ t.Errorf(`IndexRune(%q, '%c') = %v`, tt.a, r, pos)
+ }
+ }
+}
+
+func BenchmarkIndexByte4K(b *testing.B) { bmIndex(b, IndexByte, 4<<10) }
+
+func BenchmarkIndexByte4M(b *testing.B) { bmIndex(b, IndexByte, 4<<20) }
+
+func BenchmarkIndexByte64M(b *testing.B) { bmIndex(b, IndexByte, 64<<20) }
+
+func BenchmarkIndexBytePortable4K(b *testing.B) {
+ bmIndex(b, IndexBytePortable, 4<<10)
+}
+
+func BenchmarkIndexBytePortable4M(b *testing.B) {
+ bmIndex(b, IndexBytePortable, 4<<20)
+}
+
+func BenchmarkIndexBytePortable64M(b *testing.B) {
+ bmIndex(b, IndexBytePortable, 64<<20)
+}
+
+var bmbuf []byte
+
+func bmIndex(b *testing.B, index func([]byte, byte) int, n int) {
+ if len(bmbuf) < n {
+ bmbuf = make([]byte, n)
+ }
+ b.SetBytes(int64(n))
+ buf := bmbuf[0:n]
+ buf[n-1] = 'x'
+ for i := 0; i < b.N; i++ {
+ j := index(buf, 'x')
+ if j != n-1 {
+ println("bad index", j)
+ panic("bad index")
+ }
+ }
+ buf[n-1] = '0'
+}
+
+type ExplodeTest struct {
+ s string
+ n int
+ a []string
+}
+
+var explodetests = []ExplodeTest{
+ {"", -1, []string{}},
+ {abcd, -1, []string{"a", "b", "c", "d"}},
+ {faces, -1, []string{"☺", "☻", "☹"}},
+ {abcd, 2, []string{"a", "bcd"}},
+}
+
+func TestExplode(t *testing.T) {
+ for _, tt := range explodetests {
+ a := Split([]byte(tt.s), nil, tt.n)
+ result := arrayOfString(a)
+ if !eq(result, tt.a) {
+ t.Errorf(`Explode("%s", %d) = %v; want %v`, tt.s, tt.n, result, tt.a)
+ continue
+ }
+ s := Join(a, []byte{})
+ if string(s) != tt.s {
+ t.Errorf(`Join(Explode("%s", %d), "") = "%s"`, tt.s, tt.n, s)
+ }
+ }
+}
+
+
+type SplitTest struct {
+ s string
+ sep string
+ n int
+ a []string
+}
+
+var splittests = []SplitTest{
+ {abcd, "a", 0, nil},
+ {abcd, "a", -1, []string{"", "bcd"}},
+ {abcd, "z", -1, []string{"abcd"}},
+ {abcd, "", -1, []string{"a", "b", "c", "d"}},
+ {commas, ",", -1, []string{"1", "2", "3", "4"}},
+ {dots, "...", -1, []string{"1", ".2", ".3", ".4"}},
+ {faces, "☹", -1, []string{"☺☻", ""}},
+ {faces, "~", -1, []string{faces}},
+ {faces, "", -1, []string{"☺", "☻", "☹"}},
+ {"1 2 3 4", " ", 3, []string{"1", "2", "3 4"}},
+ {"1 2", " ", 3, []string{"1", "2"}},
+ {"123", "", 2, []string{"1", "23"}},
+ {"123", "", 17, []string{"1", "2", "3"}},
+}
+
+func TestSplit(t *testing.T) {
+ for _, tt := range splittests {
+ a := Split([]byte(tt.s), []byte(tt.sep), tt.n)
+ result := arrayOfString(a)
+ if !eq(result, tt.a) {
+ t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
+ continue
+ }
+ if tt.n == 0 {
+ continue
+ }
+ s := Join(a, []byte(tt.sep))
+ if string(s) != tt.s {
+ t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
+ }
+ }
+}
+
+var splitaftertests = []SplitTest{
+ {abcd, "a", -1, []string{"a", "bcd"}},
+ {abcd, "z", -1, []string{"abcd"}},
+ {abcd, "", -1, []string{"a", "b", "c", "d"}},
+ {commas, ",", -1, []string{"1,", "2,", "3,", "4"}},
+ {dots, "...", -1, []string{"1...", ".2...", ".3...", ".4"}},
+ {faces, "☹", -1, []string{"☺☻☹", ""}},
+ {faces, "~", -1, []string{faces}},
+ {faces, "", -1, []string{"☺", "☻", "☹"}},
+ {"1 2 3 4", " ", 3, []string{"1 ", "2 ", "3 4"}},
+ {"1 2 3", " ", 3, []string{"1 ", "2 ", "3"}},
+ {"1 2", " ", 3, []string{"1 ", "2"}},
+ {"123", "", 2, []string{"1", "23"}},
+ {"123", "", 17, []string{"1", "2", "3"}},
+}
+
+func TestSplitAfter(t *testing.T) {
+ for _, tt := range splitaftertests {
+ a := SplitAfter([]byte(tt.s), []byte(tt.sep), tt.n)
+ result := arrayOfString(a)
+ if !eq(result, tt.a) {
+ t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a)
+ continue
+ }
+ s := Join(a, nil)
+ if string(s) != tt.s {
+ t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s)
+ }
+ }
+}
+
+type FieldsTest struct {
+ s string
+ a []string
+}
+
+var fieldstests = []FieldsTest{
+ {"", []string{}},
+ {" ", []string{}},
+ {" \t ", []string{}},
+ {" abc ", []string{"abc"}},
+ {"1 2 3 4", []string{"1", "2", "3", "4"}},
+ {"1 2 3 4", []string{"1", "2", "3", "4"}},
+ {"1\t\t2\t\t3\t4", []string{"1", "2", "3", "4"}},
+ {"1\u20002\u20013\u20024", []string{"1", "2", "3", "4"}},
+ {"\u2000\u2001\u2002", []string{}},
+ {"\n™\t™\n", []string{"™", "™"}},
+ {faces, []string{faces}},
+}
+
+func TestFields(t *testing.T) {
+ for _, tt := range fieldstests {
+ a := Fields([]byte(tt.s))
+ result := arrayOfString(a)
+ if !eq(result, tt.a) {
+ t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a)
+ continue
+ }
+ }
+}
+
+func TestFieldsFunc(t *testing.T) {
+ pred := func(c int) bool { return c == 'X' }
+ var fieldsFuncTests = []FieldsTest{
+ {"", []string{}},
+ {"XX", []string{}},
+ {"XXhiXXX", []string{"hi"}},
+ {"aXXbXXXcX", []string{"a", "b", "c"}},
+ }
+ for _, tt := range fieldsFuncTests {
+ a := FieldsFunc([]byte(tt.s), pred)
+ result := arrayOfString(a)
+ if !eq(result, tt.a) {
+ t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a)
+ }
+ }
+}
+
+// Test case for any function which accepts and returns a byte array.
+// For ease of creation, we write the byte arrays as strings.
+type StringTest struct {
+ in, out string
+}
+
+var upperTests = []StringTest{
+ {"", ""},
+ {"abc", "ABC"},
+ {"AbC123", "ABC123"},
+ {"azAZ09_", "AZAZ09_"},
+ {"\u0250\u0250\u0250\u0250\u0250", "\u2C6F\u2C6F\u2C6F\u2C6F\u2C6F"}, // grows one byte per char
+}
+
+var lowerTests = []StringTest{
+ {"", ""},
+ {"abc", "abc"},
+ {"AbC123", "abc123"},
+ {"azAZ09_", "azaz09_"},
+ {"\u2C6D\u2C6D\u2C6D\u2C6D\u2C6D", "\u0251\u0251\u0251\u0251\u0251"}, // shrinks one byte per char
+}
+
+const space = "\t\v\r\f\n\u0085\u00a0\u2000\u3000"
+
+var trimSpaceTests = []StringTest{
+ {"", ""},
+ {"abc", "abc"},
+ {space + "abc" + space, "abc"},
+ {" ", ""},
+ {" \t\r\n \t\t\r\r\n\n ", ""},
+ {" \t\r\n x\t\t\r\r\n\n ", "x"},
+ {" \u2000\t\r\n x\t\t\r\r\ny\n \u3000", "x\t\t\r\r\ny"},
+ {"1 \t\r\n2", "1 \t\r\n2"},
+ {" x\x80", "x\x80"},
+ {" x\xc0", "x\xc0"},
+ {"x \xc0\xc0 ", "x \xc0\xc0"},
+ {"x \xc0", "x \xc0"},
+ {"x \xc0 ", "x \xc0"},
+ {"x \xc0\xc0 ", "x \xc0\xc0"},
+ {"x ☺\xc0\xc0 ", "x ☺\xc0\xc0"},
+ {"x ☺ ", "x ☺"},
+}
+
+// Execute f on each test case. funcName should be the name of f; it's used
+// in failure reports.
+func runStringTests(t *testing.T, f func([]byte) []byte, funcName string, testCases []StringTest) {
+ for _, tc := range testCases {
+ actual := string(f([]byte(tc.in)))
+ if actual != tc.out {
+ t.Errorf("%s(%q) = %q; want %q", funcName, tc.in, actual, tc.out)
+ }
+ }
+}
+
+func tenRunes(rune int) string {
+ r := make([]int, 10)
+ for i := range r {
+ r[i] = rune
+ }
+ return string(r)
+}
+
+// User-defined self-inverse mapping function
+func rot13(rune int) int {
+ step := 13
+ if rune >= 'a' && rune <= 'z' {
+ return ((rune - 'a' + step) % 26) + 'a'
+ }
+ if rune >= 'A' && rune <= 'Z' {
+ return ((rune - 'A' + step) % 26) + 'A'
+ }
+ return rune
+}
+
+func TestMap(t *testing.T) {
+ // Run a couple of awful growth/shrinkage tests
+ a := tenRunes('a')
+
+ // 1. Grow. This triggers two reallocations in Map.
+ maxRune := func(rune int) int { return unicode.MaxRune }
+ m := Map(maxRune, []byte(a))
+ expect := tenRunes(unicode.MaxRune)
+ if string(m) != expect {
+ t.Errorf("growing: expected %q got %q", expect, m)
+ }
+
+ // 2. Shrink
+ minRune := func(rune int) int { return 'a' }
+ m = Map(minRune, []byte(tenRunes(unicode.MaxRune)))
+ expect = a
+ if string(m) != expect {
+ t.Errorf("shrinking: expected %q got %q", expect, m)
+ }
+
+ // 3. Rot13
+ m = Map(rot13, []byte("a to zed"))
+ expect = "n gb mrq"
+ if string(m) != expect {
+ t.Errorf("rot13: expected %q got %q", expect, m)
+ }
+
+ // 4. Rot13^2
+ m = Map(rot13, Map(rot13, []byte("a to zed")))
+ expect = "a to zed"
+ if string(m) != expect {
+ t.Errorf("rot13: expected %q got %q", expect, m)
+ }
+
+ // 5. Drop
+ dropNotLatin := func(rune int) int {
+ if unicode.Is(unicode.Latin, rune) {
+ return rune
+ }
+ return -1
+ }
+ m = Map(dropNotLatin, []byte("Hello, 세계"))
+ expect = "Hello"
+ if string(m) != expect {
+ t.Errorf("drop: expected %q got %q", expect, m)
+ }
+}
+
+func TestToUpper(t *testing.T) { runStringTests(t, ToUpper, "ToUpper", upperTests) }
+
+func TestToLower(t *testing.T) { runStringTests(t, ToLower, "ToLower", lowerTests) }
+
+func TestTrimSpace(t *testing.T) { runStringTests(t, TrimSpace, "TrimSpace", trimSpaceTests) }
+
+type RepeatTest struct {
+ in, out string
+ count int
+}
+
+var RepeatTests = []RepeatTest{
+ {"", "", 0},
+ {"", "", 1},
+ {"", "", 2},
+ {"-", "", 0},
+ {"-", "-", 1},
+ {"-", "----------", 10},
+ {"abc ", "abc abc abc ", 3},
+}
+
+func TestRepeat(t *testing.T) {
+ for _, tt := range RepeatTests {
+ tin := []byte(tt.in)
+ tout := []byte(tt.out)
+ a := Repeat(tin, tt.count)
+ if !Equal(a, tout) {
+ t.Errorf("Repeat(%q, %d) = %q; want %q", tin, tt.count, a, tout)
+ continue
+ }
+ }
+}
+
+func runesEqual(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, r := range a {
+ if r != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+type RunesTest struct {
+ in string
+ out []int
+ lossy bool
+}
+
+var RunesTests = []RunesTest{
+ {"", []int{}, false},
+ {" ", []int{32}, false},
+ {"ABC", []int{65, 66, 67}, false},
+ {"abc", []int{97, 98, 99}, false},
+ {"\u65e5\u672c\u8a9e", []int{26085, 26412, 35486}, false},
+ {"ab\x80c", []int{97, 98, 0xFFFD, 99}, true},
+ {"ab\xc0c", []int{97, 98, 0xFFFD, 99}, true},
+}
+
+func TestRunes(t *testing.T) {
+ for _, tt := range RunesTests {
+ tin := []byte(tt.in)
+ a := Runes(tin)
+ if !runesEqual(a, tt.out) {
+ t.Errorf("Runes(%q) = %v; want %v", tin, a, tt.out)
+ continue
+ }
+ if !tt.lossy {
+ // can only test reassembly if we didn't lose information
+ s := string(a)
+ if s != tt.in {
+ t.Errorf("string(Runes(%q)) = %x; want %x", tin, s, tin)
+ }
+ }
+ }
+}
+
+
+type TrimTest struct {
+ f func([]byte, string) []byte
+ in, cutset, out string
+}
+
+var trimTests = []TrimTest{
+ {Trim, "abba", "a", "bb"},
+ {Trim, "abba", "ab", ""},
+ {TrimLeft, "abba", "ab", ""},
+ {TrimRight, "abba", "ab", ""},
+ {TrimLeft, "abba", "a", "bba"},
+ {TrimRight, "abba", "a", "abb"},
+ {Trim, "<tag>", "<>", "tag"},
+ {Trim, "* listitem", " *", "listitem"},
+ {Trim, `"quote"`, `"`, "quote"},
+ {Trim, "\u2C6F\u2C6F\u0250\u0250\u2C6F\u2C6F", "\u2C6F", "\u0250\u0250"},
+ //empty string tests
+ {Trim, "abba", "", "abba"},
+ {Trim, "", "123", ""},
+ {Trim, "", "", ""},
+ {TrimLeft, "abba", "", "abba"},
+ {TrimLeft, "", "123", ""},
+ {TrimLeft, "", "", ""},
+ {TrimRight, "abba", "", "abba"},
+ {TrimRight, "", "123", ""},
+ {TrimRight, "", "", ""},
+ {TrimRight, "☺\xc0", "☺", "☺\xc0"},
+}
+
+func TestTrim(t *testing.T) {
+ for _, tc := range trimTests {
+ actual := string(tc.f([]byte(tc.in), tc.cutset))
+ var name string
+ switch tc.f {
+ case Trim:
+ name = "Trim"
+ case TrimLeft:
+ name = "TrimLeft"
+ case TrimRight:
+ name = "TrimRight"
+ default:
+ t.Error("Undefined trim function")
+ }
+ if actual != tc.out {
+ t.Errorf("%s(%q, %q) = %q; want %q", name, tc.in, tc.cutset, actual, tc.out)
+ }
+ }
+}
+
+type predicate struct {
+ f func(r int) bool
+ name string
+}
+
+var isSpace = predicate{unicode.IsSpace, "IsSpace"}
+var isDigit = predicate{unicode.IsDigit, "IsDigit"}
+var isUpper = predicate{unicode.IsUpper, "IsUpper"}
+var isValidRune = predicate{
+ func(r int) bool {
+ return r != utf8.RuneError
+ },
+ "IsValidRune",
+}
+
+type TrimFuncTest struct {
+ f predicate
+ in, out string
+}
+
+func not(p predicate) predicate {
+ return predicate{
+ func(r int) bool {
+ return !p.f(r)
+ },
+ "not " + p.name,
+ }
+}
+
+var trimFuncTests = []TrimFuncTest{
+ {isSpace, space + " hello " + space, "hello"},
+ {isDigit, "\u0e50\u0e5212hello34\u0e50\u0e51", "hello"},
+ {isUpper, "\u2C6F\u2C6F\u2C6F\u2C6FABCDhelloEF\u2C6F\u2C6FGH\u2C6F\u2C6F", "hello"},
+ {not(isSpace), "hello" + space + "hello", space},
+ {not(isDigit), "hello\u0e50\u0e521234\u0e50\u0e51helo", "\u0e50\u0e521234\u0e50\u0e51"},
+ {isValidRune, "ab\xc0a\xc0cd", "\xc0a\xc0"},
+ {not(isValidRune), "\xc0a\xc0", "a"},
+}
+
+func TestTrimFunc(t *testing.T) {
+ for _, tc := range trimFuncTests {
+ actual := string(TrimFunc([]byte(tc.in), tc.f.f))
+ if actual != tc.out {
+ t.Errorf("TrimFunc(%q, %q) = %q; want %q", tc.in, tc.f.name, actual, tc.out)
+ }
+ }
+}
+
+type IndexFuncTest struct {
+ in string
+ f predicate
+ first, last int
+}
+
+var indexFuncTests = []IndexFuncTest{
+ {"", isValidRune, -1, -1},
+ {"abc", isDigit, -1, -1},
+ {"0123", isDigit, 0, 3},
+ {"a1b", isDigit, 1, 1},
+ {space, isSpace, 0, len(space) - 3}, // last rune in space is 3 bytes
+ {"\u0e50\u0e5212hello34\u0e50\u0e51", isDigit, 0, 18},
+ {"\u2C6F\u2C6F\u2C6F\u2C6FABCDhelloEF\u2C6F\u2C6FGH\u2C6F\u2C6F", isUpper, 0, 34},
+ {"12\u0e50\u0e52hello34\u0e50\u0e51", not(isDigit), 8, 12},
+
+ // tests of invalid UTF-8
+ {"\x801", isDigit, 1, 1},
+ {"\x80abc", isDigit, -1, -1},
+ {"\xc0a\xc0", isValidRune, 1, 1},
+ {"\xc0a\xc0", not(isValidRune), 0, 2},
+ {"\xc0☺\xc0", not(isValidRune), 0, 4},
+ {"\xc0☺\xc0\xc0", not(isValidRune), 0, 5},
+ {"ab\xc0a\xc0cd", not(isValidRune), 2, 4},
+ {"a\xe0\x80cd", not(isValidRune), 1, 2},
+}
+
+func TestIndexFunc(t *testing.T) {
+ for _, tc := range indexFuncTests {
+ first := IndexFunc([]byte(tc.in), tc.f.f)
+ if first != tc.first {
+ t.Errorf("IndexFunc(%q, %s) = %d; want %d", tc.in, tc.f.name, first, tc.first)
+ }
+ last := LastIndexFunc([]byte(tc.in), tc.f.f)
+ if last != tc.last {
+ t.Errorf("LastIndexFunc(%q, %s) = %d; want %d", tc.in, tc.f.name, last, tc.last)
+ }
+ }
+}
+
+type ReplaceTest struct {
+ in string
+ old, new string
+ n int
+ out string
+}
+
+var ReplaceTests = []ReplaceTest{
+ {"hello", "l", "L", 0, "hello"},
+ {"hello", "l", "L", -1, "heLLo"},
+ {"hello", "x", "X", -1, "hello"},
+ {"", "x", "X", -1, ""},
+ {"radar", "r", "<r>", -1, "<r>ada<r>"},
+ {"", "", "<>", -1, "<>"},
+ {"banana", "a", "<>", -1, "b<>n<>n<>"},
+ {"banana", "a", "<>", 1, "b<>nana"},
+ {"banana", "a", "<>", 1000, "b<>n<>n<>"},
+ {"banana", "an", "<>", -1, "b<><>a"},
+ {"banana", "ana", "<>", -1, "b<>na"},
+ {"banana", "", "<>", -1, "<>b<>a<>n<>a<>n<>a<>"},
+ {"banana", "", "<>", 10, "<>b<>a<>n<>a<>n<>a<>"},
+ {"banana", "", "<>", 6, "<>b<>a<>n<>a<>n<>a"},
+ {"banana", "", "<>", 5, "<>b<>a<>n<>a<>na"},
+ {"banana", "", "<>", 1, "<>banana"},
+ {"banana", "a", "a", -1, "banana"},
+ {"banana", "a", "a", 1, "banana"},
+ {"☺☻☹", "", "<>", -1, "<>☺<>☻<>☹<>"},
+}
+
+func TestReplace(t *testing.T) {
+ for _, tt := range ReplaceTests {
+ if s := string(Replace([]byte(tt.in), []byte(tt.old), []byte(tt.new), tt.n)); s != tt.out {
+ t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out)
+ }
+ }
+}
+
+type TitleTest struct {
+ in, out string
+}
+
+var TitleTests = []TitleTest{
+ {"", ""},
+ {"a", "A"},
+ {" aaa aaa aaa ", " Aaa Aaa Aaa "},
+ {" Aaa Aaa Aaa ", " Aaa Aaa Aaa "},
+ {"123a456", "123a456"},
+ {"double-blind", "Double-Blind"},
+ {"ÿøû", "Ÿøû"},
+}
+
+func TestTitle(t *testing.T) {
+ for _, tt := range TitleTests {
+ if s := string(Title([]byte(tt.in))); s != tt.out {
+ t.Errorf("Title(%q) = %q, want %q", tt.in, s, tt.out)
+ }
+ }
+}
diff --git a/libgo/go/bytes/export_test.go b/libgo/go/bytes/export_test.go
new file mode 100644
index 000000000..b65428d9c
--- /dev/null
+++ b/libgo/go/bytes/export_test.go
@@ -0,0 +1,8 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+// Export func for testing
+var IndexBytePortable = indexBytePortable
diff --git a/libgo/go/bytes/indexbyte.c b/libgo/go/bytes/indexbyte.c
new file mode 100644
index 000000000..a0a963e93
--- /dev/null
+++ b/libgo/go/bytes/indexbyte.c
@@ -0,0 +1,28 @@
+/* indexbyte.c -- implement bytes.IndexByte for Go.
+
+ Copyright 2009 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include <stddef.h>
+
+#include "array.h"
+
+/* This is in C so that the compiler can optimize it appropriately.
+ We deliberately don't split the stack in case it does call the
+ library function, which shouldn't need much stack space. */
+
+int IndexByte (struct __go_open_array, char)
+ asm ("libgo_bytes.bytes.IndexByte")
+ __attribute__ ((no_split_stack));
+
+int
+IndexByte (struct __go_open_array s, char b)
+{
+ char *p;
+
+ p = __builtin_memchr (s.__values, b, s.__count);
+ if (p == NULL)
+ return -1;
+ return p - (char *) s.__values;
+}