summaryrefslogtreecommitdiff
path: root/libgo/runtime/go-go.c
blob: 3d8e9e629084eeb68666a5f4d30716058b349e65 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
/* go-go.c -- the go function.

   Copyright 2009 The Go Authors. All rights reserved.
   Use of this source code is governed by a BSD-style
   license that can be found in the LICENSE file.  */

#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdint.h>
#include <stdlib.h>
#include <pthread.h>
#include <semaphore.h>

#include "config.h"
#include "go-assert.h"
#include "go-panic.h"
#include "go-alloc.h"
#include "runtime.h"
#include "malloc.h"

#ifdef USING_SPLIT_STACK
/* FIXME: This is not declared anywhere.  */
extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
				void **);
#endif

/* We stop the threads by sending them the signal GO_SIG_STOP and we
   start them by sending them the signal GO_SIG_START.  */

#define GO_SIG_START (SIGRTMIN + 1)
#define GO_SIG_STOP (SIGRTMIN + 2)

#ifndef SA_RESTART
  #define SA_RESTART 0
#endif

/* A doubly linked list of the threads we have started.  */

struct __go_thread_id
{
  /* Links.  */
  struct __go_thread_id *prev;
  struct __go_thread_id *next;
  /* True if the thread ID has not yet been filled in.  */
  _Bool tentative;
  /* Thread ID.  */
  pthread_t id;
  /* Thread's M structure.  */
  struct M *m;
  /* If the thread ID has not been filled in, the function we are
     running.  */
  void (*pfn) (void *);
  /* If the thread ID has not been filled in, the argument to the
     function.  */
  void *arg;
};

static struct __go_thread_id *__go_all_thread_ids;

/* A lock to control access to ALL_THREAD_IDS.  */

static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;

/* A semaphore used to wait until all the threads have stopped.  */

static sem_t __go_thread_ready_sem;

/* A signal set used to wait until garbage collection is complete.  */

static sigset_t __go_thread_wait_sigset;

/* Remove the current thread from the list of threads.  */

static void
remove_current_thread (void)
{
  struct __go_thread_id *list_entry;
  MCache *mcache;
  int i;
  
  list_entry = m->list_entry;
  mcache = m->mcache;

  i = pthread_mutex_lock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  if (list_entry->prev != NULL)
    list_entry->prev->next = list_entry->next;
  else
    __go_all_thread_ids = list_entry->next;
  if (list_entry->next != NULL)
    list_entry->next->prev = list_entry->prev;

  /* This will look runtime_mheap as needed.  */
  runtime_MCache_ReleaseAll (mcache);

  /* This should never deadlock--there shouldn't be any code that
     holds the runtime_mheap lock when locking __go_thread_ids_lock.
     We don't want to do this after releasing __go_thread_ids_lock
     because it will mean that the garbage collector might run, and
     the garbage collector does not try to lock runtime_mheap in all
     cases since it knows it is running single-threaded.  */
  runtime_lock (&runtime_mheap);
  mstats.heap_alloc += mcache->local_alloc;
  mstats.heap_objects += mcache->local_objects;
  __builtin_memset (mcache, 0, sizeof (struct MCache));
  runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
  runtime_unlock (&runtime_mheap);

  /* As soon as we release this look, a GC could run.  Since this
     thread is no longer on the list, the GC will not find our M
     structure, so it could get freed at any time.  That means that
     any code from here to thread exit must not assume that m is
     valid.  */
  m = NULL;

  i = pthread_mutex_unlock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  free (list_entry);
}

/* Start the thread.  */

static void *
start_go_thread (void *thread_arg)
{
  struct M *newm = (struct M *) thread_arg;
  void (*pfn) (void *);
  void *arg;
  struct __go_thread_id *list_entry;
  int i;

#ifdef __rtems__
  __wrap_rtems_task_variable_add ((void **) &m);
  __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
#endif

  m = newm;

  list_entry = newm->list_entry;

  pfn = list_entry->pfn;
  arg = list_entry->arg;

#ifndef USING_SPLIT_STACK
  /* If we don't support split stack, record the current stack as the
     top of the stack.  There shouldn't be anything relevant to the
     garbage collector above this point.  */
  m->gc_sp = (void *) &arg;
#endif

  /* Finish up the entry on the thread list.  */

  i = pthread_mutex_lock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  list_entry->id = pthread_self ();
  list_entry->pfn = NULL;
  list_entry->arg = NULL;
  list_entry->tentative = 0;

  i = pthread_mutex_unlock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  (*pfn) (arg);

  remove_current_thread ();

  return NULL;
}

/* The runtime.Goexit function.  */

void Goexit (void) asm ("libgo_runtime.runtime.Goexit");

void
Goexit (void)
{
  remove_current_thread ();
  pthread_exit (NULL);
  abort ();
}

/* Implement the go statement.  */

void
__go_go (void (*pfn) (void*), void *arg)
{
  int i;
  pthread_attr_t attr;
  struct M *newm;
  struct __go_thread_id *list_entry;
  pthread_t tid;

  i = pthread_attr_init (&attr);
  __go_assert (i == 0);
  i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
  __go_assert (i == 0);

#ifdef LINKER_SUPPORTS_SPLIT_STACK
  /* The linker knows how to handle calls between code which uses
     -fsplit-stack and code which does not.  That means that we can
     run with a smaller stack and rely on the -fsplit-stack support to
     save us.  The GNU/Linux glibc library won't let us have a very
     small stack, but we make it as small as we can.  */
#ifndef PTHREAD_STACK_MIN
#define PTHREAD_STACK_MIN 8192
#endif
  i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
  __go_assert (i == 0);
#endif

  newm = __go_alloc (sizeof (M));

  list_entry = malloc (sizeof (struct __go_thread_id));
  list_entry->prev = NULL;
  list_entry->next = NULL;
  list_entry->tentative = 1;
  list_entry->m = newm;
  list_entry->pfn = pfn;
  list_entry->arg = arg;

  newm->list_entry = list_entry;

  newm->mcache = runtime_allocmcache ();

  /* Add the thread to the list of all threads, marked as tentative
     since it is not yet ready to go.  */
  i = pthread_mutex_lock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  if (__go_all_thread_ids != NULL)
    __go_all_thread_ids->prev = list_entry;
  list_entry->next = __go_all_thread_ids;
  __go_all_thread_ids = list_entry;

  i = pthread_mutex_unlock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  /* Start the thread.  */
  i = pthread_create (&tid, &attr, start_go_thread, newm);
  __go_assert (i == 0);

  i = pthread_attr_destroy (&attr);
  __go_assert (i == 0);
}

/* This is the signal handler for GO_SIG_START.  The garbage collector
   will send this signal to a thread when it wants the thread to
   start.  We don't have to actually do anything here, but we need a
   signal handler since ignoring the signal will mean that the
   sigsuspend will never see it.  */

static void
gc_start_handler (int sig __attribute__ ((unused)))
{
}

/* Tell the garbage collector that we are ready, and wait for the
   garbage collector to tell us that it is done.  This may be called
   by a signal handler, so it is restricted to using functions which
   are async cancel safe.  */

static void
stop_for_gc (void)
{
  int i;

  /* Tell the garbage collector about our stack.  */
#ifdef USING_SPLIT_STACK
  m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
				&m->gc_next_segment, &m->gc_next_sp,
				&m->gc_initial_sp);
#else
  {
    uintptr_t top = (uintptr_t) m->gc_sp;
    uintptr_t bottom = (uintptr_t) &top;
    if (top < bottom)
      {
	m->gc_next_sp = m->gc_sp;
	m->gc_len = bottom - top;
      }
    else
      {
	m->gc_next_sp = (void *) bottom;
	m->gc_len = top - bottom;
      }
  }
#endif

  /* FIXME: Perhaps we should just move __go_panic_defer into M.  */
  m->gc_panic_defer = __go_panic_defer;

  /* Tell the garbage collector that we are ready by posting to the
     semaphore.  */
  i = sem_post (&__go_thread_ready_sem);
  __go_assert (i == 0);

  /* Wait for the garbage collector to tell us to continue.  */
  sigsuspend (&__go_thread_wait_sigset);
}

/* This is the signal handler for GO_SIG_STOP.  The garbage collector
   will send this signal to a thread when it wants the thread to
   stop.  */

static void
gc_stop_handler (int sig __attribute__ ((unused)))
{
  struct M *pm = m;

  if (__sync_bool_compare_and_swap (&pm->holds_finlock, 1, 1))
    {
      /* We can't interrupt the thread while it holds the finalizer
	 lock.  Otherwise we can get into a deadlock when mark calls
	 runtime_walkfintab.  */
      __sync_bool_compare_and_swap (&pm->gcing_for_finlock, 0, 1);
      return;
    }

  if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
    {
      /* m->mallocing was already non-zero.  We can't interrupt the
	 thread while it is running an malloc.  Instead, tell it to
	 call back to us when done.  */
      __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
      return;
    }

  if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
    {
      /* Similarly, we can't interrupt the thread while it is building
	 profiling information.  Otherwise we can get into a deadlock
	 when sweepspan calls MProf_Free.  */
      __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
      return;
    }

  stop_for_gc ();
}

/* This is called by malloc when it gets a signal during the malloc
   call itself.  */

int
__go_run_goroutine_gc (int r)
{
  /* Force callee-saved registers to be saved on the stack.  This is
     not needed if we are invoked from the signal handler, but it is
     needed if we are called directly, since otherwise we might miss
     something that a function somewhere up the call stack is holding
     in a register.  */
  __builtin_unwind_init ();

  stop_for_gc ();

  /* This avoids tail recursion, to make sure that the saved registers
     are on the stack.  */
  return r;
}

/* Stop all the other threads for garbage collection.  */

void
runtime_stoptheworld (void)
{
  int i;
  pthread_t me;
  int c;
  struct __go_thread_id *p;

  i = pthread_mutex_lock (&__go_thread_ids_lock);
  __go_assert (i == 0);

  me = pthread_self ();
  c = 0;
  p = __go_all_thread_ids;
  while (p != NULL)
    {
      if (p->tentative || pthread_equal (me, p->id))
	p = p->next;
      else
	{
	  i = pthread_kill (p->id, GO_SIG_STOP);
	  if (i == 0)
	    {
	      ++c;
	      p = p->next;
	    }
	  else if (i == ESRCH)
	    {
	      struct __go_thread_id *next;

	      /* This thread died somehow.  Remove it from the
		 list.  */
	      next = p->next;
	      if (p->prev != NULL)
		p->prev->next = next;
	      else
		__go_all_thread_ids = next;
	      if (next != NULL)
		next->prev = p->prev;
	      free (p);
	      p = next;
	    }
	  else
	    abort ();
	}
    }

  /* Wait for each thread to receive the signal and post to the
     semaphore.  If a thread receives the signal but contrives to die
     before it posts to the semaphore, then we will hang forever
     here.  */

  while (c > 0)
    {
      i = sem_wait (&__go_thread_ready_sem);
      if (i < 0 && errno == EINTR)
	continue;
      __go_assert (i == 0);
      --c;
    }

  /* The gc_panic_defer field should now be set for all M's except the
     one in this thread.  Set this one now.  */
  m->gc_panic_defer = __go_panic_defer;

  /* Leave with __go_thread_ids_lock held.  */
}

/* Scan all the stacks for garbage collection.  This should be called
   with __go_thread_ids_lock held.  */

void
__go_scanstacks (void (*scan) (byte *, int64))
{
  pthread_t me;
  struct __go_thread_id *p;

  /* Make sure all the registers for this thread are on the stack.  */
  __builtin_unwind_init ();

  me = pthread_self ();
  for (p = __go_all_thread_ids; p != NULL; p = p->next)
    {
      if (p->tentative)
	{
	  /* The goroutine function and argument can be allocated on
	     the heap, so we have to scan them for a thread that has
	     not yet started.  */
	  scan ((void *) &p->pfn, sizeof (void *));
	  scan ((void *) &p->arg, sizeof (void *));
	  scan ((void *) &p->m, sizeof (void *));
	  continue;
	}

#ifdef USING_SPLIT_STACK

      void *sp;
      size_t len;
      void *next_segment;
      void *next_sp;
      void *initial_sp;

      if (pthread_equal (me, p->id))
	{
	  next_segment = NULL;
	  next_sp = NULL;
	  initial_sp = NULL;
	  sp = __splitstack_find (NULL, NULL, &len, &next_segment,
				  &next_sp, &initial_sp);
	}
      else
	{
	  sp = p->m->gc_sp;
	  len = p->m->gc_len;
	  next_segment = p->m->gc_next_segment;
	  next_sp = p->m->gc_next_sp;
	  initial_sp = p->m->gc_initial_sp;
	}

      while (sp != NULL)
	{
	  scan (sp, len);
	  sp = __splitstack_find (next_segment, next_sp, &len,
				  &next_segment, &next_sp, &initial_sp);
	}

#else /* !defined(USING_SPLIT_STACK) */

      if (pthread_equal (me, p->id))
	{
	  uintptr_t top = (uintptr_t) m->gc_sp;
	  uintptr_t bottom = (uintptr_t) &top;
	  if (top < bottom)
	    scan (m->gc_sp, bottom - top);
	  else
	    scan ((void *) bottom, top - bottom);
	}
      else
	{
	  scan (p->m->gc_next_sp, p->m->gc_len);
	}
	
#endif /* !defined(USING_SPLIT_STACK) */

      /* Also scan the M structure while we're at it.  */

      scan ((void *) &p->m, sizeof (void *));
    }
}

/* Release all the memory caches.  This is called with
   __go_thread_ids_lock held.  */

void
__go_stealcache (void)
{
  struct __go_thread_id *p;

  for (p = __go_all_thread_ids; p != NULL; p = p->next)
    runtime_MCache_ReleaseAll (p->m->mcache);
}

/* Gather memory cache statistics.  This is called with
   __go_thread_ids_lock held.  */

void
__go_cachestats (void)
{
  struct __go_thread_id *p;

  for (p = __go_all_thread_ids; p != NULL; p = p->next)
    {
      MCache *c;

      c = p->m->mcache;
      mstats.heap_alloc += c->local_alloc;
      c->local_alloc = 0;
      mstats.heap_objects += c->local_objects;
      c->local_objects = 0;
    }
}

/* Start the other threads after garbage collection.  */

void
runtime_starttheworld (void)
{
  int i;
  pthread_t me;
  struct __go_thread_id *p;

  /* Here __go_thread_ids_lock should be held.  */

  me = pthread_self ();
  p = __go_all_thread_ids;
  while (p != NULL)
    {
      if (p->tentative || pthread_equal (me, p->id))
	p = p->next;
      else
	{
	  i = pthread_kill (p->id, GO_SIG_START);
	  if (i == 0)
	    p = p->next;
	  else
	    abort ();
	}
    }

  i = pthread_mutex_unlock (&__go_thread_ids_lock);
  __go_assert (i == 0);
}

/* Initialize the interaction between goroutines and the garbage
   collector.  */

void
__go_gc_goroutine_init (void *sp __attribute__ ((unused)))
{
  struct __go_thread_id *list_entry;
  int i;
  sigset_t sset;
  struct sigaction act;

  /* Add the initial thread to the list of all threads.  */

  list_entry = malloc (sizeof (struct __go_thread_id));
  list_entry->prev = NULL;
  list_entry->next = NULL;
  list_entry->tentative = 0;
  list_entry->id = pthread_self ();
  list_entry->m = m;
  list_entry->pfn = NULL;
  list_entry->arg = NULL;
  __go_all_thread_ids = list_entry;

  /* Initialize the semaphore which signals when threads are ready for
     GC.  */

  i = sem_init (&__go_thread_ready_sem, 0, 0);
  __go_assert (i == 0);

  /* Fetch the current signal mask.  */

  i = sigemptyset (&sset);
  __go_assert (i == 0);
  i = sigprocmask (SIG_BLOCK, NULL, &sset);
  __go_assert (i == 0);

  /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
     blocked, and save that set for use with later calls to sigsuspend
     while waiting for GC to complete.  */

  i = sigdelset (&sset, GO_SIG_START);
  __go_assert (i == 0);
  i = sigaddset (&sset, GO_SIG_STOP);
  __go_assert (i == 0);
  __go_thread_wait_sigset = sset;

  /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
     the process signal mask.  */

  i = sigaddset (&sset, GO_SIG_START);
  __go_assert (i == 0);
  i = sigdelset (&sset, GO_SIG_STOP);
  __go_assert (i == 0);
  i = sigprocmask (SIG_SETMASK, &sset, NULL);
  __go_assert (i == 0);

  /* Install the signal handlers.  */
  memset (&act, 0, sizeof act);
  i = sigemptyset (&act.sa_mask);
  __go_assert (i == 0);

  act.sa_handler = gc_start_handler;
  act.sa_flags = SA_RESTART;
  i = sigaction (GO_SIG_START, &act, NULL);
  __go_assert (i == 0);

  /* We could consider using an alternate signal stack for this.  The
     function does not use much stack space, so it may be OK.  */
  act.sa_handler = gc_stop_handler;
  i = sigaction (GO_SIG_STOP, &act, NULL);
  __go_assert (i == 0);

#ifndef USING_SPLIT_STACK
  /* If we don't support split stack, record the current stack as the
     top of the stack.  */
  m->gc_sp = sp;
#endif
}