From 554fd8c5195424bdbcabf5de30fdc183aba391bd Mon Sep 17 00:00:00 2001 From: upstream source tree Date: Sun, 15 Mar 2015 20:14:05 -0400 Subject: obtained gcc-4.6.4.tar.bz2 from upstream website; verified gcc-4.6.4.tar.bz2.sig; imported gcc-4.6.4 source tree from verified upstream tarball. downloading a git-generated archive based on the 'upstream' tag should provide you with a source tree that is binary identical to the one extracted from the above tarball. if you have obtained the source via the command 'git clone', however, do note that line-endings of files in your working directory might differ from line-endings of the respective files in the upstream repository. --- libjava/sysdep/powerpc/locks.h | 97 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 libjava/sysdep/powerpc/locks.h (limited to 'libjava/sysdep/powerpc/locks.h') diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h new file mode 100644 index 000000000..2e9eb0eb3 --- /dev/null +++ b/libjava/sysdep/powerpc/locks.h @@ -0,0 +1,97 @@ +// locks.h - Thread synchronization primitives. PowerPC implementation. + +/* Copyright (C) 2002,2008 Free Software Foundation + + This file is part of libgcj. + +This software is copyrighted work licensed under the terms of the +Libgcj License. Please consult the file "LIBGCJ_LICENSE" for +details. */ + +#ifndef __SYSDEP_LOCKS_H__ +#define __SYSDEP_LOCKS_H__ + +#ifdef __LP64__ +#define _LARX "ldarx " +#define _STCX "stdcx. " +#else +#define _LARX "lwarx " +#ifdef __PPC405__ +#define _STCX "sync; stwcx. " +#else +#define _STCX "stwcx. " +#endif +#endif + +typedef size_t obj_addr_t; /* Integer type big enough for object */ + /* address. */ + +inline static bool +compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old, + obj_addr_t new_val) +{ + obj_addr_t ret; + + __asm__ __volatile__ ( + " " _LARX "%0,0,%1 \n" + " xor. %0,%3,%0\n" + " bne $+12\n" + " " _STCX "%2,0,%1\n" + " bne- $-16\n" + : "=&r" (ret) + : "r" (addr), "r" (new_val), "r" (old) + : "cr0", "memory"); + + /* This version of __compare_and_swap is to be used when acquiring + a lock, so we don't need to worry about whether other memory + operations have completed, but we do need to be sure that any loads + after this point really occur after we have acquired the lock. */ + __asm__ __volatile__ ("isync" : : : "memory"); + return ret == 0; +} + +inline static void +release_set (volatile obj_addr_t *addr, obj_addr_t new_val) +{ + __asm__ __volatile__ ("sync" : : : "memory"); + *addr = new_val; +} + +inline static bool +compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old, + obj_addr_t new_val) +{ + obj_addr_t ret; + + __asm__ __volatile__ ("sync" : : : "memory"); + + __asm__ __volatile__ ( + " " _LARX "%0,0,%1 \n" + " xor. %0,%3,%0\n" + " bne $+12\n" + " " _STCX "%2,0,%1\n" + " bne- $-16\n" + : "=&r" (ret) + : "r" (addr), "r" (new_val), "r" (old) + : "cr0", "memory"); + + return ret == 0; +} + +// Ensure that subsequent instructions do not execute on stale +// data that was loaded from memory before the barrier. +inline static void +read_barrier () +{ + __asm__ __volatile__ ("isync" : : : "memory"); +} + +// Ensure that prior stores to memory are completed with respect to other +// processors. +inline static void +write_barrier () +{ + __asm__ __volatile__ ("sync" : : : "memory"); +} + +#endif -- cgit v1.2.3