From f1a76632596dd69a233d09ed278467c5fed7b13d Mon Sep 17 00:00:00 2001 From: midipix Date: Thu, 9 Aug 2018 04:15:25 -0400 Subject: library: atomics: simplify and-or-xor interfaces. --- include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h | 60 ++++++----------------- 1 file changed, 15 insertions(+), 45 deletions(-) (limited to 'include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h') diff --git a/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h b/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h index adf000f..002a62a 100644 --- a/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h +++ b/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h @@ -273,41 +273,33 @@ static __inline__ int64_t at_locked_cas_64( } -static __inline__ intptr_t at_locked_and( +static __inline__ void at_locked_and( intptr_t volatile * dst, intptr_t mask) { - intptr_t ret; - __asm__( "lock;" "andl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int32_t at_locked_and_32( +static __inline__ void at_locked_and_32( int32_t volatile * dst, int32_t mask) { - int32_t ret; - __asm__( "lock;" "andl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int64_t at_locked_and_64( +static __inline__ void at_locked_and_64( int64_t volatile * dst, int64_t mask) { @@ -320,46 +312,36 @@ static __inline__ int64_t at_locked_and_64( xchg = cmp & mask; ret = at_locked_cas_64(dst,cmp,xchg); } while (ret != cmp); - - return ret; } -static __inline__ intptr_t at_locked_or( +static __inline__ void at_locked_or( intptr_t volatile * dst, intptr_t mask) { - intptr_t ret; - __asm__( "lock;" "orl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int32_t at_locked_or_32( +static __inline__ void at_locked_or_32( int32_t volatile * dst, int32_t mask) { - int32_t ret; - __asm__( "lock;" "orl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int64_t at_locked_or_64( +static __inline__ void at_locked_or_64( int64_t volatile * dst, int64_t mask) { @@ -372,46 +354,36 @@ static __inline__ int64_t at_locked_or_64( xchg = cmp | mask; ret = at_locked_cas_64(dst,cmp,xchg); } while (ret != cmp); - - return ret; } -static __inline__ intptr_t at_locked_xor( +static __inline__ void at_locked_xor( intptr_t volatile * dst, intptr_t mask) { - intptr_t ret; - __asm__( "lock;" "xorl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int32_t at_locked_xor_32( +static __inline__ void at_locked_xor_32( int32_t volatile * dst, int32_t mask) { - int32_t ret; - __asm__( "lock;" "xorl %1, %0" - : "=m" (*dst), "=a" (ret) + : "=m" (*dst) : "r" (mask) : "memory"); - - return ret; } -static __inline__ int64_t at_locked_xor_64( +static __inline__ void at_locked_xor_64( int64_t volatile * dst, int64_t mask) { @@ -424,8 +396,6 @@ static __inline__ int64_t at_locked_xor_64( xchg = cmp ^ mask; ret = at_locked_cas_64(dst,cmp,xchg); } while (ret != cmp); - - return ret; } -- cgit v1.2.3