summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h60
-rw-r--r--include/ntapi/bits/nt32/nt_atomic_i386_asm__msvc.h36
-rw-r--r--include/ntapi/bits/nt64/nt_atomic_x86_64_asm__gcc.h72
-rw-r--r--include/ntapi/bits/nt64/nt_atomic_x86_64_asm__msvc.h36
-rw-r--r--include/ntapi/nt_atomic.h18
5 files changed, 78 insertions, 144 deletions
diff --git a/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h b/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h
index adf000f..002a62a 100644
--- a/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h
+++ b/include/ntapi/bits/nt32/nt_atomic_i386_asm__gcc.h
@@ -273,41 +273,33 @@ static __inline__ int64_t at_locked_cas_64(
}
-static __inline__ intptr_t at_locked_and(
+static __inline__ void at_locked_and(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"andl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_and_32(
+static __inline__ void at_locked_and_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"andl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_and_64(
+static __inline__ void at_locked_and_64(
int64_t volatile * dst,
int64_t mask)
{
@@ -320,46 +312,36 @@ static __inline__ int64_t at_locked_and_64(
xchg = cmp & mask;
ret = at_locked_cas_64(dst,cmp,xchg);
} while (ret != cmp);
-
- return ret;
}
-static __inline__ intptr_t at_locked_or(
+static __inline__ void at_locked_or(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"orl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_or_32(
+static __inline__ void at_locked_or_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"orl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_or_64(
+static __inline__ void at_locked_or_64(
int64_t volatile * dst,
int64_t mask)
{
@@ -372,46 +354,36 @@ static __inline__ int64_t at_locked_or_64(
xchg = cmp | mask;
ret = at_locked_cas_64(dst,cmp,xchg);
} while (ret != cmp);
-
- return ret;
}
-static __inline__ intptr_t at_locked_xor(
+static __inline__ void at_locked_xor(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"xorl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_xor_32(
+static __inline__ void at_locked_xor_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"xorl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_xor_64(
+static __inline__ void at_locked_xor_64(
int64_t volatile * dst,
int64_t mask)
{
@@ -424,8 +396,6 @@ static __inline__ int64_t at_locked_xor_64(
xchg = cmp ^ mask;
ret = at_locked_cas_64(dst,cmp,xchg);
} while (ret != cmp);
-
- return ret;
}
diff --git a/include/ntapi/bits/nt32/nt_atomic_i386_asm__msvc.h b/include/ntapi/bits/nt32/nt_atomic_i386_asm__msvc.h
index c0a0ba8..37dc496 100644
--- a/include/ntapi/bits/nt32/nt_atomic_i386_asm__msvc.h
+++ b/include/ntapi/bits/nt32/nt_atomic_i386_asm__msvc.h
@@ -198,75 +198,75 @@ static __inline__ int64_t at_locked_cas_64(
}
-static __inline__ intptr_t at_locked_and(
+static __inline__ void at_locked_and(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedAnd(dst,mask);
+ _InterlockedAnd(dst,mask);
}
-static __inline__ int32_t at_locked_and_32(
+static __inline__ void at_locked_and_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedAnd((long *)dst,mask);
+ _InterlockedAnd((long *)dst,mask);
}
-static __inline__ int64_t at_locked_and_64(
+static __inline__ void at_locked_and_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedAnd64(dst,mask);
+ _InterlockedAnd64(dst,mask);
}
-static __inline__ intptr_t at_locked_or(
+static __inline__ void at_locked_or(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedOr(dst,mask);
+ _InterlockedOr(dst,mask);
}
-static __inline__ int32_t at_locked_or_32(
+static __inline__ void at_locked_or_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedOr((long *)dst,mask);
+ _InterlockedOr((long *)dst,mask);
}
-static __inline__ int64_t at_locked_or_64(
+static __inline__ void at_locked_or_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedOr64(dst,mask);
+ _InterlockedOr64(dst,mask);
}
-static __inline__ intptr_t at_locked_xor(
+static __inline__ void at_locked_xor(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedXor(dst,mask);
+ _InterlockedXor(dst,mask);
}
-static __inline__ int32_t at_locked_xor_32(
+static __inline__ void at_locked_xor_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedXor((long *)dst,mask);
+ _InterlockedXor((long *)dst,mask);
}
-static __inline__ int64_t at_locked_xor_64(
+static __inline__ void at_locked_xor_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedXor64(dst,mask);
+ _InterlockedXor64(dst,mask);
}
diff --git a/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__gcc.h b/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__gcc.h
index 1e3f10d..2162b1c 100644
--- a/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__gcc.h
+++ b/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__gcc.h
@@ -300,156 +300,120 @@ static __inline__ int64_t at_locked_cas_64(
}
-static __inline__ intptr_t at_locked_and(
+static __inline__ void at_locked_and(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"andq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_and_32(
+static __inline__ void at_locked_and_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"andl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_and_64(
+static __inline__ void at_locked_and_64(
int64_t volatile * dst,
int64_t mask)
{
- int64_t ret;
-
__asm__(
"lock;"
"andq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ intptr_t at_locked_or(
+static __inline__ void at_locked_or(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"orq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_or_32(
+static __inline__ void at_locked_or_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"orl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_or_64(
+static __inline__ void at_locked_or_64(
int64_t volatile * dst,
int64_t mask)
{
- int64_t ret;
-
__asm__(
"lock;"
"orq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ intptr_t at_locked_xor(
+static __inline__ void at_locked_xor(
intptr_t volatile * dst,
intptr_t mask)
{
- intptr_t ret;
-
__asm__(
"lock;"
"xorq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int32_t at_locked_xor_32(
+static __inline__ void at_locked_xor_32(
int32_t volatile * dst,
int32_t mask)
{
- int32_t ret;
-
__asm__(
"lock;"
"xorl %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
-static __inline__ int64_t at_locked_xor_64(
+static __inline__ void at_locked_xor_64(
int64_t volatile * dst,
int64_t mask)
{
- int64_t ret;
-
__asm__(
"lock;"
"xorq %1, %0"
- : "=m" (*dst), "=a" (ret)
+ : "=m" (*dst)
: "r" (mask)
: "memory");
-
- return ret;
}
diff --git a/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__msvc.h b/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__msvc.h
index a52bfd4..e202187 100644
--- a/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__msvc.h
+++ b/include/ntapi/bits/nt64/nt_atomic_x86_64_asm__msvc.h
@@ -198,75 +198,75 @@ static __inline__ int64_t at_locked_cas_64(
}
-static __inline__ intptr_t at_locked_and(
+static __inline__ void at_locked_and(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedAnd64(dst,mask);
+ _InterlockedAnd64(dst,mask);
}
-static __inline__ int32_t at_locked_and_32(
+static __inline__ void at_locked_and_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedAnd(dst,mask);
+ _InterlockedAnd(dst,mask);
}
-static __inline__ int64_t at_locked_and_64(
+static __inline__ void at_locked_and_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedAnd64(dst,mask);
+ _InterlockedAnd64(dst,mask);
}
-static __inline__ intptr_t at_locked_or(
+static __inline__ void at_locked_or(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedOr64(dst,mask);
+ _InterlockedOr64(dst,mask);
}
-static __inline__ int32_t at_locked_or_32(
+static __inline__ void at_locked_or_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedOr(dst,mask);
+ _InterlockedOr(dst,mask);
}
-static __inline__ int64_t at_locked_or_64(
+static __inline__ void at_locked_or_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedOr64(dst,mask);
+ _InterlockedOr64(dst,mask);
}
-static __inline__ intptr_t at_locked_xor(
+static __inline__ void at_locked_xor(
intptr_t volatile * dst,
intptr_t mask)
{
- return _InterlockedXor64(dst,mask);
+ _InterlockedXor64(dst,mask);
}
-static __inline__ int32_t at_locked_xor_32(
+static __inline__ void at_locked_xor_32(
int32_t volatile * dst,
int32_t mask)
{
- return _InterlockedXor(dst,mask);
+ _InterlockedXor(dst,mask);
}
-static __inline__ int64_t at_locked_xor_64(
+static __inline__ void at_locked_xor_64(
int64_t volatile * dst,
int64_t mask)
{
- return _InterlockedXor64(dst,mask);
+ _InterlockedXor64(dst,mask);
}
diff --git a/include/ntapi/nt_atomic.h b/include/ntapi/nt_atomic.h
index 45a4018..b03fa19 100644
--- a/include/ntapi/nt_atomic.h
+++ b/include/ntapi/nt_atomic.h
@@ -84,47 +84,47 @@ static __inline__ int64_t at_locked_cas_64(
int64_t cmp,
int64_t xchg);
-static __inline__ intptr_t at_locked_and(
+static __inline__ void at_locked_and(
intptr_t volatile * dst,
intptr_t mask);
-static __inline__ int32_t at_locked_and_32(
+static __inline__ void at_locked_and_32(
int32_t volatile * dst,
int32_t mask);
-static __inline__ int64_t at_locked_and_64(
+static __inline__ void at_locked_and_64(
int64_t volatile * dst,
int64_t mask);
-static __inline__ intptr_t at_locked_or(
+static __inline__ void at_locked_or(
intptr_t volatile * dst,
intptr_t mask);
-static __inline__ int32_t at_locked_or_32(
+static __inline__ void at_locked_or_32(
int32_t volatile * dst,
int32_t mask);
-static __inline__ int64_t at_locked_or_64(
+static __inline__ void at_locked_or_64(
int64_t volatile * dst,
int64_t mask);
-static __inline__ intptr_t at_locked_xor(
+static __inline__ void at_locked_xor(
intptr_t volatile * dst,
intptr_t mask);
-static __inline__ int32_t at_locked_xor_32(
+static __inline__ void at_locked_xor_32(
int32_t volatile * dst,
int32_t mask);
-static __inline__ int64_t at_locked_xor_64(
+static __inline__ void at_locked_xor_64(
int64_t volatile * dst,
int64_t mask);