1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/*
* atomic.h: Atomic operations
*
* Author:
* Dick Porter (dick@ximian.com)
*
* (C) 2002 Ximian, Inc.
*/
#ifndef _WAPI_ATOMIC_H_
#define _WAPI_ATOMIC_H_
#include <glib.h>
#include "mono/io-layer/wapi.h"
#ifdef __i386__
#define WAPI_ATOMIC_ASM
/*
* NB: The *Pointer() functions here assume that
* sizeof(pointer)==sizeof(gint32)
*
* NB2: These asm functions assume 486+ (some of the opcodes dont
* exist on 386). If this becomes an issue, we can get configure to
* fall back to the non-atomic C versions of these calls.
*/
static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
gint32 exch, gint32 comp)
{
gint32 old;
__asm__ __volatile__ ("lock; cmpxchgl %2, %0"
: "=m" (*dest), "=a" (old)
: "r" (exch), "m" (*dest), "a" (comp));
return(old);
}
static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
{
gpointer old;
__asm__ __volatile__ ("lock; cmpxchgl %2, %0"
: "=m" (*dest), "=a" (old)
: "r" (exch), "m" (*dest), "a" (comp));
return(old);
}
static inline gint32 InterlockedIncrement(volatile gint32 *val)
{
gint32 tmp;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (tmp), "=m" (*val)
: "0" (1), "m" (*val));
return(tmp+1);
}
static inline gint32 InterlockedDecrement(volatile gint32 *val)
{
gint32 tmp;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (tmp), "=m" (*val)
: "0" (-1), "m" (*val));
return(tmp-1);
}
/*
* See
* http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
* for the reasons for using cmpxchg and a loop here.
*/
static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new)
{
gint32 ret;
__asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
: "=m" (*val), "=a" (ret)
: "r" (new), "m" (*val), "a" (*val));
return(ret);
}
static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
gpointer new)
{
gpointer ret;
__asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
: "=m" (*val), "=a" (ret)
: "r" (new), "m" (*val), "a" (*val));
return(ret);
}
static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
{
gint32 ret;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "=r" (ret), "=m" (*val)
: "0" (add), "m" (*val));
return(ret);
}
#else
extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
extern gint32 InterlockedIncrement(volatile gint32 *dest);
extern gint32 InterlockedDecrement(volatile gint32 *dest);
extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
#endif
#endif /* _WAPI_ATOMIC_H_ */
|