Commit 894b188f authored by Alexandre Julliard's avatar Alexandre Julliard

Moved __ASM_GLOBAL_FUNC macros and interlocked functions to port.[ch]

parent af16c98f
......@@ -24,7 +24,9 @@
*
* FIXME: Incomplete support for nested exceptions/try block cleanup.
*/
#include "config.h"
#include "wine/port.h"
#include "ntddk.h"
#include "wine/exception.h"
......
......@@ -18,6 +18,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <errno.h>
#include <stdio.h>
......@@ -29,107 +32,15 @@
WINE_DEFAULT_DEBUG_CHANNEL(ntdll);
WINE_DECLARE_DEBUG_CHANNEL(relay);
/* Define the atomic exchange/inc/dec functions.
* These are available in kernel32.dll already,
* but we don't want to import kernel32 from ntdll.
*/
#ifdef __i386__
# ifdef __GNUC__
inline static PVOID interlocked_cmpxchg( PVOID *dest, PVOID xchg, PVOID compare )
{
PVOID ret;
__asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
: "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
}
inline static LONG interlocked_inc( PLONG dest )
{
LONG ret;
__asm__ __volatile__( "lock; xaddl %0,(%1)"
: "=r" (ret) : "r" (dest), "0" (1) : "memory" );
return ret + 1;
}
inline static LONG interlocked_dec( PLONG dest )
{
LONG ret;
__asm__ __volatile__( "lock; xaddl %0,(%1)"
: "=r" (ret) : "r" (dest), "0" (-1) : "memory" );
return ret - 1;
}
# else /* __GNUC__ */
PVOID WINAPI interlocked_cmpxchg( PVOID *dest, PVOID xchg, PVOID compare );
__ASM_GLOBAL_FUNC(interlocked_cmpxchg,
"movl 12(%esp),%eax\n\t"
"movl 8(%esp),%ecx\n\t"
"movl 4(%esp),%edx\n\t"
"lock; cmpxchgl %ecx,(%edx)\n\t"
"ret $12");
LONG WINAPI interlocked_inc( PLONG dest );
__ASM_GLOBAL_FUNC(interlocked_inc,
"movl 4(%esp),%edx\n\t"
"movl $1,%eax\n\t"
"lock; xaddl %eax,(%edx)\n\t"
"incl %eax\n\t"
"ret $4");
LONG WINAPI interlocked_dec( PLONG dest );
__ASM_GLOBAL_FUNC(interlocked_dec,
"movl 4(%esp),%edx\n\t"
"movl $-1,%eax\n\t"
"lock; xaddl %eax,(%edx)\n\t"
"decl %eax\n\t"
"ret $4");
# endif /* __GNUC__ */
#elif defined(__sparc__) && defined(__sun__)
/*
* As the earlier Sparc processors lack necessary atomic instructions,
* I'm simply falling back to the library-provided _lwp_mutex routines
* to ensure mutual exclusion in a way appropriate for the current
* architecture.
*
* FIXME: If we have the compare-and-swap instruction (Sparc v9 and above)
* we could use this to speed up the Interlocked operations ...
*/
#include <synch.h>
static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
static PVOID interlocked_cmpxchg( PVOID *dest, PVOID xchg, PVOID compare )
{
_lwp_mutex_lock( &interlocked_mutex );
if ( *dest == compare )
*dest = xchg;
else
compare = *dest;
_lwp_mutex_unlock( &interlocked_mutex );
return compare;
}
static LONG interlocked_inc( PLONG dest )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = ++*dest;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg_add( dest, 1 ) + 1;
}
static LONG interlocked_dec( PLONG dest )
inline static LONG interlocked_dec( PLONG dest )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = --*dest;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg_add( dest, -1 ) - 1;
}
#else
# error You must implement the interlocked* functions for your CPU
#endif
/***********************************************************************
* get_semaphore
......@@ -141,7 +52,7 @@ static inline HANDLE get_semaphore( RTL_CRITICAL_SECTION *crit )
{
HANDLE sem;
if (NtCreateSemaphore( &sem, SEMAPHORE_ALL_ACCESS, NULL, 0, 1 )) return 0;
if (!(ret = (HANDLE)interlocked_cmpxchg( (PVOID *)&crit->LockSemaphore,
if (!(ret = (HANDLE)interlocked_cmpxchg_ptr( (PVOID *)&crit->LockSemaphore,
(PVOID)sem, 0 )))
ret = sem;
else
......@@ -270,7 +181,7 @@ NTSTATUS WINAPI RtlEnterCriticalSection( RTL_CRITICAL_SECTION *crit )
BOOL WINAPI RtlTryEnterCriticalSection( RTL_CRITICAL_SECTION *crit )
{
BOOL ret = FALSE;
if (interlocked_cmpxchg( (PVOID *)&crit->LockCount, (PVOID)0L, (PVOID)-1L ) == (PVOID)-1L)
if (interlocked_cmpxchg( &crit->LockCount, 0L, -1 ) == -1)
{
crit->OwningThread = GetCurrentThreadId();
crit->RecursionCount = 1;
......
......@@ -31,7 +31,6 @@
#include "winbase.h"
#include "winnt.h"
#include "ntddk.h"
#include "wtypes.h"
#include "msvcrt/excpt.h"
WINE_DECLARE_DEBUG_CHANNEL(tid);
......
......@@ -20,6 +20,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <signal.h>
......
......@@ -21,6 +21,7 @@
#ifdef __i386__
#include "config.h"
#include "wine/port.h"
#include <errno.h>
#include <signal.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include "windef.h"
#include "wine/winbase16.h"
......
......@@ -200,6 +200,92 @@ extern int wine_dlclose( void *handle, char *error, int errorsize );
#define RTLD_GLOBAL 0x100
#endif
/* Interlocked functions */
#if defined(__i386__) && defined(__GNUC__)
inline static long interlocked_cmpxchg( long *dest, long xchg, long compare )
{
long ret;
__asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
: "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
}
inline static void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
{
void *ret;
__asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
: "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
}
inline static long interlocked_xchg( long *dest, long val )
{
long ret;
__asm__ __volatile__( "lock; xchgl %0,(%1)"
: "=r" (ret) : "r" (dest), "0" (val) : "memory" );
return ret;
}
inline static void *interlocked_xchg_ptr( void **dest, void *val )
{
void *ret;
__asm__ __volatile__( "lock; xchgl %0,(%1)"
: "=r" (ret) : "r" (dest), "0" (val) : "memory" );
return ret;
}
inline static long interlocked_xchg_add( long *dest, long incr )
{
long ret;
__asm__ __volatile__( "lock; xaddl %0,(%1)"
: "=r" (ret) : "r" (dest), "0" (incr) : "memory" );
return ret;
}
#else /* __i386___ && __GNUC__ */
extern long interlocked_cmpxchg( long *dest, long xchg, long compare );
extern void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare );
extern long interlocked_xchg( long *dest, long val );
extern void *interlocked_xchg_ptr( void **dest, void *val );
extern long interlocked_xchg_add( long *dest, long incr );
#endif /* __i386___ && __GNUC__ */
/* Macros to define assembler functions somewhat portably */
#ifdef NEED_UNDERSCORE_PREFIX
# define __ASM_NAME(name) "_" name
#else
# define __ASM_NAME(name) name
#endif
#ifdef NEED_TYPE_IN_DEF
# define __ASM_FUNC(name) ".def " __ASM_NAME(name) "; .scl 2; .type 32; .endef"
#else
# define __ASM_FUNC(name) ".type " __ASM_NAME(name) ",@function"
#endif
#ifdef __GNUC__
# define __ASM_GLOBAL_FUNC(name,code) \
__asm__( ".align 4\n\t" \
".globl " __ASM_NAME(#name) "\n\t" \
__ASM_FUNC(#name) "\n" \
__ASM_NAME(#name) ":\n\t" \
code );
#else /* __GNUC__ */
# define __ASM_GLOBAL_FUNC(name,code) \
void __asm_dummy_##name(void) { \
asm( ".align 4\n\t" \
".globl " __ASM_NAME(#name) "\n\t" \
__ASM_FUNC(#name) "\n" \
__ASM_NAME(#name) ":\n\t" \
code ); \
}
#endif /* __GNUC__ */
/* Macros to access unaligned or wrong-endian WORDs and DWORDs. */
......
......@@ -1120,36 +1120,6 @@ typedef CONTEXT *PCONTEXT;
/* Macros to retrieve the current context */
#ifdef NEED_UNDERSCORE_PREFIX
# define __ASM_NAME(name) "_" name
#else
# define __ASM_NAME(name) name
#endif
#ifdef NEED_TYPE_IN_DEF
# define __ASM_FUNC(name) ".def " __ASM_NAME(name) "; .scl 2; .type 32; .endef"
#else
# define __ASM_FUNC(name) ".type " __ASM_NAME(name) ",@function"
#endif
#ifdef __GNUC__
# define __ASM_GLOBAL_FUNC(name,code) \
__asm__( ".align 4\n\t" \
".globl " __ASM_NAME(#name) "\n\t" \
__ASM_FUNC(#name) "\n" \
__ASM_NAME(#name) ":\n\t" \
code );
#else /* __GNUC__ */
# define __ASM_GLOBAL_FUNC(name,code) \
void __asm_dummy_##name(void) { \
asm( ".align 4\n\t" \
".globl " __ASM_NAME(#name) "\n\t" \
__ASM_FUNC(#name) "\n" \
__ASM_NAME(#name) ":\n\t" \
code ); \
}
#endif /* __GNUC__ */
#ifdef __i386__
#define _DEFINE_REGS_ENTRYPOINT( name, fn, args ) \
......
......@@ -738,3 +738,103 @@ char *gcvt (double number, size_t ndigit, char *buff)
return buff;
}
#endif /* HAVE_ECVT */
/***********************************************************************
* interlocked functions
*/
#ifdef __i386__
__ASM_GLOBAL_FUNC(interlocked_cmpxchg,
"movl 12(%esp),%eax\n\t"
"movl 8(%esp),%ecx\n\t"
"movl 4(%esp),%edx\n\t"
"lock; cmpxchgl %ecx,(%edx)\n\t"
"ret");
__ASM_GLOBAL_FUNC(interlocked_cmpxchg_ptr,
"movl 12(%esp),%eax\n\t"
"movl 8(%esp),%ecx\n\t"
"movl 4(%esp),%edx\n\t"
"lock; cmpxchgl %ecx,(%edx)\n\t"
"ret");
__ASM_GLOBAL_FUNC(interlocked_xchg,
"movl 8(%esp),%eax\n\t"
"movl 4(%esp),%edx\n\t"
"lock; xchgl %eax,(%edx)\n\t"
"ret");
__ASM_GLOBAL_FUNC(interlocked_xchg_ptr,
"movl 8(%esp),%eax\n\t"
"movl 4(%esp),%edx\n\t"
"lock; xchgl %eax,(%edx)\n\t"
"ret");
__ASM_GLOBAL_FUNC(interlocked_xchg_add,
"movl 8(%esp),%eax\n\t"
"movl 4(%esp),%edx\n\t"
"lock; xaddl %eax,(%edx)\n\t"
"ret");
#elif defined(__sparc__) && defined(__sun__)
/*
* As the earlier Sparc processors lack necessary atomic instructions,
* I'm simply falling back to the library-provided _lwp_mutex routines
* to ensure mutual exclusion in a way appropriate for the current
* architecture.
*
* FIXME: If we have the compare-and-swap instruction (Sparc v9 and above)
* we could use this to speed up the Interlocked operations ...
*/
#include <synch.h>
static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
long interlocked_cmpxchg( long *dest, long xchg, long compare )
{
_lwp_mutex_lock( &interlocked_mutex );
if (*dest == compare) *dest = xchg;
else compare = *dest;
_lwp_mutex_unlock( &interlocked_mutex );
return compare;
}
void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare )
{
_lwp_mutex_lock( &interlocked_mutex );
if (*dest == compare) *dest = xchg;
else compare = *dest;
_lwp_mutex_unlock( &interlocked_mutex );
return compare;
}
long interlocked_xchg( long *dest, long val )
{
long retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = *dest;
*dest = val;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
}
void *interlocked_xchg_ptr( void **dest, void *val )
{
long retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = *dest;
*dest = val;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
}
long interlocked_xchg_add( long *dest, long incr )
{
long retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = *dest;
*dest += incr;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
}
#else
# error You must implement the interlocked* functions for your CPU
#endif
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <string.h>
......
......@@ -18,8 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <string.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <stdio.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <errno.h>
......@@ -150,35 +151,14 @@ __ASM_GLOBAL_FUNC(InterlockedDecrement,
"decl %eax\n\t"
"ret $4");
#elif defined(__sparc__) && defined(__sun__)
/*
* As the earlier Sparc processors lack necessary atomic instructions,
* I'm simply falling back to the library-provided _lwp_mutex routines
* to ensure mutual exclusion in a way appropriate for the current
* architecture.
*
* FIXME: If we have the compare-and-swap instruction (Sparc v9 and above)
* we could use this to speed up the Interlocked operations ...
*/
#include <synch.h>
static lwp_mutex_t interlocked_mutex = DEFAULTMUTEX;
#else /* __i386__ */
/***********************************************************************
* InterlockedCompareExchange (KERNEL32.@)
*/
LONG WINAPI InterlockedCompareExchange( PLONG dest, LONG xchg, LONG compare )
{
_lwp_mutex_lock( &interlocked_mutex );
if ( *dest == compare )
*dest = xchg;
else
compare = *dest;
_lwp_mutex_unlock( &interlocked_mutex );
return compare;
return interlocked_cmpxchg( dest, xchg, compare );
}
/***********************************************************************
......@@ -186,14 +166,7 @@ LONG WINAPI InterlockedCompareExchange( PLONG dest, LONG xchg, LONG compare )
*/
LONG WINAPI InterlockedExchange( PLONG dest, LONG val )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = *dest;
*dest = val;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg( dest, val );
}
/***********************************************************************
......@@ -201,14 +174,7 @@ LONG WINAPI InterlockedExchange( PLONG dest, LONG val )
*/
LONG WINAPI InterlockedExchangeAdd( PLONG dest, LONG incr )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = *dest;
*dest += incr;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg_add( dest, incr );
}
/***********************************************************************
......@@ -216,13 +182,7 @@ LONG WINAPI InterlockedExchangeAdd( PLONG dest, LONG incr )
*/
LONG WINAPI InterlockedIncrement( PLONG dest )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = ++*dest;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg_add( dest, 1 ) + 1;
}
/***********************************************************************
......@@ -230,15 +190,7 @@ LONG WINAPI InterlockedIncrement( PLONG dest )
*/
LONG WINAPI InterlockedDecrement( PLONG dest )
{
LONG retv;
_lwp_mutex_lock( &interlocked_mutex );
retv = --*dest;
_lwp_mutex_unlock( &interlocked_mutex );
return retv;
return interlocked_xchg_add( dest, -1 ) - 1;
}
#else
#error You must implement the Interlocked* functions for your CPU
#endif
#endif /* __i386__ */
......@@ -30,59 +30,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_DIRECT_H
# include <direct.h>
#endif
#ifdef HAVE_IO_H
# include <io.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#if !defined(HAVE_POPEN) && defined(HAVE__POPEN)
#define popen _popen
#endif
#if !defined(HAVE_PCLOSE) && defined(HAVE__PCLOSE)
#define pclose _pclose
#endif
#if !defined(HAVE_STRNCASECMP) && defined(HAVE__STRNICMP)
# define strncasecmp _strnicmp
#endif
#if !defined(HAVE_STRCASECMP) && defined(HAVE__STRICMP)
# define strcasecmp _stricmp
#endif
#define PUT_WORD(ptr, w) (*(WORD *)(ptr) = (w))
#define PUT_LE_WORD(ptr, w) \
do { ((BYTE *)(ptr))[0] = LOBYTE(w); \
((BYTE *)(ptr))[1] = HIBYTE(w); } while (0)
#define PUT_BE_WORD(ptr, w) \
do { ((BYTE *)(ptr))[1] = LOBYTE(w); \
((BYTE *)(ptr))[0] = HIBYTE(w); } while (0)
#if defined(ALLOW_UNALIGNED_ACCESS)
#define PUT_UA_WORD(ptr, w) PUT_WORD(ptr, w)
#elif defined(WORDS_BIGENDIAN)
#define PUT_UA_WORD(ptr, w) PUT_BE_WORD(ptr, w)
#else
#define PUT_UA_WORD(ptr, w) PUT_LE_WORD(ptr, w)
#endif
#ifdef NEED_UNDERSCORE_PREFIX
# define __ASM_NAME(name) "_" name
#else
# define __ASM_NAME(name) name
#endif
#ifdef NEED_TYPE_IN_DEF
# define __ASM_FUNC(name) ".def " __ASM_NAME(name) "; .scl 2; .type 32; .endef"
#else
# define __ASM_FUNC(name) ".type " __ASM_NAME(name) ",@function"
#endif
#ifdef NEED_UNDERSCORE_PREFIX
# define PREFIX "_"
......
......@@ -20,6 +20,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <fcntl.h>
#include <stdio.h>
......
......@@ -23,6 +23,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <stdio.h>
......
......@@ -23,6 +23,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <ctype.h>
......
......@@ -23,6 +23,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <ctype.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <ctype.h>
#include <stdlib.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <ctype.h>
#include <stdlib.h>
......
......@@ -23,6 +23,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <ctype.h>
......
......@@ -23,6 +23,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <ctype.h>
......
......@@ -19,6 +19,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <ctype.h>
#include <stdarg.h>
......
......@@ -20,6 +20,7 @@
*/
#include "config.h"
#include "wine/port.h"
#include <string.h>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment