Commit d7ee5f3f authored by Jukka Heinonen's avatar Jukka Heinonen Committed by Alexandre Julliard

Fix race in real mode event handling.

Merge real mode pending event checking routines. Remove some obsolete code.
parent 63c1093f
...@@ -532,6 +532,71 @@ static void restore_vm86_context( const CONTEXT *context, struct vm86plus_struct ...@@ -532,6 +532,71 @@ static void restore_vm86_context( const CONTEXT *context, struct vm86plus_struct
vm86->regs.ss = context->SegSs; vm86->regs.ss = context->SegSs;
vm86->regs.eflags = context->EFlags; vm86->regs.eflags = context->EFlags;
} }
/**********************************************************************
* merge_vm86_pending_flags
*
* Merges TEB.vm86_ptr and TEB.vm86_pending VIP flags and
* raises exception if there are pending events and VIF flag
* has been turned on.
*
* Called from __wine_enter_vm86 because vm86_enter
* doesn't check for pending events.
*
* Called from raise_vm86_sti_exception to check for
* pending events in a signal safe way.
*/
static void merge_vm86_pending_flags( EXCEPTION_RECORD *rec )
{
BOOL check_pending = TRUE;
struct vm86plus_struct *vm86 =
(struct vm86plus_struct*)(NtCurrentTeb()->vm86_ptr);
/*
* In order to prevent a race when SIGUSR2 occurs while
* we are returning from exception handler, pending events
* will be rechecked after each raised exception.
*/
while (check_pending && NtCurrentTeb()->vm86_pending)
{
check_pending = FALSE;
NtCurrentTeb()->vm86_ptr = NULL;
/*
* If VIF is set, throw exception.
* Note that SIGUSR2 may turn VIF flag off so
* VIF check must occur only when TEB.vm86_ptr is NULL.
*/
if (vm86->regs.eflags & VIF_MASK)
{
CONTEXT vcontext;
save_vm86_context( &vcontext, vm86 );
rec->ExceptionCode = EXCEPTION_VM86_STI;
rec->ExceptionFlags = EXCEPTION_CONTINUABLE;
rec->ExceptionRecord = NULL;
rec->NumberParameters = 0;
rec->ExceptionAddress = (LPVOID)vcontext.Eip;
vcontext.EFlags &= ~VIP_MASK;
NtCurrentTeb()->vm86_pending = 0;
EXC_RtlRaiseException( rec, &vcontext );
restore_vm86_context( &vcontext, vm86 );
check_pending = TRUE;
}
NtCurrentTeb()->vm86_ptr = vm86;
}
/*
* Merge VIP flags in a signal safe way. This requires
* that the following operation compiles into atomic
* instruction.
*/
vm86->regs.eflags |= NtCurrentTeb()->vm86_pending;
}
#endif /* __HAVE_VM86 */ #endif /* __HAVE_VM86 */
...@@ -845,38 +910,21 @@ static void WINAPI raise_fpu_exception( EXCEPTION_RECORD *rec, CONTEXT *context ...@@ -845,38 +910,21 @@ static void WINAPI raise_fpu_exception( EXCEPTION_RECORD *rec, CONTEXT *context
#ifdef __HAVE_VM86 #ifdef __HAVE_VM86
/********************************************************************** /**********************************************************************
* raise_vm86_sti_exception * raise_vm86_sti_exception
*
* FIXME: this is most likely broken.
*/ */
static void WINAPI raise_vm86_sti_exception( EXCEPTION_RECORD *rec, CONTEXT *context ) static void WINAPI raise_vm86_sti_exception( EXCEPTION_RECORD *rec, CONTEXT *context )
{ {
struct vm86plus_struct *vm86; /* merge_vm86_pending_flags merges the vm86_pending flag in safely */
/* __wine_enter_vm86() merges the vm86_pending flag in safely */
NtCurrentTeb()->vm86_pending |= VIP_MASK; NtCurrentTeb()->vm86_pending |= VIP_MASK;
vm86 = (struct vm86plus_struct*)(NtCurrentTeb()->vm86_ptr); if (NtCurrentTeb()->vm86_ptr)
if (vm86)
{ {
if (vm86->regs.eflags & VIP_MASK) return;
vm86->regs.eflags |= VIP_MASK;
if (((char*)context->Eip >= (char*)vm86_return) && if (((char*)context->Eip >= (char*)vm86_return) &&
((char*)context->Eip <= (char*)vm86_return_end) && ((char*)context->Eip <= (char*)vm86_return_end) &&
(VM86_TYPE(context->Eax) != VM86_SIGNAL)) { (VM86_TYPE(context->Eax) != VM86_SIGNAL)) {
/* exiting from VM86, can't throw */ /* exiting from VM86, can't throw */
return; return;
} }
if (vm86->regs.eflags & VIF_MASK) { merge_vm86_pending_flags( rec );
/* VIF is set, throw exception */
CONTEXT vcontext;
NtCurrentTeb()->vm86_pending = 0;
NtCurrentTeb()->vm86_ptr = NULL;
save_vm86_context( &vcontext, vm86 );
rec->ExceptionAddress = (LPVOID)vcontext.Eip;
EXC_RtlRaiseException( rec, &vcontext );
restore_vm86_context( &vcontext, vm86 );
NtCurrentTeb()->vm86_ptr = vm86;
}
} }
else if (NtCurrentTeb()->dpmi_vif && else if (NtCurrentTeb()->dpmi_vif &&
!IS_SELECTOR_SYSTEM(context->SegCs) && !IS_SELECTOR_SYSTEM(context->SegCs) &&
...@@ -1195,40 +1243,18 @@ void __wine_enter_vm86( CONTEXT *context ) ...@@ -1195,40 +1243,18 @@ void __wine_enter_vm86( CONTEXT *context )
for (;;) for (;;)
{ {
restore_vm86_context( context, &vm86 ); restore_vm86_context( context, &vm86 );
/* Linux doesn't preserve pending flag (VIP_MASK) on return,
* so save it on entry, just in case */
teb->vm86_pending |= (context->EFlags & VIP_MASK);
/* Work around race conditions with signal handler
* (avoiding sigprocmask for performance reasons) */
teb->vm86_ptr = &vm86;
vm86.regs.eflags |= teb->vm86_pending;
/* Check for VIF|VIP here, since vm86_enter doesn't */ teb->vm86_ptr = &vm86;
if ((vm86.regs.eflags & (VIF_MASK|VIP_MASK)) == (VIF_MASK|VIP_MASK)) { merge_vm86_pending_flags( &rec );
teb->vm86_ptr = NULL;
teb->vm86_pending = 0;
context->EFlags |= VIP_MASK;
rec.ExceptionCode = EXCEPTION_VM86_STI;
rec.ExceptionFlags = EXCEPTION_CONTINUABLE;
rec.ExceptionRecord = NULL;
rec.ExceptionAddress = (LPVOID)context->Eip;
rec.NumberParameters = 0;
EXC_RtlRaiseException( &rec, context );
continue;
}
do
{
res = vm86_enter( &teb->vm86_ptr ); /* uses and clears teb->vm86_ptr */ res = vm86_enter( &teb->vm86_ptr ); /* uses and clears teb->vm86_ptr */
if (res < 0) if (res < 0)
{ {
errno = -res; errno = -res;
return; return;
} }
} while (VM86_TYPE(res) == VM86_SIGNAL);
save_vm86_context( context, &vm86 ); save_vm86_context( context, &vm86 );
context->EFlags |= teb->vm86_pending;
rec.ExceptionFlags = EXCEPTION_CONTINUABLE; rec.ExceptionFlags = EXCEPTION_CONTINUABLE;
rec.ExceptionRecord = NULL; rec.ExceptionRecord = NULL;
...@@ -1263,12 +1289,15 @@ void __wine_enter_vm86( CONTEXT *context ) ...@@ -1263,12 +1289,15 @@ void __wine_enter_vm86( CONTEXT *context )
rec.ExceptionInformation[0] = VM86_ARG(res); rec.ExceptionInformation[0] = VM86_ARG(res);
break; break;
case VM86_STI: /* sti/popf/iret instruction enabled virtual interrupts */ case VM86_STI: /* sti/popf/iret instruction enabled virtual interrupts */
context->EFlags |= VIF_MASK;
context->EFlags &= ~VIP_MASK;
teb->vm86_pending = 0; teb->vm86_pending = 0;
rec.ExceptionCode = EXCEPTION_VM86_STI; rec.ExceptionCode = EXCEPTION_VM86_STI;
break; break;
case VM86_PICRETURN: /* return due to pending PIC request */ case VM86_PICRETURN: /* return due to pending PIC request */
rec.ExceptionCode = EXCEPTION_VM86_PICRETURN; rec.ExceptionCode = EXCEPTION_VM86_PICRETURN;
break; break;
case VM86_SIGNAL: /* cannot happen because vm86_enter handles this case */
default: default:
ERR( "unhandled result from vm86 mode %x\n", res ); ERR( "unhandled result from vm86 mode %x\n", res );
continue; continue;
......
...@@ -557,9 +557,6 @@ static WINE_EXCEPTION_FILTER(exception_handler) ...@@ -557,9 +557,6 @@ static WINE_EXCEPTION_FILTER(exception_handler)
/* case EXCEPTION_VM86_PICRETURN: */ /* case EXCEPTION_VM86_PICRETURN: */
if (!ISV86(context)) if (!ISV86(context))
ERR( "Protected mode STI caught by real mode handler!\n" ); ERR( "Protected mode STI caught by real mode handler!\n" );
context->EFlags |= VIF_MASK;
context->EFlags &= ~VIP_MASK;
DOSVM_SendQueuedEvents(context); DOSVM_SendQueuedEvents(context);
return EXCEPTION_CONTINUE_EXECUTION; return EXCEPTION_CONTINUE_EXECUTION;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment