Commit 32347fcf authored by Zebediah Figura's avatar Zebediah Figura Committed by Alexandre Julliard

server: Queue a cancel IRP in the device file cancel_async callback.

parent 4f1c7ba5
......@@ -349,6 +349,8 @@ static void test_v1_server(void)
ret = CancelIo(queue);
ok(ret, "Failed to close queue handle, error %u.\n", GetLastError());
ret = WaitForSingleObject(ovl.hEvent, 100);
ok(!ret, "Got %u.\n", ret);
ret_size = 0xdeadbeef;
ret = GetOverlappedResult(queue, &ovl, &ret_size, FALSE);
ok(!ret, "Expected failure.\n");
......
......@@ -419,17 +419,6 @@ static void *create_file_object( HANDLE handle )
DECLARE_CRITICAL_SECTION(irp_completion_cs);
static void WINAPI cancel_completed_irp( DEVICE_OBJECT *device, IRP *irp )
{
TRACE( "(%p %p)\n", device, irp );
IoReleaseCancelSpinLock(irp->CancelIrql);
irp->IoStatus.u.Status = STATUS_CANCELLED;
irp->IoStatus.Information = 0;
IoCompleteRequest(irp, IO_NO_INCREMENT);
}
/* transfer result of IRP back to wineserver */
static NTSTATUS WINAPI dispatch_irp_completion( DEVICE_OBJECT *device, IRP *irp, void *context )
{
......@@ -455,17 +444,6 @@ static NTSTATUS WINAPI dispatch_irp_completion( DEVICE_OBJECT *device, IRP *irp,
}
SERVER_END_REQ;
if (status == STATUS_MORE_PROCESSING_REQUIRED)
{
/* IRP is complete, but server may have already ordered cancel call. In such case,
* it will return STATUS_MORE_PROCESSING_REQUIRED, leaving the IRP alive until
* cancel frees it. */
if (irp->Cancel)
status = STATUS_SUCCESS;
else
IoSetCancelRoutine( irp, cancel_completed_irp );
}
if (irp->UserBuffer != irp->AssociatedIrp.SystemBuffer)
{
HeapFree( GetProcessHeap(), 0, irp->UserBuffer );
......
......@@ -53,6 +53,7 @@ struct async
unsigned int direct_result :1;/* a flag if we're passing result directly from request instead of APC */
unsigned int alerted :1; /* fd is signaled, but we are waiting for client-side I/O */
unsigned int terminated :1; /* async has been terminated */
unsigned int canceled :1; /* have we already queued cancellation for this async? */
unsigned int unknown_status :1; /* initial status is not known yet */
struct completion *completion; /* completion associated with fd */
apc_param_t comp_key; /* completion key associated with fd */
......@@ -259,6 +260,7 @@ struct async *create_async( struct fd *fd, struct thread *thread, const async_da
async->direct_result = 0;
async->alerted = 0;
async->terminated = 0;
async->canceled = 0;
async->unknown_status = 0;
async->completion = fd_get_completion( fd, &async->comp_key );
async->comp_flags = 0;
......@@ -494,14 +496,19 @@ static int cancel_async( struct process *process, struct object *obj, struct thr
struct async *async;
int woken = 0;
/* FIXME: it would probably be nice to replace the "canceled" flag with a
* single LIST_FOR_EACH_ENTRY_SAFE, but currently cancelling an async can
* cause other asyncs to be removed via async_reselect() */
restart:
LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry )
{
if (async->terminated) continue;
if (async->terminated || async->canceled) continue;
if ((!obj || (get_fd_user( async->fd ) == obj)) &&
(!thread || async->thread == thread) &&
(!iosb || async->data.iosb == iosb))
{
async->canceled = 1;
fd_cancel_async( async->fd, async );
woken++;
goto restart;
......
......@@ -205,7 +205,7 @@ static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos
static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
static void device_file_flush( struct fd *fd, struct async *async );
static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
static void device_file_reselect_async( struct fd *fd, struct async_queue *queue );
static void device_file_cancel_async( struct fd *fd, struct async *async );
static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class );
static const struct object_ops device_file_ops =
......@@ -243,9 +243,9 @@ static const struct fd_ops device_file_fd_ops =
default_fd_get_file_info, /* get_file_info */
device_file_get_volume_info, /* get_volume_info */
device_file_ioctl, /* ioctl */
default_fd_cancel_async, /* cancel_async */
device_file_cancel_async, /* cancel_async */
default_fd_queue_async, /* queue_async */
device_file_reselect_async /* reselect_async */
default_fd_reselect_async, /* reselect_async */
};
......@@ -687,21 +687,21 @@ static void cancel_irp_call( struct irp_call *irp )
add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
release_object( cancel_irp );
}
set_irp_result( irp, STATUS_CANCELLED, NULL, 0, 0 );
}
static void device_file_reselect_async( struct fd *fd, struct async_queue *queue )
static void device_file_cancel_async( struct fd *fd, struct async *async )
{
struct device_file *file = get_fd_user( fd );
struct irp_call *irp;
LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
if (irp->iosb->status != STATUS_PENDING)
{
if (irp->async == async)
{
cancel_irp_call( irp );
return;
}
}
}
static struct device *create_device( struct object *root, const struct unicode_str *name,
......@@ -1018,12 +1018,9 @@ DECL_HANDLER(set_irp_result)
if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
{
if (!irp->canceled)
set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
else if(irp->user_ptr) /* cancel already queued */
set_error( STATUS_MORE_PROCESSING_REQUIRED );
else /* we may be still dispatching the IRP. don't bother queuing cancel if it's already complete */
irp->canceled = 0;
set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
/* we may be still dispatching the IRP. don't bother queuing cancel if it's already complete */
irp->canceled = 0;
close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
release_object( irp );
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment