Commit aa09031c authored by Robert Shearman's avatar Robert Shearman Committed by Alexandre Julliard

ole32: Reuse event handles used for COM calls.

This reduces the number of server calls during a COM call by half.
parent af994f4e
...@@ -100,6 +100,7 @@ typedef struct ...@@ -100,6 +100,7 @@ typedef struct
OXID oxid; /* apartment in which the channel is valid */ OXID oxid; /* apartment in which the channel is valid */
DWORD dest_context; /* returned from GetDestCtx */ DWORD dest_context; /* returned from GetDestCtx */
LPVOID dest_context_data; /* returned from GetDestCtx */ LPVOID dest_context_data; /* returned from GetDestCtx */
HANDLE event; /* cached event handle */
} ClientRpcChannelBuffer; } ClientRpcChannelBuffer;
struct dispatch_params struct dispatch_params
...@@ -152,6 +153,7 @@ static ULONG WINAPI ClientRpcChannelBuffer_Release(LPRPCCHANNELBUFFER iface) ...@@ -152,6 +153,7 @@ static ULONG WINAPI ClientRpcChannelBuffer_Release(LPRPCCHANNELBUFFER iface)
if (ref) if (ref)
return ref; return ref;
if (This->event) CloseHandle(This->event);
RpcBindingFree(&This->bind); RpcBindingFree(&This->bind);
HeapFree(GetProcessHeap(), 0, This); HeapFree(GetProcessHeap(), 0, This);
return 0; return 0;
...@@ -207,6 +209,24 @@ static HRESULT WINAPI ServerRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac ...@@ -207,6 +209,24 @@ static HRESULT WINAPI ServerRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac
return E_NOTIMPL; return E_NOTIMPL;
} }
static HANDLE ClientRpcChannelBuffer_GetEventHandle(ClientRpcChannelBuffer *This)
{
HANDLE event = InterlockedExchangePointer(&This->event, NULL);
/* Note: must be auto-reset event so we can reuse it without a call
* to ResetEvent */
if (!event) event = CreateEventW(NULL, FALSE, FALSE, NULL);
return event;
}
static void ClientRpcChannelBuffer_ReleaseEventHandle(ClientRpcChannelBuffer *This, HANDLE event)
{
if (InterlockedCompareExchangePointer(&This->event, event, NULL))
/* already a handle cached in This */
CloseHandle(event);
}
/* this thread runs an outgoing RPC */ /* this thread runs an outgoing RPC */
static DWORD WINAPI rpc_sendreceive_thread(LPVOID param) static DWORD WINAPI rpc_sendreceive_thread(LPVOID param)
{ {
...@@ -271,7 +291,7 @@ static HRESULT WINAPI ClientRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac ...@@ -271,7 +291,7 @@ static HRESULT WINAPI ClientRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac
RpcBindingInqObject(msg->Handle, &ipid); RpcBindingInqObject(msg->Handle, &ipid);
hr = ipid_get_dispatch_params(&ipid, &apt, &params->stub, &params->chan); hr = ipid_get_dispatch_params(&ipid, &apt, &params->stub, &params->chan);
params->handle = CreateEventW(NULL, FALSE, FALSE, NULL); params->handle = ClientRpcChannelBuffer_GetEventHandle(This);
if ((hr == S_OK) && !apt->multi_threaded) if ((hr == S_OK) && !apt->multi_threaded)
{ {
TRACE("Calling apartment thread 0x%08lx...\n", apt->tid); TRACE("Calling apartment thread 0x%08lx...\n", apt->tid);
...@@ -315,7 +335,7 @@ static HRESULT WINAPI ClientRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac ...@@ -315,7 +335,7 @@ static HRESULT WINAPI ClientRpcChannelBuffer_SendReceive(LPRPCCHANNELBUFFER ifac
if (WaitForSingleObject(params->handle, 0)) if (WaitForSingleObject(params->handle, 0))
hr = CoWaitForMultipleHandles(0, INFINITE, 1, &params->handle, &index); hr = CoWaitForMultipleHandles(0, INFINITE, 1, &params->handle, &index);
} }
CloseHandle(params->handle); ClientRpcChannelBuffer_ReleaseEventHandle(This, params->handle);
if (hr == S_OK) hr = params->hr; if (hr == S_OK) hr = params->hr;
...@@ -478,6 +498,7 @@ HRESULT RPC_CreateClientChannel(const OXID *oxid, const IPID *ipid, ...@@ -478,6 +498,7 @@ HRESULT RPC_CreateClientChannel(const OXID *oxid, const IPID *ipid,
apartment_getoxid(COM_CurrentApt(), &This->oxid); apartment_getoxid(COM_CurrentApt(), &This->oxid);
This->dest_context = dest_context; This->dest_context = dest_context;
This->dest_context_data = dest_context_data; This->dest_context_data = dest_context_data;
This->event = NULL;
*chan = (IRpcChannelBuffer*)This; *chan = (IRpcChannelBuffer*)This;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment