Commit b04acc81 authored by Connor McAdams's avatar Connor McAdams Committed by Alexandre Julliard

uiautomationcore: Retrieve runtime ID on UiaReturnRawElementProvider thread to prevent a deadlock.

If we pass a node to the provider thread that contains a provider that was created in an STA with the ProviderOptions_UseComThreading flag set, we can deadlock when attempting to get a runtime ID from the proxy due to the message queue not being pumped. To avoid this, retrieve the runtime ID before passing the node to the provider thread. Signed-off-by: 's avatarConnor McAdams <cmcadams@codeweavers.com>
parent 5bd40907
...@@ -6305,6 +6305,16 @@ static const struct prov_method_sequence node_from_hwnd9[] = { ...@@ -6305,6 +6305,16 @@ static const struct prov_method_sequence node_from_hwnd9[] = {
{ 0 } { 0 }
}; };
static const struct prov_method_sequence node_from_hwnd10[] = {
NODE_CREATE_SEQ(&Provider),
/* Next two only done on Windows 8+. */
{ &Provider, FRAG_GET_RUNTIME_ID, METHOD_OPTIONAL },
{ &Provider, FRAG_GET_RUNTIME_ID, METHOD_OPTIONAL },
{ &Provider, PROV_GET_PROVIDER_OPTIONS },
{ &Provider, FRAG_GET_RUNTIME_ID, METHOD_OPTIONAL }, /* Only done on Win11+. */
{ 0 }
};
static const struct prov_method_sequence disconnect_prov1[] = { static const struct prov_method_sequence disconnect_prov1[] = {
{ &Provider_child, PROV_GET_PROVIDER_OPTIONS }, { &Provider_child, PROV_GET_PROVIDER_OPTIONS },
/* Win10v1507 and below call this. */ /* Win10v1507 and below call this. */
...@@ -6576,6 +6586,28 @@ static DWORD WINAPI uia_node_from_handle_test_thread(LPVOID param) ...@@ -6576,6 +6586,28 @@ static DWORD WINAPI uia_node_from_handle_test_thread(LPVOID param)
Sleep(50); Sleep(50);
ok(Provider.ref == 1, "Unexpected refcnt %ld\n", Provider.ref); ok(Provider.ref == 1, "Unexpected refcnt %ld\n", Provider.ref);
/* ProviderOptions_UseComThreading test from a separate thread. */
SET_EXPECT(winproc_GETOBJECT_UiaRoot);
/* Only sent on Win7. */
SET_EXPECT(winproc_GETOBJECT_CLIENT);
prov_root = &Provider.IRawElementProviderSimple_iface;
initialize_provider(&Provider, ProviderOptions_ServerSideProvider | ProviderOptions_UseComThreading, NULL, FALSE);
Provider.frag_root = NULL;
Provider.runtime_id[0] = Provider.runtime_id[1] = 0xdeadbeef;
hr = UiaNodeFromHandle(hwnd, &node);
ok(hr == S_OK, "Unexpected hr %#lx.\n", hr);
ok(Provider.ref == 2, "Unexpected refcnt %ld\n", Provider.ref);
CHECK_CALLED(winproc_GETOBJECT_UiaRoot);
called_winproc_GETOBJECT_CLIENT = expect_winproc_GETOBJECT_CLIENT = 0;
ok_method_sequence(node_from_hwnd10, "node_from_hwnd10");
ok(UiaNodeRelease(node), "UiaNodeRelease returned FALSE\n");
/* Win10v1809 can be slow to call Release on Provider. */
if (Provider.ref != 1)
Sleep(50);
ok(Provider.ref == 1, "Unexpected refcnt %ld\n", Provider.ref);
if (!pUiaDisconnectProvider) if (!pUiaDisconnectProvider)
{ {
win_skip("UiaDisconnectProvider not exported by uiautomationcore.dll\n"); win_skip("UiaDisconnectProvider not exported by uiautomationcore.dll\n");
......
...@@ -1744,19 +1744,15 @@ exit: ...@@ -1744,19 +1744,15 @@ exit:
LeaveCriticalSection(&provider_thread_cs); LeaveCriticalSection(&provider_thread_cs);
} }
static HRESULT uia_provider_thread_add_node(HUIANODE node) static HRESULT uia_provider_thread_add_node(HUIANODE node, SAFEARRAY *rt_id)
{ {
struct uia_node *node_data = impl_from_IWineUiaNode((IWineUiaNode *)node); struct uia_node *node_data = impl_from_IWineUiaNode((IWineUiaNode *)node);
int prov_type = get_node_provider_type_at_idx(node_data, 0); int prov_type = get_node_provider_type_at_idx(node_data, 0);
struct uia_provider *prov_data; struct uia_provider *prov_data;
SAFEARRAY *sa; HRESULT hr = S_OK;
HRESULT hr;
prov_data = impl_from_IWineUiaProvider(node_data->prov[prov_type]); prov_data = impl_from_IWineUiaProvider(node_data->prov[prov_type]);
node_data->nested_node = prov_data->return_nested_node = TRUE; node_data->nested_node = prov_data->return_nested_node = TRUE;
hr = UiaGetRuntimeId(node, &sa);
if (FAILED(hr))
return hr;
TRACE("Adding node %p\n", node); TRACE("Adding node %p\n", node);
...@@ -1764,38 +1760,40 @@ static HRESULT uia_provider_thread_add_node(HUIANODE node) ...@@ -1764,38 +1760,40 @@ static HRESULT uia_provider_thread_add_node(HUIANODE node)
list_add_tail(&provider_thread.nodes_list, &node_data->prov_thread_list_entry); list_add_tail(&provider_thread.nodes_list, &node_data->prov_thread_list_entry);
/* If we have a runtime ID, create an entry in the rb tree. */ /* If we have a runtime ID, create an entry in the rb tree. */
if (sa) if (rt_id)
{ {
struct uia_provider_thread_map_entry *prov_map; struct uia_provider_thread_map_entry *prov_map;
struct rb_entry *rb_entry; struct rb_entry *rb_entry;
if ((rb_entry = rb_get(&provider_thread.node_map, sa))) if ((rb_entry = rb_get(&provider_thread.node_map, rt_id)))
{
prov_map = RB_ENTRY_VALUE(rb_entry, struct uia_provider_thread_map_entry, entry); prov_map = RB_ENTRY_VALUE(rb_entry, struct uia_provider_thread_map_entry, entry);
SafeArrayDestroy(sa);
}
else else
{ {
prov_map = heap_alloc_zero(sizeof(*prov_map)); prov_map = heap_alloc_zero(sizeof(*prov_map));
if (!prov_map) if (!prov_map)
{ {
SafeArrayDestroy(sa); hr = E_OUTOFMEMORY;
LeaveCriticalSection(&provider_thread_cs); goto exit;
return E_OUTOFMEMORY;
} }
prov_map->runtime_id = sa; hr = SafeArrayCopy(rt_id, &prov_map->runtime_id);
if (FAILED(hr))
{
heap_free(prov_map);
goto exit;
}
list_init(&prov_map->nodes_list); list_init(&prov_map->nodes_list);
rb_put(&provider_thread.node_map, sa, &prov_map->entry); rb_put(&provider_thread.node_map, prov_map->runtime_id, &prov_map->entry);
} }
list_add_tail(&prov_map->nodes_list, &node_data->node_map_list_entry); list_add_tail(&prov_map->nodes_list, &node_data->node_map_list_entry);
node_data->map = prov_map; node_data->map = prov_map;
} }
exit:
LeaveCriticalSection(&provider_thread_cs); LeaveCriticalSection(&provider_thread_cs);
return S_OK; return hr;
} }
#define WM_GET_OBJECT_UIA_NODE (WM_USER + 1) #define WM_GET_OBJECT_UIA_NODE (WM_USER + 1)
...@@ -1807,13 +1805,13 @@ static LRESULT CALLBACK uia_provider_thread_msg_proc(HWND hwnd, UINT msg, WPARAM ...@@ -1807,13 +1805,13 @@ static LRESULT CALLBACK uia_provider_thread_msg_proc(HWND hwnd, UINT msg, WPARAM
{ {
case WM_GET_OBJECT_UIA_NODE: case WM_GET_OBJECT_UIA_NODE:
{ {
SAFEARRAY *rt_id = (SAFEARRAY *)wparam;
HUIANODE node = (HUIANODE)lparam; HUIANODE node = (HUIANODE)lparam;
LRESULT lr; LRESULT lr;
if (FAILED(uia_provider_thread_add_node(node))) if (FAILED(uia_provider_thread_add_node(node, rt_id)))
{ {
WARN("Failed to add node %p to provider thread list.\n", node); WARN("Failed to add node %p to provider thread list.\n", node);
UiaNodeRelease(node);
return 0; return 0;
} }
...@@ -1828,11 +1826,6 @@ static LRESULT CALLBACK uia_provider_thread_msg_proc(HWND hwnd, UINT msg, WPARAM ...@@ -1828,11 +1826,6 @@ static LRESULT CALLBACK uia_provider_thread_msg_proc(HWND hwnd, UINT msg, WPARAM
lr = 0; lr = 0;
} }
/*
* LresultFromObject increases refcnt by 1. If LresultFromObject
* failed, this is expected to release the node.
*/
UiaNodeRelease(node);
return lr; return lr;
} }
...@@ -1948,13 +1941,24 @@ void uia_stop_provider_thread(void) ...@@ -1948,13 +1941,24 @@ void uia_stop_provider_thread(void)
*/ */
LRESULT uia_lresult_from_node(HUIANODE huianode) LRESULT uia_lresult_from_node(HUIANODE huianode)
{ {
if (!uia_start_provider_thread()) SAFEARRAY *rt_id;
{ LRESULT lr = 0;
UiaNodeRelease(huianode); HRESULT hr;
return 0;
} hr = UiaGetRuntimeId(huianode, &rt_id);
if (SUCCEEDED(hr) && uia_start_provider_thread())
lr = SendMessageW(provider_thread.hwnd, WM_GET_OBJECT_UIA_NODE, (WPARAM)rt_id, (LPARAM)huianode);
if (FAILED(hr))
WARN("UiaGetRuntimeId failed with hr %#lx\n", hr);
return SendMessageW(provider_thread.hwnd, WM_GET_OBJECT_UIA_NODE, 0, (LPARAM)huianode); /*
* LresultFromObject increases refcnt by 1. If LresultFromObject
* failed or wasn't called, this is expected to release the node.
*/
UiaNodeRelease(huianode);
SafeArrayDestroy(rt_id);
return lr;
} }
/*********************************************************************** /***********************************************************************
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment