Commit cdce50f5 authored by Alexandre Julliard's avatar Alexandre Julliard

ntdll: Store the per-page committed status in the server for anonymous file mappings.

parent a19ff5f0
...@@ -863,6 +863,45 @@ done: ...@@ -863,6 +863,45 @@ done:
/*********************************************************************** /***********************************************************************
* get_committed_size
*
* Get the size of the committed range starting at base.
* Also return the protections for the first page.
*/
static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot )
{
SIZE_T i, start;
start = ((char *)base - (char *)view->base) >> page_shift;
*vprot = view->prot[start];
if (view->mapping && !(view->protect & VPROT_COMMITTED))
{
SIZE_T ret = 0;
SERVER_START_REQ( get_mapping_committed_range )
{
req->handle = view->mapping;
req->offset = start << page_shift;
if (!wine_server_call( req ))
{
ret = reply->size;
if (reply->committed)
{
*vprot |= VPROT_COMMITTED;
for (i = 0; i < ret >> page_shift; i++) view->prot[start+i] |= VPROT_COMMITTED;
}
}
}
SERVER_END_REQ;
return ret;
}
for (i = start + 1; i < view->size >> page_shift; i++)
if ((*vprot ^ view->prot[i]) & VPROT_COMMITTED) break;
return (i - start) << page_shift;
}
/***********************************************************************
* decommit_view * decommit_view
* *
* Decommit some pages of a given view. * Decommit some pages of a given view.
...@@ -1541,6 +1580,17 @@ NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG zero_ ...@@ -1541,6 +1580,17 @@ NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG zero_
if (!(view = VIRTUAL_FindView( base )) || if (!(view = VIRTUAL_FindView( base )) ||
((char *)base + size > (char *)view->base + view->size)) status = STATUS_NOT_MAPPED_VIEW; ((char *)base + size > (char *)view->base + view->size)) status = STATUS_NOT_MAPPED_VIEW;
else if (!VIRTUAL_SetProt( view, base, size, vprot )) status = STATUS_ACCESS_DENIED; else if (!VIRTUAL_SetProt( view, base, size, vprot )) status = STATUS_ACCESS_DENIED;
else if (view->mapping && !(view->protect & VPROT_COMMITTED))
{
SERVER_START_REQ( add_mapping_committed_range )
{
req->handle = view->mapping;
req->offset = (char *)base - (char *)view->base;
req->size = size;
wine_server_call( req );
}
SERVER_END_REQ;
}
} }
if (use_locks) server_leave_uninterrupted_section( &csVirtual, &sigset ); if (use_locks) server_leave_uninterrupted_section( &csVirtual, &sigset );
...@@ -1659,9 +1709,7 @@ NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T ...@@ -1659,9 +1709,7 @@ NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T
sigset_t sigset; sigset_t sigset;
NTSTATUS status = STATUS_SUCCESS; NTSTATUS status = STATUS_SUCCESS;
char *base; char *base;
UINT i; BYTE vprot;
BYTE vprot, *p;
ULONG prot;
SIZE_T size = *size_ptr; SIZE_T size = *size_ptr;
LPVOID addr = *addr_ptr; LPVOID addr = *addr_ptr;
...@@ -1704,23 +1752,13 @@ NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T ...@@ -1704,23 +1752,13 @@ NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T
else else
{ {
/* Make sure all the pages are committed */ /* Make sure all the pages are committed */
if (get_committed_size( view, base, &vprot ) >= size && (vprot & VPROT_COMMITTED))
p = view->prot + ((base - (char *)view->base) >> page_shift);
prot = VIRTUAL_GetWin32Prot( *p );
for (i = size >> page_shift; i; i--, p++)
{ {
if (!(*p & VPROT_COMMITTED)) if (old_prot) *old_prot = VIRTUAL_GetWin32Prot( vprot );
{
status = STATUS_NOT_COMMITTED;
break;
}
}
if (!i)
{
if (old_prot) *old_prot = prot;
vprot = VIRTUAL_GetProt( new_prot ) | VPROT_COMMITTED; vprot = VIRTUAL_GetProt( new_prot ) | VPROT_COMMITTED;
if (!VIRTUAL_SetProt( view, base, size, vprot )) status = STATUS_ACCESS_DENIED; if (!VIRTUAL_SetProt( view, base, size, vprot )) status = STATUS_ACCESS_DENIED;
} }
else status = STATUS_NOT_COMMITTED;
} }
server_leave_uninterrupted_section( &csVirtual, &sigset ); server_leave_uninterrupted_section( &csVirtual, &sigset );
...@@ -1850,15 +1888,17 @@ NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr, ...@@ -1850,15 +1888,17 @@ NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
} }
else else
{ {
BYTE vprot = view->prot[(base - alloc_base) >> page_shift]; BYTE vprot;
SIZE_T range_size = get_committed_size( view, base, &vprot );
info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE; info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE;
info->Protect = VIRTUAL_GetWin32Prot( vprot ); info->Protect = (vprot & VPROT_COMMITTED) ? VIRTUAL_GetWin32Prot( vprot ) : 0;
info->AllocationBase = alloc_base; info->AllocationBase = alloc_base;
info->AllocationProtect = VIRTUAL_GetWin32Prot( view->protect ); info->AllocationProtect = VIRTUAL_GetWin32Prot( view->protect );
if (view->protect & VPROT_IMAGE) info->Type = MEM_IMAGE; if (view->protect & VPROT_IMAGE) info->Type = MEM_IMAGE;
else if (view->protect & VPROT_VALLOC) info->Type = MEM_PRIVATE; else if (view->protect & VPROT_VALLOC) info->Type = MEM_PRIVATE;
else info->Type = MEM_MAPPED; else info->Type = MEM_MAPPED;
for (size = base - alloc_base; size < view->size; size += page_size) for (size = base - alloc_base; size < base + range_size - alloc_base; size += page_size)
if (view->prot[size >> page_shift] != vprot) break; if (view->prot[size >> page_shift] != vprot) break;
} }
server_leave_uninterrupted_section( &csVirtual, &sigset ); server_leave_uninterrupted_section( &csVirtual, &sigset );
......
...@@ -1729,6 +1729,35 @@ struct get_mapping_info_reply ...@@ -1729,6 +1729,35 @@ struct get_mapping_info_reply
}; };
struct get_mapping_committed_range_request
{
struct request_header __header;
obj_handle_t handle;
file_pos_t offset;
};
struct get_mapping_committed_range_reply
{
struct reply_header __header;
file_pos_t size;
int committed;
};
struct add_mapping_committed_range_request
{
struct request_header __header;
obj_handle_t handle;
file_pos_t offset;
file_pos_t size;
};
struct add_mapping_committed_range_reply
{
struct reply_header __header;
};
#define SNAP_HEAPLIST 0x00000001 #define SNAP_HEAPLIST 0x00000001
#define SNAP_PROCESS 0x00000002 #define SNAP_PROCESS 0x00000002
#define SNAP_THREAD 0x00000004 #define SNAP_THREAD 0x00000004
...@@ -4389,6 +4418,8 @@ enum request ...@@ -4389,6 +4418,8 @@ enum request
REQ_create_mapping, REQ_create_mapping,
REQ_open_mapping, REQ_open_mapping,
REQ_get_mapping_info, REQ_get_mapping_info,
REQ_get_mapping_committed_range,
REQ_add_mapping_committed_range,
REQ_create_snapshot, REQ_create_snapshot,
REQ_next_process, REQ_next_process,
REQ_next_thread, REQ_next_thread,
...@@ -4631,6 +4662,8 @@ union generic_request ...@@ -4631,6 +4662,8 @@ union generic_request
struct create_mapping_request create_mapping_request; struct create_mapping_request create_mapping_request;
struct open_mapping_request open_mapping_request; struct open_mapping_request open_mapping_request;
struct get_mapping_info_request get_mapping_info_request; struct get_mapping_info_request get_mapping_info_request;
struct get_mapping_committed_range_request get_mapping_committed_range_request;
struct add_mapping_committed_range_request add_mapping_committed_range_request;
struct create_snapshot_request create_snapshot_request; struct create_snapshot_request create_snapshot_request;
struct next_process_request next_process_request; struct next_process_request next_process_request;
struct next_thread_request next_thread_request; struct next_thread_request next_thread_request;
...@@ -4871,6 +4904,8 @@ union generic_reply ...@@ -4871,6 +4904,8 @@ union generic_reply
struct create_mapping_reply create_mapping_reply; struct create_mapping_reply create_mapping_reply;
struct open_mapping_reply open_mapping_reply; struct open_mapping_reply open_mapping_reply;
struct get_mapping_info_reply get_mapping_info_reply; struct get_mapping_info_reply get_mapping_info_reply;
struct get_mapping_committed_range_reply get_mapping_committed_range_reply;
struct add_mapping_committed_range_reply add_mapping_committed_range_reply;
struct create_snapshot_reply create_snapshot_reply; struct create_snapshot_reply create_snapshot_reply;
struct next_process_reply next_process_reply; struct next_process_reply next_process_reply;
struct next_thread_reply next_thread_reply; struct next_thread_reply next_thread_reply;
...@@ -5035,6 +5070,6 @@ union generic_reply ...@@ -5035,6 +5070,6 @@ union generic_reply
struct set_window_layered_info_reply set_window_layered_info_reply; struct set_window_layered_info_reply set_window_layered_info_reply;
}; };
#define SERVER_PROTOCOL_VERSION 344 #define SERVER_PROTOCOL_VERSION 345
#endif /* __WINE_WINE_SERVER_PROTOCOL_H */ #endif /* __WINE_WINE_SERVER_PROTOCOL_H */
...@@ -39,6 +39,18 @@ ...@@ -39,6 +39,18 @@
#include "request.h" #include "request.h"
#include "security.h" #include "security.h"
/* list of memory ranges, used to store committed info */
struct ranges
{
unsigned int count;
unsigned int max;
struct range
{
file_pos_t start;
file_pos_t end;
} ranges[1];
};
struct mapping struct mapping
{ {
struct object obj; /* object header */ struct object obj; /* object header */
...@@ -47,6 +59,7 @@ struct mapping ...@@ -47,6 +59,7 @@ struct mapping
struct file *file; /* file mapped */ struct file *file; /* file mapped */
int header_size; /* size of headers (for PE image mapping) */ int header_size; /* size of headers (for PE image mapping) */
void *base; /* default base addr (for PE image mapping) */ void *base; /* default base addr (for PE image mapping) */
struct ranges *committed; /* list of committed ranges in this mapping */
struct file *shared_file; /* temp file for shared PE mapping */ struct file *shared_file; /* temp file for shared PE mapping */
struct list shared_entry; /* entry in global shared PE mappings list */ struct list shared_entry; /* entry in global shared PE mappings list */
}; };
...@@ -138,6 +151,81 @@ static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *m ...@@ -138,6 +151,81 @@ static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *m
if (*file_size > *map_size) *file_size = *map_size; if (*file_size > *map_size) *file_size = *map_size;
} }
/* add a range to the committed list */
static void add_committed_range( struct mapping *mapping, file_pos_t start, file_pos_t end )
{
unsigned int i, j;
struct range *ranges;
if (!mapping->committed) return; /* everything committed already */
for (i = 0, ranges = mapping->committed->ranges; i < mapping->committed->count; i++)
{
if (ranges[i].start > end) break;
if (ranges[i].end < start) continue;
if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
if (ranges[i].end < end) /* extend upwards and maybe merge with next */
{
for (j = i + 1; j < mapping->committed->count; j++)
{
if (ranges[j].start > end) break;
if (ranges[j].end > end) end = ranges[j].end;
}
if (j > i + 1)
{
memmove( &ranges[i + 1], &ranges[j], (mapping->committed->count - j) * sizeof(*ranges) );
mapping->committed->count -= j - (i + 1);
}
ranges[i].end = end;
}
return;
}
/* now add a new range */
if (mapping->committed->count == mapping->committed->max)
{
unsigned int new_size = mapping->committed->max * 2;
struct ranges *new_ptr = realloc( mapping->committed, offsetof( struct ranges, ranges[new_size] ));
if (!new_ptr) return;
new_ptr->max = new_size;
ranges = new_ptr->ranges;
mapping->committed = new_ptr;
}
memmove( &ranges[i + 1], &ranges[i], (mapping->committed->count - i) * sizeof(*ranges) );
ranges[i].start = start;
ranges[i].end = end;
mapping->committed->count++;
}
/* find the range containing start and return whether it's committed */
static int find_committed_range( struct mapping *mapping, file_pos_t start, file_pos_t *size )
{
unsigned int i;
struct range *ranges;
if (!mapping->committed) /* everything is committed */
{
*size = mapping->size - start;
return 1;
}
for (i = 0, ranges = mapping->committed->ranges; i < mapping->committed->count; i++)
{
if (ranges[i].start > start)
{
*size = ranges[i].start - start;
return 0;
}
if (ranges[i].end > start)
{
*size = ranges[i].end - start;
return 1;
}
}
*size = mapping->size - start;
return 0;
}
/* allocate and fill the temp file for a shared PE image mapping */ /* allocate and fill the temp file for a shared PE image mapping */
static int build_shared_mapping( struct mapping *mapping, int fd, static int build_shared_mapping( struct mapping *mapping, int fd,
IMAGE_SECTION_HEADER *sec, unsigned int nb_sec ) IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
...@@ -302,13 +390,20 @@ static struct object *create_mapping( struct directory *root, const struct unico ...@@ -302,13 +390,20 @@ static struct object *create_mapping( struct directory *root, const struct unico
SACL_SECURITY_INFORMATION ); SACL_SECURITY_INFORMATION );
mapping->header_size = 0; mapping->header_size = 0;
mapping->base = NULL; mapping->base = NULL;
mapping->file = NULL;
mapping->shared_file = NULL; mapping->shared_file = NULL;
mapping->committed = NULL;
if (protect & VPROT_READ) access |= FILE_READ_DATA; if (protect & VPROT_READ) access |= FILE_READ_DATA;
if (protect & VPROT_WRITE) access |= FILE_WRITE_DATA; if (protect & VPROT_WRITE) access |= FILE_WRITE_DATA;
if (handle) if (handle)
{ {
if (!(protect & VPROT_COMMITTED))
{
set_error( STATUS_INVALID_PARAMETER );
goto error;
}
if (!(mapping->file = get_file_obj( current->process, handle, access ))) goto error; if (!(mapping->file = get_file_obj( current->process, handle, access ))) goto error;
if (protect & VPROT_IMAGE) if (protect & VPROT_IMAGE)
{ {
...@@ -334,9 +429,14 @@ static struct object *create_mapping( struct directory *root, const struct unico ...@@ -334,9 +429,14 @@ static struct object *create_mapping( struct directory *root, const struct unico
if (!size || (protect & VPROT_IMAGE)) if (!size || (protect & VPROT_IMAGE))
{ {
set_error( STATUS_INVALID_PARAMETER ); set_error( STATUS_INVALID_PARAMETER );
mapping->file = NULL;
goto error; goto error;
} }
if (!(protect & VPROT_COMMITTED))
{
if (!(mapping->committed = mem_alloc( offsetof(struct ranges, ranges[8]) ))) goto error;
mapping->committed->count = 0;
mapping->committed->max = 8;
}
if (!(mapping->file = create_temp_file( access ))) goto error; if (!(mapping->file = create_temp_file( access ))) goto error;
if (!grow_file( mapping->file, size )) goto error; if (!grow_file( mapping->file, size )) goto error;
} }
...@@ -394,6 +494,7 @@ static void mapping_destroy( struct object *obj ) ...@@ -394,6 +494,7 @@ static void mapping_destroy( struct object *obj )
release_object( mapping->shared_file ); release_object( mapping->shared_file );
list_remove( &mapping->shared_entry ); list_remove( &mapping->shared_entry );
} }
free( mapping->committed );
} }
int get_page_size(void) int get_page_size(void)
...@@ -485,3 +586,39 @@ DECL_HANDLER(get_mapping_info) ...@@ -485,3 +586,39 @@ DECL_HANDLER(get_mapping_info)
release_object( mapping ); release_object( mapping );
} }
} }
/* get a range of committed pages in a file mapping */
DECL_HANDLER(get_mapping_committed_range)
{
struct mapping *mapping;
if ((mapping = (struct mapping *)get_handle_obj( current->process, req->handle, 0, &mapping_ops )))
{
if (!(req->offset & page_mask) && req->offset < mapping->size)
reply->committed = find_committed_range( mapping, req->offset, &reply->size );
else
set_error( STATUS_INVALID_PARAMETER );
release_object( mapping );
}
}
/* add a range to the committed pages in a file mapping */
DECL_HANDLER(add_mapping_committed_range)
{
struct mapping *mapping;
if ((mapping = (struct mapping *)get_handle_obj( current->process, req->handle, 0, &mapping_ops )))
{
if (!(req->size & page_mask) &&
!(req->offset & page_mask) &&
req->offset < mapping->size &&
req->size > 0 &&
req->size <= mapping->size - req->offset)
add_committed_range( mapping, req->offset, req->offset + req->size );
else
set_error( STATUS_INVALID_PARAMETER );
release_object( mapping );
}
}
...@@ -1361,6 +1361,24 @@ enum char_info_mode ...@@ -1361,6 +1361,24 @@ enum char_info_mode
@END @END
/* Get a range of committed pages in a file mapping */
@REQ(get_mapping_committed_range)
obj_handle_t handle; /* handle to the mapping */
file_pos_t offset; /* starting offset (page-aligned, in bytes) */
@REPLY
file_pos_t size; /* size of range starting at offset (page-aligned, in bytes) */
int committed; /* whether it is a committed range */
@END
/* Add a range to the committed pages in a file mapping */
@REQ(add_mapping_committed_range)
obj_handle_t handle; /* handle to the mapping */
file_pos_t offset; /* starting offset (page-aligned, in bytes) */
file_pos_t size; /* size to set (page-aligned, in bytes) or 0 if only retrieving */
@END
#define SNAP_HEAPLIST 0x00000001 #define SNAP_HEAPLIST 0x00000001
#define SNAP_PROCESS 0x00000002 #define SNAP_PROCESS 0x00000002
#define SNAP_THREAD 0x00000004 #define SNAP_THREAD 0x00000004
......
...@@ -184,6 +184,8 @@ DECL_HANDLER(read_change); ...@@ -184,6 +184,8 @@ DECL_HANDLER(read_change);
DECL_HANDLER(create_mapping); DECL_HANDLER(create_mapping);
DECL_HANDLER(open_mapping); DECL_HANDLER(open_mapping);
DECL_HANDLER(get_mapping_info); DECL_HANDLER(get_mapping_info);
DECL_HANDLER(get_mapping_committed_range);
DECL_HANDLER(add_mapping_committed_range);
DECL_HANDLER(create_snapshot); DECL_HANDLER(create_snapshot);
DECL_HANDLER(next_process); DECL_HANDLER(next_process);
DECL_HANDLER(next_thread); DECL_HANDLER(next_thread);
...@@ -425,6 +427,8 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] = ...@@ -425,6 +427,8 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] =
(req_handler)req_create_mapping, (req_handler)req_create_mapping,
(req_handler)req_open_mapping, (req_handler)req_open_mapping,
(req_handler)req_get_mapping_info, (req_handler)req_get_mapping_info,
(req_handler)req_get_mapping_committed_range,
(req_handler)req_add_mapping_committed_range,
(req_handler)req_create_snapshot, (req_handler)req_create_snapshot,
(req_handler)req_next_process, (req_handler)req_next_process,
(req_handler)req_next_thread, (req_handler)req_next_thread,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment