Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
W
wine-winehq
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wine
wine-winehq
Commits
5b4b8774
Commit
5b4b8774
authored
Sep 02, 2020
by
Alexandre Julliard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ntdll: Move the reserved area functions to avoid forward declarations.
Signed-off-by:
Alexandre Julliard
<
julliard@winehq.org
>
parent
7b96e82f
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
139 additions
and
141 deletions
+139
-141
virtual.c
dlls/ntdll/unix/virtual.c
+139
-141
No files found.
dlls/ntdll/unix/virtual.c
View file @
5b4b8774
...
...
@@ -212,147 +212,6 @@ static inline BOOL is_inside_signal_stack( void *ptr )
}
static
void
mmap_add_reserved_area
(
void
*
addr
,
SIZE_T
size
);
static
void
reserve_area
(
void
*
addr
,
void
*
end
)
{
#ifdef __APPLE__
#ifdef __i386__
static
const
mach_vm_address_t
max_address
=
VM_MAX_ADDRESS
;
#else
static
const
mach_vm_address_t
max_address
=
MACH_VM_MAX_ADDRESS
;
#endif
mach_vm_address_t
address
=
(
mach_vm_address_t
)
addr
;
mach_vm_address_t
end_address
=
(
mach_vm_address_t
)
end
;
if
(
!
end_address
||
max_address
<
end_address
)
end_address
=
max_address
;
while
(
address
<
end_address
)
{
mach_vm_address_t
hole_address
=
address
;
kern_return_t
ret
;
mach_vm_size_t
size
;
vm_region_basic_info_data_64_t
info
;
mach_msg_type_number_t
count
=
VM_REGION_BASIC_INFO_COUNT_64
;
mach_port_t
dummy_object_name
=
MACH_PORT_NULL
;
/* find the mapped region at or above the current address. */
ret
=
mach_vm_region
(
mach_task_self
(),
&
address
,
&
size
,
VM_REGION_BASIC_INFO_64
,
(
vm_region_info_t
)
&
info
,
&
count
,
&
dummy_object_name
);
if
(
ret
!=
KERN_SUCCESS
)
{
address
=
max_address
;
size
=
0
;
}
if
(
end_address
<
address
)
address
=
end_address
;
if
(
hole_address
<
address
)
{
/* found a hole, attempt to reserve it. */
size_t
hole_size
=
address
-
hole_address
;
mach_vm_address_t
alloc_address
=
hole_address
;
ret
=
mach_vm_map
(
mach_task_self
(),
&
alloc_address
,
hole_size
,
0
,
VM_FLAGS_FIXED
,
MEMORY_OBJECT_NULL
,
0
,
0
,
PROT_NONE
,
VM_PROT_ALL
,
VM_INHERIT_COPY
);
if
(
!
ret
)
mmap_add_reserved_area
(
(
void
*
)
hole_address
,
hole_size
);
else
if
(
ret
==
KERN_NO_SPACE
)
{
/* something filled (part of) the hole before we could.
go back and look again. */
address
=
hole_address
;
continue
;
}
}
address
+=
size
;
}
#else
void
*
ptr
;
int
flags
=
MAP_PRIVATE
|
MAP_ANON
|
MAP_NORESERVE
|
MAP_TRYFIXED
;
size_t
size
=
(
char
*
)
end
-
(
char
*
)
addr
;
if
(
!
size
)
return
;
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
ptr
=
mmap
(
addr
,
size
,
PROT_NONE
,
flags
|
MAP_FIXED
|
MAP_EXCL
,
-
1
,
0
);
#else
ptr
=
mmap
(
addr
,
size
,
PROT_NONE
,
flags
,
-
1
,
0
);
#endif
if
(
ptr
==
addr
)
{
mmap_add_reserved_area
(
addr
,
size
);
return
;
}
if
(
ptr
!=
(
void
*
)
-
1
)
munmap
(
ptr
,
size
);
size
=
(
size
/
2
)
&
~
granularity_mask
;
if
(
size
)
{
reserve_area
(
addr
,
(
char
*
)
addr
+
size
);
reserve_area
(
(
char
*
)
addr
+
size
,
end
);
}
#endif
/* __APPLE__ */
}
static
void
mmap_init
(
const
struct
preload_info
*
preload_info
)
{
#ifndef _WIN64
#ifndef __APPLE__
char
stack
;
char
*
const
stack_ptr
=
&
stack
;
#endif
char
*
user_space_limit
=
(
char
*
)
0x7ffe0000
;
int
i
;
if
(
preload_info
)
{
/* check for a reserved area starting at the user space limit */
/* to avoid wasting time trying to allocate it again */
for
(
i
=
0
;
preload_info
[
i
].
size
;
i
++
)
{
if
((
char
*
)
preload_info
[
i
].
addr
>
user_space_limit
)
break
;
if
((
char
*
)
preload_info
[
i
].
addr
+
preload_info
[
i
].
size
>
user_space_limit
)
{
user_space_limit
=
(
char
*
)
preload_info
[
i
].
addr
+
preload_info
[
i
].
size
;
break
;
}
}
}
else
reserve_area
(
(
void
*
)
0x00010000
,
(
void
*
)
0x40000000
);
#ifndef __APPLE__
if
(
stack_ptr
>=
user_space_limit
)
{
char
*
end
=
0
;
char
*
base
=
stack_ptr
-
((
unsigned
int
)
stack_ptr
&
granularity_mask
)
-
(
granularity_mask
+
1
);
if
(
base
>
user_space_limit
)
reserve_area
(
user_space_limit
,
base
);
base
=
stack_ptr
-
((
unsigned
int
)
stack_ptr
&
granularity_mask
)
+
(
granularity_mask
+
1
);
#if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
/* Heuristic: assume the stack is near the end of the address */
/* space, this avoids a lot of futile allocation attempts */
end
=
(
char
*
)(((
unsigned
long
)
base
+
0x0fffffff
)
&
0xf0000000
);
#endif
reserve_area
(
base
,
end
);
}
else
#endif
reserve_area
(
user_space_limit
,
0
);
#else
if
(
preload_info
)
return
;
/* if we don't have a preloader, try to reserve the space now */
reserve_area
(
(
void
*
)
0x000000010000
,
(
void
*
)
0x000068000000
);
reserve_area
(
(
void
*
)
0x00007ff00000
,
(
void
*
)
0x00007fff0000
);
reserve_area
(
(
void
*
)
0x7ffffe000000
,
(
void
*
)
0x7fffffff0000
);
#endif
}
static
void
mmap_add_reserved_area
(
void
*
addr
,
SIZE_T
size
)
{
struct
reserved_area
*
area
;
...
...
@@ -505,6 +364,145 @@ static int mmap_enum_reserved_areas( int (CDECL *enum_func)(void *base, SIZE_T s
}
static
void
reserve_area
(
void
*
addr
,
void
*
end
)
{
#ifdef __APPLE__
#ifdef __i386__
static
const
mach_vm_address_t
max_address
=
VM_MAX_ADDRESS
;
#else
static
const
mach_vm_address_t
max_address
=
MACH_VM_MAX_ADDRESS
;
#endif
mach_vm_address_t
address
=
(
mach_vm_address_t
)
addr
;
mach_vm_address_t
end_address
=
(
mach_vm_address_t
)
end
;
if
(
!
end_address
||
max_address
<
end_address
)
end_address
=
max_address
;
while
(
address
<
end_address
)
{
mach_vm_address_t
hole_address
=
address
;
kern_return_t
ret
;
mach_vm_size_t
size
;
vm_region_basic_info_data_64_t
info
;
mach_msg_type_number_t
count
=
VM_REGION_BASIC_INFO_COUNT_64
;
mach_port_t
dummy_object_name
=
MACH_PORT_NULL
;
/* find the mapped region at or above the current address. */
ret
=
mach_vm_region
(
mach_task_self
(),
&
address
,
&
size
,
VM_REGION_BASIC_INFO_64
,
(
vm_region_info_t
)
&
info
,
&
count
,
&
dummy_object_name
);
if
(
ret
!=
KERN_SUCCESS
)
{
address
=
max_address
;
size
=
0
;
}
if
(
end_address
<
address
)
address
=
end_address
;
if
(
hole_address
<
address
)
{
/* found a hole, attempt to reserve it. */
size_t
hole_size
=
address
-
hole_address
;
mach_vm_address_t
alloc_address
=
hole_address
;
ret
=
mach_vm_map
(
mach_task_self
(),
&
alloc_address
,
hole_size
,
0
,
VM_FLAGS_FIXED
,
MEMORY_OBJECT_NULL
,
0
,
0
,
PROT_NONE
,
VM_PROT_ALL
,
VM_INHERIT_COPY
);
if
(
!
ret
)
mmap_add_reserved_area
(
(
void
*
)
hole_address
,
hole_size
);
else
if
(
ret
==
KERN_NO_SPACE
)
{
/* something filled (part of) the hole before we could.
go back and look again. */
address
=
hole_address
;
continue
;
}
}
address
+=
size
;
}
#else
void
*
ptr
;
int
flags
=
MAP_PRIVATE
|
MAP_ANON
|
MAP_NORESERVE
|
MAP_TRYFIXED
;
size_t
size
=
(
char
*
)
end
-
(
char
*
)
addr
;
if
(
!
size
)
return
;
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
ptr
=
mmap
(
addr
,
size
,
PROT_NONE
,
flags
|
MAP_FIXED
|
MAP_EXCL
,
-
1
,
0
);
#else
ptr
=
mmap
(
addr
,
size
,
PROT_NONE
,
flags
,
-
1
,
0
);
#endif
if
(
ptr
==
addr
)
{
mmap_add_reserved_area
(
addr
,
size
);
return
;
}
if
(
ptr
!=
(
void
*
)
-
1
)
munmap
(
ptr
,
size
);
size
=
(
size
/
2
)
&
~
granularity_mask
;
if
(
size
)
{
reserve_area
(
addr
,
(
char
*
)
addr
+
size
);
reserve_area
(
(
char
*
)
addr
+
size
,
end
);
}
#endif
/* __APPLE__ */
}
static
void
mmap_init
(
const
struct
preload_info
*
preload_info
)
{
#ifndef _WIN64
#ifndef __APPLE__
char
stack
;
char
*
const
stack_ptr
=
&
stack
;
#endif
char
*
user_space_limit
=
(
char
*
)
0x7ffe0000
;
int
i
;
if
(
preload_info
)
{
/* check for a reserved area starting at the user space limit */
/* to avoid wasting time trying to allocate it again */
for
(
i
=
0
;
preload_info
[
i
].
size
;
i
++
)
{
if
((
char
*
)
preload_info
[
i
].
addr
>
user_space_limit
)
break
;
if
((
char
*
)
preload_info
[
i
].
addr
+
preload_info
[
i
].
size
>
user_space_limit
)
{
user_space_limit
=
(
char
*
)
preload_info
[
i
].
addr
+
preload_info
[
i
].
size
;
break
;
}
}
}
else
reserve_area
(
(
void
*
)
0x00010000
,
(
void
*
)
0x40000000
);
#ifndef __APPLE__
if
(
stack_ptr
>=
user_space_limit
)
{
char
*
end
=
0
;
char
*
base
=
stack_ptr
-
((
unsigned
int
)
stack_ptr
&
granularity_mask
)
-
(
granularity_mask
+
1
);
if
(
base
>
user_space_limit
)
reserve_area
(
user_space_limit
,
base
);
base
=
stack_ptr
-
((
unsigned
int
)
stack_ptr
&
granularity_mask
)
+
(
granularity_mask
+
1
);
#if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
/* Heuristic: assume the stack is near the end of the address */
/* space, this avoids a lot of futile allocation attempts */
end
=
(
char
*
)(((
unsigned
long
)
base
+
0x0fffffff
)
&
0xf0000000
);
#endif
reserve_area
(
base
,
end
);
}
else
#endif
reserve_area
(
user_space_limit
,
0
);
#else
if
(
preload_info
)
return
;
/* if we don't have a preloader, try to reserve the space now */
reserve_area
(
(
void
*
)
0x000000010000
,
(
void
*
)
0x000068000000
);
reserve_area
(
(
void
*
)
0x00007ff00000
,
(
void
*
)
0x00007fff0000
);
reserve_area
(
(
void
*
)
0x7ffffe000000
,
(
void
*
)
0x7fffffff0000
);
#endif
}
/***********************************************************************
* free_ranges_lower_bound
*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment