Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
W
wine-winehq
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wine
wine-winehq
Commits
09dd8012
Commit
09dd8012
authored
Sep 03, 2021
by
Zebediah Figura
Committed by
Alexandre Julliard
Sep 06, 2021
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
server: Add helper functions to perform atomic stores.
Signed-off-by:
Zebediah Figura
<
zfigura@codeweavers.com
>
Signed-off-by:
Alexandre Julliard
<
julliard@winehq.org
>
parent
ec6ce2ce
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
33 additions
and
30 deletions
+33
-30
fd.c
server/fd.c
+33
-30
No files found.
server/fd.c
View file @
09dd8012
...
@@ -385,43 +385,46 @@ timeout_t monotonic_time;
...
@@ -385,43 +385,46 @@ timeout_t monotonic_time;
struct
_KUSER_SHARED_DATA
*
user_shared_data
=
NULL
;
struct
_KUSER_SHARED_DATA
*
user_shared_data
=
NULL
;
static
const
int
user_shared_data_timeout
=
16
;
static
const
int
user_shared_data_timeout
=
16
;
static
void
set_user_shared_data_time
(
void
)
static
void
atomic_store_ulong
(
volatile
ULONG
*
ptr
,
ULONG
value
)
{
{
timeout_t
tick_count
=
monotonic_time
/
10000
;
/* on x86 there should be total store order guarantees, so volatile is
* enough to ensure the stores aren't reordered by the compiler, and then
* they will always be seen in-order from other CPUs. On other archs, we
* need atomic intrinsics to guarantee that. */
#if defined(__i386__) || defined(__x86_64__)
*
ptr
=
value
;
#else
__atomic_store_n
(
ptr
,
value
,
__ATOMIC_SEQ_CST
);
#endif
}
/* on X86 there should be total store order guarantees, so volatile is enough
static
void
atomic_store_long
(
volatile
LONG
*
ptr
,
LONG
value
)
* to ensure the stores aren't reordered by the compiler, and then they will
{
* always be seen in-order from other CPUs. On other archs, we need atomic
* intrinsics to guarantee that. */
#if defined(__i386__) || defined(__x86_64__)
#if defined(__i386__) || defined(__x86_64__)
user_shared_data
->
SystemTime
.
High2Time
=
current_time
>>
32
;
*
ptr
=
value
;
user_shared_data
->
SystemTime
.
LowPart
=
current_time
;
user_shared_data
->
SystemTime
.
High1Time
=
current_time
>>
32
;
user_shared_data
->
InterruptTime
.
High2Time
=
monotonic_time
>>
32
;
user_shared_data
->
InterruptTime
.
LowPart
=
monotonic_time
;
user_shared_data
->
InterruptTime
.
High1Time
=
monotonic_time
>>
32
;
user_shared_data
->
TickCount
.
High2Time
=
tick_count
>>
32
;
user_shared_data
->
TickCount
.
LowPart
=
tick_count
;
user_shared_data
->
TickCount
.
High1Time
=
tick_count
>>
32
;
*
(
volatile
ULONG
*
)
&
user_shared_data
->
TickCountLowDeprecated
=
tick_count
;
#else
#else
__atomic_store_n
(
&
user_shared_data
->
SystemTime
.
High2Time
,
current_time
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
ptr
,
value
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
SystemTime
.
LowPart
,
current_time
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
SystemTime
.
High1Time
,
current_time
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
InterruptTime
.
High2Time
,
monotonic_time
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
InterruptTime
.
LowPart
,
monotonic_time
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
InterruptTime
.
High1Time
,
monotonic_time
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
TickCount
.
High2Time
,
tick_count
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
TickCount
.
LowPart
,
tick_count
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
TickCount
.
High1Time
,
tick_count
>>
32
,
__ATOMIC_SEQ_CST
);
__atomic_store_n
(
&
user_shared_data
->
TickCountLowDeprecated
,
tick_count
,
__ATOMIC_SEQ_CST
);
#endif
#endif
}
}
static
void
set_user_shared_data_time
(
void
)
{
timeout_t
tick_count
=
monotonic_time
/
10000
;
atomic_store_long
(
&
user_shared_data
->
SystemTime
.
High2Time
,
current_time
>>
32
);
atomic_store_ulong
(
&
user_shared_data
->
SystemTime
.
LowPart
,
current_time
);
atomic_store_long
(
&
user_shared_data
->
SystemTime
.
High1Time
,
current_time
>>
32
);
atomic_store_long
(
&
user_shared_data
->
InterruptTime
.
High2Time
,
monotonic_time
>>
32
);
atomic_store_ulong
(
&
user_shared_data
->
InterruptTime
.
LowPart
,
monotonic_time
);
atomic_store_long
(
&
user_shared_data
->
InterruptTime
.
High1Time
,
monotonic_time
>>
32
);
atomic_store_long
(
&
user_shared_data
->
TickCount
.
High2Time
,
tick_count
>>
32
);
atomic_store_ulong
(
&
user_shared_data
->
TickCount
.
LowPart
,
tick_count
);
atomic_store_long
(
&
user_shared_data
->
TickCount
.
High1Time
,
tick_count
>>
32
);
atomic_store_ulong
(
&
user_shared_data
->
TickCountLowDeprecated
,
tick_count
);
}
void
set_current_time
(
void
)
void
set_current_time
(
void
)
{
{
static
const
timeout_t
ticks_1601_to_1970
=
(
timeout_t
)
86400
*
(
369
*
365
+
89
)
*
TICKS_PER_SEC
;
static
const
timeout_t
ticks_1601_to_1970
=
(
timeout_t
)
86400
*
(
369
*
365
+
89
)
*
TICKS_PER_SEC
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment