Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
W
wine-winehq
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
wine
wine-winehq
Commits
c3025582
Commit
c3025582
authored
Sep 26, 2022
by
Alexandre Julliard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
vkd3d: Import upstream release 1.5.
parent
c738be12
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
2070 additions
and
685 deletions
+2070
-685
AUTHORS
libs/vkd3d/AUTHORS
+1
-0
vkd3d_version.h
libs/vkd3d/include/private/vkd3d_version.h
+1
-1
vkd3d.h
libs/vkd3d/include/vkd3d.h
+3
-0
vkd3d_shader.h
libs/vkd3d/include/vkd3d_shader.h
+22
-0
hlsl.c
libs/vkd3d/libs/vkd3d-shader/hlsl.c
+488
-155
hlsl.h
libs/vkd3d/libs/vkd3d-shader/hlsl.h
+55
-20
hlsl.y
libs/vkd3d/libs/vkd3d-shader/hlsl.y
+550
-258
hlsl_codegen.c
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
+537
-203
hlsl_constant_ops.c
libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c
+186
-1
hlsl_sm1.c
libs/vkd3d/libs/vkd3d-shader/hlsl_sm1.c
+9
-4
hlsl_sm4.c
libs/vkd3d/libs/vkd3d-shader/hlsl_sm4.c
+122
-8
spirv.c
libs/vkd3d/libs/vkd3d-shader/spirv.c
+17
-3
vkd3d_shader_private.h
libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
+1
-0
command.c
libs/vkd3d/libs/vkd3d/command.c
+18
-23
device.c
libs/vkd3d/libs/vkd3d/device.c
+42
-1
state.c
libs/vkd3d/libs/vkd3d/state.c
+15
-6
vkd3d_private.h
libs/vkd3d/libs/vkd3d/vkd3d_private.h
+3
-2
No files found.
libs/vkd3d/AUTHORS
View file @
c3025582
...
...
@@ -16,6 +16,7 @@ Isabella Bosia
Jactry Zeng
Joshua Ashton
Józef Kucia
Martin Storsjö
Matteo Bruni
Nikolay Sivov
Philip Rebohle
...
...
libs/vkd3d/include/private/vkd3d_version.h
View file @
c3025582
#define VKD3D_VCS_ID " (git
9d4df5e70468
)"
#define VKD3D_VCS_ID " (git
56b2f56b8631
)"
libs/vkd3d/include/vkd3d.h
View file @
c3025582
...
...
@@ -61,6 +61,9 @@ enum vkd3d_api_version
VKD3D_API_VERSION_1_2
,
VKD3D_API_VERSION_1_3
,
VKD3D_API_VERSION_1_4
,
VKD3D_API_VERSION_1_5
,
VKD3D_FORCE_32_BIT_ENUM
(
VKD3D_API_VERSION
),
};
typedef
HRESULT
(
*
PFN_vkd3d_signal_event
)(
HANDLE
event
);
...
...
libs/vkd3d/include/vkd3d_shader.h
View file @
c3025582
...
...
@@ -46,6 +46,9 @@ enum vkd3d_shader_api_version
VKD3D_SHADER_API_VERSION_1_2
,
VKD3D_SHADER_API_VERSION_1_3
,
VKD3D_SHADER_API_VERSION_1_4
,
VKD3D_SHADER_API_VERSION_1_5
,
VKD3D_FORCE_32_BIT_ENUM
(
VKD3D_SHADER_API_VERSION
),
};
/** The type of a chained structure. */
...
...
@@ -99,6 +102,23 @@ enum vkd3d_shader_compile_option_buffer_uav
VKD3D_FORCE_32_BIT_ENUM
(
VKD3D_SHADER_COMPILE_OPTION_BUFFER_UAV
),
};
/**
* Determines how typed UAVs are declared.
* \since 1.5
*/
enum
vkd3d_shader_compile_option_typed_uav
{
/** Use R32(u)i/R32f format for UAVs which are read from. This is the default value. */
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_R32
=
0x00000000
,
/**
* Use Unknown format for UAVs which are read from. This should only be set if
* shaderStorageImageReadWithoutFormat is enabled in the target environment.
*/
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_UNKNOWN
=
0x00000001
,
VKD3D_FORCE_32_BIT_ENUM
(
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV
),
};
enum
vkd3d_shader_compile_option_formatting_flags
{
VKD3D_SHADER_COMPILE_OPTION_FORMATTING_NONE
=
0x00000000
,
...
...
@@ -127,6 +147,8 @@ enum vkd3d_shader_compile_option_name
VKD3D_SHADER_COMPILE_OPTION_FORMATTING
=
0x00000003
,
/** \a value is a member of enum vkd3d_shader_api_version. \since 1.3 */
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
=
0x00000004
,
/** \a value is a member of enum vkd3d_shader_compile_option_typed_uav. \since 1.5 */
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV
=
0x00000005
,
VKD3D_FORCE_32_BIT_ENUM
(
VKD3D_SHADER_COMPILE_OPTION_NAME
),
};
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl.c
View file @
c3025582
...
...
@@ -117,13 +117,46 @@ void hlsl_free_var(struct hlsl_ir_var *decl)
vkd3d_free
(
decl
);
}
static
bool
hlsl_type_is_row_major
(
const
struct
hlsl_type
*
type
)
bool
hlsl_type_is_row_major
(
const
struct
hlsl_type
*
type
)
{
/* Default to column-major if the majority isn't explicitly set, which can
* happen for anonymous nodes. */
return
!!
(
type
->
modifiers
&
HLSL_MODIFIER_ROW_MAJOR
);
}
unsigned
int
hlsl_type_minor_size
(
const
struct
hlsl_type
*
type
)
{
if
(
type
->
type
!=
HLSL_CLASS_MATRIX
||
hlsl_type_is_row_major
(
type
))
return
type
->
dimx
;
else
return
type
->
dimy
;
}
unsigned
int
hlsl_type_major_size
(
const
struct
hlsl_type
*
type
)
{
if
(
type
->
type
!=
HLSL_CLASS_MATRIX
||
hlsl_type_is_row_major
(
type
))
return
type
->
dimy
;
else
return
type
->
dimx
;
}
unsigned
int
hlsl_type_element_count
(
const
struct
hlsl_type
*
type
)
{
switch
(
type
->
type
)
{
case
HLSL_CLASS_VECTOR
:
return
type
->
dimx
;
case
HLSL_CLASS_MATRIX
:
return
hlsl_type_major_size
(
type
);
case
HLSL_CLASS_ARRAY
:
return
type
->
e
.
array
.
elements_count
;
case
HLSL_CLASS_STRUCT
:
return
type
->
e
.
record
.
field_count
;
default:
return
0
;
}
}
static
unsigned
int
get_array_size
(
const
struct
hlsl_type
*
type
)
{
if
(
type
->
type
==
HLSL_CLASS_ARRAY
)
...
...
@@ -165,8 +198,9 @@ static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type
{
unsigned
int
element_size
=
type
->
e
.
array
.
type
->
reg_size
;
assert
(
element_size
);
if
(
is_sm4
)
if
(
type
->
e
.
array
.
elements_count
==
HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT
)
type
->
reg_size
=
0
;
else
if
(
is_sm4
)
type
->
reg_size
=
(
type
->
e
.
array
.
elements_count
-
1
)
*
align
(
element_size
,
4
)
+
element_size
;
else
type
->
reg_size
=
type
->
e
.
array
.
elements_count
*
element_size
;
...
...
@@ -175,17 +209,16 @@ static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type
case
HLSL_CLASS_STRUCT
:
{
struct
hlsl_struct_field
*
field
;
unsigned
int
i
;
type
->
dimx
=
0
;
type
->
reg_size
=
0
;
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
struct
hlsl_struct_field
*
field
=
&
type
->
e
.
record
.
fields
[
i
];
unsigned
int
field_size
=
field
->
type
->
reg_size
;
assert
(
field_size
);
type
->
reg_size
=
hlsl_type_get_sm4_offset
(
field
->
type
,
type
->
reg_size
);
field
->
reg_offset
=
type
->
reg_size
;
type
->
reg_size
+=
field_size
;
...
...
@@ -196,8 +229,7 @@ static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type
}
case
HLSL_CLASS_OBJECT
:
/* For convenience when performing copy propagation. */
type
->
reg_size
=
1
;
type
->
reg_size
=
0
;
break
;
}
}
...
...
@@ -232,83 +264,211 @@ static struct hlsl_type *hlsl_new_type(struct hlsl_ctx *ctx, const char *name, e
return
type
;
}
/* Returns the register offset of a given component within a type, given its index.
* *comp_type will be set to the type of the component. */
unsigned
int
hlsl_compute_component_offset
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
type
,
unsigned
int
idx
,
struct
hlsl_type
**
comp_type
)
static
bool
type_is_single_component
(
const
struct
hlsl_type
*
type
)
{
return
type
->
type
==
HLSL_CLASS_SCALAR
||
type
->
type
==
HLSL_CLASS_OBJECT
;
}
/* Given a type and a component index, this function moves one step through the path required to
* reach that component within the type.
* It returns the first index of this path.
* It sets *type_ptr to the (outermost) type within the original type that contains the component.
* It sets *index_ptr to the index of the component within *type_ptr.
* So, this function can be called several times in sequence to obtain all the path's indexes until
* the component is finally reached. */
static
unsigned
int
traverse_path_from_component_index
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
**
type_ptr
,
unsigned
int
*
index_ptr
)
{
struct
hlsl_type
*
type
=
*
type_ptr
;
unsigned
int
index
=
*
index_ptr
;
assert
(
!
type_is_single_component
(
type
));
assert
(
index
<
hlsl_type_component_count
(
type
));
switch
(
type
->
type
)
{
case
HLSL_CLASS_SCALAR
:
case
HLSL_CLASS_VECTOR
:
{
assert
(
idx
<
type
->
dimx
*
type
->
dimy
);
*
comp_type
=
hlsl_get_scalar_type
(
ctx
,
type
->
base_type
)
;
return
i
d
x
;
}
assert
(
index
<
type
->
dimx
);
*
type_ptr
=
hlsl_get_scalar_type
(
ctx
,
type
->
base_type
);
*
index_ptr
=
0
;
return
i
nde
x
;
case
HLSL_CLASS_MATRIX
:
{
unsigned
int
minor
,
major
,
x
=
idx
%
type
->
dimx
,
y
=
idx
/
type
->
dimx
;
assert
(
idx
<
type
->
dimx
*
type
->
dimy
);
unsigned
int
y
=
index
/
type
->
dimx
,
x
=
index
%
type
->
dimx
;
bool
row_major
=
hlsl_type_is_row_major
(
type
);
if
(
hlsl_type_is_row_major
(
type
))
{
minor
=
x
;
major
=
y
;
}
else
{
minor
=
y
;
major
=
x
;
}
*
comp_type
=
hlsl_get_scalar_type
(
ctx
,
type
->
base_type
);
return
4
*
major
+
minor
;
assert
(
index
<
type
->
dimx
*
type
->
dimy
);
*
type_ptr
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
row_major
?
type
->
dimx
:
type
->
dimy
);
*
index_ptr
=
row_major
?
x
:
y
;
return
row_major
?
y
:
x
;
}
case
HLSL_CLASS_ARRAY
:
{
unsigned
int
elem_comp_count
=
hlsl_type_component_count
(
type
->
e
.
array
.
type
);
unsigned
int
array_idx
=
idx
/
elem_comp_count
;
unsigned
int
idx_in_elem
=
idx
%
elem_comp_count
;
unsigned
int
array_index
;
assert
(
array_idx
<
type
->
e
.
array
.
elements_count
);
return
array_idx
*
hlsl_type_get_array_element_reg_size
(
type
->
e
.
array
.
type
)
+
hlsl_compute_component_offset
(
ctx
,
type
->
e
.
array
.
type
,
idx_in_elem
,
comp_type
);
*
type_ptr
=
type
->
e
.
array
.
type
;
*
index_ptr
=
index
%
elem_comp_count
;
array_index
=
index
/
elem_comp_count
;
assert
(
array_index
<
type
->
e
.
array
.
elements_count
);
return
array_index
;
}
case
HLSL_CLASS_STRUCT
:
{
struct
hlsl_struct_field
*
field
;
unsigned
int
field_comp_count
,
i
;
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
unsigned
int
elem_comp_count
=
hlsl_type_component_count
(
field
->
type
)
;
if
(
i
dx
<
elem
_comp_count
)
field
=
&
type
->
e
.
record
.
fields
[
i
]
;
field_comp_count
=
hlsl_type_component_count
(
field
->
type
);
if
(
i
ndex
<
field
_comp_count
)
{
return
field
->
reg_offset
+
hlsl_compute_component_offset
(
ctx
,
field
->
type
,
idx
,
comp_type
);
*
type_ptr
=
field
->
type
;
*
index_ptr
=
index
;
return
i
;
}
i
dx
-=
elem
_comp_count
;
i
ndex
-=
field
_comp_count
;
}
assert
(
0
);
return
0
;
}
case
HLSL_CLASS_OBJECT
:
{
assert
(
idx
==
0
);
*
comp_type
=
type
;
default:
assert
(
0
);
return
0
;
}
}
struct
hlsl_type
*
hlsl_type_get_component_type
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
type
,
unsigned
int
index
)
{
while
(
!
type_is_single_component
(
type
))
traverse_path_from_component_index
(
ctx
,
&
type
,
&
index
);
return
type
;
}
static
bool
init_deref
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_deref
*
deref
,
struct
hlsl_ir_var
*
var
,
unsigned
int
path_len
)
{
deref
->
var
=
var
;
deref
->
path_len
=
path_len
;
deref
->
offset
.
node
=
NULL
;
if
(
path_len
==
0
)
{
deref
->
path
=
NULL
;
return
true
;
}
if
(
!
(
deref
->
path
=
hlsl_alloc
(
ctx
,
sizeof
(
*
deref
->
path
)
*
deref
->
path_len
)))
{
deref
->
var
=
NULL
;
deref
->
path_len
=
0
;
return
false
;
}
return
true
;
}
static
struct
hlsl_type
*
get_type_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
)
{
struct
hlsl_type
*
type
;
unsigned
int
i
;
assert
(
deref
);
assert
(
!
deref
->
offset
.
node
);
type
=
deref
->
var
->
data_type
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
type
=
hlsl_get_element_type_from_path_index
(
ctx
,
type
,
deref
->
path
[
i
].
node
);
return
type
;
}
/* Initializes a deref from another deref (prefix) and a component index.
* *block is initialized to contain the new constant node instructions used by the deref's path. */
static
bool
init_deref_from_component_index
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
struct
hlsl_deref
*
deref
,
const
struct
hlsl_deref
*
prefix
,
unsigned
int
index
,
const
struct
vkd3d_shader_location
*
loc
)
{
unsigned
int
path_len
,
path_index
,
deref_path_len
,
i
;
struct
hlsl_type
*
path_type
;
struct
hlsl_ir_constant
*
c
;
list_init
(
&
block
->
instrs
);
path_len
=
0
;
path_type
=
get_type_from_deref
(
ctx
,
prefix
);
path_index
=
index
;
while
(
!
type_is_single_component
(
path_type
))
{
traverse_path_from_component_index
(
ctx
,
&
path_type
,
&
path_index
);
++
path_len
;
}
if
(
!
init_deref
(
ctx
,
deref
,
prefix
->
var
,
prefix
->
path_len
+
path_len
))
return
false
;
deref_path_len
=
0
;
for
(
i
=
0
;
i
<
prefix
->
path_len
;
++
i
)
hlsl_src_from_node
(
&
deref
->
path
[
deref_path_len
++
],
prefix
->
path
[
i
].
node
);
path_type
=
get_type_from_deref
(
ctx
,
prefix
);
path_index
=
index
;
while
(
!
type_is_single_component
(
path_type
))
{
unsigned
int
next_index
=
traverse_path_from_component_index
(
ctx
,
&
path_type
,
&
path_index
);
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
next_index
,
loc
)))
{
hlsl_free_instr_list
(
&
block
->
instrs
);
return
false
;
}
list_add_tail
(
&
block
->
instrs
,
&
c
->
node
.
entry
);
hlsl_src_from_node
(
&
deref
->
path
[
deref_path_len
++
],
&
c
->
node
);
}
assert
(
0
);
return
0
;
assert
(
deref_path_len
==
deref
->
path_len
);
return
true
;
}
struct
hlsl_type
*
hlsl_get_element_type_from_path_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_type
*
type
,
struct
hlsl_ir_node
*
idx
)
{
assert
(
idx
);
switch
(
type
->
type
)
{
case
HLSL_CLASS_VECTOR
:
return
hlsl_get_scalar_type
(
ctx
,
type
->
base_type
);
case
HLSL_CLASS_MATRIX
:
if
(
hlsl_type_is_row_major
(
type
))
return
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
type
->
dimx
);
else
return
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
type
->
dimy
);
case
HLSL_CLASS_ARRAY
:
return
type
->
e
.
array
.
type
;
case
HLSL_CLASS_STRUCT
:
{
struct
hlsl_ir_constant
*
c
=
hlsl_ir_constant
(
idx
);
assert
(
c
->
value
[
0
].
u
<
type
->
e
.
record
.
field_count
);
return
type
->
e
.
record
.
fields
[
c
->
value
[
0
].
u
].
type
;
}
default:
assert
(
0
);
return
NULL
;
}
}
struct
hlsl_type
*
hlsl_new_array_type
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
basic_type
,
unsigned
int
array_size
)
...
...
@@ -331,7 +491,8 @@ struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *ba
return
type
;
}
struct
hlsl_type
*
hlsl_new_struct_type
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
list
*
fields
)
struct
hlsl_type
*
hlsl_new_struct_type
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
hlsl_struct_field
*
fields
,
size_t
field_count
)
{
struct
hlsl_type
*
type
;
...
...
@@ -341,7 +502,8 @@ struct hlsl_type *hlsl_new_struct_type(struct hlsl_ctx *ctx, const char *name, s
type
->
base_type
=
HLSL_TYPE_VOID
;
type
->
name
=
name
;
type
->
dimy
=
1
;
type
->
e
.
elements
=
fields
;
type
->
e
.
record
.
fields
=
fields
;
type
->
e
.
record
.
field_count
=
field_count
;
hlsl_type_calculate_reg_size
(
ctx
,
type
);
list_add_tail
(
&
ctx
->
types
,
&
type
->
entry
);
...
...
@@ -399,30 +561,34 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha
return
NULL
;
}
unsigned
int
hlsl_type_component_count
(
struct
hlsl_type
*
type
)
unsigned
int
hlsl_type_component_count
(
const
struct
hlsl_type
*
type
)
{
struct
hlsl_struct_field
*
field
;
unsigned
int
count
=
0
;
if
(
type
->
type
<=
HLSL_CLASS_LAST_NUMERIC
)
{
return
type
->
dimx
*
type
->
dimy
;
}
if
(
type
->
type
==
HLSL_CLASS_ARRAY
)
{
return
hlsl_type_component_count
(
type
->
e
.
array
.
type
)
*
type
->
e
.
array
.
elements_count
;
}
if
(
type
->
type
!=
HLSL_CLASS_STRUCT
)
switch
(
type
->
type
)
{
ERR
(
"Unexpected data type %#x.
\n
"
,
type
->
type
);
return
0
;
}
case
HLSL_CLASS_SCALAR
:
case
HLSL_CLASS_VECTOR
:
case
HLSL_CLASS_MATRIX
:
return
type
->
dimx
*
type
->
dimy
;
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
{
count
+=
hlsl_type_component_count
(
field
->
type
);
case
HLSL_CLASS_STRUCT
:
{
unsigned
int
count
=
0
,
i
;
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
count
+=
hlsl_type_component_count
(
type
->
e
.
record
.
fields
[
i
].
type
);
return
count
;
}
case
HLSL_CLASS_ARRAY
:
return
hlsl_type_component_count
(
type
->
e
.
array
.
type
)
*
type
->
e
.
array
.
elements_count
;
case
HLSL_CLASS_OBJECT
:
return
1
;
default:
assert
(
0
);
return
0
;
}
return
count
;
}
bool
hlsl_types_are_equal
(
const
struct
hlsl_type
*
t1
,
const
struct
hlsl_type
*
t2
)
...
...
@@ -451,24 +617,22 @@ bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2
return
false
;
if
(
t1
->
type
==
HLSL_CLASS_STRUCT
)
{
struct
list
*
t1cur
,
*
t2cur
;
struct
hlsl_struct_field
*
t1field
,
*
t2field
;
size_t
i
;
t1cur
=
list_head
(
t1
->
e
.
elements
);
t2cur
=
list_head
(
t2
->
e
.
elements
);
while
(
t1cur
&&
t2cur
)
if
(
t1
->
e
.
record
.
field_count
!=
t2
->
e
.
record
.
field_count
)
return
false
;
for
(
i
=
0
;
i
<
t1
->
e
.
record
.
field_count
;
++
i
)
{
t1field
=
LIST_ENTRY
(
t1cur
,
struct
hlsl_struct_field
,
entry
);
t2field
=
LIST_ENTRY
(
t2cur
,
struct
hlsl_struct_field
,
entry
);
if
(
!
hlsl_types_are_equal
(
t1field
->
type
,
t2field
->
type
))
const
struct
hlsl_struct_field
*
field1
=
&
t1
->
e
.
record
.
fields
[
i
];
const
struct
hlsl_struct_field
*
field2
=
&
t2
->
e
.
record
.
fields
[
i
];
if
(
!
hlsl_types_are_equal
(
field1
->
type
,
field2
->
type
))
return
false
;
if
(
strcmp
(
t1field
->
name
,
t2field
->
name
))
if
(
strcmp
(
field1
->
name
,
field2
->
name
))
return
false
;
t1cur
=
list_next
(
t1
->
e
.
elements
,
t1cur
);
t2cur
=
list_next
(
t2
->
e
.
elements
,
t2cur
);
}
if
(
t1cur
!=
t2cur
)
return
false
;
}
if
(
t1
->
type
==
HLSL_CLASS_ARRAY
)
return
t1
->
e
.
array
.
elements_count
==
t2
->
e
.
array
.
elements_count
...
...
@@ -480,7 +644,6 @@ bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2
struct
hlsl_type
*
hlsl_type_clone
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
old
,
unsigned
int
default_majority
,
unsigned
int
modifiers
)
{
struct
hlsl_struct_field
*
old_field
,
*
field
;
struct
hlsl_type
*
type
;
if
(
!
(
type
=
hlsl_alloc
(
ctx
,
sizeof
(
*
type
))))
...
...
@@ -506,43 +669,47 @@ struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
switch
(
old
->
type
)
{
case
HLSL_CLASS_ARRAY
:
type
->
e
.
array
.
type
=
hlsl_type_clone
(
ctx
,
old
->
e
.
array
.
type
,
default_majority
,
modifiers
);
if
(
!
(
type
->
e
.
array
.
type
=
hlsl_type_clone
(
ctx
,
old
->
e
.
array
.
type
,
default_majority
,
modifiers
)))
{
vkd3d_free
((
void
*
)
type
->
name
);
vkd3d_free
(
type
);
return
NULL
;
}
type
->
e
.
array
.
elements_count
=
old
->
e
.
array
.
elements_count
;
break
;
case
HLSL_CLASS_STRUCT
:
{
if
(
!
(
type
->
e
.
elements
=
hlsl_alloc
(
ctx
,
sizeof
(
*
type
->
e
.
elements
))))
size_t
field_count
=
old
->
e
.
record
.
field_count
,
i
;
type
->
e
.
record
.
field_count
=
field_count
;
if
(
!
(
type
->
e
.
record
.
fields
=
hlsl_alloc
(
ctx
,
field_count
*
sizeof
(
*
type
->
e
.
record
.
fields
))))
{
vkd3d_free
((
void
*
)
type
->
name
);
vkd3d_free
(
type
);
return
NULL
;
}
list_init
(
type
->
e
.
elements
);
LIST_FOR_EACH_ENTRY
(
old_field
,
old
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
field_count
;
++
i
)
{
if
(
!
(
field
=
hlsl_alloc
(
ctx
,
sizeof
(
*
field
))))
const
struct
hlsl_struct_field
*
src_field
=
&
old
->
e
.
record
.
fields
[
i
];
struct
hlsl_struct_field
*
dst_field
=
&
type
->
e
.
record
.
fields
[
i
];
dst_field
->
loc
=
src_field
->
loc
;
if
(
!
(
dst_field
->
type
=
hlsl_type_clone
(
ctx
,
src_field
->
type
,
default_majority
,
modifiers
)))
{
LIST_FOR_EACH_ENTRY_SAFE
(
field
,
old_field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
{
vkd3d_free
((
void
*
)
field
->
semantic
.
name
);
vkd3d_free
((
void
*
)
field
->
name
);
vkd3d_free
(
field
);
}
vkd3d_free
(
type
->
e
.
elements
);
vkd3d_free
(
type
->
e
.
record
.
fields
);
vkd3d_free
((
void
*
)
type
->
name
);
vkd3d_free
(
type
);
return
NULL
;
}
field
->
loc
=
old_field
->
loc
;
field
->
type
=
hlsl_type_clone
(
ctx
,
old_field
->
type
,
default_majority
,
modifiers
);
field
->
name
=
hlsl_strdup
(
ctx
,
old_field
->
name
);
if
(
old_field
->
semantic
.
name
)
dst_field
->
name
=
hlsl_strdup
(
ctx
,
src_field
->
name
);
if
(
src_field
->
semantic
.
name
)
{
field
->
semantic
.
name
=
hlsl_strdup
(
ctx
,
old
_field
->
semantic
.
name
);
field
->
semantic
.
index
=
old
_field
->
semantic
.
index
;
dst_field
->
semantic
.
name
=
hlsl_strdup
(
ctx
,
src
_field
->
semantic
.
name
);
dst_field
->
semantic
.
index
=
src
_field
->
semantic
.
index
;
}
list_add_tail
(
type
->
e
.
elements
,
&
field
->
entry
);
}
break
;
}
...
...
@@ -618,28 +785,110 @@ static bool type_is_single_reg(const struct hlsl_type *type)
return
type
->
type
==
HLSL_CLASS_SCALAR
||
type
->
type
==
HLSL_CLASS_VECTOR
;
}
struct
hlsl_ir_store
*
hlsl_new_store
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_ir_node
*
offset
,
struct
hlsl_ir_node
*
rhs
,
unsigned
int
writemask
,
struct
vkd3d_shader_location
loc
)
bool
hlsl_copy_deref
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_deref
*
deref
,
struct
hlsl_deref
*
other
)
{
unsigned
int
i
;
memset
(
deref
,
0
,
sizeof
(
*
deref
));
if
(
!
other
)
return
true
;
assert
(
!
other
->
offset
.
node
);
if
(
!
init_deref
(
ctx
,
deref
,
other
->
var
,
other
->
path_len
))
return
false
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
hlsl_src_from_node
(
&
deref
->
path
[
i
],
other
->
path
[
i
].
node
);
return
true
;
}
void
hlsl_cleanup_deref
(
struct
hlsl_deref
*
deref
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
hlsl_src_remove
(
&
deref
->
path
[
i
]);
vkd3d_free
(
deref
->
path
);
deref
->
path
=
NULL
;
deref
->
path_len
=
0
;
hlsl_src_remove
(
&
deref
->
offset
);
}
/* Initializes a simple variable derefence, so that it can be passed to load/store functions. */
void
hlsl_init_simple_deref_from_var
(
struct
hlsl_deref
*
deref
,
struct
hlsl_ir_var
*
var
)
{
memset
(
deref
,
0
,
sizeof
(
*
deref
));
deref
->
var
=
var
;
}
struct
hlsl_ir_store
*
hlsl_new_simple_store
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
lhs
,
struct
hlsl_ir_node
*
rhs
)
{
struct
hlsl_deref
lhs_deref
;
hlsl_init_simple_deref_from_var
(
&
lhs_deref
,
lhs
);
return
hlsl_new_store_index
(
ctx
,
&
lhs_deref
,
NULL
,
rhs
,
0
,
&
rhs
->
loc
);
}
struct
hlsl_ir_store
*
hlsl_new_store_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
lhs
,
struct
hlsl_ir_node
*
idx
,
struct
hlsl_ir_node
*
rhs
,
unsigned
int
writemask
,
const
struct
vkd3d_shader_location
*
loc
)
{
struct
hlsl_ir_store
*
store
;
unsigned
int
i
;
if
(
!
writemask
&&
type_is_single_reg
(
rhs
->
data_type
))
writemask
=
(
1
<<
rhs
->
data_type
->
dimx
)
-
1
;
assert
(
lhs
);
assert
(
!
lhs
->
offset
.
node
)
;
if
(
!
(
store
=
hlsl_alloc
(
ctx
,
sizeof
(
*
store
))))
return
NULL
;
init_node
(
&
store
->
node
,
HLSL_IR_STORE
,
NULL
,
*
loc
);
if
(
!
init_deref
(
ctx
,
&
store
->
lhs
,
lhs
->
var
,
lhs
->
path_len
+
!!
idx
))
return
NULL
;
for
(
i
=
0
;
i
<
lhs
->
path_len
;
++
i
)
hlsl_src_from_node
(
&
store
->
lhs
.
path
[
i
],
lhs
->
path
[
i
].
node
);
if
(
idx
)
hlsl_src_from_node
(
&
store
->
lhs
.
path
[
lhs
->
path_len
],
idx
);
init_node
(
&
store
->
node
,
HLSL_IR_STORE
,
NULL
,
loc
);
store
->
lhs
.
var
=
var
;
hlsl_src_from_node
(
&
store
->
lhs
.
offset
,
offset
);
hlsl_src_from_node
(
&
store
->
rhs
,
rhs
);
if
(
!
writemask
&&
type_is_single_reg
(
rhs
->
data_type
))
writemask
=
(
1
<<
rhs
->
data_type
->
dimx
)
-
1
;
store
->
writemask
=
writemask
;
return
store
;
}
struct
hlsl_ir_store
*
hlsl_new_simple_store
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
lhs
,
struct
hlsl_ir_node
*
rhs
)
struct
hlsl_ir_store
*
hlsl_new_store_component
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
const
struct
hlsl_deref
*
lhs
,
unsigned
int
comp
,
struct
hlsl_ir_node
*
rhs
)
{
return
hlsl_new_store
(
ctx
,
lhs
,
NULL
,
rhs
,
0
,
rhs
->
loc
);
struct
hlsl_block
comp_path_block
;
struct
hlsl_ir_store
*
store
;
list_init
(
&
block
->
instrs
);
if
(
!
(
store
=
hlsl_alloc
(
ctx
,
sizeof
(
*
store
))))
return
NULL
;
init_node
(
&
store
->
node
,
HLSL_IR_STORE
,
NULL
,
rhs
->
loc
);
if
(
!
init_deref_from_component_index
(
ctx
,
&
comp_path_block
,
&
store
->
lhs
,
lhs
,
comp
,
&
rhs
->
loc
))
{
vkd3d_free
(
store
);
return
NULL
;
}
list_move_tail
(
&
block
->
instrs
,
&
comp_path_block
.
instrs
);
hlsl_src_from_node
(
&
store
->
rhs
,
rhs
);
if
(
type_is_single_reg
(
rhs
->
data_type
))
store
->
writemask
=
(
1
<<
rhs
->
data_type
->
dimx
)
-
1
;
list_add_tail
(
&
block
->
instrs
,
&
store
->
node
.
entry
);
return
store
;
}
struct
hlsl_ir_constant
*
hlsl_new_constant
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
type
,
...
...
@@ -725,29 +974,76 @@ struct hlsl_ir_if *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *condit
return
iff
;
}
struct
hlsl_ir_load
*
hlsl_new_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_ir_node
*
offset
,
struct
hlsl_
type
*
type
,
const
struct
vkd3d_shader_location
loc
)
struct
hlsl_ir_load
*
hlsl_new_load
_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
struct
hlsl_
ir_node
*
idx
,
const
struct
vkd3d_shader_location
*
loc
)
{
struct
hlsl_ir_load
*
load
;
struct
hlsl_type
*
type
;
unsigned
int
i
;
assert
(
!
deref
->
offset
.
node
);
type
=
get_type_from_deref
(
ctx
,
deref
);
if
(
idx
)
type
=
hlsl_get_element_type_from_path_index
(
ctx
,
type
,
idx
);
if
(
!
(
load
=
hlsl_alloc
(
ctx
,
sizeof
(
*
load
))))
return
NULL
;
init_node
(
&
load
->
node
,
HLSL_IR_LOAD
,
type
,
loc
);
load
->
src
.
var
=
var
;
hlsl_src_from_node
(
&
load
->
src
.
offset
,
offset
);
init_node
(
&
load
->
node
,
HLSL_IR_LOAD
,
type
,
*
loc
);
if
(
!
init_deref
(
ctx
,
&
load
->
src
,
deref
->
var
,
deref
->
path_len
+
!!
idx
))
{
vkd3d_free
(
load
);
return
NULL
;
}
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
hlsl_src_from_node
(
&
load
->
src
.
path
[
i
],
deref
->
path
[
i
].
node
);
if
(
idx
)
hlsl_src_from_node
(
&
load
->
src
.
path
[
deref
->
path_len
],
idx
);
return
load
;
}
struct
hlsl_ir_load
*
hlsl_new_var_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
const
struct
vkd3d_shader_location
loc
)
struct
vkd3d_shader_location
loc
)
{
return
hlsl_new_load
(
ctx
,
var
,
NULL
,
var
->
data_type
,
loc
);
struct
hlsl_deref
var_deref
;
hlsl_init_simple_deref_from_var
(
&
var_deref
,
var
);
return
hlsl_new_load_index
(
ctx
,
&
var_deref
,
NULL
,
&
loc
);
}
struct
hlsl_ir_load
*
hlsl_new_load_component
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
comp
,
const
struct
vkd3d_shader_location
*
loc
)
{
struct
hlsl_type
*
type
,
*
comp_type
;
struct
hlsl_block
comp_path_block
;
struct
hlsl_ir_load
*
load
;
list_init
(
&
block
->
instrs
);
if
(
!
(
load
=
hlsl_alloc
(
ctx
,
sizeof
(
*
load
))))
return
NULL
;
type
=
get_type_from_deref
(
ctx
,
deref
);
comp_type
=
hlsl_type_get_component_type
(
ctx
,
type
,
comp
);
init_node
(
&
load
->
node
,
HLSL_IR_LOAD
,
comp_type
,
*
loc
);
if
(
!
init_deref_from_component_index
(
ctx
,
&
comp_path_block
,
&
load
->
src
,
deref
,
comp
,
loc
))
{
vkd3d_free
(
load
);
return
NULL
;
}
list_move_tail
(
&
block
->
instrs
,
&
comp_path_block
.
instrs
);
list_add_tail
(
&
block
->
instrs
,
&
load
->
node
.
entry
);
return
load
;
}
struct
hlsl_ir_resource_load
*
hlsl_new_resource_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
data_type
,
enum
hlsl_resource_load_type
type
,
struct
hlsl_ir_var
*
resource
,
struct
hlsl_ir_node
*
resource_offset
,
struct
hlsl_ir_var
*
sampler
,
struct
hlsl_ir_node
*
sampler_offset
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
const
struct
vkd3d_shader_location
*
loc
)
enum
hlsl_resource_load_type
type
,
struct
hlsl_deref
*
resource
,
struct
hlsl_deref
*
sampler
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
const
struct
vkd3d_shader_location
*
loc
)
{
struct
hlsl_ir_resource_load
*
load
;
...
...
@@ -755,15 +1051,25 @@ struct hlsl_ir_resource_load *hlsl_new_resource_load(struct hlsl_ctx *ctx, struc
return
NULL
;
init_node
(
&
load
->
node
,
HLSL_IR_RESOURCE_LOAD
,
data_type
,
*
loc
);
load
->
load_type
=
type
;
load
->
resource
.
var
=
resource
;
hlsl_src_from_node
(
&
load
->
resource
.
offset
,
resource_offset
);
load
->
sampler
.
var
=
sampler
;
hlsl_src_from_node
(
&
load
->
sampler
.
offset
,
sampler_offset
);
hlsl_copy_deref
(
ctx
,
&
load
->
resource
,
resource
);
hlsl_copy_deref
(
ctx
,
&
load
->
sampler
,
sampler
);
hlsl_src_from_node
(
&
load
->
coords
,
coords
);
hlsl_src_from_node
(
&
load
->
texel_offset
,
texel_offset
);
return
load
;
}
struct
hlsl_ir_resource_load
*
hlsl_new_sample_lod
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
data_type
,
struct
hlsl_deref
*
resource
,
struct
hlsl_deref
*
sampler
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
struct
hlsl_ir_node
*
lod
,
const
struct
vkd3d_shader_location
*
loc
)
{
struct
hlsl_ir_resource_load
*
load
;
if
((
load
=
hlsl_new_resource_load
(
ctx
,
data_type
,
HLSL_RESOURCE_SAMPLE_LOD
,
resource
,
sampler
,
coords
,
texel_offset
,
loc
)))
hlsl_src_from_node
(
&
load
->
lod
,
lod
);
return
load
;
}
struct
hlsl_ir_swizzle
*
hlsl_new_swizzle
(
struct
hlsl_ctx
*
ctx
,
DWORD
s
,
unsigned
int
components
,
struct
hlsl_ir_node
*
val
,
const
struct
vkd3d_shader_location
*
loc
)
{
...
...
@@ -911,24 +1217,22 @@ static int compare_param_hlsl_types(const struct hlsl_type *t1, const struct hls
return
r
;
if
(
t1
->
type
==
HLSL_CLASS_STRUCT
)
{
struct
list
*
t1cur
,
*
t2cur
;
struct
hlsl_struct_field
*
t1field
,
*
t2field
;
size_t
i
;
if
(
t1
->
e
.
record
.
field_count
!=
t2
->
e
.
record
.
field_count
)
return
t1
->
e
.
record
.
field_count
-
t2
->
e
.
record
.
field_count
;
t1cur
=
list_head
(
t1
->
e
.
elements
);
t2cur
=
list_head
(
t2
->
e
.
elements
);
while
(
t1cur
&&
t2cur
)
for
(
i
=
0
;
i
<
t1
->
e
.
record
.
field_count
;
++
i
)
{
t1field
=
LIST_ENTRY
(
t1cur
,
struct
hlsl_struct_field
,
entry
);
t2field
=
LIST_ENTRY
(
t2cur
,
struct
hlsl_struct_field
,
entry
);
if
((
r
=
compare_param_hlsl_types
(
t1field
->
type
,
t2field
->
type
)))
const
struct
hlsl_struct_field
*
field1
=
&
t1
->
e
.
record
.
fields
[
i
];
const
struct
hlsl_struct_field
*
field2
=
&
t2
->
e
.
record
.
fields
[
i
];
if
((
r
=
compare_param_hlsl_types
(
field1
->
type
,
field2
->
type
)))
return
r
;
if
((
r
=
strcmp
(
t1field
->
name
,
t2field
->
name
)))
if
((
r
=
strcmp
(
field1
->
name
,
field2
->
name
)))
return
r
;
t1cur
=
list_next
(
t1
->
e
.
elements
,
t1cur
);
t2cur
=
list_next
(
t2
->
e
.
elements
,
t2cur
);
}
if
(
t1cur
!=
t2cur
)
return
t1cur
?
1
:
-
1
;
return
0
;
}
if
(
t1
->
type
==
HLSL_CLASS_ARRAY
)
...
...
@@ -1023,7 +1327,12 @@ struct vkd3d_string_buffer *hlsl_type_to_string(struct hlsl_ctx *ctx, const stru
}
for
(
t
=
type
;
t
->
type
==
HLSL_CLASS_ARRAY
;
t
=
t
->
e
.
array
.
type
)
vkd3d_string_buffer_printf
(
string
,
"[%u]"
,
t
->
e
.
array
.
elements_count
);
{
if
(
t
->
e
.
array
.
elements_count
==
HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT
)
vkd3d_string_buffer_printf
(
string
,
"[]"
);
else
vkd3d_string_buffer_printf
(
string
,
"[%u]"
,
t
->
e
.
array
.
elements_count
);
}
return
string
;
}
...
...
@@ -1185,10 +1494,23 @@ static void dump_ir_var(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer
static
void
dump_deref
(
struct
vkd3d_string_buffer
*
buffer
,
const
struct
hlsl_deref
*
deref
)
{
unsigned
int
i
;
if
(
deref
->
var
)
{
vkd3d_string_buffer_printf
(
buffer
,
"%s"
,
deref
->
var
->
name
);
if
(
deref
->
offset
.
node
)
if
(
deref
->
path_len
)
{
vkd3d_string_buffer_printf
(
buffer
,
"["
);
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
{
vkd3d_string_buffer_printf
(
buffer
,
"["
);
dump_src
(
buffer
,
&
deref
->
path
[
i
]);
vkd3d_string_buffer_printf
(
buffer
,
"]"
);
}
vkd3d_string_buffer_printf
(
buffer
,
"]"
);
}
else
if
(
deref
->
offset
.
node
)
{
vkd3d_string_buffer_printf
(
buffer
,
"["
);
dump_src
(
buffer
,
&
deref
->
offset
);
...
...
@@ -1255,6 +1577,7 @@ static void dump_ir_constant(struct vkd3d_string_buffer *buffer, const struct hl
break
;
case
HLSL_TYPE_FLOAT
:
case
HLSL_TYPE_HALF
:
vkd3d_string_buffer_printf
(
buffer
,
"%.8e "
,
value
->
f
);
break
;
...
...
@@ -1385,6 +1708,7 @@ static void dump_ir_resource_load(struct vkd3d_string_buffer *buffer, const stru
{
[
HLSL_RESOURCE_LOAD
]
=
"load_resource"
,
[
HLSL_RESOURCE_SAMPLE
]
=
"sample"
,
[
HLSL_RESOURCE_SAMPLE_LOD
]
=
"sample_lod"
,
[
HLSL_RESOURCE_GATHER_RED
]
=
"gather_red"
,
[
HLSL_RESOURCE_GATHER_GREEN
]
=
"gather_green"
,
[
HLSL_RESOURCE_GATHER_BLUE
]
=
"gather_blue"
,
...
...
@@ -1403,6 +1727,11 @@ static void dump_ir_resource_load(struct vkd3d_string_buffer *buffer, const stru
vkd3d_string_buffer_printf
(
buffer
,
", offset = "
);
dump_src
(
buffer
,
&
load
->
texel_offset
);
}
if
(
load
->
lod
.
node
)
{
vkd3d_string_buffer_printf
(
buffer
,
", lod = "
);
dump_src
(
buffer
,
&
load
->
lod
);
}
vkd3d_string_buffer_printf
(
buffer
,
")"
);
}
...
...
@@ -1519,17 +1848,20 @@ void hlsl_replace_node(struct hlsl_ir_node *old, struct hlsl_ir_node *new)
void
hlsl_free_type
(
struct
hlsl_type
*
type
)
{
struct
hlsl_struct_field
*
field
,
*
next_field
;
struct
hlsl_struct_field
*
field
;
size_t
i
;
vkd3d_free
((
void
*
)
type
->
name
);
if
(
type
->
type
==
HLSL_CLASS_STRUCT
)
{
LIST_FOR_EACH_ENTRY_SAFE
(
field
,
next_field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
field
=
&
type
->
e
.
record
.
fields
[
i
];
vkd3d_free
((
void
*
)
field
->
name
);
vkd3d_free
((
void
*
)
field
->
semantic
.
name
);
vkd3d_free
(
field
);
}
vkd3d_free
((
void
*
)
type
->
e
.
record
.
fields
);
}
vkd3d_free
(
type
);
}
...
...
@@ -1575,7 +1907,7 @@ static void free_ir_jump(struct hlsl_ir_jump *jump)
static
void
free_ir_load
(
struct
hlsl_ir_load
*
load
)
{
hlsl_
src_remove
(
&
load
->
src
.
offset
);
hlsl_
cleanup_deref
(
&
load
->
src
);
vkd3d_free
(
load
);
}
...
...
@@ -1587,9 +1919,10 @@ static void free_ir_loop(struct hlsl_ir_loop *loop)
static
void
free_ir_resource_load
(
struct
hlsl_ir_resource_load
*
load
)
{
hlsl_cleanup_deref
(
&
load
->
sampler
);
hlsl_cleanup_deref
(
&
load
->
resource
);
hlsl_src_remove
(
&
load
->
coords
);
hlsl_src_remove
(
&
load
->
sampler
.
offset
);
hlsl_src_remove
(
&
load
->
resource
.
offset
);
hlsl_src_remove
(
&
load
->
lod
);
hlsl_src_remove
(
&
load
->
texel_offset
);
vkd3d_free
(
load
);
}
...
...
@@ -1597,7 +1930,7 @@ static void free_ir_resource_load(struct hlsl_ir_resource_load *load)
static
void
free_ir_store
(
struct
hlsl_ir_store
*
store
)
{
hlsl_src_remove
(
&
store
->
rhs
);
hlsl_
src_remove
(
&
store
->
lhs
.
offset
);
hlsl_
cleanup_deref
(
&
store
->
lhs
);
vkd3d_free
(
store
);
}
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl.h
View file @
c3025582
...
...
@@ -126,7 +126,11 @@ struct hlsl_type
unsigned
int
dimy
;
union
{
struct
list
*
elements
;
struct
{
struct
hlsl_struct_field
*
fields
;
size_t
field_count
;
}
record
;
struct
{
struct
hlsl_type
*
type
;
...
...
@@ -147,7 +151,6 @@ struct hlsl_semantic
struct
hlsl_struct_field
{
struct
list
entry
;
struct
vkd3d_shader_location
loc
;
struct
hlsl_type
*
type
;
const
char
*
name
;
...
...
@@ -227,6 +230,8 @@ struct hlsl_src
#define HLSL_MODIFIERS_MAJORITY_MASK (HLSL_MODIFIER_ROW_MAJOR | HLSL_MODIFIER_COLUMN_MAJOR)
#define HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT 0
struct
hlsl_reg_reservation
{
char
type
;
...
...
@@ -371,6 +376,10 @@ struct hlsl_ir_swizzle
struct
hlsl_deref
{
struct
hlsl_ir_var
*
var
;
unsigned
int
path_len
;
struct
hlsl_src
*
path
;
struct
hlsl_src
offset
;
};
...
...
@@ -384,6 +393,7 @@ enum hlsl_resource_load_type
{
HLSL_RESOURCE_LOAD
,
HLSL_RESOURCE_SAMPLE
,
HLSL_RESOURCE_SAMPLE_LOD
,
HLSL_RESOURCE_GATHER_RED
,
HLSL_RESOURCE_GATHER_GREEN
,
HLSL_RESOURCE_GATHER_BLUE
,
...
...
@@ -395,8 +405,7 @@ struct hlsl_ir_resource_load
struct
hlsl_ir_node
node
;
enum
hlsl_resource_load_type
load_type
;
struct
hlsl_deref
resource
,
sampler
;
struct
hlsl_src
coords
;
struct
hlsl_src
texel_offset
;
struct
hlsl_src
coords
,
lod
,
texel_offset
;
};
struct
hlsl_ir_store
...
...
@@ -714,6 +723,9 @@ void hlsl_dump_function(struct hlsl_ctx *ctx, const struct hlsl_ir_function_decl
int
hlsl_emit_bytecode
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_function_decl
*
entry_func
,
enum
vkd3d_shader_target_type
target_type
,
struct
vkd3d_shader_code
*
out
);
bool
hlsl_copy_deref
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_deref
*
deref
,
struct
hlsl_deref
*
other
);
void
hlsl_cleanup_deref
(
struct
hlsl_deref
*
deref
);
void
hlsl_replace_node
(
struct
hlsl_ir_node
*
old
,
struct
hlsl_ir_node
*
new
);
void
hlsl_free_instr
(
struct
hlsl_ir_node
*
node
);
...
...
@@ -726,6 +738,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha
struct
hlsl_type
*
hlsl_get_type
(
struct
hlsl_scope
*
scope
,
const
char
*
name
,
bool
recursive
);
struct
hlsl_ir_var
*
hlsl_get_var
(
struct
hlsl_scope
*
scope
,
const
char
*
name
);
struct
hlsl_type
*
hlsl_get_element_type_from_path_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_type
*
type
,
struct
hlsl_ir_node
*
idx
);
struct
hlsl_type
*
hlsl_new_array_type
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
basic_type
,
unsigned
int
array_size
);
struct
hlsl_ir_node
*
hlsl_new_binary_expr
(
struct
hlsl_ctx
*
ctx
,
enum
hlsl_ir_expr_op
op
,
struct
hlsl_ir_node
*
arg1
,
struct
hlsl_ir_node
*
arg2
);
...
...
@@ -742,17 +757,32 @@ struct hlsl_ir_if *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *condit
struct
hlsl_ir_constant
*
hlsl_new_int_constant
(
struct
hlsl_ctx
*
ctx
,
int
n
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_jump
*
hlsl_new_jump
(
struct
hlsl_ctx
*
ctx
,
enum
hlsl_ir_jump_type
type
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_ir_load
*
hlsl_new_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_ir_node
*
offset
,
struct
hlsl_type
*
type
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_ir_loop
*
hlsl_new_loop
(
struct
hlsl_ctx
*
ctx
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_ir_resource_load
*
hlsl_new_resource_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
data_type
,
enum
hlsl_resource_load_type
type
,
struct
hlsl_ir_var
*
resource
,
struct
hlsl_ir_node
*
resource_offset
,
struct
hlsl_ir_var
*
sampler
,
struct
hlsl_ir_node
*
sampler_offset
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
const
struct
vkd3d_shader_location
*
loc
);
void
hlsl_init_simple_deref_from_var
(
struct
hlsl_deref
*
deref
,
struct
hlsl_ir_var
*
var
);
struct
hlsl_ir_load
*
hlsl_new_var_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_ir_load
*
hlsl_new_load_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
struct
hlsl_ir_node
*
idx
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_load
*
hlsl_new_load_component
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
comp
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_store
*
hlsl_new_simple_store
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
lhs
,
struct
hlsl_ir_node
*
rhs
);
struct
hlsl_ir_store
*
hlsl_new_store
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_ir_node
*
offset
,
struct
hlsl_ir_node
*
rhs
,
unsigned
int
writemask
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_type
*
hlsl_new_struct_type
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
list
*
fields
);
struct
hlsl_ir_store
*
hlsl_new_store_index
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
lhs
,
struct
hlsl_ir_node
*
idx
,
struct
hlsl_ir_node
*
rhs
,
unsigned
int
writemask
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_store
*
hlsl_new_store_component
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
const
struct
hlsl_deref
*
lhs
,
unsigned
int
comp
,
struct
hlsl_ir_node
*
rhs
);
struct
hlsl_ir_resource_load
*
hlsl_new_resource_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
data_type
,
enum
hlsl_resource_load_type
type
,
struct
hlsl_deref
*
resource
,
struct
hlsl_deref
*
sampler
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_resource_load
*
hlsl_new_sample_lod
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
data_type
,
struct
hlsl_deref
*
resource
,
struct
hlsl_deref
*
sampler
,
struct
hlsl_ir_node
*
coords
,
struct
hlsl_ir_node
*
texel_offset
,
struct
hlsl_ir_node
*
lod
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_loop
*
hlsl_new_loop
(
struct
hlsl_ctx
*
ctx
,
struct
vkd3d_shader_location
loc
);
struct
hlsl_type
*
hlsl_new_struct_type
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
hlsl_struct_field
*
fields
,
size_t
field_count
);
struct
hlsl_ir_swizzle
*
hlsl_new_swizzle
(
struct
hlsl_ctx
*
ctx
,
DWORD
s
,
unsigned
int
components
,
struct
hlsl_ir_node
*
val
,
const
struct
vkd3d_shader_location
*
loc
);
struct
hlsl_ir_var
*
hlsl_new_synthetic_var
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
hlsl_type
*
type
,
...
...
@@ -765,8 +795,6 @@ struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr
struct
hlsl_ir_var
*
hlsl_new_var
(
struct
hlsl_ctx
*
ctx
,
const
char
*
name
,
struct
hlsl_type
*
type
,
const
struct
vkd3d_shader_location
loc
,
const
struct
hlsl_semantic
*
semantic
,
unsigned
int
modifiers
,
const
struct
hlsl_reg_reservation
*
reg_reservation
);
struct
hlsl_ir_load
*
hlsl_new_var_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
const
struct
vkd3d_shader_location
loc
);
void
hlsl_error
(
struct
hlsl_ctx
*
ctx
,
const
struct
vkd3d_shader_location
*
loc
,
enum
vkd3d_shader_error
error
,
const
char
*
fmt
,
...)
VKD3D_PRINTF_FUNC
(
4
,
5
);
...
...
@@ -784,10 +812,14 @@ bool hlsl_scope_add_type(struct hlsl_scope *scope, struct hlsl_type *type);
struct
hlsl_type
*
hlsl_type_clone
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
old
,
unsigned
int
default_majority
,
unsigned
int
modifiers
);
unsigned
int
hlsl_type_component_count
(
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_component_count
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_get_array_element_reg_size
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_compute_component_offset
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
type
,
unsigned
int
idx
,
struct
hlsl_type
**
comp_type
);
struct
hlsl_type
*
hlsl_type_get_component_type
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_type
*
type
,
unsigned
int
index
);
bool
hlsl_type_is_row_major
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_minor_size
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_major_size
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_element_count
(
const
struct
hlsl_type
*
type
);
unsigned
int
hlsl_type_get_sm4_offset
(
const
struct
hlsl_type
*
type
,
unsigned
int
offset
);
bool
hlsl_types_are_equal
(
const
struct
hlsl_type
*
t1
,
const
struct
hlsl_type
*
t2
);
...
...
@@ -796,11 +828,14 @@ unsigned int hlsl_combine_writemasks(unsigned int first, unsigned int second);
unsigned
int
hlsl_map_swizzle
(
unsigned
int
swizzle
,
unsigned
int
writemask
);
unsigned
int
hlsl_swizzle_from_writemask
(
unsigned
int
writemask
);
bool
hlsl_component_index_range_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
*
start
,
unsigned
int
*
count
);
bool
hlsl_offset_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
*
offset
);
unsigned
int
hlsl_offset_from_deref_safe
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
);
struct
hlsl_reg
hlsl_reg_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
);
bool
hlsl_fold_constants
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
);
bool
hlsl_fold_constant_exprs
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
);
bool
hlsl_fold_constant_swizzles
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
);
bool
hlsl_sm1_register_from_semantic
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_semantic
*
semantic
,
bool
output
,
D3DSHADER_PARAM_REGISTER_TYPE
*
type
,
unsigned
int
*
reg
);
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl.y
View file @
c3025582
...
...
@@ -28,6 +28,12 @@
#define HLSL_YYLTYPE struct vkd3d_shader_location
struct parse_fields
{
struct hlsl_struct_field *fields;
size_t count, capacity;
};
struct parse_parameter
{
struct hlsl_type *type;
...
...
@@ -266,8 +272,8 @@ static bool implicit_compatible_data_types(struct hlsl_type *t1, struct hlsl_typ
return false;
}
static struct hlsl_ir_load *add_load
(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node
,
struct hlsl_ir_node *offset, struct hlsl_type *data_type, const struct vkd3d_shader_location
loc);
static struct hlsl_ir_load *add_load
_component(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_instr
,
unsigned int comp, const struct vkd3d_shader_location *
loc);
static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *node, struct hlsl_type *dst_type, const struct vkd3d_shader_location *loc)
...
...
@@ -283,6 +289,7 @@ static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct list *instrs,
{
struct vkd3d_string_buffer *name;
static unsigned int counter = 0;
struct hlsl_deref var_deref;
struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
unsigned int dst_idx;
...
...
@@ -302,13 +309,14 @@ static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct list *instrs,
vkd3d_string_buffer_release(&ctx->string_buffers, name);
if (!var)
return NULL;
hlsl_init_simple_deref_from_var(&var_deref, var);
for (dst_idx = 0; dst_idx < dst_type->dimx * dst_type->dimy; ++dst_idx)
{
struct hlsl_type *src_scalar_type, *dst_scalar_type;
unsigned int src_idx, src_offset, dst_offset;
struct hlsl_type *dst_scalar_type;
struct hlsl_ir_store *store;
struct hlsl_ir_constant *c;
struct hlsl_block block;
unsigned int src_idx;
if (broadcast)
{
...
...
@@ -328,30 +336,21 @@ static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct list *instrs,
}
}
dst_offset = hlsl_compute_component_offset(ctx, dst_type, dst_idx, &dst_scalar_type);
src_offset = hlsl_compute_component_offset(ctx, src_type, src_idx, &src_scalar_type);
dst_scalar_type = hlsl_type_get_component_type(ctx, dst_type, dst_idx);
if (!(c = hlsl_new_uint_constant(ctx, src_offset, loc)))
return NULL;
list_add_tail(instrs, &c->node.entry);
if (!(load = add_load(ctx, instrs, node, &c->node, src_scalar_type, *loc)))
if (!(load = add_load_component(ctx, instrs, node, src_idx, loc)))
return NULL;
if (!(cast = hlsl_new_cast(ctx, &load->node, dst_scalar_type, loc)))
return NULL;
list_add_tail(instrs, &cast->node.entry);
if (!(
c = hlsl_new_uint_constant(ctx, dst_offset, loc
)))
if (!(
store = hlsl_new_store_component(ctx, &block, &var_deref, dst_idx, &cast->node
)))
return NULL;
list_add_tail(instrs, &c->node.entry);
if (!(store = hlsl_new_store(ctx, var, &c->node, &cast->node, 0, *loc)))
return NULL;
list_add_tail(instrs, &store->node.entry);
list_move_tail(instrs, &block.instrs);
}
if (!(load = hlsl_new_
load(ctx, var, NULL, dst_type
, *loc)))
if (!(load = hlsl_new_
var_load(ctx, var
, *loc)))
return NULL;
list_add_tail(instrs, &load->node.entry);
...
...
@@ -371,6 +370,9 @@ static struct hlsl_ir_node *add_implicit_conversion(struct hlsl_ctx *ctx, struct
{
struct hlsl_type *src_type = node->data_type;
if (hlsl_types_are_equal(src_type, dst_type))
return node;
if (!implicit_compatible_data_types(src_type, dst_type))
{
struct vkd3d_string_buffer *src_string, *dst_string;
...
...
@@ -621,111 +623,115 @@ static struct hlsl_ir_jump *add_return(struct hlsl_ctx *ctx, struct list *instrs
return jump;
}
static struct hlsl_ir_load *add_load
(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node
,
struct hlsl_ir_node *
offset, struct hlsl_type *data_type, const struct vkd3d_shader_location
loc)
static struct hlsl_ir_load *add_load
_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_instr
,
struct hlsl_ir_node *
idx, const struct vkd3d_shader_location *
loc)
{
struct hlsl_ir_node *add = NULL
;
const struct hlsl_deref *src
;
struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
if (var_
node
->type == HLSL_IR_LOAD)
if (var_
instr
->type == HLSL_IR_LOAD)
{
const struct hlsl_deref *src = &hlsl_ir_load(var_node)->src;
var = src->var;
if (src->offset.node)
{
if (!(add = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, src->offset.node, offset)))
return NULL;
list_add_tail(instrs, &add->entry);
offset = add;
}
src = &hlsl_ir_load(var_instr)->src;
}
else
{
struct vkd3d_string_buffer *name;
struct hlsl_ir_store *store;
char name[27]
;
struct hlsl_ir_var *var
;
sprintf(name, "<deref-%p>", var_node);
if (!(var = hlsl_new_synthetic_var(ctx, name, var_node->data_type, var_node->loc)))
if (!(name = hlsl_get_string_buffer(ctx)))
return NULL;
if (!(store = hlsl_new_simple_store(ctx, var, var_node)))
vkd3d_string_buffer_printf(name, "<deref-%p>", var_instr);
var = hlsl_new_synthetic_var(ctx, name->buffer, var_instr->data_type, var_instr->loc);
hlsl_release_string_buffer(ctx, name);
if (!var)
return NULL;
if (!(store = hlsl_new_simple_store(ctx, var, var_instr)))
return NULL;
list_add_tail(instrs, &store->node.entry);
src = &store->lhs;
}
if (!(load = hlsl_new_load
(ctx, var, offset, data_type
, loc)))
if (!(load = hlsl_new_load
_index(ctx, src, idx
, loc)))
return NULL;
list_add_tail(instrs, &load->node.entry);
return load;
}
static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record,
const struct hlsl_struct_field *field, const struct vkd3d_shader_location loc)
{
struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, &loc)))
return false;
list_add_tail(instrs, &c->node.entry);
return !!add_load(ctx, instrs, record, &c->node, field->type, loc);
return load;
}
static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs,
enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
const struct vkd3d_shader_location *loc);
static struct hlsl_ir_node *add_matrix_scalar_load(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *matrix, struct hlsl_ir_node *x, struct hlsl_ir_node *y,
const struct vkd3d_shader_location *loc)
static struct hlsl_ir_load *add_load_component(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_instr,
unsigned int comp, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *major, *minor, *mul, *add;
struct hlsl_ir_constant *four;
const struct hlsl_deref *src;
struct hlsl_ir_load *load;
struct hlsl_type *type = matrix->data_type, *scalar_type;
scalar_type = hlsl_get_scalar_type(ctx, type->base_type);
struct hlsl_block block;
if (
type->modifiers & HLSL_MODIFIER_ROW_MAJOR
)
if (
var_instr->type == HLSL_IR_LOAD
)
{
minor = x;
major = y;
src = &hlsl_ir_load(var_instr)->src;
}
else
{
minor = y;
major = x;
struct vkd3d_string_buffer *name;
struct hlsl_ir_store *store;
struct hlsl_ir_var *var;
if (!(name = hlsl_get_string_buffer(ctx)))
return NULL;
vkd3d_string_buffer_printf(name, "<deref-%p>", var_instr);
var = hlsl_new_synthetic_var(ctx, name->buffer, var_instr->data_type, var_instr->loc);
hlsl_release_string_buffer(ctx, name);
if (!var)
return NULL;
if (!(store = hlsl_new_simple_store(ctx, var, var_instr)))
return NULL;
list_add_tail(instrs, &store->node.entry);
src = &store->lhs;
}
if (!(
four = hlsl_new_uint_constant(ctx, 4
, loc)))
if (!(
load = hlsl_new_load_component(ctx, &block, src, comp
, loc)))
return NULL;
list_
add_tail(instrs, &four->node.entry
);
list_
move_tail(instrs, &block.instrs
);
if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc)))
return NULL;
return load;
}
if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, mul, minor, loc)))
return NULL;
static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record,
unsigned int idx, const struct vkd3d_shader_location loc)
{
struct hlsl_ir_constant *c;
if (!(load = add_load(ctx, instrs, matrix, add, scalar_type, *loc)))
return NULL;
assert(idx < record->data_type->e.record.field_count);
return &load->node;
if (!(c = hlsl_new_uint_constant(ctx, idx, &loc)))
return false;
list_add_tail(instrs, &c->node.entry);
return !!add_load_index(ctx, instrs, record, &c->node, &loc);
}
static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs,
enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
const struct vkd3d_shader_location *loc);
static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
{
struct hlsl_type *mat_type = matrix->data_type, *ret_type;
struct vkd3d_string_buffer *name;
static unsigned int counter = 0;
struct hlsl_deref var_deref;
struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
unsigned int i;
if (hlsl_type_is_row_major(mat_type))
return add_load_index(ctx, instrs, matrix, index, loc);
ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
name = vkd3d_string_buffer_get(&ctx->string_buffers);
...
...
@@ -734,23 +740,28 @@ static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs,
vkd3d_string_buffer_release(&ctx->string_buffers, name);
if (!var)
return false;
hlsl_init_simple_deref_from_var(&var_deref, var);
for (i = 0; i < mat_type->dimx; ++i)
{
struct hlsl_ir_load *column, *value;
struct hlsl_ir_store *store;
struct hlsl_ir_node *value;
struct hlsl_ir_constant *c;
struct hlsl_block block;
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!(
value = add_matrix_scalar_load(ctx, instrs, matrix, &c->node, index
, loc)))
if (!(
column = add_load_index(ctx, instrs, matrix, &c->node
, loc)))
return false;
if (!(
store = hlsl_new_store(ctx, var, &c->node, value, 0, *
loc)))
if (!(
value = add_load_index(ctx, instrs, &column->node, index,
loc)))
return false;
list_add_tail(instrs, &store->node.entry);
if (!(store = hlsl_new_store_component(ctx, &block, &var_deref, i, &value->node)))
return false;
list_move_tail(instrs, &block.instrs);
}
if (!(load = hlsl_new_var_load(ctx, var, *loc)))
...
...
@@ -764,30 +775,11 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls
struct hlsl_ir_node *index, const struct vkd3d_shader_location loc)
{
const struct hlsl_type *expr_type = array->data_type;
struct hlsl_type *data_type;
struct hlsl_ir_constant *c;
if (expr_type->type == HLSL_CLASS_ARRAY)
{
data_type = expr_type->e.array.type;
if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!(index = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node)))
return false;
list_add_tail(instrs, &index->entry);
}
else if (expr_type->type == HLSL_CLASS_MATRIX)
{
if (expr_type->type == HLSL_CLASS_MATRIX)
return add_matrix_index(ctx, instrs, array, index, &loc);
}
else if (expr_type->type == HLSL_CLASS_VECTOR)
{
data_type = hlsl_get_scalar_type(ctx, expr_type->base_type);
}
else
if (expr_type->type != HLSL_CLASS_ARRAY && expr_type->type != HLSL_CLASS_VECTOR)
{
if (expr_type->type == HLSL_CLASS_SCALAR)
hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed.");
...
...
@@ -796,17 +788,21 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls
return false;
}
return !!add_load(ctx, instrs, array, index, data_type, loc);
if (!add_load_index(ctx, instrs, array, index, &loc))
return false;
return true;
}
static struct hlsl_struct_field *get_struct_field(struct list *fields, const char *name)
static const struct hlsl_struct_field *get_struct_field(const struct hlsl_struct_field *fields,
size_t count, const char *name)
{
s
truct hlsl_struct_field *f
;
s
ize_t i
;
LIST_FOR_EACH_ENTRY(f, fields, struct hlsl_struct_field, entry
)
for (i = 0; i < count; ++i
)
{
if (!strcmp(f
->
name, name))
return
f
;
if (!strcmp(f
ields[i].
name, name))
return
&fields[i]
;
}
return NULL;
}
...
...
@@ -857,31 +853,67 @@ static void free_parse_variable_def(struct parse_variable_def *v)
vkd3d_free(v);
}
static struct list *gen_struct_fields(struct hlsl_ctx *ctx,
struct hlsl_type *type, unsigned int modifiers, struct list *fields)
static bool shader_is_sm_5_1(const struct hlsl_ctx *ctx)
{
return ctx->profile->major_version == 5 && ctx->profile->minor_version >= 1;
}
static bool gen_struct_fields(struct hlsl_ctx *ctx, struct parse_fields *fields,
struct hlsl_type *type, unsigned int modifiers, struct list *defs)
{
struct parse_variable_def *v, *v_next;
struct hlsl_struct_field *field;
struct list *list;
size_t i = 0;
if (type->type == HLSL_CLASS_MATRIX)
assert(type->modifiers & HLSL_MODIFIERS_MAJORITY_MASK);
if (!(list = make_empty_list(ctx)))
return NULL;
LIST_FOR_EACH_ENTRY_SAFE(v, v_next, fields, struct parse_variable_def, entry)
memset(fields, 0, sizeof(*fields));
fields->count = list_count(defs);
if (!hlsl_array_reserve(ctx, (void **)&fields->fields, &fields->capacity, fields->count, sizeof(*fields->fields)))
return false;
LIST_FOR_EACH_ENTRY_SAFE(v, v_next, defs, struct parse_variable_def, entry)
{
unsigned int i;
struct hlsl_struct_field *field = &fields->fields[i++];
bool unbounded_res_array = false;
unsigned int k;
field->type = type;
if (
!(field = hlsl_alloc(ctx, sizeof(*field)))
)
if (
shader_is_sm_5_1(ctx) && type->type == HLSL_CLASS_OBJECT
)
{
f
ree_parse_variable_def(v);
continue
;
f
or (k = 0; k < v->arrays.count; ++k)
unbounded_res_array |= (v->arrays.sizes[k] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
;
}
field->type = type;
for (i = 0; i < v->arrays.count; ++i)
field->type = hlsl_new_array_type(ctx, field->type, v->arrays.sizes[i]);
if (unbounded_res_array)
{
if (v->arrays.count == 1)
{
hlsl_fixme(ctx, &v->loc, "Unbounded resource arrays as struct fields.");
free_parse_variable_def(v);
vkd3d_free(field);
continue;
}
else
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Unbounded resource arrays cannot be multi-dimensional.");
}
}
else
{
for (k = 0; k < v->arrays.count; ++k)
{
if (v->arrays.sizes[k] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Implicit size arrays not allowed in struct fields.");
}
field->type = hlsl_new_array_type(ctx, field->type, v->arrays.sizes[k]);
}
}
vkd3d_free(v->arrays.sizes);
field->loc = v->loc;
field->name = v->name;
...
...
@@ -892,11 +924,10 @@ static struct list *gen_struct_fields(struct hlsl_ctx *ctx,
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Illegal initializer on a struct field.");
free_parse_initializer(&v->initializer);
}
list_add_tail(list, &field->entry);
vkd3d_free(v);
}
vkd3d_free(
field
s);
return
list
;
vkd3d_free(
def
s);
return
true
;
}
static bool add_typedef(struct hlsl_ctx *ctx, DWORD modifiers, struct hlsl_type *orig_type, struct list *list)
...
...
@@ -924,6 +955,12 @@ static bool add_typedef(struct hlsl_ctx *ctx, DWORD modifiers, struct hlsl_type
ret = true;
for (i = 0; i < v->arrays.count; ++i)
{
if (v->arrays.sizes[i] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Implicit size arrays not allowed in typedefs.");
}
if (!(type = hlsl_new_array_type(ctx, type, v->arrays.sizes[i])))
{
free_parse_variable_def(v);
...
...
@@ -1201,22 +1238,6 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct
return true;
}
static unsigned int minor_size(const struct hlsl_type *type)
{
if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimx;
else
return type->dimy;
}
static unsigned int major_size(const struct hlsl_type *type)
{
if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimy;
else
return type->dimx;
}
static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs,
enum hlsl_ir_expr_op op, struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
struct hlsl_type *type, const struct vkd3d_shader_location *loc)
...
...
@@ -1229,10 +1250,11 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs,
struct vkd3d_string_buffer *name;
static unsigned int counter = 0;
struct hlsl_type *vector_type;
struct hlsl_deref var_deref;
struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
vector_type = hlsl_get_vector_type(ctx, type->base_type, minor_size(type));
vector_type = hlsl_get_vector_type(ctx, type->base_type,
hlsl_type_
minor_size(type));
name = vkd3d_string_buffer_get(&ctx->string_buffers);
vkd3d_string_buffer_printf(name, "<split_op-%u>", counter++);
...
...
@@ -1240,15 +1262,16 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs,
vkd3d_string_buffer_release(&ctx->string_buffers, name);
if (!var)
return NULL;
hlsl_init_simple_deref_from_var(&var_deref, var);
for (i = 0; i <
major_size(type); i++
)
for (i = 0; i <
hlsl_type_major_size(type); ++i
)
{
struct hlsl_ir_node *value, *vector_operands[HLSL_MAX_OPERANDS] = { NULL };
struct hlsl_ir_store *store;
struct hlsl_ir_constant *c;
unsigned int j;
if (!(c = hlsl_new_uint_constant(ctx,
4 *
i, loc)))
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return NULL;
list_add_tail(instrs, &c->node.entry);
...
...
@@ -1256,12 +1279,9 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs,
{
if (operands[j])
{
struct hlsl_type *vector_arg_type;
struct hlsl_ir_load *load;
vector_arg_type = hlsl_get_vector_type(ctx, operands[j]->data_type->base_type, minor_size(type));
if (!(load = add_load(ctx, instrs, operands[j], &c->node, vector_arg_type, *loc)))
if (!(load = add_load_index(ctx, instrs, operands[j], &c->node, loc)))
return NULL;
vector_operands[j] = &load->node;
}
...
...
@@ -1270,12 +1290,12 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs,
if (!(value = add_expr(ctx, instrs, op, vector_operands, vector_type, loc)))
return NULL;
if (!(store = hlsl_new_store
(ctx, var, &c->node, value, 0, *
loc)))
if (!(store = hlsl_new_store
_index(ctx, &var_deref, &c->node, value, 0,
loc)))
return NULL;
list_add_tail(instrs, &store->node.entry);
}
if (!(load = hlsl_new_
load(ctx, var, NULL, type
, *loc)))
if (!(load = hlsl_new_
var_load(ctx, var
, *loc)))
return NULL;
list_add_tail(instrs, &load->node.entry);
...
...
@@ -1516,6 +1536,61 @@ static struct list *add_binary_shift_expr_merge(struct hlsl_ctx *ctx, struct lis
return list1;
}
static struct hlsl_ir_node *add_binary_dot_expr(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, const struct vkd3d_shader_location *loc)
{
enum hlsl_base_type base = expr_common_base_type(arg1->data_type->base_type, arg2->data_type->base_type);
struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {0};
struct hlsl_type *common_type, *ret_type;
enum hlsl_ir_expr_op op;
unsigned dim;
if (arg1->data_type->type == HLSL_CLASS_MATRIX)
{
struct vkd3d_string_buffer *string;
if ((string = hlsl_type_to_string(ctx, arg1->data_type)))
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Invalid type %s.\n", string->buffer);
hlsl_release_string_buffer(ctx, string);
return NULL;
}
if (arg2->data_type->type == HLSL_CLASS_MATRIX)
{
struct vkd3d_string_buffer *string;
if ((string = hlsl_type_to_string(ctx, arg2->data_type)))
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Invalid type %s.\n", string->buffer);
hlsl_release_string_buffer(ctx, string);
return NULL;
}
if (arg1->data_type->type == HLSL_CLASS_SCALAR)
dim = arg2->data_type->dimx;
else if (arg1->data_type->type == HLSL_CLASS_SCALAR)
dim = arg1->data_type->dimx;
else
dim = min(arg1->data_type->dimx, arg2->data_type->dimx);
if (dim == 1)
op = HLSL_OP2_MUL;
else
op = HLSL_OP2_DOT;
common_type = hlsl_get_vector_type(ctx, base, dim);
ret_type = hlsl_get_scalar_type(ctx, base);
if (!(args[0] = add_implicit_conversion(ctx, instrs, arg1, common_type, loc)))
return NULL;
if (!(args[1] = add_implicit_conversion(ctx, instrs, arg2, common_type, loc)))
return NULL;
return add_expr(ctx, instrs, op, args, ret_type, loc);
}
static enum hlsl_ir_expr_op op_from_assignment(enum parse_assign_op op)
{
static const enum hlsl_ir_expr_op ops[] =
...
...
@@ -1603,15 +1678,11 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in
return NULL;
}
if (!(store = hlsl_alloc(ctx, sizeof(*store))))
return NULL;
while (lhs->type != HLSL_IR_LOAD)
{
if (lhs->type == HLSL_IR_EXPR && hlsl_ir_expr(lhs)->op == HLSL_OP1_CAST)
{
hlsl_fixme(ctx, &lhs->loc, "Cast on the LHS.");
vkd3d_free(store);
return NULL;
}
else if (lhs->type == HLSL_IR_SWIZZLE)
...
...
@@ -1625,13 +1696,11 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in
if (!invert_swizzle(&s, &writemask, &width))
{
hlsl_error(ctx, &lhs->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_WRITEMASK, "Invalid writemask.");
vkd3d_free(store);
return NULL;
}
if (!(new_swizzle = hlsl_new_swizzle(ctx, s, width, rhs, &swizzle->node.loc)))
{
vkd3d_free(store);
return NULL;
}
list_add_tail(instrs, &new_swizzle->node.entry);
...
...
@@ -1642,16 +1711,12 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in
else
{
hlsl_error(ctx, &lhs->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_LVALUE, "Invalid lvalue.");
vkd3d_free(store);
return NULL;
}
}
init_node(&store->node, HLSL_IR_STORE, NULL, lhs->loc);
store->writemask = writemask;
store->lhs.var = hlsl_ir_load(lhs)->src.var;
hlsl_src_from_node(&store->lhs.offset, hlsl_ir_load(lhs)->src.offset.node);
hlsl_src_from_node(&store->rhs, rhs);
if (!(store = hlsl_new_store_index(ctx, &hlsl_ir_load(lhs)->src, NULL, rhs, writemask, &rhs->loc)))
return NULL;
list_add_tail(instrs, &store->node.entry);
/* Don't use the instruction itself as a source, as this makes structure
...
...
@@ -1700,37 +1765,30 @@ static void initialize_var_components(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_var *dst, unsigned int *store_index, struct hlsl_ir_node *src)
{
unsigned int src_comp_count = hlsl_type_component_count(src->data_type);
struct hlsl_deref dst_deref;
unsigned int k;
hlsl_init_simple_deref_from_var(&dst_deref, dst);
for (k = 0; k < src_comp_count; ++k)
{
struct hlsl_type *dst_comp_type, *src_comp_type;
unsigned int dst_reg_offset, src_reg_offset;
struct hlsl_type *dst_comp_type;
struct hlsl_ir_store *store;
struct hlsl_ir_constant *c;
struct hlsl_ir_load *load;
struct hlsl_ir_node *conv;
struct hlsl_block block;
dst_reg_offset = hlsl_compute_component_offset(ctx, dst->data_type, *store_index, &dst_comp_type);
src_reg_offset = hlsl_compute_component_offset(ctx, src->data_type, k, &src_comp_type);
if (!(c = hlsl_new_uint_constant(ctx, src_reg_offset, &src->loc)))
if (!(load = add_load_component(ctx, instrs, src, k, &src->loc)))
return;
list_add_tail(instrs, &c->node.entry);
if (!(load = add_load(ctx, instrs, src, &c->node, src_comp_type, src->loc)))
return;
dst_comp_type = hlsl_type_get_component_type(ctx, dst->data_type, *store_index);
if (!(conv = add_implicit_conversion(ctx, instrs, &load->node, dst_comp_type, &src->loc)))
return;
if (!(
c = hlsl_new_uint_constant(ctx, dst_reg_offset, &src->loc
)))
if (!(
store = hlsl_new_store_component(ctx, &block, &dst_deref, *store_index, conv
)))
return;
list_add_tail(instrs, &c->node.entry);
if (!(store = hlsl_new_store(ctx, dst, &c->node, conv, 0, src->loc)))
return;
list_add_tail(instrs, &store->node.entry);
list_move_tail(instrs, &block.instrs);
++*store_index;
}
...
...
@@ -1762,11 +1820,78 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t
LIST_FOR_EACH_ENTRY_SAFE(v, v_next, var_list, struct parse_variable_def, entry)
{
bool unbounded_res_array = false;
unsigned int i;
type = basic_type;
for (i = 0; i < v->arrays.count; ++i)
type = hlsl_new_array_type(ctx, type, v->arrays.sizes[i]);
if (shader_is_sm_5_1(ctx) && type->type == HLSL_CLASS_OBJECT)
{
for (i = 0; i < v->arrays.count; ++i)
unbounded_res_array |= (v->arrays.sizes[i] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT);
}
if (unbounded_res_array)
{
if (v->arrays.count == 1)
{
hlsl_fixme(ctx, &v->loc, "Unbounded resource arrays.");
free_parse_variable_def(v);
continue;
}
else
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Unbounded resource arrays cannot be multi-dimensional.");
}
}
else
{
for (i = 0; i < v->arrays.count; ++i)
{
if (v->arrays.sizes[i] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
{
unsigned int size = initializer_size(&v->initializer);
unsigned int elem_components = hlsl_type_component_count(type);
if (i < v->arrays.count - 1)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Only innermost array size can be implicit.");
free_parse_initializer(&v->initializer);
v->initializer.args_count = 0;
}
else if (elem_components == 0)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Cannot declare an implicit size array of a size 0 type.");
free_parse_initializer(&v->initializer);
v->initializer.args_count = 0;
}
else if (size == 0)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Implicit size arrays need to be initialized.");
free_parse_initializer(&v->initializer);
v->initializer.args_count = 0;
}
else if (size % elem_components != 0)
{
hlsl_error(ctx, &v->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Cannot initialize implicit size array with %u components, expected a multiple of %u.",
size, elem_components);
free_parse_initializer(&v->initializer);
v->initializer.args_count = 0;
}
else
{
v->arrays.sizes[i] = size / elem_components;
}
}
type = hlsl_new_array_type(ctx, type, v->arrays.sizes[i]);
}
}
vkd3d_free(v->arrays.sizes);
if (type->type != HLSL_CLASS_MATRIX)
...
...
@@ -2023,6 +2148,12 @@ static bool intrinsic_cross(struct hlsl_ctx *ctx,
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, mul2, mul1_neg, loc);
}
static bool intrinsic_dot(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
return !!add_binary_dot_expr(ctx, params->instrs, params->args[0], params->args[1], loc);
}
static bool intrinsic_floor(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
...
...
@@ -2034,6 +2165,40 @@ static bool intrinsic_floor(struct hlsl_ctx *ctx,
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_FLOOR, arg, loc);
}
static bool intrinsic_ldexp(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *arg;
if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[1], loc)))
return false;
if (!(arg = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_EXP2, arg, loc)))
return false;
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, params->args[0], arg, loc);
}
static bool intrinsic_lerp(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *arg, *neg, *add, *mul;
if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
return false;
if (!(neg = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_NEG, arg, loc)))
return false;
if (!(add = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, params->args[1], neg, loc)))
return false;
if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, params->args[2], add, loc)))
return false;
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, arg, mul, loc);
}
static bool intrinsic_max(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
...
...
@@ -2055,6 +2220,7 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
unsigned int i, j, k, vect_count = 0;
struct vkd3d_string_buffer *name;
static unsigned int counter = 0;
struct hlsl_deref var_deref;
struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
...
...
@@ -2101,62 +2267,48 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
vkd3d_string_buffer_release(&ctx->string_buffers, name);
if (!var)
return false;
hlsl_init_simple_deref_from_var(&var_deref, var);
for (i = 0; i < matrix_type->dimx; ++i)
{
for (j = 0; j < matrix_type->dimy; ++j)
{
struct hlsl_ir_node *node = NULL;
struct hlsl_type *scalar_type;
struct hlsl_ir_node *instr = NULL;
struct hlsl_ir_store *store;
struct hlsl_ir_constant *c;
unsigned int offset;
struct hlsl_block block;
for (k = 0; k < cast_type1->dimx && k < cast_type2->dimy; ++k)
{
struct hlsl_ir_load *value1, *value2;
struct hlsl_ir_node *mul;
offset = hlsl_compute_component_offset(ctx, cast_type1, j * cast_type1->dimx + k, &scalar_type);
if (!(c = hlsl_new_uint_constant(ctx, offset, loc)))
return false;
list_add_tail(params->instrs, &c->node.entry);
if (!(value1 = add_load(ctx, params->instrs, cast1, &c->node, scalar_type, *loc)))
return false;
offset = hlsl_compute_component_offset(ctx, cast_type2, k * cast_type2->dimx + i, &scalar_type);
if (!(c = hlsl_new_uint_constant(ctx, offset, loc)))
if (!(value1 = add_load_component(ctx, params->instrs, cast1, j * cast1->data_type->dimx + k, loc)))
return false;
list_add_tail(params->instrs, &c->node.entry);
if (!(value2 = add_load
(ctx, params->instrs, cast2, &c->node, scalar_type, *
loc)))
if (!(value2 = add_load
_component(ctx, params->instrs, cast2, k * cast2->data_type->dimx + i,
loc)))
return false;
if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, &value1->node, &value2->node, loc)))
return false;
if (
node
)
if (
instr
)
{
if (!(
node = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, node
, mul, loc)))
if (!(
instr = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, instr
, mul, loc)))
return false;
}
else
{
node
= mul;
instr
= mul;
}
}
offset = hlsl_compute_component_offset(ctx, matrix_type, j * matrix_type->dimx + i, &scalar_type);
if (!(c = hlsl_new_uint_constant(ctx, offset, loc)))
return false;
list_add_tail(params->instrs, &c->node.entry);
if (!(store = hlsl_new_store(ctx, var, &c->node, node, 0, *loc)))
if (!(store = hlsl_new_store_component(ctx, &block, &var_deref, j * matrix_type->dimx + i, instr)))
return false;
list_
add_tail(params->instrs, &store->node.entry
);
list_
move_tail(params->instrs, &block.instrs
);
}
}
if (!(load = hlsl_new_
load(ctx, var, NULL, matrix_type
, *loc)))
if (!(load = hlsl_new_
var_load(ctx, var
, *loc)))
return false;
list_add_tail(params->instrs, &load->node.entry);
...
...
@@ -2220,7 +2372,10 @@ intrinsic_functions[] =
{"abs", 1, true, intrinsic_abs},
{"clamp", 3, true, intrinsic_clamp},
{"cross", 2, true, intrinsic_cross},
{"dot", 2, true, intrinsic_dot},
{"floor", 1, true, intrinsic_floor},
{"ldexp", 2, true, intrinsic_ldexp},
{"lerp", 3, true, intrinsic_lerp},
{"max", 2, true, intrinsic_max},
{"min", 2, true, intrinsic_min},
{"mul", 2, true, intrinsic_mul},
...
...
@@ -2336,6 +2491,30 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type
return params->instrs;
}
static unsigned int hlsl_offset_dim_count(enum hlsl_sampler_dim dim)
{
switch (dim)
{
case HLSL_SAMPLER_DIM_1D:
case HLSL_SAMPLER_DIM_1DARRAY:
return 1;
case HLSL_SAMPLER_DIM_2D:
case HLSL_SAMPLER_DIM_2DMS:
case HLSL_SAMPLER_DIM_2DARRAY:
case HLSL_SAMPLER_DIM_2DMSARRAY:
return 2;
case HLSL_SAMPLER_DIM_3D:
return 3;
case HLSL_SAMPLER_DIM_CUBE:
case HLSL_SAMPLER_DIM_CUBEARRAY:
/* Offset parameters not supported for these types. */
return 0;
default:
assert(0);
return 0;
}
}
static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *object,
const char *name, const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
...
...
@@ -2362,26 +2541,38 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
&& object_type->sampler_dim != HLSL_SAMPLER_DIM_CUBEARRAY)
{
const unsigned int sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
const unsigned int offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
struct hlsl_ir_resource_load *load;
struct hlsl_ir_node *offset = NULL;
struct hlsl_ir_node *coords;
bool multisampled;
if (object_type->sampler_dim == HLSL_SAMPLER_DIM_2DMS
|| object_type->sampler_dim == HLSL_SAMPLER_DIM_2DMSARRAY)
multisampled = object_type->sampler_dim == HLSL_SAMPLER_DIM_2DMS
|| object_type->sampler_dim == HLSL_SAMPLER_DIM_2DMSARRAY;
if (params->args_count < 1 + multisampled || params->args_count > 3 + multisampled)
{
FIXME("'Load' method for multi-sample textures.\n");
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Wrong number of arguments to method 'Load': expected between %u and %u, but got %u.",
1 + multisampled, 3 + multisampled, params->args_count);
return false;
}
if (multisampled)
{
hlsl_fixme(ctx, loc, "Load() sampling index parameter.");
}
if (params->args_count < 1 || params->args_count > 3)
assert(offset_dim);
if (params->args_count > 1 + multisampled)
{
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT
,
"Wrong number of arguments to method 'Load': expected 1, 2, or 3, but got %u.", params->args_count);
return false;
if (!(offset = add_implicit_conversion(ctx, instrs, params->args[1 + multisampled]
,
hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
return false;
}
if (params->args_count >= 2)
hlsl_fixme(ctx, loc, "Offset parameter.");
if (params->args_count == 3)
if (params->args_count > 2 + multisampled)
{
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
}
/* +1 for the mipmap level */
if (!(coords = add_implicit_conversion(ctx, instrs, params->args[0],
...
...
@@ -2389,7 +2580,7 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
return false;
if (!(load = hlsl_new_resource_load(ctx, object_type->e.resource_format, HLSL_RESOURCE_LOAD,
object_load->src.var, object_load->src.offset.node, NULL, NULL, coords, NULL
, loc)))
&object_load->src, NULL, coords, offset
, loc)))
return false;
list_add_tail(instrs, &load->node.entry);
return true;
...
...
@@ -2399,16 +2590,18 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
&& object_type->sampler_dim != HLSL_SAMPLER_DIM_2DMSARRAY)
{
const unsigned int sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
const unsigned int offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
const struct hlsl_type *sampler_type;
struct hlsl_ir_resource_load *load;
struct hlsl_ir_node *offset = NULL;
struct hlsl_ir_load *sampler_load;
struct hlsl_ir_node *coords;
if (params->args_count
!= 2 && params->args_count != 3
)
if (params->args_count
< 2 || params->args_count > 4 + !!offset_dim
)
{
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Wrong number of arguments to method 'Sample': expected 2 or 3, but got %u.", params->args_count);
"Wrong number of arguments to method 'Sample': expected from 2 to %u, but got %u.",
4 + !!offset_dim, params->args_count);
return false;
}
...
...
@@ -2432,18 +2625,23 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
return false;
if (
params->args_count == 3
)
if (
offset_dim && params->args_count > 2
)
{
if (!(offset = add_implicit_conversion(ctx, instrs, params->args[2],
hlsl_get_vector_type(ctx, HLSL_TYPE_INT,
sampler
_dim), loc)))
hlsl_get_vector_type(ctx, HLSL_TYPE_INT,
offset
_dim), loc)))
return false;
}
if (params->args_count > 2 + !!offset_dim)
hlsl_fixme(ctx, loc, "Sample() clamp parameter.");
if (params->args_count > 3 + !!offset_dim)
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
if (!(load = hlsl_new_resource_load(ctx, object_type->e.resource_format,
HLSL_RESOURCE_SAMPLE, object_load->src.var, object_load->src.offset.node,
sampler_load->src.var, sampler_load->src.offset.node, coords, offset, loc)))
HLSL_RESOURCE_SAMPLE, &object_load->src, &sampler_load->src, coords, offset, loc)))
return false;
list_add_tail(instrs, &load->node.entry);
return true;
}
else if ((!strcmp(name, "Gather") || !strcmp(name, "GatherRed") || !strcmp(name, "GatherBlue")
...
...
@@ -2454,6 +2652,7 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
|| object_type->sampler_dim == HLSL_SAMPLER_DIM_CUBEARRAY))
{
const unsigned int sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
const unsigned int offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
enum hlsl_resource_load_type load_type;
const struct hlsl_type *sampler_type;
struct hlsl_ir_resource_load *load;
...
...
@@ -2484,12 +2683,13 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
read_channel = 0;
}
if (!strcmp(name, "Gather"))
if (!strcmp(name, "Gather")
|| !offset_dim
)
{
if (params->args_count
!= 2 && params->args_count != 3
)
if (params->args_count
< 2 && params->args_count > 3 + !!offset_dim
)
{
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Wrong number of arguments to method 'Gather': expected 2 or 3, but got %u.", params->args_count);
"Wrong number of arguments to method '%s': expected from 2 to %u, but got %u.",
name, 3 + !!offset_dim, params->args_count);
return false;
}
}
...
...
@@ -2501,16 +2701,17 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
return false;
}
if (params->args_count ==
4
|| params->args_count == 7)
if (params->args_count ==
3 + !!offset_dim
|| params->args_count == 7)
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
if (params->args_count == 6 || params->args_count == 7)
hlsl_fixme(ctx, loc, "Multiple Gather() offset parameters.");
if (params->args_count == 3 || params->args_count == 4)
{
hlsl_fixme(ctx, loc, "Multiple %s() offset parameters.", name);
}
else if (offset_dim && params->args_count > 2)
{
if (!(offset = add_implicit_conversion(ctx, instrs, params->args[2],
hlsl_get_vector_type(ctx, HLSL_TYPE_INT,
sampler
_dim), loc)))
hlsl_get_vector_type(ctx, HLSL_TYPE_INT,
offset
_dim), loc)))
return false;
}
...
...
@@ -2543,9 +2744,68 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
return false;
if (!(load = hlsl_new_resource_load(ctx, result_type,
load_type, object_load->src.var, object_load->src.offset.node,
sampler_load->src.var, sampler_load->src.offset.node, coords, offset, loc)))
if (!(load = hlsl_new_resource_load(ctx, result_type, load_type, &object_load->src,
&sampler_load->src, coords, offset, loc)))
return false;
list_add_tail(instrs, &load->node.entry);
return true;
}
else if (!strcmp(name, "SampleLevel")
&& object_type->sampler_dim != HLSL_SAMPLER_DIM_2DMS
&& object_type->sampler_dim != HLSL_SAMPLER_DIM_2DMSARRAY)
{
const unsigned int sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
const unsigned int offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
const struct hlsl_type *sampler_type;
struct hlsl_ir_resource_load *load;
struct hlsl_ir_node *offset = NULL;
struct hlsl_ir_load *sampler_load;
struct hlsl_ir_node *coords, *lod;
if (params->args_count < 3 || params->args_count > 4 + !!offset_dim)
{
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Wrong number of arguments to method 'SampleLevel': expected from 3 to %u, but got %u.",
4 + !!offset_dim, params->args_count);
return false;
}
sampler_type = params->args[0]->data_type;
if (sampler_type->type != HLSL_CLASS_OBJECT || sampler_type->base_type != HLSL_TYPE_SAMPLER
|| sampler_type->sampler_dim != HLSL_SAMPLER_DIM_GENERIC)
{
struct vkd3d_string_buffer *string;
if ((string = hlsl_type_to_string(ctx, sampler_type)))
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Wrong type for argument 0 of SampleLevel(): expected 'sampler', but got '%s'.", string->buffer);
hlsl_release_string_buffer(ctx, string);
return false;
}
/* Only HLSL_IR_LOAD can return an object. */
sampler_load = hlsl_ir_load(params->args[0]);
if (!(coords = add_implicit_conversion(ctx, instrs, params->args[1],
hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
coords = params->args[1];
if (!(lod = add_implicit_conversion(ctx, instrs, params->args[2],
hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc)))
lod = params->args[2];
if (offset_dim && params->args_count > 3)
{
if (!(offset = add_implicit_conversion(ctx, instrs, params->args[3],
hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
return false;
}
if (params->args_count > 3 + !!offset_dim)
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
if (!(load = hlsl_new_sample_lod(ctx, object_type->e.resource_format,
&object_load->src, &sampler_load->src, coords, offset, lod, loc)))
return false;
list_add_tail(instrs, &load->node.entry);
return true;
...
...
@@ -2583,6 +2843,7 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
DWORD modifiers;
struct hlsl_ir_node *instr;
struct list *list;
struct parse_fields fields;
struct parse_function function;
struct parse_parameter parameter;
struct parse_initializer initializer;
...
...
@@ -2711,8 +2972,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
%type <list> equality_expr
%type <list> expr
%type <list> expr_statement
%type <list> field
%type <list> fields_list
%type <list> initializer_expr
%type <list> jump_statement
%type <list> logicand_expr
...
...
@@ -2749,6 +3008,9 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl
%type <colon_attribute> colon_attribute
%type <fields> field
%type <fields> fields_list
%type <function> func_declaration
%type <function> func_prototype
...
...
@@ -2904,7 +3166,7 @@ named_struct_spec:
{
bool ret;
$$ = hlsl_new_struct_type(ctx, $2, $4);
$$ = hlsl_new_struct_type(ctx, $2, $4
.fields, $4.count
);
if (hlsl_get_var(ctx->cur_scope, $2))
{
...
...
@@ -2923,7 +3185,7 @@ named_struct_spec:
unnamed_struct_spec:
KW_STRUCT '{' fields_list '}'
{
$$ = hlsl_new_struct_type(ctx, NULL, $3);
$$ = hlsl_new_struct_type(ctx, NULL, $3
.fields, $3.count
);
}
any_identifier:
...
...
@@ -2934,30 +3196,35 @@ any_identifier:
fields_list:
%empty
{
if (!($$ = make_empty_list(ctx)))
YYABORT;
$$.fields = NULL;
$$.count = 0;
$$.capacity = 0;
}
| fields_list field
{
s
truct hlsl_struct_field *field, *next, *existing
;
s
ize_t i
;
$$ = $1;
LIST_FOR_EACH_ENTRY_SAFE(field, next, $2, struct hlsl_struct_field, entry)
for (i = 0; i < $2.count; ++i)
{
if ((existing = get_struct_field($$, field->name)))
const struct hlsl_struct_field *field = &$2.fields[i];
const struct hlsl_struct_field *existing;
if ((existing = get_struct_field($1.fields, $1.count, field->name)))
{
hlsl_error(ctx, &
@2
, VKD3D_SHADER_ERROR_HLSL_REDEFINED,
hlsl_error(ctx, &
field->loc
, VKD3D_SHADER_ERROR_HLSL_REDEFINED,
"Field \"%s\" is already defined.", field->name);
hlsl_note(ctx, &existing->loc, VKD3D_SHADER_LOG_ERROR,
"'%s' was previously defined here.", field->name);
vkd3d_free(field);
}
else
{
list_add_tail($$, &field->entry);
}
}
vkd3d_free($2);
if (!hlsl_array_reserve(ctx, (void **)&$1.fields, &$1.capacity, $1.count + $2.count, sizeof(*$1.fields)))
YYABORT;
memcpy($1.fields + $1.count, $2.fields, $2.count * sizeof(*$2.fields));
$1.count += $2.count;
vkd3d_free($2.fields);
$$ = $1;
}
field_type:
...
...
@@ -2981,7 +3248,8 @@ field:
"Modifiers '%s' are not allowed on struct fields.", string->buffer);
hlsl_release_string_buffer(ctx, string);
}
$$ = gen_struct_fields(ctx, type, modifiers, $3);
if (!gen_struct_fields(ctx, &$$, type, modifiers, $3))
YYABORT;
}
func_declaration:
...
...
@@ -3467,6 +3735,21 @@ arrays:
$$.sizes = new_array;
$$.sizes[$$.count++] = size;
}
| '[' ']' arrays
{
uint32_t *new_array;
$$ = $3;
if (!(new_array = hlsl_realloc(ctx, $$.sizes, ($$.count + 1) * sizeof(*new_array))))
{
vkd3d_free($$.sizes);
YYABORT;
}
$$.sizes = new_array;
$$.sizes[$$.count++] = HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT;
}
var_modifiers:
%empty
...
...
@@ -3838,15 +4121,17 @@ postfix_expr:
if (node->data_type->type == HLSL_CLASS_STRUCT)
{
struct hlsl_type *type = node->data_type;
struct hlsl_struct_field *field;
const struct hlsl_struct_field *field;
unsigned int field_idx = 0;
if (!(field = get_struct_field(type->e.
elements
, $3)))
if (!(field = get_struct_field(type->e.
record.fields, type->e.record.field_count
, $3)))
{
hlsl_error(ctx, &@3, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, "Field \"%s\" is not defined.", $3);
YYABORT;
}
if (!add_record_load(ctx, $1, node, field, @2))
field_idx = field - type->e.record.fields;
if (!add_record_load(ctx, $1, node, field_idx, @2))
YYABORT;
$$ = $1;
}
...
...
@@ -4006,7 +4291,14 @@ unary_expr:
dst_type = $3;
for (i = 0; i < $4.count; ++i)
{
if ($4.sizes[i] == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
{
hlsl_error(ctx, &@3, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Implicit size arrays not allowed in casts.");
}
dst_type = hlsl_new_array_type(ctx, dst_type, $4.sizes[i]);
}
if (!compatible_data_types(src_type, dst_type))
{
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
View file @
c3025582
...
...
@@ -21,20 +21,150 @@
#include "hlsl.h"
#include <stdio.h>
static
unsigned
int
minor_size
(
const
struct
hlsl_type
*
type
)
/* TODO: remove when no longer needed, only used for new_offset_instr_from_deref() */
static
struct
hlsl_ir_node
*
new_offset_from_path_index
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
struct
hlsl_type
*
type
,
struct
hlsl_ir_node
*
offset
,
struct
hlsl_ir_node
*
idx
,
const
struct
vkd3d_shader_location
*
loc
)
{
if
(
type
->
modifiers
&
HLSL_MODIFIER_ROW_MAJOR
)
return
type
->
dimx
;
else
return
type
->
dimy
;
struct
hlsl_ir_node
*
idx_offset
=
NULL
;
struct
hlsl_ir_constant
*
c
;
list_init
(
&
block
->
instrs
);
switch
(
type
->
type
)
{
case
HLSL_CLASS_VECTOR
:
idx_offset
=
idx
;
break
;
case
HLSL_CLASS_MATRIX
:
{
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
4
,
loc
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
c
->
node
.
entry
);
if
(
!
(
idx_offset
=
hlsl_new_binary_expr
(
ctx
,
HLSL_OP2_MUL
,
&
c
->
node
,
idx
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
idx_offset
->
entry
);
break
;
}
case
HLSL_CLASS_ARRAY
:
{
unsigned
int
size
=
hlsl_type_get_array_element_reg_size
(
type
->
e
.
array
.
type
);
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
size
,
loc
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
c
->
node
.
entry
);
if
(
!
(
idx_offset
=
hlsl_new_binary_expr
(
ctx
,
HLSL_OP2_MUL
,
&
c
->
node
,
idx
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
idx_offset
->
entry
);
break
;
}
case
HLSL_CLASS_STRUCT
:
{
unsigned
int
field_idx
=
hlsl_ir_constant
(
idx
)
->
value
[
0
].
u
;
struct
hlsl_struct_field
*
field
=
&
type
->
e
.
record
.
fields
[
field_idx
];
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
field
->
reg_offset
,
loc
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
c
->
node
.
entry
);
idx_offset
=
&
c
->
node
;
break
;
}
default:
assert
(
0
);
return
NULL
;
}
if
(
offset
)
{
if
(
!
(
idx_offset
=
hlsl_new_binary_expr
(
ctx
,
HLSL_OP2_ADD
,
offset
,
idx_offset
)))
return
NULL
;
list_add_tail
(
&
block
->
instrs
,
&
idx_offset
->
entry
);
}
return
idx_offset
;
}
static
unsigned
int
major_size
(
const
struct
hlsl_type
*
type
)
/* TODO: remove when no longer needed, only used for replace_deref_path_with_offset() */
static
struct
hlsl_ir_node
*
new_offset_instr_from_deref
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_block
*
block
,
const
struct
hlsl_deref
*
deref
,
const
struct
vkd3d_shader_location
*
loc
)
{
if
(
type
->
modifiers
&
HLSL_MODIFIER_ROW_MAJOR
)
return
type
->
dimy
;
else
return
type
->
dimx
;
struct
hlsl_ir_node
*
offset
=
NULL
;
struct
hlsl_type
*
type
;
unsigned
int
i
;
list_init
(
&
block
->
instrs
);
assert
(
deref
->
var
);
type
=
deref
->
var
->
data_type
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
{
struct
hlsl_block
idx_block
;
if
(
!
(
offset
=
new_offset_from_path_index
(
ctx
,
&
idx_block
,
type
,
offset
,
deref
->
path
[
i
].
node
,
loc
)))
return
NULL
;
list_move_tail
(
&
block
->
instrs
,
&
idx_block
.
instrs
);
type
=
hlsl_get_element_type_from_path_index
(
ctx
,
type
,
deref
->
path
[
i
].
node
);
}
return
offset
;
}
/* TODO: remove when no longer needed, only used for transform_deref_paths_into_offsets() */
static
void
replace_deref_path_with_offset
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_deref
*
deref
,
struct
hlsl_ir_node
*
instr
)
{
struct
hlsl_ir_node
*
offset
;
struct
hlsl_block
block
;
if
(
!
deref
->
var
)
return
;
/* register offsets shouldn't be used before this point is reached. */
assert
(
!
deref
->
offset
.
node
);
if
(
!
(
offset
=
new_offset_instr_from_deref
(
ctx
,
&
block
,
deref
,
&
instr
->
loc
)))
return
;
list_move_before
(
&
instr
->
entry
,
&
block
.
instrs
);
hlsl_cleanup_deref
(
deref
);
hlsl_src_from_node
(
&
deref
->
offset
,
offset
);
}
/* TODO: remove when no longer needed. */
static
bool
transform_deref_paths_into_offsets
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
{
switch
(
instr
->
type
)
{
case
HLSL_IR_LOAD
:
replace_deref_path_with_offset
(
ctx
,
&
hlsl_ir_load
(
instr
)
->
src
,
instr
);
return
true
;
case
HLSL_IR_STORE
:
replace_deref_path_with_offset
(
ctx
,
&
hlsl_ir_store
(
instr
)
->
lhs
,
instr
);
return
true
;
case
HLSL_IR_RESOURCE_LOAD
:
replace_deref_path_with_offset
(
ctx
,
&
hlsl_ir_resource_load
(
instr
)
->
resource
,
instr
);
replace_deref_path_with_offset
(
ctx
,
&
hlsl_ir_resource_load
(
instr
)
->
sampler
,
instr
);
return
true
;
default:
return
false
;
}
return
false
;
}
/* Split uniforms into two variables representing the constant and temp
...
...
@@ -74,78 +204,113 @@ static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct list *instrs, stru
list_add_after
(
&
load
->
node
.
entry
,
&
store
->
node
.
entry
);
}
static
void
prepend_input_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_type
*
type
,
unsigned
int
field_offset
,
unsigned
int
modifiers
,
const
struct
hlsl_semantic
*
semantic
)
static
struct
hlsl_ir_var
*
add_semantic_var
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_type
*
type
,
unsigned
int
modifiers
,
const
struct
hlsl_semantic
*
semantic
,
bool
output
)
{
struct
vkd3d_string_buffer
*
name
;
struct
hlsl_semantic
new_semantic
;
struct
hlsl_ir_constant
*
offset
;
struct
hlsl_ir_store
*
store
;
struct
hlsl_ir_load
*
load
;
struct
hlsl_ir_var
*
input
;
if
(
type
->
type
==
HLSL_CLASS_MATRIX
)
{
struct
hlsl_type
*
vector_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
minor_size
(
type
));
struct
hlsl_semantic
vector_semantic
=
*
semantic
;
unsigned
int
i
;
for
(
i
=
0
;
i
<
major_size
(
type
);
++
i
)
{
prepend_input_copy
(
ctx
,
instrs
,
var
,
vector_type
,
4
*
i
,
modifiers
,
&
vector_semantic
);
++
vector_semantic
.
index
;
}
return
;
}
struct
vkd3d_string_buffer
*
name
;
struct
hlsl_ir_var
*
ext_var
;
if
(
!
(
name
=
hlsl_get_string_buffer
(
ctx
)))
return
;
vkd3d_string_buffer_printf
(
name
,
"<
input-%s%u>
"
,
semantic
->
name
,
semantic
->
index
);
return
NULL
;
vkd3d_string_buffer_printf
(
name
,
"<
%s-%s%u>"
,
output
?
"output"
:
"input
"
,
semantic
->
name
,
semantic
->
index
);
if
(
!
(
new_semantic
.
name
=
hlsl_strdup
(
ctx
,
semantic
->
name
)))
{
hlsl_release_string_buffer
(
ctx
,
name
);
return
;
return
NULL
;
}
new_semantic
.
index
=
semantic
->
index
;
if
(
!
(
input
=
hlsl_new_var
(
ctx
,
hlsl_strdup
(
ctx
,
name
->
buffer
),
if
(
!
(
ext_var
=
hlsl_new_var
(
ctx
,
hlsl_strdup
(
ctx
,
name
->
buffer
),
type
,
var
->
loc
,
&
new_semantic
,
modifiers
,
NULL
)))
{
hlsl_release_string_buffer
(
ctx
,
name
);
vkd3d_free
((
void
*
)
new_semantic
.
name
);
return
;
return
NULL
;
}
hlsl_release_string_buffer
(
ctx
,
name
);
input
->
is_input_semantic
=
1
;
input
->
is_param
=
var
->
is_param
;
list_add_before
(
&
var
->
scope_entry
,
&
input
->
scope_entry
);
list_add_tail
(
&
ctx
->
extern_vars
,
&
input
->
extern_entry
);
if
(
output
)
ext_var
->
is_output_semantic
=
1
;
else
ext_var
->
is_input_semantic
=
1
;
ext_var
->
is_param
=
var
->
is_param
;
list_add_before
(
&
var
->
scope_entry
,
&
ext_var
->
scope_entry
);
list_add_tail
(
&
ctx
->
extern_vars
,
&
ext_var
->
extern_entry
);
if
(
!
(
load
=
hlsl_new_var_load
(
ctx
,
input
,
var
->
loc
)))
return
;
list_add_head
(
instrs
,
&
load
->
node
.
entry
);
return
ext_var
;
}
if
(
!
(
offset
=
hlsl_new_uint_constant
(
ctx
,
field_offset
,
&
var
->
loc
)))
return
;
list_add_after
(
&
load
->
node
.
entry
,
&
offset
->
node
.
entry
);
static
void
prepend_input_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_load
*
lhs
,
unsigned
int
modifiers
,
const
struct
hlsl_semantic
*
semantic
)
{
struct
hlsl_type
*
type
=
lhs
->
node
.
data_type
,
*
vector_type
;
struct
hlsl_ir_var
*
var
=
lhs
->
src
.
var
;
unsigned
int
i
;
if
(
!
(
store
=
hlsl_new_store
(
ctx
,
var
,
&
offset
->
node
,
&
load
->
node
,
0
,
var
->
loc
)))
return
;
list_add_after
(
&
offset
->
node
.
entry
,
&
store
->
node
.
entry
);
vector_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
hlsl_type_minor_size
(
type
));
for
(
i
=
0
;
i
<
hlsl_type_major_size
(
type
);
++
i
)
{
struct
hlsl_semantic
semantic_copy
=
*
semantic
;
struct
hlsl_ir_store
*
store
;
struct
hlsl_ir_constant
*
c
;
struct
hlsl_ir_var
*
input
;
struct
hlsl_ir_load
*
load
;
semantic_copy
.
index
=
semantic
->
index
+
i
;
if
(
!
(
input
=
add_semantic_var
(
ctx
,
var
,
vector_type
,
modifiers
,
&
semantic_copy
,
false
)))
return
;
if
(
!
(
load
=
hlsl_new_var_load
(
ctx
,
input
,
var
->
loc
)))
return
;
list_add_after
(
&
lhs
->
node
.
entry
,
&
load
->
node
.
entry
);
if
(
type
->
type
==
HLSL_CLASS_MATRIX
)
{
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
i
,
&
var
->
loc
)))
return
;
list_add_after
(
&
load
->
node
.
entry
,
&
c
->
node
.
entry
);
if
(
!
(
store
=
hlsl_new_store_index
(
ctx
,
&
lhs
->
src
,
&
c
->
node
,
&
load
->
node
,
0
,
&
var
->
loc
)))
return
;
list_add_after
(
&
c
->
node
.
entry
,
&
store
->
node
.
entry
);
}
else
{
assert
(
i
==
0
);
if
(
!
(
store
=
hlsl_new_store_index
(
ctx
,
&
lhs
->
src
,
NULL
,
&
load
->
node
,
0
,
&
var
->
loc
)))
return
;
list_add_after
(
&
load
->
node
.
entry
,
&
store
->
node
.
entry
);
}
}
}
static
void
prepend_input_struct_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_type
*
type
,
unsigned
int
field_offset
)
static
void
prepend_input_struct_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_load
*
lhs
)
{
struct
hlsl_struct_field
*
field
;
struct
hlsl_type
*
type
=
lhs
->
node
.
data_type
;
struct
hlsl_ir_var
*
var
=
lhs
->
src
.
var
;
size_t
i
;
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
const
struct
hlsl_struct_field
*
field
=
&
type
->
e
.
record
.
fields
[
i
];
struct
hlsl_ir_load
*
field_load
;
struct
hlsl_ir_constant
*
c
;
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
i
,
&
var
->
loc
)))
return
;
list_add_after
(
&
lhs
->
node
.
entry
,
&
c
->
node
.
entry
);
/* This redudant load is expected to be deleted later by DCE */
if
(
!
(
field_load
=
hlsl_new_load_index
(
ctx
,
&
lhs
->
src
,
&
c
->
node
,
&
var
->
loc
)))
return
;
list_add_after
(
&
c
->
node
.
entry
,
&
field_load
->
node
.
entry
);
if
(
field
->
type
->
type
==
HLSL_CLASS_STRUCT
)
prepend_input_struct_copy
(
ctx
,
instrs
,
var
,
field
->
type
,
field_offset
+
field
->
reg_offset
);
prepend_input_struct_copy
(
ctx
,
instrs
,
field_load
);
else
if
(
field
->
semantic
.
name
)
prepend_input_copy
(
ctx
,
instrs
,
var
,
field
->
type
,
field_offset
+
field
->
reg_offset
,
field
->
modifiers
,
&
field
->
semantic
);
prepend_input_copy
(
ctx
,
instrs
,
field_load
,
field
->
modifiers
,
&
field
->
semantic
);
else
hlsl_error
(
ctx
,
&
field
->
loc
,
VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC
,
"Field '%s' is missing a semantic."
,
field
->
name
);
...
...
@@ -156,84 +321,91 @@ static void prepend_input_struct_copy(struct hlsl_ctx *ctx, struct list *instrs,
* and copy the former to the latter, so that writes to input variables work. */
static
void
prepend_input_var_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_var
*
var
)
{
struct
hlsl_ir_load
*
load
;
/* This redudant load is expected to be deleted later by DCE */
if
(
!
(
load
=
hlsl_new_var_load
(
ctx
,
var
,
var
->
loc
)))
return
;
list_add_head
(
instrs
,
&
load
->
node
.
entry
);
if
(
var
->
data_type
->
type
==
HLSL_CLASS_STRUCT
)
prepend_input_struct_copy
(
ctx
,
instrs
,
var
,
var
->
data_type
,
0
);
prepend_input_struct_copy
(
ctx
,
instrs
,
load
);
else
if
(
var
->
semantic
.
name
)
prepend_input_copy
(
ctx
,
instrs
,
var
,
var
->
data_type
,
0
,
var
->
modifiers
,
&
var
->
semantic
);
prepend_input_copy
(
ctx
,
instrs
,
load
,
var
->
modifiers
,
&
var
->
semantic
);
}
static
void
append_output_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_
var
*
var
,
struct
hlsl_type
*
type
,
unsigned
int
field_offset
,
unsigned
int
modifiers
,
const
struct
hlsl_semantic
*
semantic
)
static
void
append_output_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_
load
*
rhs
,
unsigned
int
modifiers
,
const
struct
hlsl_semantic
*
semantic
)
{
struct
vkd3d_string_buffer
*
name
;
struct
hlsl_semantic
new_semantic
;
struct
hlsl_ir_constant
*
offset
;
struct
hlsl_ir_store
*
store
;
struct
hlsl_ir_var
*
output
;
struct
hlsl_ir_load
*
load
;
struct
hlsl_type
*
type
=
rhs
->
node
.
data_type
,
*
vector_type
;
struct
hlsl_ir_var
*
var
=
rhs
->
src
.
var
;
unsigned
int
i
;
if
(
type
->
type
==
HLSL_CLASS_MATRIX
)
vector_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
hlsl_type_minor_size
(
type
));
for
(
i
=
0
;
i
<
hlsl_type_major_size
(
type
);
++
i
)
{
struct
hlsl_type
*
vector_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
minor_size
(
type
));
struct
hlsl_semantic
vector_semantic
=
*
semantic
;
unsigned
int
i
;
struct
hlsl_semantic
semantic_copy
=
*
semantic
;
struct
hlsl_ir_store
*
store
;
struct
hlsl_ir_constant
*
c
;
struct
hlsl_ir_var
*
output
;
struct
hlsl_ir_load
*
load
;
for
(
i
=
0
;
i
<
major_size
(
type
);
++
i
)
{
append_output_copy
(
ctx
,
instrs
,
var
,
vector_type
,
4
*
i
,
modifiers
,
&
vector_semantic
);
++
vector_semantic
.
index
;
}
semantic_copy
.
index
=
semantic
->
index
+
i
;
return
;
}
if
(
!
(
output
=
add_semantic_var
(
ctx
,
var
,
vector_type
,
modifiers
,
&
semantic_copy
,
true
)))
return
;
if
(
!
(
name
=
hlsl_get_string_buffer
(
ctx
)))
return
;
vkd3d_string_buffer_printf
(
name
,
"<output-%s%u>"
,
semantic
->
name
,
semantic
->
index
);
if
(
!
(
new_semantic
.
name
=
hlsl_strdup
(
ctx
,
semantic
->
name
)))
{
hlsl_release_string_buffer
(
ctx
,
name
);
return
;
}
new_semantic
.
index
=
semantic
->
index
;
if
(
!
(
output
=
hlsl_new_var
(
ctx
,
hlsl_strdup
(
ctx
,
name
->
buffer
),
type
,
var
->
loc
,
&
new_semantic
,
modifiers
,
NULL
)))
{
vkd3d_free
((
void
*
)
new_semantic
.
name
);
hlsl_release_string_buffer
(
ctx
,
name
);
return
;
}
hlsl_release_string_buffer
(
ctx
,
name
);
output
->
is_output_semantic
=
1
;
output
->
is_param
=
var
->
is_param
;
list_add_before
(
&
var
->
scope_entry
,
&
output
->
scope_entry
);
list_add_tail
(
&
ctx
->
extern_vars
,
&
output
->
extern_entry
);
if
(
type
->
type
==
HLSL_CLASS_MATRIX
)
{
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
i
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
c
->
node
.
entry
);
if
(
!
(
offset
=
hlsl_new_uint_constant
(
ctx
,
field_offset
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
offset
->
node
.
entry
);
if
(
!
(
load
=
hlsl_new_load_index
(
ctx
,
&
rhs
->
src
,
&
c
->
node
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
load
->
node
.
entry
);
}
else
{
assert
(
i
==
0
);
if
(
!
(
load
=
hlsl_new_load
(
ctx
,
var
,
&
offset
->
node
,
type
,
var
->
loc
)))
return
;
list_add_after
(
&
offset
->
node
.
entry
,
&
load
->
node
.
entry
);
if
(
!
(
load
=
hlsl_new_load_index
(
ctx
,
&
rhs
->
src
,
NULL
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
load
->
node
.
entry
);
}
if
(
!
(
store
=
hlsl_new_store
(
ctx
,
output
,
NULL
,
&
load
->
node
,
0
,
var
->
loc
)))
return
;
list_add_after
(
&
load
->
node
.
entry
,
&
store
->
node
.
entry
);
if
(
!
(
store
=
hlsl_new_simple_store
(
ctx
,
output
,
&
load
->
node
)))
return
;
list_add_tail
(
instrs
,
&
store
->
node
.
entry
);
}
}
static
void
append_output_struct_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_var
*
var
,
struct
hlsl_type
*
type
,
unsigned
int
field_offset
)
static
void
append_output_struct_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_load
*
rhs
)
{
struct
hlsl_struct_field
*
field
;
struct
hlsl_type
*
type
=
rhs
->
node
.
data_type
;
struct
hlsl_ir_var
*
var
=
rhs
->
src
.
var
;
size_t
i
;
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
const
struct
hlsl_struct_field
*
field
=
&
type
->
e
.
record
.
fields
[
i
];
struct
hlsl_ir_load
*
field_load
;
struct
hlsl_ir_constant
*
c
;
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
i
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
c
->
node
.
entry
);
/* This redudant load is expected to be deleted later by DCE */
if
(
!
(
field_load
=
hlsl_new_load_index
(
ctx
,
&
rhs
->
src
,
&
c
->
node
,
&
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
field_load
->
node
.
entry
);
if
(
field
->
type
->
type
==
HLSL_CLASS_STRUCT
)
append_output_struct_copy
(
ctx
,
instrs
,
var
,
field
->
type
,
field_offset
+
field
->
reg_offset
);
append_output_struct_copy
(
ctx
,
instrs
,
field_load
);
else
if
(
field
->
semantic
.
name
)
append_output_copy
(
ctx
,
instrs
,
var
,
field
->
type
,
field_offset
+
field
->
reg_offset
,
field
->
modifiers
,
&
field
->
semantic
);
append_output_copy
(
ctx
,
instrs
,
field_load
,
field
->
modifiers
,
&
field
->
semantic
);
else
hlsl_error
(
ctx
,
&
field
->
loc
,
VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC
,
"Field '%s' is missing a semantic."
,
field
->
name
);
...
...
@@ -245,10 +417,17 @@ static void append_output_struct_copy(struct hlsl_ctx *ctx, struct list *instrs,
* variables work. */
static
void
append_output_var_copy
(
struct
hlsl_ctx
*
ctx
,
struct
list
*
instrs
,
struct
hlsl_ir_var
*
var
)
{
struct
hlsl_ir_load
*
load
;
/* This redudant load is expected to be deleted later by DCE */
if
(
!
(
load
=
hlsl_new_var_load
(
ctx
,
var
,
var
->
loc
)))
return
;
list_add_tail
(
instrs
,
&
load
->
node
.
entry
);
if
(
var
->
data_type
->
type
==
HLSL_CLASS_STRUCT
)
append_output_struct_copy
(
ctx
,
instrs
,
var
,
var
->
data_type
,
0
);
append_output_struct_copy
(
ctx
,
instrs
,
load
);
else
if
(
var
->
semantic
.
name
)
append_output_copy
(
ctx
,
instrs
,
var
,
var
->
data_type
,
0
,
var
->
modifiers
,
&
var
->
semantic
);
append_output_copy
(
ctx
,
instrs
,
load
,
var
->
modifiers
,
&
var
->
semantic
);
}
static
bool
transform_ir
(
struct
hlsl_ctx
*
ctx
,
bool
(
*
func
)(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
,
void
*
),
...
...
@@ -362,7 +541,7 @@ static void copy_propagation_var_def_destroy(struct rb_entry *entry, void *conte
}
static
struct
copy_propagation_value
*
copy_propagation_get_value
(
const
struct
copy_propagation_state
*
state
,
const
struct
hlsl_ir_var
*
var
,
unsigned
component
)
const
struct
hlsl_ir_var
*
var
,
unsigned
int
component
)
{
for
(;
state
;
state
=
state
->
parent
)
{
...
...
@@ -370,9 +549,11 @@ static struct copy_propagation_value *copy_propagation_get_value(const struct co
if
(
entry
)
{
struct
copy_propagation_var_def
*
var_def
=
RB_ENTRY_VALUE
(
entry
,
struct
copy_propagation_var_def
,
entry
);
enum
copy_propagation_value_state
state
=
var_def
->
values
[
component
].
state
;
unsigned
int
component_count
=
hlsl_type_component_count
(
var
->
data_type
);
enum
copy_propagation_value_state
state
;
assert
(
component
<
var_def
->
var
->
data_type
->
reg_size
);
assert
(
component
<
component_count
);
state
=
var_def
->
values
[
component
].
state
;
switch
(
state
)
{
...
...
@@ -394,12 +575,13 @@ static struct copy_propagation_var_def *copy_propagation_create_var_def(struct h
{
struct
rb_entry
*
entry
=
rb_get
(
&
state
->
var_defs
,
var
);
struct
copy_propagation_var_def
*
var_def
;
unsigned
int
component_count
=
hlsl_type_component_count
(
var
->
data_type
);
int
res
;
if
(
entry
)
return
RB_ENTRY_VALUE
(
entry
,
struct
copy_propagation_var_def
,
entry
);
if
(
!
(
var_def
=
hlsl_alloc
(
ctx
,
offsetof
(
struct
copy_propagation_var_def
,
values
[
var
->
data_type
->
reg_size
]))))
if
(
!
(
var_def
=
hlsl_alloc
(
ctx
,
offsetof
(
struct
copy_propagation_var_def
,
values
[
component_count
]))))
return
NULL
;
var_def
->
var
=
var
;
...
...
@@ -411,31 +593,75 @@ static struct copy_propagation_var_def *copy_propagation_create_var_def(struct h
}
static
void
copy_propagation_invalidate_variable
(
struct
copy_propagation_var_def
*
var_def
,
unsigned
offset
,
unsigned
char
writemask
)
unsigned
int
comp
,
unsigned
char
writemask
)
{
unsigned
i
;
TRACE
(
"Invalidate variable %s[%u]%s.
\n
"
,
var_def
->
var
->
name
,
offset
,
debug_hlsl_writemask
(
writemask
));
TRACE
(
"Invalidate variable %s[%u]%s.
\n
"
,
var_def
->
var
->
name
,
comp
,
debug_hlsl_writemask
(
writemask
));
for
(
i
=
0
;
i
<
4
;
++
i
)
{
if
(
writemask
&
(
1u
<<
i
))
var_def
->
values
[
offset
+
i
].
state
=
VALUE_STATE_DYNAMICALLY_WRITTEN
;
var_def
->
values
[
comp
+
i
].
state
=
VALUE_STATE_DYNAMICALLY_WRITTEN
;
}
}
static
void
copy_propagation_invalidate_whole_variable
(
struct
copy_propagation_var_def
*
var_def
)
static
void
copy_propagation_invalidate_variable_from_deref_recurse
(
struct
hlsl_ctx
*
ctx
,
struct
copy_propagation_var_def
*
var_def
,
const
struct
hlsl_deref
*
deref
,
struct
hlsl_type
*
type
,
unsigned
int
depth
,
unsigned
int
comp_start
,
unsigned
char
writemask
)
{
unsigned
i
;
unsigned
int
i
,
subtype_comp_count
;
struct
hlsl_ir_node
*
path_node
;
struct
hlsl_type
*
subtype
;
if
(
depth
==
deref
->
path_len
)
{
copy_propagation_invalidate_variable
(
var_def
,
comp_start
,
writemask
);
return
;
}
path_node
=
deref
->
path
[
depth
].
node
;
subtype
=
hlsl_get_element_type_from_path_index
(
ctx
,
type
,
path_node
);
if
(
type
->
type
==
HLSL_CLASS_STRUCT
)
{
unsigned
int
idx
=
hlsl_ir_constant
(
path_node
)
->
value
[
0
].
u
;
for
(
i
=
0
;
i
<
idx
;
++
i
)
comp_start
+=
hlsl_type_component_count
(
type
->
e
.
record
.
fields
[
i
].
type
);
copy_propagation_invalidate_variable_from_deref_recurse
(
ctx
,
var_def
,
deref
,
subtype
,
depth
+
1
,
comp_start
,
writemask
);
}
else
{
subtype_comp_count
=
hlsl_type_component_count
(
subtype
);
TRACE
(
"Invalidate variable %s.
\n
"
,
var_def
->
var
->
name
);
if
(
path_node
->
type
==
HLSL_IR_CONSTANT
)
{
copy_propagation_invalidate_variable_from_deref_recurse
(
ctx
,
var_def
,
deref
,
subtype
,
depth
+
1
,
hlsl_ir_constant
(
path_node
)
->
value
[
0
].
u
*
subtype_comp_count
,
writemask
);
}
else
{
for
(
i
=
0
;
i
<
hlsl_type_element_count
(
type
);
++
i
)
{
copy_propagation_invalidate_variable_from_deref_recurse
(
ctx
,
var_def
,
deref
,
subtype
,
depth
+
1
,
i
*
subtype_comp_count
,
writemask
);
}
}
}
}
for
(
i
=
0
;
i
<
var_def
->
var
->
data_type
->
reg_size
;
++
i
)
var_def
->
values
[
i
].
state
=
VALUE_STATE_DYNAMICALLY_WRITTEN
;
static
void
copy_propagation_invalidate_variable_from_deref
(
struct
hlsl_ctx
*
ctx
,
struct
copy_propagation_var_def
*
var_def
,
const
struct
hlsl_deref
*
deref
,
unsigned
char
writemask
)
{
copy_propagation_invalidate_variable_from_deref_recurse
(
ctx
,
var_def
,
deref
,
deref
->
var
->
data_type
,
0
,
0
,
writemask
);
}
static
void
copy_propagation_set_value
(
struct
copy_propagation_var_def
*
var_def
,
unsigned
int
offset
,
unsigned
char
writemask
,
struct
hlsl_ir_node
*
node
)
static
void
copy_propagation_set_value
(
struct
copy_propagation_var_def
*
var_def
,
unsigned
int
comp
,
unsigned
char
writemask
,
struct
hlsl_ir_node
*
instr
)
{
unsigned
int
i
,
j
=
0
;
...
...
@@ -444,59 +670,56 @@ static void copy_propagation_set_value(struct copy_propagation_var_def *var_def,
if
(
writemask
&
(
1u
<<
i
))
{
TRACE
(
"Variable %s[%u] is written by instruction %p%s.
\n
"
,
var_def
->
var
->
name
,
offset
+
i
,
node
,
debug_hlsl_writemask
(
1u
<<
i
));
var_def
->
values
[
offset
+
i
].
state
=
VALUE_STATE_STATICALLY_WRITTEN
;
var_def
->
values
[
offset
+
i
].
node
=
node
;
var_def
->
values
[
offset
+
i
].
component
=
j
++
;
var_def
->
var
->
name
,
comp
+
i
,
instr
,
debug_hlsl_writemask
(
1u
<<
i
));
var_def
->
values
[
comp
+
i
].
state
=
VALUE_STATE_STATICALLY_WRITTEN
;
var_def
->
values
[
comp
+
i
].
node
=
instr
;
var_def
->
values
[
comp
+
i
].
component
=
j
++
;
}
}
}
static
struct
hlsl_ir_node
*
copy_propagation_compute_replacement
(
struct
hlsl_ctx
*
ctx
,
const
struct
copy_propagation_state
*
state
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
count
,
unsigned
int
*
swizzle
)
unsigned
int
*
swizzle
)
{
const
struct
hlsl_ir_var
*
var
=
deref
->
var
;
struct
hlsl_ir_node
*
node
=
NULL
;
unsigned
int
offse
t
,
i
;
struct
hlsl_ir_node
*
instr
=
NULL
;
unsigned
int
start
,
coun
t
,
i
;
if
(
!
hlsl_
offset_from_deref
(
ctx
,
deref
,
&
offse
t
))
if
(
!
hlsl_
component_index_range_from_deref
(
ctx
,
deref
,
&
start
,
&
coun
t
))
return
NULL
;
if
(
var
->
data_type
->
type
!=
HLSL_CLASS_OBJECT
)
assert
(
offset
+
count
<=
var
->
data_type
->
reg_size
);
*
swizzle
=
0
;
for
(
i
=
0
;
i
<
count
;
++
i
)
{
struct
copy_propagation_value
*
value
=
copy_propagation_get_value
(
state
,
var
,
offse
t
+
i
);
struct
copy_propagation_value
*
value
=
copy_propagation_get_value
(
state
,
var
,
star
t
+
i
);
if
(
!
value
)
return
NULL
;
if
(
!
node
)
if
(
!
instr
)
{
node
=
value
->
node
;
instr
=
value
->
node
;
}
else
if
(
node
!=
value
->
node
)
else
if
(
instr
!=
value
->
node
)
{
TRACE
(
"No single source for propagating load from %s[%u-%u].
\n
"
,
var
->
name
,
offset
,
offse
t
+
count
);
TRACE
(
"No single source for propagating load from %s[%u-%u].
\n
"
,
var
->
name
,
start
,
star
t
+
count
);
return
NULL
;
}
*
swizzle
|=
value
->
component
<<
i
*
2
;
}
TRACE
(
"Load from %s[%u-%u] propagated as instruction %p%s.
\n
"
,
var
->
name
,
offset
,
offset
+
count
,
node
,
debug_hlsl_swizzle
(
*
swizzle
,
count
));
return
node
;
var
->
name
,
start
,
start
+
count
,
instr
,
debug_hlsl_swizzle
(
*
swizzle
,
count
));
return
instr
;
}
static
bool
copy_propagation_transform_load
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_load
*
load
,
struct
copy_propagation_state
*
state
)
{
struct
hlsl_ir_node
*
node
=
&
load
->
node
,
*
new_node
;
struct
hlsl_type
*
type
=
node
->
data_type
;
struct
hlsl_ir_node
*
instr
=
&
load
->
node
,
*
new_instr
;
struct
hlsl_type
*
type
=
instr
->
data_type
;
struct
hlsl_ir_swizzle
*
swizzle_node
;
unsigned
int
dimx
=
0
;
unsigned
int
swizzle
;
...
...
@@ -520,17 +743,17 @@ static bool copy_propagation_transform_load(struct hlsl_ctx *ctx,
return
false
;
}
if
(
!
(
new_
node
=
copy_propagation_compute_replacement
(
ctx
,
state
,
&
load
->
src
,
dimx
,
&
swizzle
)))
if
(
!
(
new_
instr
=
copy_propagation_compute_replacement
(
ctx
,
state
,
&
load
->
src
,
&
swizzle
)))
return
false
;
if
(
type
->
type
!=
HLSL_CLASS_OBJECT
)
{
if
(
!
(
swizzle_node
=
hlsl_new_swizzle
(
ctx
,
swizzle
,
dimx
,
new_
node
,
&
node
->
loc
)))
if
(
!
(
swizzle_node
=
hlsl_new_swizzle
(
ctx
,
swizzle
,
dimx
,
new_
instr
,
&
instr
->
loc
)))
return
false
;
list_add_before
(
&
node
->
entry
,
&
swizzle_node
->
node
.
entry
);
new_
node
=
&
swizzle_node
->
node
;
list_add_before
(
&
instr
->
entry
,
&
swizzle_node
->
node
.
entry
);
new_
instr
=
&
swizzle_node
->
node
;
}
hlsl_replace_node
(
node
,
new_node
);
hlsl_replace_node
(
instr
,
new_instr
);
return
true
;
}
...
...
@@ -538,17 +761,18 @@ static bool copy_propagation_transform_object_load(struct hlsl_ctx *ctx,
struct
hlsl_deref
*
deref
,
struct
copy_propagation_state
*
state
)
{
struct
hlsl_ir_load
*
load
;
struct
hlsl_ir_node
*
node
;
struct
hlsl_ir_node
*
instr
;
unsigned
int
swizzle
;
if
(
!
(
node
=
copy_propagation_compute_replacement
(
ctx
,
state
,
deref
,
1
,
&
swizzle
)))
if
(
!
(
instr
=
copy_propagation_compute_replacement
(
ctx
,
state
,
deref
,
&
swizzle
)))
return
false
;
/* Only HLSL_IR_LOAD can produce an object. */
load
=
hlsl_ir_load
(
node
);
deref
->
var
=
load
->
src
.
var
;
hlsl_src_remove
(
&
deref
->
offset
);
hlsl_src_from_node
(
&
deref
->
offset
,
load
->
src
.
offset
.
node
);
load
=
hlsl_ir_load
(
instr
);
hlsl_cleanup_deref
(
deref
);
hlsl_copy_deref
(
ctx
,
deref
,
&
load
->
src
);
return
true
;
}
...
...
@@ -569,22 +793,22 @@ static void copy_propagation_record_store(struct hlsl_ctx *ctx, struct hlsl_ir_s
struct
copy_propagation_var_def
*
var_def
;
struct
hlsl_deref
*
lhs
=
&
store
->
lhs
;
struct
hlsl_ir_var
*
var
=
lhs
->
var
;
unsigned
int
offse
t
;
unsigned
int
start
,
coun
t
;
if
(
!
(
var_def
=
copy_propagation_create_var_def
(
ctx
,
state
,
var
)))
return
;
if
(
hlsl_
offset_from_deref
(
ctx
,
lhs
,
&
offse
t
))
if
(
hlsl_
component_index_range_from_deref
(
ctx
,
lhs
,
&
start
,
&
coun
t
))
{
unsigned
int
writemask
=
store
->
writemask
;
if
(
store
->
rhs
.
node
->
data_type
->
type
==
HLSL_CLASS_OBJECT
)
writemask
=
VKD3DSP_WRITEMASK_0
;
copy_propagation_set_value
(
var_def
,
offse
t
,
writemask
,
store
->
rhs
.
node
);
copy_propagation_set_value
(
var_def
,
star
t
,
writemask
,
store
->
rhs
.
node
);
}
else
{
copy_propagation_invalidate_
whole_variable
(
var_def
);
copy_propagation_invalidate_
variable_from_deref
(
ctx
,
var_def
,
lhs
,
store
->
writemask
);
}
}
...
...
@@ -615,15 +839,11 @@ static void copy_propagation_invalidate_from_block(struct hlsl_ctx *ctx, struct
struct
copy_propagation_var_def
*
var_def
;
struct
hlsl_deref
*
lhs
=
&
store
->
lhs
;
struct
hlsl_ir_var
*
var
=
lhs
->
var
;
unsigned
int
offset
;
if
(
!
(
var_def
=
copy_propagation_create_var_def
(
ctx
,
state
,
var
)))
continue
;
if
(
hlsl_offset_from_deref
(
ctx
,
lhs
,
&
offset
))
copy_propagation_invalidate_variable
(
var_def
,
offset
,
store
->
writemask
);
else
copy_propagation_invalidate_whole_variable
(
var_def
);
copy_propagation_invalidate_variable_from_deref
(
ctx
,
var_def
,
lhs
,
store
->
writemask
);
break
;
}
...
...
@@ -747,6 +967,51 @@ static bool copy_propagation_execute(struct hlsl_ctx *ctx, struct hlsl_block *bl
return
progress
;
}
static
void
note_non_static_deref_expressions
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
const
char
*
usage
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
{
struct
hlsl_ir_node
*
path_node
=
deref
->
path
[
i
].
node
;
assert
(
path_node
);
if
(
path_node
->
type
!=
HLSL_IR_CONSTANT
)
hlsl_note
(
ctx
,
&
path_node
->
loc
,
VKD3D_SHADER_LOG_ERROR
,
"Expression for %s within
\"
%s
\"
cannot be resolved statically."
,
usage
,
deref
->
var
->
name
);
}
}
static
bool
validate_static_object_references
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
{
unsigned
int
start
,
count
;
if
(
instr
->
type
==
HLSL_IR_RESOURCE_LOAD
)
{
struct
hlsl_ir_resource_load
*
load
=
hlsl_ir_resource_load
(
instr
);
if
(
!
hlsl_component_index_range_from_deref
(
ctx
,
&
load
->
resource
,
&
start
,
&
count
))
{
hlsl_error
(
ctx
,
&
instr
->
loc
,
VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF
,
"Loaded resource from
\"
%s
\"
must be determinable at compile time."
,
load
->
resource
.
var
->
name
);
note_non_static_deref_expressions
(
ctx
,
&
load
->
resource
,
"loaded resource"
);
}
if
(
load
->
sampler
.
var
&&
!
hlsl_component_index_range_from_deref
(
ctx
,
&
load
->
sampler
,
&
start
,
&
count
))
{
hlsl_error
(
ctx
,
&
instr
->
loc
,
VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF
,
"Resource load sampler from
\"
%s
\"
must be determinable at compile time."
,
load
->
sampler
.
var
->
name
);
note_non_static_deref_expressions
(
ctx
,
&
load
->
sampler
,
"resource load sampler"
);
}
}
return
false
;
}
static
bool
is_vec1
(
const
struct
hlsl_type
*
type
)
{
return
(
type
->
type
==
HLSL_CLASS_SCALAR
)
||
(
type
->
type
==
HLSL_CLASS_VECTOR
&&
type
->
dimx
==
1
);
...
...
@@ -779,39 +1044,21 @@ static bool fold_redundant_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
* split_matrix_copies(). Inserts new instructions right before
* "store". */
static
bool
split_copy
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_store
*
store
,
const
struct
hlsl_ir_load
*
load
,
const
unsigned
int
offset
,
struct
hlsl_type
*
type
)
const
struct
hlsl_ir_load
*
load
,
const
unsigned
int
idx
,
struct
hlsl_type
*
type
)
{
struct
hlsl_ir_node
*
offset_instr
,
*
add
;
struct
hlsl_ir_store
*
split_store
;
struct
hlsl_ir_load
*
split_load
;
struct
hlsl_ir_constant
*
c
;
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
offset
,
&
store
->
node
.
loc
)))
if
(
!
(
c
=
hlsl_new_uint_constant
(
ctx
,
idx
,
&
store
->
node
.
loc
)))
return
false
;
list_add_before
(
&
store
->
node
.
entry
,
&
c
->
node
.
entry
);
offset_instr
=
&
c
->
node
;
if
(
load
->
src
.
offset
.
node
)
{
if
(
!
(
add
=
hlsl_new_binary_expr
(
ctx
,
HLSL_OP2_ADD
,
load
->
src
.
offset
.
node
,
&
c
->
node
)))
return
false
;
list_add_before
(
&
store
->
node
.
entry
,
&
add
->
entry
);
offset_instr
=
add
;
}
if
(
!
(
split_load
=
hlsl_new_load
(
ctx
,
load
->
src
.
var
,
offset_instr
,
type
,
store
->
node
.
loc
)))
if
(
!
(
split_load
=
hlsl_new_load_index
(
ctx
,
&
load
->
src
,
&
c
->
node
,
&
store
->
node
.
loc
)))
return
false
;
list_add_before
(
&
store
->
node
.
entry
,
&
split_load
->
node
.
entry
);
offset_instr
=
&
c
->
node
;
if
(
store
->
lhs
.
offset
.
node
)
{
if
(
!
(
add
=
hlsl_new_binary_expr
(
ctx
,
HLSL_OP2_ADD
,
store
->
lhs
.
offset
.
node
,
&
c
->
node
)))
return
false
;
list_add_before
(
&
store
->
node
.
entry
,
&
add
->
entry
);
offset_instr
=
add
;
}
if
(
!
(
split_store
=
hlsl_new_store
(
ctx
,
store
->
lhs
.
var
,
offset_instr
,
&
split_load
->
node
,
0
,
store
->
node
.
loc
)))
if
(
!
(
split_store
=
hlsl_new_store_index
(
ctx
,
&
store
->
lhs
,
&
c
->
node
,
&
split_load
->
node
,
0
,
&
store
->
node
.
loc
)))
return
false
;
list_add_before
(
&
store
->
node
.
entry
,
&
split_store
->
node
.
entry
);
...
...
@@ -823,8 +1070,8 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
const
struct
hlsl_ir_node
*
rhs
;
struct
hlsl_type
*
element_type
;
const
struct
hlsl_type
*
type
;
unsigned
int
element_size
,
i
;
struct
hlsl_ir_store
*
store
;
unsigned
int
i
;
if
(
instr
->
type
!=
HLSL_IR_STORE
)
return
false
;
...
...
@@ -835,7 +1082,6 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
if
(
type
->
type
!=
HLSL_CLASS_ARRAY
)
return
false
;
element_type
=
type
->
e
.
array
.
type
;
element_size
=
hlsl_type_get_array_element_reg_size
(
element_type
);
if
(
rhs
->
type
!=
HLSL_IR_LOAD
)
{
...
...
@@ -845,7 +1091,7 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
for
(
i
=
0
;
i
<
type
->
e
.
array
.
elements_count
;
++
i
)
{
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
i
*
element_size
,
element_type
))
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
i
,
element_type
))
return
false
;
}
...
...
@@ -859,10 +1105,10 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
static
bool
split_struct_copies
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
{
const
struct
hlsl_struct_field
*
field
;
const
struct
hlsl_ir_node
*
rhs
;
const
struct
hlsl_type
*
type
;
struct
hlsl_ir_store
*
store
;
size_t
i
;
if
(
instr
->
type
!=
HLSL_IR_STORE
)
return
false
;
...
...
@@ -879,9 +1125,11 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
return
false
;
}
LIST_FOR_EACH_ENTRY
(
field
,
type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
type
->
e
.
record
.
field_count
;
++
i
)
{
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
field
->
reg_offset
,
field
->
type
))
const
struct
hlsl_struct_field
*
field
=
&
type
->
e
.
record
.
fields
[
i
];
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
i
,
field
->
type
))
return
false
;
}
...
...
@@ -909,7 +1157,7 @@ static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
type
=
rhs
->
data_type
;
if
(
type
->
type
!=
HLSL_CLASS_MATRIX
)
return
false
;
element_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
minor_size
(
type
));
element_type
=
hlsl_get_vector_type
(
ctx
,
type
->
base_type
,
hlsl_type_
minor_size
(
type
));
if
(
rhs
->
type
!=
HLSL_IR_LOAD
)
{
...
...
@@ -917,9 +1165,9 @@ static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
return
false
;
}
for
(
i
=
0
;
i
<
major_size
(
type
);
++
i
)
for
(
i
=
0
;
i
<
hlsl_type_
major_size
(
type
);
++
i
)
{
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
4
*
i
,
element_type
))
if
(
!
split_copy
(
ctx
,
store
,
hlsl_ir_load
(
rhs
),
i
,
element_type
))
return
false
;
}
...
...
@@ -1203,6 +1451,8 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
load
->
coords
.
node
->
last_read
=
instr
->
index
;
if
(
load
->
texel_offset
.
node
)
load
->
texel_offset
.
node
->
last_read
=
instr
->
index
;
if
(
load
->
lod
.
node
)
load
->
lod
.
node
->
last_read
=
instr
->
index
;
break
;
}
case
HLSL_IR_SWIZZLE
:
...
...
@@ -1834,6 +2084,82 @@ static void allocate_objects(struct hlsl_ctx *ctx, enum hlsl_base_type type)
}
}
bool
hlsl_component_index_range_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
*
start
,
unsigned
int
*
count
)
{
struct
hlsl_type
*
type
=
deref
->
var
->
data_type
;
unsigned
int
i
,
k
;
*
start
=
0
;
*
count
=
0
;
for
(
i
=
0
;
i
<
deref
->
path_len
;
++
i
)
{
struct
hlsl_ir_node
*
path_node
=
deref
->
path
[
i
].
node
;
unsigned
int
idx
=
0
;
assert
(
path_node
);
if
(
path_node
->
type
!=
HLSL_IR_CONSTANT
)
return
false
;
/* We should always have generated a cast to UINT. */
assert
(
path_node
->
data_type
->
type
==
HLSL_CLASS_SCALAR
&&
path_node
->
data_type
->
base_type
==
HLSL_TYPE_UINT
);
idx
=
hlsl_ir_constant
(
path_node
)
->
value
[
0
].
u
;
switch
(
type
->
type
)
{
case
HLSL_CLASS_VECTOR
:
if
(
idx
>=
type
->
dimx
)
{
hlsl_error
(
ctx
,
&
path_node
->
loc
,
VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS
,
"Vector index is out of bounds. %u/%u"
,
idx
,
type
->
dimx
);
return
false
;
}
*
start
+=
idx
;
break
;
case
HLSL_CLASS_MATRIX
:
if
(
idx
>=
hlsl_type_major_size
(
type
))
{
hlsl_error
(
ctx
,
&
path_node
->
loc
,
VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS
,
"Matrix index is out of bounds. %u/%u"
,
idx
,
hlsl_type_major_size
(
type
));
return
false
;
}
if
(
hlsl_type_is_row_major
(
type
))
*
start
+=
idx
*
type
->
dimx
;
else
*
start
+=
idx
*
type
->
dimy
;
break
;
case
HLSL_CLASS_ARRAY
:
if
(
idx
>=
type
->
e
.
array
.
elements_count
)
{
hlsl_error
(
ctx
,
&
path_node
->
loc
,
VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS
,
"Array index is out of bounds. %u/%u"
,
idx
,
type
->
e
.
array
.
elements_count
);
return
false
;
}
*
start
+=
idx
*
hlsl_type_component_count
(
type
->
e
.
array
.
type
);
break
;
case
HLSL_CLASS_STRUCT
:
for
(
k
=
0
;
k
<
idx
;
++
k
)
*
start
+=
hlsl_type_component_count
(
type
->
e
.
record
.
fields
[
k
].
type
);
break
;
default:
assert
(
0
);
break
;
}
type
=
hlsl_get_element_type_from_path_index
(
ctx
,
type
,
path_node
);
}
*
count
=
hlsl_type_component_count
(
type
);
return
true
;
}
bool
hlsl_offset_from_deref
(
struct
hlsl_ctx
*
ctx
,
const
struct
hlsl_deref
*
deref
,
unsigned
int
*
offset
)
{
struct
hlsl_ir_node
*
offset_node
=
deref
->
offset
.
node
;
...
...
@@ -1942,11 +2268,13 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
}
while
(
progress
);
transform_ir
(
ctx
,
split_matrix_copies
,
body
,
NULL
);
transform_ir
(
ctx
,
lower_narrowing_casts
,
body
,
NULL
);
transform_ir
(
ctx
,
lower_casts_to_bool
,
body
,
NULL
);
do
{
progress
=
transform_ir
(
ctx
,
hlsl_fold_constants
,
body
,
NULL
);
progress
=
transform_ir
(
ctx
,
hlsl_fold_constant_exprs
,
body
,
NULL
);
progress
|=
transform_ir
(
ctx
,
hlsl_fold_constant_swizzles
,
body
,
NULL
);
progress
|=
copy_propagation_execute
(
ctx
,
body
);
progress
|=
transform_ir
(
ctx
,
remove_trivial_swizzles
,
body
,
NULL
);
}
...
...
@@ -1955,6 +2283,12 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
if
(
ctx
->
profile
->
major_version
<
4
)
transform_ir
(
ctx
,
lower_division
,
body
,
NULL
);
transform_ir
(
ctx
,
validate_static_object_references
,
body
,
NULL
);
/* TODO: move forward, remove when no longer needed */
transform_ir
(
ctx
,
transform_deref_paths_into_offsets
,
body
,
NULL
);
while
(
transform_ir
(
ctx
,
hlsl_fold_constant_exprs
,
body
,
NULL
));
do
compute_liveness
(
ctx
,
entry_func
);
while
(
transform_ir
(
ctx
,
dce
,
body
,
NULL
));
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c
View file @
c3025582
...
...
@@ -369,7 +369,143 @@ static bool fold_mod(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
return
true
;
}
bool
hlsl_fold_constants
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
static
bool
fold_max
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_constant
*
dst
,
struct
hlsl_ir_constant
*
src1
,
struct
hlsl_ir_constant
*
src2
)
{
enum
hlsl_base_type
type
=
dst
->
node
.
data_type
->
base_type
;
unsigned
int
k
;
assert
(
type
==
src1
->
node
.
data_type
->
base_type
);
assert
(
type
==
src2
->
node
.
data_type
->
base_type
);
for
(
k
=
0
;
k
<
dst
->
node
.
data_type
->
dimx
;
++
k
)
{
switch
(
type
)
{
case
HLSL_TYPE_INT
:
dst
->
value
[
k
].
i
=
max
(
src1
->
value
[
k
].
i
,
src2
->
value
[
k
].
i
);
break
;
case
HLSL_TYPE_UINT
:
dst
->
value
[
k
].
u
=
max
(
src1
->
value
[
k
].
u
,
src2
->
value
[
k
].
u
);
break
;
default:
FIXME
(
"Fold max for type %s.
\n
"
,
debug_hlsl_type
(
ctx
,
dst
->
node
.
data_type
));
return
false
;
}
}
return
true
;
}
static
bool
fold_min
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_constant
*
dst
,
struct
hlsl_ir_constant
*
src1
,
struct
hlsl_ir_constant
*
src2
)
{
enum
hlsl_base_type
type
=
dst
->
node
.
data_type
->
base_type
;
unsigned
int
k
;
assert
(
type
==
src1
->
node
.
data_type
->
base_type
);
assert
(
type
==
src2
->
node
.
data_type
->
base_type
);
for
(
k
=
0
;
k
<
dst
->
node
.
data_type
->
dimx
;
++
k
)
{
switch
(
type
)
{
case
HLSL_TYPE_INT
:
dst
->
value
[
k
].
i
=
min
(
src1
->
value
[
k
].
i
,
src2
->
value
[
k
].
i
);
break
;
case
HLSL_TYPE_UINT
:
dst
->
value
[
k
].
u
=
min
(
src1
->
value
[
k
].
u
,
src2
->
value
[
k
].
u
);
break
;
default:
FIXME
(
"Fold min for type %s.
\n
"
,
debug_hlsl_type
(
ctx
,
dst
->
node
.
data_type
));
return
false
;
}
}
return
true
;
}
static
bool
fold_bit_xor
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_constant
*
dst
,
struct
hlsl_ir_constant
*
src1
,
struct
hlsl_ir_constant
*
src2
)
{
enum
hlsl_base_type
type
=
dst
->
node
.
data_type
->
base_type
;
unsigned
int
k
;
assert
(
type
==
src1
->
node
.
data_type
->
base_type
);
assert
(
type
==
src2
->
node
.
data_type
->
base_type
);
for
(
k
=
0
;
k
<
dst
->
node
.
data_type
->
dimx
;
++
k
)
{
switch
(
type
)
{
case
HLSL_TYPE_INT
:
case
HLSL_TYPE_UINT
:
dst
->
value
[
k
].
u
=
src1
->
value
[
k
].
u
^
src2
->
value
[
k
].
u
;
break
;
default:
FIXME
(
"Fold bit xor for type %s.
\n
"
,
debug_hlsl_type
(
ctx
,
dst
->
node
.
data_type
));
return
false
;
}
}
return
true
;
}
static
bool
fold_bit_and
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_constant
*
dst
,
struct
hlsl_ir_constant
*
src1
,
struct
hlsl_ir_constant
*
src2
)
{
enum
hlsl_base_type
type
=
dst
->
node
.
data_type
->
base_type
;
unsigned
int
k
;
assert
(
type
==
src1
->
node
.
data_type
->
base_type
);
assert
(
type
==
src2
->
node
.
data_type
->
base_type
);
for
(
k
=
0
;
k
<
dst
->
node
.
data_type
->
dimx
;
++
k
)
{
switch
(
type
)
{
case
HLSL_TYPE_INT
:
case
HLSL_TYPE_UINT
:
dst
->
value
[
k
].
u
=
src1
->
value
[
k
].
u
&
src2
->
value
[
k
].
u
;
break
;
default:
FIXME
(
"Fold bit and for type %s.
\n
"
,
debug_hlsl_type
(
ctx
,
dst
->
node
.
data_type
));
return
false
;
}
}
return
true
;
}
static
bool
fold_bit_or
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_constant
*
dst
,
struct
hlsl_ir_constant
*
src1
,
struct
hlsl_ir_constant
*
src2
)
{
enum
hlsl_base_type
type
=
dst
->
node
.
data_type
->
base_type
;
unsigned
int
k
;
assert
(
type
==
src1
->
node
.
data_type
->
base_type
);
assert
(
type
==
src2
->
node
.
data_type
->
base_type
);
for
(
k
=
0
;
k
<
dst
->
node
.
data_type
->
dimx
;
++
k
)
{
switch
(
type
)
{
case
HLSL_TYPE_INT
:
case
HLSL_TYPE_UINT
:
dst
->
value
[
k
].
u
=
src1
->
value
[
k
].
u
|
src2
->
value
[
k
].
u
;
break
;
default:
FIXME
(
"Fold bit or for type %s.
\n
"
,
debug_hlsl_type
(
ctx
,
dst
->
node
.
data_type
));
return
false
;
}
}
return
true
;
}
bool
hlsl_fold_constant_exprs
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
{
struct
hlsl_ir_constant
*
arg1
,
*
arg2
=
NULL
,
*
res
;
struct
hlsl_ir_expr
*
expr
;
...
...
@@ -430,6 +566,26 @@ bool hlsl_fold_constants(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void
success
=
fold_mod
(
ctx
,
res
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_MAX
:
success
=
fold_max
(
ctx
,
res
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_MIN
:
success
=
fold_min
(
ctx
,
res
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_BIT_XOR
:
success
=
fold_bit_xor
(
ctx
,
res
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_BIT_AND
:
success
=
fold_bit_and
(
ctx
,
res
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_BIT_OR
:
success
=
fold_bit_or
(
ctx
,
res
,
arg1
,
arg2
);
break
;
default:
FIXME
(
"Fold
\"
%s
\"
expression.
\n
"
,
debug_hlsl_expr_op
(
expr
->
op
));
success
=
false
;
...
...
@@ -447,3 +603,32 @@ bool hlsl_fold_constants(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void
}
return
success
;
}
bool
hlsl_fold_constant_swizzles
(
struct
hlsl_ctx
*
ctx
,
struct
hlsl_ir_node
*
instr
,
void
*
context
)
{
struct
hlsl_ir_constant
*
value
,
*
res
;
struct
hlsl_ir_swizzle
*
swizzle
;
unsigned
int
i
,
swizzle_bits
;
if
(
instr
->
type
!=
HLSL_IR_SWIZZLE
)
return
false
;
swizzle
=
hlsl_ir_swizzle
(
instr
);
if
(
swizzle
->
val
.
node
->
type
!=
HLSL_IR_CONSTANT
)
return
false
;
value
=
hlsl_ir_constant
(
swizzle
->
val
.
node
);
if
(
!
(
res
=
hlsl_alloc
(
ctx
,
sizeof
(
*
res
))))
return
false
;
init_node
(
&
res
->
node
,
HLSL_IR_CONSTANT
,
instr
->
data_type
,
instr
->
loc
);
swizzle_bits
=
swizzle
->
swizzle
;
for
(
i
=
0
;
i
<
swizzle
->
node
.
data_type
->
dimx
;
++
i
)
{
res
->
value
[
i
]
=
value
->
value
[
swizzle_bits
&
3
];
swizzle_bits
>>=
2
;
}
list_add_before
(
&
swizzle
->
node
.
entry
,
&
res
->
node
.
entry
);
hlsl_replace_node
(
&
swizzle
->
node
,
&
res
->
node
);
return
true
;
}
libs/vkd3d/libs/vkd3d-shader/hlsl_sm1.c
View file @
c3025582
...
...
@@ -243,28 +243,33 @@ static void write_sm1_type(struct vkd3d_bytecode_buffer *buffer, struct hlsl_typ
{
const
struct
hlsl_type
*
array_type
=
get_array_type
(
type
);
unsigned
int
array_size
=
get_array_size
(
type
);
struct
hlsl_struct_field
*
field
;
unsigned
int
field_count
=
0
;
size_t
fields_offset
=
0
;
size_t
i
;
if
(
type
->
bytecode_offset
)
return
;
if
(
array_type
->
type
==
HLSL_CLASS_STRUCT
)
{
LIST_FOR_EACH_ENTRY
(
field
,
array_type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
field_count
=
array_type
->
e
.
record
.
field_count
;
for
(
i
=
0
;
i
<
field_count
;
++
i
)
{
struct
hlsl_struct_field
*
field
=
&
array_type
->
e
.
record
.
fields
[
i
];
field
->
name_bytecode_offset
=
put_string
(
buffer
,
field
->
name
);
write_sm1_type
(
buffer
,
field
->
type
,
ctab_start
);
}
fields_offset
=
bytecode_get_size
(
buffer
)
-
ctab_start
;
LIST_FOR_EACH_ENTRY
(
field
,
array_type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
field_count
;
++
i
)
{
struct
hlsl_struct_field
*
field
=
&
array_type
->
e
.
record
.
fields
[
i
];
put_u32
(
buffer
,
field
->
name_bytecode_offset
-
ctab_start
);
put_u32
(
buffer
,
field
->
type
->
bytecode_offset
-
ctab_start
);
++
field_count
;
}
}
...
...
libs/vkd3d/libs/vkd3d-shader/hlsl_sm4.c
View file @
c3025582
...
...
@@ -355,7 +355,7 @@ static void write_sm4_type(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *b
const
struct
hlsl_profile_info
*
profile
=
ctx
->
profile
;
unsigned
int
field_count
=
0
,
array_size
=
0
;
size_t
fields_offset
=
0
,
name_offset
=
0
;
s
truct
hlsl_struct_field
*
field
;
s
ize_t
i
;
if
(
type
->
bytecode_offset
)
return
;
...
...
@@ -368,20 +368,25 @@ static void write_sm4_type(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *b
if
(
array_type
->
type
==
HLSL_CLASS_STRUCT
)
{
LIST_FOR_EACH_ENTRY
(
field
,
array_type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
field_count
=
array_type
->
e
.
record
.
field_count
;
for
(
i
=
0
;
i
<
field_count
;
++
i
)
{
struct
hlsl_struct_field
*
field
=
&
array_type
->
e
.
record
.
fields
[
i
];
field
->
name_bytecode_offset
=
put_string
(
buffer
,
field
->
name
);
write_sm4_type
(
ctx
,
buffer
,
field
->
type
);
}
fields_offset
=
bytecode_get_size
(
buffer
);
LIST_FOR_EACH_ENTRY
(
field
,
array_type
->
e
.
elements
,
struct
hlsl_struct_field
,
entry
)
for
(
i
=
0
;
i
<
field_count
;
++
i
)
{
struct
hlsl_struct_field
*
field
=
&
array_type
->
e
.
record
.
fields
[
i
];
put_u32
(
buffer
,
field
->
name_bytecode_offset
);
put_u32
(
buffer
,
field
->
type
->
bytecode_offset
);
put_u32
(
buffer
,
field
->
reg_offset
);
++
field_count
;
}
}
...
...
@@ -1314,6 +1319,25 @@ static void write_sm4_binary_op(struct vkd3d_bytecode_buffer *buffer, enum vkd3d
write_sm4_instruction
(
buffer
,
&
instr
);
}
/* dp# instructions don't map the swizzle. */
static
void
write_sm4_binary_op_dot
(
struct
vkd3d_bytecode_buffer
*
buffer
,
enum
vkd3d_sm4_opcode
opcode
,
const
struct
hlsl_ir_node
*
dst
,
const
struct
hlsl_ir_node
*
src1
,
const
struct
hlsl_ir_node
*
src2
)
{
struct
sm4_instruction
instr
;
memset
(
&
instr
,
0
,
sizeof
(
instr
));
instr
.
opcode
=
opcode
;
sm4_dst_from_node
(
&
instr
.
dsts
[
0
],
dst
);
instr
.
dst_count
=
1
;
sm4_src_from_node
(
&
instr
.
srcs
[
0
],
src1
,
VKD3DSP_WRITEMASK_ALL
);
sm4_src_from_node
(
&
instr
.
srcs
[
1
],
src2
,
VKD3DSP_WRITEMASK_ALL
);
instr
.
src_count
=
2
;
write_sm4_instruction
(
buffer
,
&
instr
);
}
static
void
write_sm4_binary_op_with_two_destinations
(
struct
vkd3d_bytecode_buffer
*
buffer
,
enum
vkd3d_sm4_opcode
opcode
,
const
struct
hlsl_ir_node
*
dst
,
unsigned
dst_idx
,
const
struct
hlsl_ir_node
*
src1
,
const
struct
hlsl_ir_node
*
src2
)
...
...
@@ -1440,9 +1464,36 @@ static bool type_is_float(const struct hlsl_type *type)
return
type
->
base_type
==
HLSL_TYPE_FLOAT
||
type
->
base_type
==
HLSL_TYPE_HALF
;
}
static
void
write_sm4_cast_from_bool
(
struct
hlsl_ctx
*
ctx
,
struct
vkd3d_bytecode_buffer
*
buffer
,
const
struct
hlsl_ir_expr
*
expr
,
const
struct
hlsl_ir_node
*
arg
,
uint32_t
mask
)
{
struct
sm4_instruction
instr
;
memset
(
&
instr
,
0
,
sizeof
(
instr
));
instr
.
opcode
=
VKD3D_SM4_OP_AND
;
sm4_dst_from_node
(
&
instr
.
dsts
[
0
],
&
expr
->
node
);
instr
.
dst_count
=
1
;
sm4_src_from_node
(
&
instr
.
srcs
[
0
],
arg
,
instr
.
dsts
[
0
].
writemask
);
instr
.
srcs
[
1
].
swizzle_type
=
VKD3D_SM4_SWIZZLE_NONE
;
instr
.
srcs
[
1
].
reg
.
type
=
VKD3D_SM4_RT_IMMCONST
;
instr
.
srcs
[
1
].
reg
.
dim
=
VKD3D_SM4_DIMENSION_SCALAR
;
instr
.
srcs
[
1
].
reg
.
immconst_uint
[
0
]
=
mask
;
instr
.
src_count
=
2
;
write_sm4_instruction
(
buffer
,
&
instr
);
}
static
void
write_sm4_cast
(
struct
hlsl_ctx
*
ctx
,
struct
vkd3d_bytecode_buffer
*
buffer
,
const
struct
hlsl_ir_expr
*
expr
)
{
static
const
union
{
uint32_t
u
;
float
f
;
}
one
=
{
.
f
=
1
.
0
};
const
struct
hlsl_ir_node
*
arg1
=
expr
->
operands
[
0
].
node
;
const
struct
hlsl_type
*
dst_type
=
expr
->
node
.
data_type
;
const
struct
hlsl_type
*
src_type
=
arg1
->
data_type
;
...
...
@@ -1469,7 +1520,7 @@ static void write_sm4_cast(struct hlsl_ctx *ctx,
break
;
case
HLSL_TYPE_BOOL
:
hlsl_fixme
(
ctx
,
&
expr
->
node
.
loc
,
"SM4 cast from bool to float."
);
write_sm4_cast_from_bool
(
ctx
,
buffer
,
expr
,
arg1
,
one
.
u
);
break
;
case
HLSL_TYPE_DOUBLE
:
...
...
@@ -1495,7 +1546,7 @@ static void write_sm4_cast(struct hlsl_ctx *ctx,
break
;
case
HLSL_TYPE_BOOL
:
hlsl_fixme
(
ctx
,
&
expr
->
node
.
loc
,
"SM4 cast from bool to int."
);
write_sm4_cast_from_bool
(
ctx
,
buffer
,
expr
,
arg1
,
1
);
break
;
case
HLSL_TYPE_DOUBLE
:
...
...
@@ -1521,7 +1572,7 @@ static void write_sm4_cast(struct hlsl_ctx *ctx,
break
;
case
HLSL_TYPE_BOOL
:
hlsl_fixme
(
ctx
,
&
expr
->
node
.
loc
,
"SM4 cast from bool to uint."
);
write_sm4_cast_from_bool
(
ctx
,
buffer
,
expr
,
arg1
,
1
);
break
;
case
HLSL_TYPE_DOUBLE
:
...
...
@@ -1602,6 +1653,11 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
write_sm4_unary_op
(
buffer
,
VKD3D_SM4_OP_LOG
,
&
expr
->
node
,
arg1
,
0
);
break
;
case
HLSL_OP1_LOGIC_NOT
:
assert
(
dst_type
->
base_type
==
HLSL_TYPE_BOOL
);
write_sm4_unary_op
(
buffer
,
VKD3D_SM4_OP_NOT
,
&
expr
->
node
,
arg1
,
0
);
break
;
case
HLSL_OP1_NEG
:
switch
(
dst_type
->
base_type
)
{
...
...
@@ -1679,6 +1735,38 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
}
break
;
case
HLSL_OP2_DOT
:
switch
(
dst_type
->
base_type
)
{
case
HLSL_TYPE_FLOAT
:
switch
(
arg1
->
data_type
->
dimx
)
{
case
4
:
write_sm4_binary_op_dot
(
buffer
,
VKD3D_SM4_OP_DP4
,
&
expr
->
node
,
arg1
,
arg2
);
break
;
case
3
:
write_sm4_binary_op_dot
(
buffer
,
VKD3D_SM4_OP_DP3
,
&
expr
->
node
,
arg1
,
arg2
);
break
;
case
2
:
write_sm4_binary_op_dot
(
buffer
,
VKD3D_SM4_OP_DP2
,
&
expr
->
node
,
arg1
,
arg2
);
break
;
case
1
:
assert
(
0
);
break
;
default:
assert
(
0
);
}
break
;
default:
hlsl_fixme
(
ctx
,
&
expr
->
node
.
loc
,
"SM4 %s dot expression."
,
dst_type_string
->
buffer
);
}
break
;
case
HLSL_OP2_EQUAL
:
{
const
struct
hlsl_type
*
src_type
=
arg1
->
data_type
;
...
...
@@ -1763,6 +1851,16 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
break
;
}
case
HLSL_OP2_LOGIC_AND
:
assert
(
dst_type
->
base_type
==
HLSL_TYPE_BOOL
);
write_sm4_binary_op
(
buffer
,
VKD3D_SM4_OP_AND
,
&
expr
->
node
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_LOGIC_OR
:
assert
(
dst_type
->
base_type
==
HLSL_TYPE_BOOL
);
write_sm4_binary_op
(
buffer
,
VKD3D_SM4_OP_OR
,
&
expr
->
node
,
arg1
,
arg2
);
break
;
case
HLSL_OP2_LSHIFT
:
assert
(
type_is_integer
(
dst_type
));
assert
(
dst_type
->
base_type
!=
HLSL_TYPE_BOOL
);
...
...
@@ -1984,11 +2082,23 @@ static void write_sm4_resource_load(struct hlsl_ctx *ctx,
const
struct
hlsl_ir_node
*
texel_offset
=
load
->
texel_offset
.
node
;
const
struct
hlsl_ir_node
*
coords
=
load
->
coords
.
node
;
if
(
resource_type
->
type
!=
HLSL_CLASS_OBJECT
)
{
assert
(
resource_type
->
type
==
HLSL_CLASS_ARRAY
||
resource_type
->
type
==
HLSL_CLASS_STRUCT
);
hlsl_fixme
(
ctx
,
&
load
->
node
.
loc
,
"Resource being a component of another variable."
);
return
;
}
if
(
load
->
sampler
.
var
)
{
const
struct
hlsl_type
*
sampler_type
=
load
->
sampler
.
var
->
data_type
;
assert
(
sampler_type
->
type
==
HLSL_CLASS_OBJECT
);
if
(
sampler_type
->
type
!=
HLSL_CLASS_OBJECT
)
{
assert
(
sampler_type
->
type
==
HLSL_CLASS_ARRAY
||
sampler_type
->
type
==
HLSL_CLASS_STRUCT
);
hlsl_fixme
(
ctx
,
&
load
->
node
.
loc
,
"Sampler being a component of another variable."
);
return
;
}
assert
(
sampler_type
->
base_type
==
HLSL_TYPE_SAMPLER
);
assert
(
sampler_type
->
sampler_dim
==
HLSL_SAMPLER_DIM_GENERIC
);
...
...
@@ -2037,6 +2147,10 @@ static void write_sm4_resource_load(struct hlsl_ctx *ctx,
write_sm4_gather
(
ctx
,
buffer
,
resource_type
,
&
load
->
node
,
&
load
->
resource
,
&
load
->
sampler
,
coords
,
HLSL_SWIZZLE
(
W
,
W
,
W
,
W
),
texel_offset
);
break
;
case
HLSL_RESOURCE_SAMPLE_LOD
:
hlsl_fixme
(
ctx
,
&
load
->
node
.
loc
,
"SM4 sample-LOD expression."
);
break
;
}
}
...
...
libs/vkd3d/libs/vkd3d-shader/spirv.c
View file @
c3025582
...
...
@@ -206,7 +206,7 @@ static enum vkd3d_shader_input_sysval_semantic vkd3d_siv_from_sysval(enum vkd3d_
#define VKD3D_SPIRV_VERSION 0x00010000
#define VKD3D_SPIRV_GENERATOR_ID 18
#define VKD3D_SPIRV_GENERATOR_VERSION
4
#define VKD3D_SPIRV_GENERATOR_VERSION
5
#define VKD3D_SPIRV_GENERATOR_MAGIC vkd3d_make_u32(VKD3D_SPIRV_GENERATOR_VERSION, VKD3D_SPIRV_GENERATOR_ID)
struct
vkd3d_spirv_stream
...
...
@@ -2228,6 +2228,7 @@ struct vkd3d_dxbc_compiler
bool
strip_debug
;
bool
ssbo_uavs
;
bool
uav_read_without_format
;
struct
rb_tree
symbol_table
;
uint32_t
temp_id
;
...
...
@@ -2379,6 +2380,15 @@ struct vkd3d_dxbc_compiler *vkd3d_dxbc_compiler_create(const struct vkd3d_shader
case
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
:
break
;
case
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV
:
if
(
option
->
value
==
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_R32
)
compiler
->
uav_read_without_format
=
false
;
else
if
(
option
->
value
==
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_UNKNOWN
)
compiler
->
uav_read_without_format
=
true
;
else
WARN
(
"Ignoring unrecognised value %#x for option %#x.
\n
"
,
option
->
value
,
option
->
name
);
break
;
}
}
...
...
@@ -5856,14 +5866,18 @@ static uint32_t vkd3d_dxbc_compiler_get_image_type_id(struct vkd3d_dxbc_compiler
const
struct
vkd3d_shader_descriptor_info
*
d
;
uint32_t
sampled_type_id
;
SpvImageFormat
format
;
bool
uav_read
;
format
=
SpvImageFormatUnknown
;
if
(
reg
->
type
==
VKD3DSPR_UAV
)
{
d
=
vkd3d_dxbc_compiler_get_descriptor_info
(
compiler
,
VKD3D_SHADER_DESCRIPTOR_TYPE_UAV
,
range
);
if
(
raw_structured
||
(
d
->
flags
&
VKD3D_SHADER_DESCRIPTOR_INFO_FLAG_UAV_READ
))
uav_read
=
!!
(
d
->
flags
&
VKD3D_SHADER_DESCRIPTOR_INFO_FLAG_UAV_READ
);
if
(
raw_structured
||
(
uav_read
&&
!
compiler
->
uav_read_without_format
))
format
=
image_format_for_image_read
(
data_type
);
else
if
(
uav_read
)
vkd3d_spirv_enable_capability
(
builder
,
SpvCapabilityStorageImageReadWithoutFormat
);
}
sampled_type_id
=
vkd3d_spirv_get_type_id
(
builder
,
data_type
,
1
);
...
...
@@ -5962,7 +5976,7 @@ static void vkd3d_dxbc_compiler_emit_resource_declaration(struct vkd3d_dxbc_comp
const
struct
vkd3d_shader_resource
*
resource
,
enum
vkd3d_shader_resource_type
resource_type
,
enum
vkd3d_data_type
resource_data_type
,
unsigned
int
structure_stride
,
bool
raw
)
{
struct
vkd3d_descriptor_variable_info
var_info
,
counter_var_info
;
struct
vkd3d_descriptor_variable_info
var_info
,
counter_var_info
=
{
0
}
;
struct
vkd3d_spirv_builder
*
builder
=
&
compiler
->
spirv_builder
;
SpvStorageClass
storage_class
=
SpvStorageClassUniformConstant
;
uint32_t
counter_type_id
,
type_id
,
var_id
,
counter_var_id
=
0
;
...
...
libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
View file @
c3025582
...
...
@@ -118,6 +118,7 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS
=
5019
,
VKD3D_SHADER_ERROR_HLSL_INCOMPATIBLE_PROFILE
=
5020
,
VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO
=
5021
,
VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF
=
5022
,
VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION
=
5300
,
VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO
=
5301
,
...
...
libs/vkd3d/libs/vkd3d/command.c
View file @
c3025582
...
...
@@ -369,7 +369,10 @@ static void *vkd3d_fence_worker_main(void *arg)
}
if
(
worker
->
should_exit
)
{
vkd3d_mutex_unlock
(
&
worker
->
mutex
);
break
;
}
old_fences_size
=
cur_fences_size
;
old_fences
=
cur_fences
;
...
...
@@ -424,20 +427,11 @@ static HRESULT vkd3d_fence_worker_start(struct vkd3d_fence_worker *worker,
return
hresult_from_errno
(
rc
);
}
if
((
rc
=
vkd3d_cond_init
(
&
worker
->
fence_destruction_cond
)))
{
ERR
(
"Failed to initialize condition variable, error %d.
\n
"
,
rc
);
vkd3d_mutex_destroy
(
&
worker
->
mutex
);
vkd3d_cond_destroy
(
&
worker
->
cond
);
return
hresult_from_errno
(
rc
);
}
if
(
FAILED
(
hr
=
vkd3d_create_thread
(
device
->
vkd3d_instance
,
vkd3d_fence_worker_main
,
worker
,
&
worker
->
thread
)))
{
vkd3d_mutex_destroy
(
&
worker
->
mutex
);
vkd3d_cond_destroy
(
&
worker
->
cond
);
vkd3d_cond_destroy
(
&
worker
->
fence_destruction_cond
);
}
return
hr
;
...
...
@@ -467,7 +461,6 @@ static HRESULT vkd3d_fence_worker_stop(struct vkd3d_fence_worker *worker,
vkd3d_mutex_destroy
(
&
worker
->
mutex
);
vkd3d_cond_destroy
(
&
worker
->
cond
);
vkd3d_cond_destroy
(
&
worker
->
fence_destruction_cond
);
vkd3d_free
(
worker
->
fences
);
...
...
@@ -858,7 +851,7 @@ static void d3d12_fence_signal_external_events_locked(struct d3d12_fence *fence)
}
else
{
current
->
latch
=
true
;
*
current
->
latch
=
true
;
signal_null_event_cond
=
true
;
}
}
...
...
@@ -1162,7 +1155,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
{
struct
d3d12_fence
*
fence
=
impl_from_ID3D12Fence
(
iface
);
unsigned
int
i
;
bool
*
latch
;
bool
latch
=
false
;
int
rc
;
TRACE
(
"iface %p, value %#"
PRIx64
", event %p.
\n
"
,
iface
,
value
,
event
);
...
...
@@ -1203,8 +1196,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
fence
->
events
[
fence
->
event_count
].
value
=
value
;
fence
->
events
[
fence
->
event_count
].
event
=
event
;
fence
->
events
[
fence
->
event_count
].
latch
=
false
;
latch
=
&
fence
->
events
[
fence
->
event_count
].
latch
;
fence
->
events
[
fence
->
event_count
].
latch
=
&
latch
;
++
fence
->
event_count
;
/* If event is NULL, we need to block until the fence value completes.
...
...
@@ -1213,7 +1205,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
* and signal a condition variable instead of calling external signal_event callback. */
if
(
!
event
)
{
while
(
!
*
latch
)
while
(
!
latch
)
vkd3d_cond_wait
(
&
fence
->
null_event_cond
,
&
fence
->
mutex
);
}
...
...
@@ -6804,22 +6796,15 @@ static HRESULT STDMETHODCALLTYPE d3d12_command_queue_Wait(ID3D12CommandQueue *if
goto
done
;
}
vkd3d_mutex_unlock
(
&
fence
->
mutex
);
/* This is the critical part required to support out-of-order signal.
* Normally we would be able to submit waits and signals out of order, but
* we don't have virtualized queues in Vulkan, so we need to handle the case
* where multiple queues alias over the same physical queue, so effectively,
* we need to manage out-of-order submits ourselves. */
if
(
!
command_queue
->
ops_count
)
hr
=
d3d12_device_add_blocked_command_queues
(
command_queue
->
device
,
&
command_queue
,
1
);
if
(
FAILED
(
hr
))
goto
done
;
if
(
!
(
op
=
d3d12_command_queue_require_space_locked
(
command_queue
)))
{
vkd3d_mutex_unlock
(
&
fence
->
mutex
);
hr
=
E_OUTOFMEMORY
;
goto
done
;
}
...
...
@@ -6829,6 +6814,16 @@ static HRESULT STDMETHODCALLTYPE d3d12_command_queue_Wait(ID3D12CommandQueue *if
d3d12_fence_incref
(
fence
);
/* Add the queue to the blocked list after writing the op to ensure the queue isn't
* removed again in another thread because it has no ops. */
if
(
command_queue
->
ops_count
==
1
)
hr
=
d3d12_device_add_blocked_command_queues
(
command_queue
->
device
,
&
command_queue
,
1
);
/* The fence must remain locked until the op is created and the queue is added to the blocked list,
* because if an unblocking d3d12_fence_Signal() call occurs on another thread before the above
* work is done, flushing will be delayed until the next signal, if one occurs at all. */
vkd3d_mutex_unlock
(
&
fence
->
mutex
);
done:
vkd3d_mutex_unlock
(
&
command_queue
->
op_mutex
);
return
hr
;
...
...
libs/vkd3d/libs/vkd3d/device.c
View file @
c3025582
...
...
@@ -1391,6 +1391,45 @@ static void vkd3d_device_vk_heaps_descriptor_limits_init(struct vkd3d_device_des
limits
->
sampler_max_descriptors
=
min
(
limits
->
sampler_max_descriptors
,
VKD3D_MAX_DESCRIPTOR_SET_SAMPLERS
);
}
static
bool
d3d12_device_supports_typed_uav_load_additional_formats
(
const
struct
d3d12_device
*
device
)
{
const
struct
vkd3d_vk_instance_procs
*
vk_procs
=
&
device
->
vkd3d_instance
->
vk_procs
;
const
struct
vkd3d_format
*
format
;
VkFormatProperties
properties
;
unsigned
int
i
;
static
const
DXGI_FORMAT
additional_formats
[]
=
{
DXGI_FORMAT_R32G32B32A32_FLOAT
,
DXGI_FORMAT_R32G32B32A32_UINT
,
DXGI_FORMAT_R32G32B32A32_SINT
,
DXGI_FORMAT_R16G16B16A16_FLOAT
,
DXGI_FORMAT_R16G16B16A16_UINT
,
DXGI_FORMAT_R16G16B16A16_SINT
,
DXGI_FORMAT_R8G8B8A8_UNORM
,
DXGI_FORMAT_R8G8B8A8_UINT
,
DXGI_FORMAT_R8G8B8A8_SINT
,
DXGI_FORMAT_R16_FLOAT
,
DXGI_FORMAT_R16_UINT
,
DXGI_FORMAT_R16_SINT
,
DXGI_FORMAT_R8_UNORM
,
DXGI_FORMAT_R8_UINT
,
DXGI_FORMAT_R8_SINT
,
};
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
additional_formats
);
++
i
)
{
format
=
vkd3d_get_format
(
device
,
additional_formats
[
i
],
false
);
assert
(
format
);
VK_CALL
(
vkGetPhysicalDeviceFormatProperties
(
device
->
vk_physical_device
,
format
->
vk_format
,
&
properties
));
if
(
!
((
properties
.
linearTilingFeatures
|
properties
.
optimalTilingFeatures
)
&
VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT
))
return
false
;
}
return
true
;
}
static
HRESULT
vkd3d_init_device_caps
(
struct
d3d12_device
*
device
,
const
struct
vkd3d_device_create_info
*
create_info
,
struct
vkd3d_physical_device_info
*
physical_device_info
,
...
...
@@ -1425,6 +1464,7 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
vulkan_info
->
sparse_properties
=
physical_device_info
->
properties2
.
properties
.
sparseProperties
;
vulkan_info
->
rasterization_stream
=
physical_device_info
->
xfb_properties
.
transformFeedbackRasterizationStreamSelect
;
vulkan_info
->
transform_feedback_queries
=
physical_device_info
->
xfb_properties
.
transformFeedbackQueries
;
vulkan_info
->
uav_read_without_format
=
features
->
shaderStorageImageReadWithoutFormat
;
vulkan_info
->
max_vertex_attrib_divisor
=
max
(
physical_device_info
->
vertex_divisor_properties
.
maxVertexAttribDivisor
,
1
);
device
->
feature_options
.
DoublePrecisionFloatShaderOps
=
features
->
shaderFloat64
;
...
...
@@ -1455,7 +1495,8 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
else
device
->
feature_options
.
ResourceBindingTier
=
D3D12_RESOURCE_BINDING_TIER_3
;
device
->
feature_options
.
TypedUAVLoadAdditionalFormats
=
features
->
shaderStorageImageExtendedFormats
;
device
->
feature_options
.
TypedUAVLoadAdditionalFormats
=
features
->
shaderStorageImageReadWithoutFormat
&&
d3d12_device_supports_typed_uav_load_additional_formats
(
device
);
/* GL_INTEL_fragment_shader_ordering, no Vulkan equivalent. */
device
->
feature_options
.
ROVsSupported
=
FALSE
;
/* GL_INTEL_conservative_rasterization, no Vulkan equivalent. */
...
...
libs/vkd3d/libs/vkd3d/state.c
View file @
c3025582
...
...
@@ -1944,6 +1944,13 @@ struct d3d12_pipeline_state *unsafe_impl_from_ID3D12PipelineState(ID3D12Pipeline
return
impl_from_ID3D12PipelineState
(
iface
);
}
static
inline
unsigned
int
typed_uav_compile_option
(
const
struct
d3d12_device
*
device
)
{
return
device
->
vk_info
.
uav_read_without_format
?
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_UNKNOWN
:
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV_READ_FORMAT_R32
;
}
static
HRESULT
create_shader_stage
(
struct
d3d12_device
*
device
,
struct
VkPipelineShaderStageCreateInfo
*
stage_desc
,
enum
VkShaderStageFlagBits
stage
,
const
D3D12_SHADER_BYTECODE
*
code
,
const
struct
vkd3d_shader_interface_info
*
shader_interface
)
...
...
@@ -1955,9 +1962,10 @@ static HRESULT create_shader_stage(struct d3d12_device *device,
VkResult
vr
;
int
ret
;
static
const
struct
vkd3d_shader_compile_option
options
[]
=
const
struct
vkd3d_shader_compile_option
options
[]
=
{
{
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
,
VKD3D_SHADER_API_VERSION_1_4
},
{
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
,
VKD3D_SHADER_API_VERSION_1_5
},
{
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV
,
typed_uav_compile_option
(
device
)},
};
stage_desc
->
sType
=
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
;
...
...
@@ -2001,14 +2009,15 @@ static HRESULT create_shader_stage(struct d3d12_device *device,
return
S_OK
;
}
static
int
vkd3d_scan_dxbc
(
const
D3D12_SHADER_BYTECODE
*
code
,
static
int
vkd3d_scan_dxbc
(
const
struct
d3d12_device
*
device
,
const
D3D12_SHADER_BYTECODE
*
code
,
struct
vkd3d_shader_scan_descriptor_info
*
descriptor_info
)
{
struct
vkd3d_shader_compile_info
compile_info
;
static
const
struct
vkd3d_shader_compile_option
options
[]
=
const
struct
vkd3d_shader_compile_option
options
[]
=
{
{
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
,
VKD3D_SHADER_API_VERSION_1_4
},
{
VKD3D_SHADER_COMPILE_OPTION_API_VERSION
,
VKD3D_SHADER_API_VERSION_1_5
},
{
VKD3D_SHADER_COMPILE_OPTION_TYPED_UAV
,
typed_uav_compile_option
(
device
)},
};
compile_info
.
type
=
VKD3D_SHADER_STRUCTURE_TYPE_COMPILE_INFO
;
...
...
@@ -2170,7 +2179,7 @@ static HRESULT d3d12_pipeline_state_find_and_init_uav_counters(struct d3d12_pipe
shader_info
.
type
=
VKD3D_SHADER_STRUCTURE_TYPE_SCAN_DESCRIPTOR_INFO
;
shader_info
.
next
=
NULL
;
if
((
ret
=
vkd3d_scan_dxbc
(
code
,
&
shader_info
))
<
0
)
if
((
ret
=
vkd3d_scan_dxbc
(
device
,
code
,
&
shader_info
))
<
0
)
{
WARN
(
"Failed to scan shader bytecode, stage %#x, vkd3d result %d.
\n
"
,
stage_flags
,
ret
);
return
hresult_from_vkd3d_result
(
ret
);
...
...
libs/vkd3d/libs/vkd3d/vkd3d_private.h
View file @
c3025582
...
...
@@ -143,6 +143,8 @@ struct vkd3d_vulkan_info
bool
rasterization_stream
;
bool
transform_feedback_queries
;
bool
uav_read_without_format
;
bool
vertex_attrib_zero_divisor
;
unsigned
int
max_vertex_attrib_divisor
;
...
...
@@ -346,7 +348,6 @@ struct vkd3d_fence_worker
union
vkd3d_thread_handle
thread
;
struct
vkd3d_mutex
mutex
;
struct
vkd3d_cond
cond
;
struct
vkd3d_cond
fence_destruction_cond
;
bool
should_exit
;
size_t
fence_count
;
...
...
@@ -529,7 +530,7 @@ struct d3d12_fence
{
uint64_t
value
;
HANDLE
event
;
bool
latch
;
bool
*
latch
;
}
*
events
;
size_t
events_size
;
size_t
event_count
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment