[spirv-reader][ir] Fixup mixed sign binary methods.
Several of the SPIR-V binary methods allow mixing sign and result types.
This is not allowed by WGSL. Create a SPIR-V IR instruction, and then
add bitcasts as needed.
Bug: 42250952
Change-Id: I417b4407dc46dfcd0969e6c024d7606eb3b1256a
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/225955
Auto-Submit: dan sinclair <dsinclair@chromium.org>
Reviewed-by: James Price <jrprice@google.com>
diff --git a/src/tint/lang/spirv/builtin_fn.cc b/src/tint/lang/spirv/builtin_fn.cc
index 3147999..f06f2ec 100644
--- a/src/tint/lang/spirv/builtin_fn.cc
+++ b/src/tint/lang/spirv/builtin_fn.cc
@@ -154,6 +154,16 @@
return "bit_field_s_extract";
case BuiltinFn::kBitFieldUExtract:
return "bit_field_u_extract";
+ case BuiltinFn::kAdd:
+ return "add";
+ case BuiltinFn::kSub:
+ return "sub";
+ case BuiltinFn::kMul:
+ return "mul";
+ case BuiltinFn::kSDiv:
+ return "s_div";
+ case BuiltinFn::kSMod:
+ return "s_mod";
case BuiltinFn::kSdot:
return "sdot";
case BuiltinFn::kUdot:
@@ -239,6 +249,11 @@
case BuiltinFn::kBitFieldInsert:
case BuiltinFn::kBitFieldSExtract:
case BuiltinFn::kBitFieldUExtract:
+ case BuiltinFn::kAdd:
+ case BuiltinFn::kSub:
+ case BuiltinFn::kMul:
+ case BuiltinFn::kSDiv:
+ case BuiltinFn::kSMod:
break;
}
return core::ir::Instruction::Accesses{};
diff --git a/src/tint/lang/spirv/builtin_fn.cc.tmpl b/src/tint/lang/spirv/builtin_fn.cc.tmpl
index 03873a2..20de8cc 100644
--- a/src/tint/lang/spirv/builtin_fn.cc.tmpl
+++ b/src/tint/lang/spirv/builtin_fn.cc.tmpl
@@ -98,6 +98,11 @@
case BuiltinFn::kBitFieldInsert:
case BuiltinFn::kBitFieldSExtract:
case BuiltinFn::kBitFieldUExtract:
+ case BuiltinFn::kAdd:
+ case BuiltinFn::kSub:
+ case BuiltinFn::kMul:
+ case BuiltinFn::kSDiv:
+ case BuiltinFn::kSMod:
break;
}
return core::ir::Instruction::Accesses{};
diff --git a/src/tint/lang/spirv/builtin_fn.h b/src/tint/lang/spirv/builtin_fn.h
index c7160bf..d6f9f03 100644
--- a/src/tint/lang/spirv/builtin_fn.h
+++ b/src/tint/lang/spirv/builtin_fn.h
@@ -104,6 +104,11 @@
kBitFieldInsert,
kBitFieldSExtract,
kBitFieldUExtract,
+ kAdd,
+ kSub,
+ kMul,
+ kSDiv,
+ kSMod,
kSdot,
kUdot,
kCooperativeMatrixLoad,
diff --git a/src/tint/lang/spirv/intrinsic/data.cc b/src/tint/lang/spirv/intrinsic/data.cc
index 386073b..27d3c1d 100644
--- a/src/tint/lang/spirv/intrinsic/data.cc
+++ b/src/tint/lang/spirv/intrinsic/data.cc
@@ -3724,104 +3724,104 @@
},
{
/* [51] */
- /* name */ "T",
+ /* name */ "R",
/* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
/* [52] */
- /* name */ "U",
- /* matcher_indices */ MatcherIndicesIndex(114),
+ /* name */ "A",
+ /* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
/* [53] */
- /* name */ "S",
- /* matcher_indices */ MatcherIndicesIndex(72),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [54] */
- /* name */ "A",
- /* matcher_indices */ MatcherIndicesIndex(44),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [55] */
/* name */ "B",
/* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
- /* [56] */
- /* name */ "C",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [57] */
- /* name */ "I",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [58] */
- /* name */ "C",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [59] */
- /* name */ "S",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [60] */
- /* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(227),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [61] */
- /* name */ "C",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [62] */
- /* name */ "D",
- /* matcher_indices */ MatcherIndicesIndex(225),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [63] */
- /* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(226),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [64] */
+ /* [54] */
/* name */ "N",
/* matcher_indices */ MatcherIndicesIndex(/* invalid */),
/* kind */ TemplateInfo::Kind::kNumber,
},
{
- /* [65] */
- /* name */ "M",
- /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* [55] */
+ /* name */ "T",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [56] */
+ /* name */ "U",
+ /* matcher_indices */ MatcherIndicesIndex(114),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [57] */
+ /* name */ "S",
+ /* matcher_indices */ MatcherIndicesIndex(72),
/* kind */ TemplateInfo::Kind::kNumber,
},
{
+ /* [58] */
+ /* name */ "A",
+ /* matcher_indices */ MatcherIndicesIndex(44),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [59] */
+ /* name */ "B",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [60] */
+ /* name */ "C",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [61] */
+ /* name */ "I",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [62] */
+ /* name */ "C",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [63] */
+ /* name */ "S",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [64] */
+ /* name */ "T",
+ /* matcher_indices */ MatcherIndicesIndex(227),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [65] */
+ /* name */ "C",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
/* [66] */
- /* name */ "R",
+ /* name */ "D",
/* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
/* [67] */
/* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(225),
+ /* matcher_indices */ MatcherIndicesIndex(226),
/* kind */ TemplateInfo::Kind::kType,
},
{
@@ -3832,84 +3832,108 @@
},
{
/* [69] */
- /* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(226),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [70] */
- /* name */ "N",
+ /* name */ "M",
/* matcher_indices */ MatcherIndicesIndex(/* invalid */),
/* kind */ TemplateInfo::Kind::kNumber,
},
{
- /* [71] */
- /* name */ "S",
- /* matcher_indices */ MatcherIndicesIndex(202),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [72] */
- /* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(226),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [73] */
+ /* [70] */
/* name */ "R",
/* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
- /* [74] */
- /* name */ "S",
- /* matcher_indices */ MatcherIndicesIndex(202),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [75] */
- /* name */ "I",
- /* matcher_indices */ MatcherIndicesIndex(114),
- /* kind */ TemplateInfo::Kind::kType,
- },
- {
- /* [76] */
- /* name */ "A",
- /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [77] */
- /* name */ "F",
- /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [78] */
- /* name */ "A",
- /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
- /* kind */ TemplateInfo::Kind::kNumber,
- },
- {
- /* [79] */
+ /* [71] */
/* name */ "T",
- /* matcher_indices */ MatcherIndicesIndex(227),
+ /* matcher_indices */ MatcherIndicesIndex(225),
/* kind */ TemplateInfo::Kind::kType,
},
{
- /* [80] */
- /* name */ "S",
- /* matcher_indices */ MatcherIndicesIndex(229),
- /* kind */ TemplateInfo::Kind::kType,
+ /* [72] */
+ /* name */ "N",
+ /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* kind */ TemplateInfo::Kind::kNumber,
},
{
- /* [81] */
+ /* [73] */
/* name */ "T",
/* matcher_indices */ MatcherIndicesIndex(226),
/* kind */ TemplateInfo::Kind::kType,
},
{
+ /* [74] */
+ /* name */ "N",
+ /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
+ /* [75] */
+ /* name */ "S",
+ /* matcher_indices */ MatcherIndicesIndex(202),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
+ /* [76] */
+ /* name */ "T",
+ /* matcher_indices */ MatcherIndicesIndex(226),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [77] */
+ /* name */ "R",
+ /* matcher_indices */ MatcherIndicesIndex(225),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [78] */
+ /* name */ "S",
+ /* matcher_indices */ MatcherIndicesIndex(202),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
+ /* [79] */
+ /* name */ "I",
+ /* matcher_indices */ MatcherIndicesIndex(114),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [80] */
+ /* name */ "A",
+ /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
+ /* [81] */
+ /* name */ "F",
+ /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
/* [82] */
+ /* name */ "A",
+ /* matcher_indices */ MatcherIndicesIndex(/* invalid */),
+ /* kind */ TemplateInfo::Kind::kNumber,
+ },
+ {
+ /* [83] */
+ /* name */ "T",
+ /* matcher_indices */ MatcherIndicesIndex(227),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [84] */
+ /* name */ "S",
+ /* matcher_indices */ MatcherIndicesIndex(229),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [85] */
+ /* name */ "T",
+ /* matcher_indices */ MatcherIndicesIndex(226),
+ /* kind */ TemplateInfo::Kind::kType,
+ },
+ {
+ /* [86] */
/* name */ "S",
/* matcher_indices */ MatcherIndicesIndex(202),
/* kind */ TemplateInfo::Kind::kNumber,
@@ -3959,7 +3983,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(60),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -3970,7 +3994,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(185),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4003,7 +4027,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(189),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4014,7 +4038,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(65),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4047,7 +4071,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(70),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4058,7 +4082,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(193),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4201,7 +4225,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(60),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4212,7 +4236,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(75),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4223,7 +4247,7 @@
/* num_parameters */ 6,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(10),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4256,7 +4280,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(65),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4267,7 +4291,7 @@
/* num_parameters */ 6,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(16),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4300,7 +4324,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(70),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4311,7 +4335,7 @@
/* num_parameters */ 6,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(60),
+ /* templates */ TemplateIndex(64),
/* parameters */ ParameterIndex(22),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4564,7 +4588,7 @@
/* num_parameters */ 1,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(77),
+ /* templates */ TemplateIndex(81),
/* parameters */ ParameterIndex(285),
/* return_matcher_indices */ MatcherIndicesIndex(114),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4575,7 +4599,7 @@
/* num_parameters */ 1,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(77),
+ /* templates */ TemplateIndex(81),
/* parameters */ ParameterIndex(288),
/* return_matcher_indices */ MatcherIndicesIndex(205),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4586,7 +4610,7 @@
/* num_parameters */ 1,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(77),
+ /* templates */ TemplateIndex(81),
/* parameters */ ParameterIndex(291),
/* return_matcher_indices */ MatcherIndicesIndex(207),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -4597,7 +4621,7 @@
/* num_parameters */ 1,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(77),
+ /* templates */ TemplateIndex(81),
/* parameters */ ParameterIndex(294),
/* return_matcher_indices */ MatcherIndicesIndex(207),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5147,7 +5171,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(137),
/* return_matcher_indices */ MatcherIndicesIndex(83),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5158,7 +5182,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(325),
/* return_matcher_indices */ MatcherIndicesIndex(86),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5169,7 +5193,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(327),
/* return_matcher_indices */ MatcherIndicesIndex(89),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5180,7 +5204,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(329),
/* return_matcher_indices */ MatcherIndicesIndex(92),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5191,7 +5215,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(331),
/* return_matcher_indices */ MatcherIndicesIndex(95),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5202,7 +5226,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(79),
+ /* templates */ TemplateIndex(83),
/* parameters */ ParameterIndex(333),
/* return_matcher_indices */ MatcherIndicesIndex(98),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5213,7 +5237,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 1,
- /* templates */ TemplateIndex(80),
+ /* templates */ TemplateIndex(84),
/* parameters */ ParameterIndex(335),
/* return_matcher_indices */ MatcherIndicesIndex(179),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5224,7 +5248,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 1,
- /* templates */ TemplateIndex(80),
+ /* templates */ TemplateIndex(84),
/* parameters */ ParameterIndex(337),
/* return_matcher_indices */ MatcherIndicesIndex(185),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5235,7 +5259,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 1,
- /* templates */ TemplateIndex(80),
+ /* templates */ TemplateIndex(84),
/* parameters */ ParameterIndex(339),
/* return_matcher_indices */ MatcherIndicesIndex(189),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5246,7 +5270,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 1,
- /* templates */ TemplateIndex(80),
+ /* templates */ TemplateIndex(84),
/* parameters */ ParameterIndex(341),
/* return_matcher_indices */ MatcherIndicesIndex(191),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5312,7 +5336,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(57),
+ /* templates */ TemplateIndex(61),
/* parameters */ ParameterIndex(157),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5323,7 +5347,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(57),
+ /* templates */ TemplateIndex(61),
/* parameters */ ParameterIndex(161),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5334,7 +5358,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(57),
+ /* templates */ TemplateIndex(61),
/* parameters */ ParameterIndex(165),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5345,7 +5369,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(40),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5356,7 +5380,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(40),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5367,7 +5391,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(45),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5378,7 +5402,7 @@
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(45),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5389,7 +5413,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(129),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5400,7 +5424,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(54),
+ /* templates */ TemplateIndex(58),
/* parameters */ ParameterIndex(133),
/* return_matcher_indices */ MatcherIndicesIndex(74),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5631,7 +5655,7 @@
/* num_parameters */ 1,
/* num_explicit_templates */ 1,
/* num_templates */ 3,
- /* templates */ TemplateIndex(66),
+ /* templates */ TemplateIndex(70),
/* parameters */ ParameterIndex(364),
/* return_matcher_indices */ MatcherIndicesIndex(149),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5697,7 +5721,7 @@
/* num_parameters */ 3,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(63),
+ /* templates */ TemplateIndex(67),
/* parameters */ ParameterIndex(310),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5719,7 +5743,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(63),
+ /* templates */ TemplateIndex(67),
/* parameters */ ParameterIndex(310),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5741,7 +5765,7 @@
/* num_parameters */ 3,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(63),
+ /* templates */ TemplateIndex(67),
/* parameters */ ParameterIndex(313),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5774,7 +5798,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(81),
+ /* templates */ TemplateIndex(85),
/* parameters */ ParameterIndex(353),
/* return_matcher_indices */ MatcherIndicesIndex(4),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5785,7 +5809,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(69),
+ /* templates */ TemplateIndex(73),
/* parameters */ ParameterIndex(355),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5796,7 +5820,7 @@
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(72),
+ /* templates */ TemplateIndex(76),
/* parameters */ ParameterIndex(357),
/* return_matcher_indices */ MatcherIndicesIndex(4),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5829,7 +5853,7 @@
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(67),
+ /* templates */ TemplateIndex(71),
/* parameters */ ParameterIndex(273),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5851,7 +5875,7 @@
/* num_parameters */ 3,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(67),
+ /* templates */ TemplateIndex(71),
/* parameters */ ParameterIndex(320),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
@@ -5860,59 +5884,81 @@
/* [176] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 2,
+ /* num_explicit_templates */ 1,
+ /* num_templates */ 3,
+ /* templates */ TemplateIndex(51),
+ /* parameters */ ParameterIndex(7),
+ /* return_matcher_indices */ MatcherIndicesIndex(4),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [177] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 2,
+ /* num_explicit_templates */ 1,
+ /* num_templates */ 4,
+ /* templates */ TemplateIndex(51),
+ /* parameters */ ParameterIndex(351),
+ /* return_matcher_indices */ MatcherIndicesIndex(155),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [178] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(75),
+ /* templates */ TemplateIndex(79),
/* parameters */ ParameterIndex(323),
/* return_matcher_indices */ MatcherIndicesIndex(114),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [177] */
+ /* [179] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(51),
+ /* templates */ TemplateIndex(55),
/* parameters */ ParameterIndex(125),
/* return_matcher_indices */ MatcherIndicesIndex(4),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [178] */
- /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
- /* num_parameters */ 6,
- /* num_explicit_templates */ 0,
- /* num_templates */ 3,
- /* templates */ TemplateIndex(51),
- /* parameters */ ParameterIndex(0),
- /* return_matcher_indices */ MatcherIndicesIndex(4),
- /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
- },
- {
- /* [179] */
- /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
- /* num_parameters */ 3,
- /* num_explicit_templates */ 0,
- /* num_templates */ 3,
- /* templates */ TemplateIndex(51),
- /* parameters */ ParameterIndex(0),
- /* return_matcher_indices */ MatcherIndicesIndex(4),
- /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
- },
- {
/* [180] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 6,
+ /* num_explicit_templates */ 0,
+ /* num_templates */ 3,
+ /* templates */ TemplateIndex(55),
+ /* parameters */ ParameterIndex(0),
+ /* return_matcher_indices */ MatcherIndicesIndex(4),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [181] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 3,
+ /* num_explicit_templates */ 0,
+ /* num_templates */ 3,
+ /* templates */ TemplateIndex(55),
+ /* parameters */ ParameterIndex(0),
+ /* return_matcher_indices */ MatcherIndicesIndex(4),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [182] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(51),
+ /* templates */ TemplateIndex(55),
/* parameters */ ParameterIndex(125),
/* return_matcher_indices */ MatcherIndicesIndex(/* invalid */),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [181] */
+ /* [183] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
@@ -5923,7 +5969,7 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [182] */
+ /* [184] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
@@ -5934,36 +5980,14 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [183] */
- /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
- /* num_parameters */ 2,
- /* num_explicit_templates */ 0,
- /* num_templates */ 3,
- /* templates */ TemplateIndex(63),
- /* parameters */ ParameterIndex(347),
- /* return_matcher_indices */ MatcherIndicesIndex(59),
- /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
- },
- {
- /* [184] */
- /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
- /* num_parameters */ 2,
- /* num_explicit_templates */ 0,
- /* num_templates */ 3,
- /* templates */ TemplateIndex(63),
- /* parameters */ ParameterIndex(349),
- /* return_matcher_indices */ MatcherIndicesIndex(149),
- /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
- },
- {
/* [185] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
/* num_templates */ 3,
- /* templates */ TemplateIndex(63),
- /* parameters */ ParameterIndex(315),
- /* return_matcher_indices */ MatcherIndicesIndex(149),
+ /* templates */ TemplateIndex(67),
+ /* parameters */ ParameterIndex(347),
+ /* return_matcher_indices */ MatcherIndicesIndex(59),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
@@ -5971,14 +5995,36 @@
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 2,
/* num_explicit_templates */ 0,
+ /* num_templates */ 3,
+ /* templates */ TemplateIndex(67),
+ /* parameters */ ParameterIndex(349),
+ /* return_matcher_indices */ MatcherIndicesIndex(149),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [187] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 2,
+ /* num_explicit_templates */ 0,
+ /* num_templates */ 3,
+ /* templates */ TemplateIndex(67),
+ /* parameters */ ParameterIndex(315),
+ /* return_matcher_indices */ MatcherIndicesIndex(149),
+ /* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
+ },
+ {
+ /* [188] */
+ /* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
+ /* num_parameters */ 2,
+ /* num_explicit_templates */ 0,
/* num_templates */ 2,
- /* templates */ TemplateIndex(63),
+ /* templates */ TemplateIndex(67),
/* parameters */ ParameterIndex(311),
/* return_matcher_indices */ MatcherIndicesIndex(2),
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [187] */
+ /* [189] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 3,
/* num_explicit_templates */ 0,
@@ -5989,7 +6035,7 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [188] */
+ /* [190] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsVertexPipeline, OverloadFlag::kSupportsFragmentPipeline, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 3,
/* num_explicit_templates */ 0,
@@ -6000,7 +6046,7 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [189] */
+ /* [191] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 4,
/* num_explicit_templates */ 1,
@@ -6011,7 +6057,7 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [190] */
+ /* [192] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsComputePipeline),
/* num_parameters */ 5,
/* num_explicit_templates */ 0,
@@ -6022,7 +6068,7 @@
/* const_eval_fn */ ConstEvalFunctionIndex(/* invalid */),
},
{
- /* [191] */
+ /* [193] */
/* flags */ OverloadFlags(OverloadFlag::kIsBuiltin, OverloadFlag::kSupportsComputePipeline, OverloadFlag::kMustUse),
/* num_parameters */ 4,
/* num_explicit_templates */ 0,
@@ -6042,91 +6088,91 @@
/* [0] */
/* fn array_length[I : u32, A : access](ptr<storage, struct_with_runtime_array, A>, I) -> u32 */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(176),
+ /* overloads */ OverloadIndex(178),
},
{
/* [1] */
/* fn atomic_and[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [2] */
/* fn atomic_compare_exchange[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, U, T, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(178),
+ /* overloads */ OverloadIndex(180),
},
{
/* [3] */
/* fn atomic_exchange[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [4] */
/* fn atomic_iadd[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [5] */
/* fn atomic_isub[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [6] */
/* fn atomic_load[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(179),
+ /* overloads */ OverloadIndex(181),
},
{
/* [7] */
/* fn atomic_or[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [8] */
/* fn atomic_smax[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [9] */
/* fn atomic_smin[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [10] */
/* fn atomic_store[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(180),
+ /* overloads */ OverloadIndex(182),
},
{
/* [11] */
/* fn atomic_umax[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [12] */
/* fn atomic_umin[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [13] */
/* fn atomic_xor[T : iu32, U : u32, S : workgroup_or_storage](ptr<S, atomic<T>, read_write>, U, U, T) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(177),
+ /* overloads */ OverloadIndex(179),
},
{
/* [14] */
/* fn dot[N : num, T : f32_f16](vec<N, T>, vec<N, T>) -> T */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(181),
+ /* overloads */ OverloadIndex(183),
},
{
/* [15] */
@@ -6324,19 +6370,19 @@
/* [26] */
/* fn matrix_times_matrix[T : f32_f16, K : num, C : num, R : num](mat<K, R, T>, mat<C, K, T>) -> mat<C, R, T> */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(182),
+ /* overloads */ OverloadIndex(184),
},
{
/* [27] */
/* fn matrix_times_scalar[T : f32_f16, N : num, M : num](mat<N, M, T>, T) -> mat<N, M, T> */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(183),
+ /* overloads */ OverloadIndex(185),
},
{
/* [28] */
/* fn matrix_times_vector[T : f32_f16, N : num, M : num](mat<N, M, T>, vec<N, T>) -> vec<M, T> */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(184),
+ /* overloads */ OverloadIndex(186),
},
{
/* [29] */
@@ -6364,13 +6410,13 @@
/* [31] */
/* fn vector_times_matrix[T : f32_f16, N : num, M : num](vec<N, T>, mat<M, N, T>) -> vec<M, T> */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(185),
+ /* overloads */ OverloadIndex(187),
},
{
/* [32] */
/* fn vector_times_scalar[T : f32_f16, N : num](vec<N, T>, T) -> vec<N, T> */
/* num overloads */ 1,
- /* overloads */ OverloadIndex(186),
+ /* overloads */ OverloadIndex(188),
},
{
/* [33] */
@@ -6536,34 +6582,69 @@
},
{
/* [56] */
- /* fn sdot(u32, u32, u32) -> i32 */
- /* num overloads */ 1,
- /* overloads */ OverloadIndex(187),
+ /* fn add<R : iu32>[A : iu32, B : iu32](A, B) -> R */
+ /* fn add<R : iu32>[A : iu32, B : iu32, N : num](vec<N, A>, vec<N, B>) -> vec<N, R> */
+ /* num overloads */ 2,
+ /* overloads */ OverloadIndex(176),
},
{
/* [57] */
- /* fn udot(u32, u32, u32) -> u32 */
- /* num overloads */ 1,
- /* overloads */ OverloadIndex(188),
+ /* fn sub<R : iu32>[A : iu32, B : iu32](A, B) -> R */
+ /* fn sub<R : iu32>[A : iu32, B : iu32, N : num](vec<N, A>, vec<N, B>) -> vec<N, R> */
+ /* num overloads */ 2,
+ /* overloads */ OverloadIndex(176),
},
{
/* [58] */
- /* fn cooperative_matrix_load<T : subgroup_matrix<K, S, C, R>>[K : subgroup_matrix_kind, S : fiu32_f16, C : num, R : num](ptr<workgroup_or_storage, S, readable>, u32, u32, u32) -> T */
+ /* fn mul<R : iu32>[A : iu32, B : iu32](A, B) -> R */
+ /* fn mul<R : iu32>[A : iu32, B : iu32, N : num](vec<N, A>, vec<N, B>) -> vec<N, R> */
+ /* num overloads */ 2,
+ /* overloads */ OverloadIndex(176),
+ },
+ {
+ /* [59] */
+ /* fn s_div<R : iu32>[A : iu32, B : iu32](A, B) -> R */
+ /* fn s_div<R : iu32>[A : iu32, B : iu32, N : num](vec<N, A>, vec<N, B>) -> vec<N, R> */
+ /* num overloads */ 2,
+ /* overloads */ OverloadIndex(176),
+ },
+ {
+ /* [60] */
+ /* fn s_mod<R : iu32>[A : iu32, B : iu32](A, B) -> R */
+ /* fn s_mod<R : iu32>[A : iu32, B : iu32, N : num](vec<N, A>, vec<N, B>) -> vec<N, R> */
+ /* num overloads */ 2,
+ /* overloads */ OverloadIndex(176),
+ },
+ {
+ /* [61] */
+ /* fn sdot(u32, u32, u32) -> i32 */
/* num overloads */ 1,
/* overloads */ OverloadIndex(189),
},
{
- /* [59] */
- /* fn cooperative_matrix_store[K : subgroup_matrix_kind, S : fiu32_f16, C : num, R : num](ptr<workgroup_or_storage, S, writable>, subgroup_matrix<K, S, C, R>, u32, u32, u32) */
+ /* [62] */
+ /* fn udot(u32, u32, u32) -> u32 */
/* num overloads */ 1,
/* overloads */ OverloadIndex(190),
},
{
- /* [60] */
- /* fn cooperative_matrix_mul_add[T : subgroup_matrix_elements, TR : subgroup_matrix_elements, C : num, R : num, K : num](subgroup_matrix<subgroup_matrix_kind_left, T, K, R>, subgroup_matrix<subgroup_matrix_kind_right, T, C, K>, subgroup_matrix<subgroup_matrix_kind_result, TR, C, R>, u32) -> subgroup_matrix<subgroup_matrix_kind_result, TR, C, R> */
+ /* [63] */
+ /* fn cooperative_matrix_load<T : subgroup_matrix<K, S, C, R>>[K : subgroup_matrix_kind, S : fiu32_f16, C : num, R : num](ptr<workgroup_or_storage, S, readable>, u32, u32, u32) -> T */
/* num overloads */ 1,
/* overloads */ OverloadIndex(191),
},
+ {
+ /* [64] */
+ /* fn cooperative_matrix_store[K : subgroup_matrix_kind, S : fiu32_f16, C : num, R : num](ptr<workgroup_or_storage, S, writable>, subgroup_matrix<K, S, C, R>, u32, u32, u32) */
+ /* num overloads */ 1,
+ /* overloads */ OverloadIndex(192),
+ },
+ {
+ /* [65] */
+ /* fn cooperative_matrix_mul_add[T : subgroup_matrix_elements, TR : subgroup_matrix_elements, C : num, R : num, K : num](subgroup_matrix<subgroup_matrix_kind_left, T, K, R>, subgroup_matrix<subgroup_matrix_kind_right, T, C, K>, subgroup_matrix<subgroup_matrix_kind_result, TR, C, R>, u32) -> subgroup_matrix<subgroup_matrix_kind_result, TR, C, R> */
+ /* num overloads */ 1,
+ /* overloads */ OverloadIndex(193),
+ },
};
// clang-format on
diff --git a/src/tint/lang/spirv/reader/lower/builtins.cc b/src/tint/lang/spirv/reader/lower/builtins.cc
index 466acd1..1cf706e 100644
--- a/src/tint/lang/spirv/reader/lower/builtins.cc
+++ b/src/tint/lang/spirv/reader/lower/builtins.cc
@@ -131,12 +131,91 @@
case spirv::BuiltinFn::kBitFieldUExtract:
BitFieldUExtract(builtin);
break;
+ case spirv::BuiltinFn::kAdd:
+ Add(builtin);
+ break;
+ case spirv::BuiltinFn::kSub:
+ Sub(builtin);
+ break;
+ case spirv::BuiltinFn::kMul:
+ Mul(builtin);
+ break;
+ case spirv::BuiltinFn::kSDiv:
+ SDiv(builtin);
+ break;
+ case spirv::BuiltinFn::kSMod:
+ SMod(builtin);
+ break;
default:
TINT_UNREACHABLE() << "unknown spirv builtin: " << builtin->Func();
}
}
}
+ void EmitBinaryWrappedAsFirstArg(spirv::ir::BuiltinCall* call, core::BinaryOp op) {
+ const auto& args = call->Args();
+ auto* lhs = args[0];
+ auto* rhs = args[1];
+
+ auto* op_ty = lhs->Type();
+ auto* res_ty = call->Result(0)->Type();
+
+ b.InsertBefore(call, [&] {
+ if (rhs->Type() != op_ty) {
+ rhs = b.Bitcast(op_ty, rhs)->Result(0);
+ }
+
+ auto* c = b.Binary(op, op_ty, lhs, rhs)->Result(0);
+ if (res_ty != op_ty) {
+ c = b.Bitcast(res_ty, c)->Result(0);
+ }
+ call->Result(0)->ReplaceAllUsesWith(c);
+ });
+ call->Destroy();
+ }
+
+ void Add(spirv::ir::BuiltinCall* call) {
+ EmitBinaryWrappedAsFirstArg(call, core::BinaryOp::kAdd);
+ }
+ void Sub(spirv::ir::BuiltinCall* call) {
+ EmitBinaryWrappedAsFirstArg(call, core::BinaryOp::kSubtract);
+ }
+ void Mul(spirv::ir::BuiltinCall* call) {
+ EmitBinaryWrappedAsFirstArg(call, core::BinaryOp::kMultiply);
+ }
+
+ void EmitBinaryWrappedSignedSpirvMethods(spirv::ir::BuiltinCall* call, core::BinaryOp op) {
+ const auto& args = call->Args();
+ auto* lhs = args[0];
+ auto* rhs = args[1];
+
+ auto* res_ty = call->Result(0)->Type();
+ auto* op_ty = ty.MatchWidth(ty.i32(), res_ty);
+
+ b.InsertBefore(call, [&] {
+ if (lhs->Type() != op_ty) {
+ lhs = b.Bitcast(op_ty, lhs)->Result(0);
+ }
+ if (rhs->Type() != op_ty) {
+ rhs = b.Bitcast(op_ty, rhs)->Result(0);
+ }
+
+ auto* c = b.Binary(op, op_ty, lhs, rhs)->Result(0);
+ if (res_ty != op_ty) {
+ c = b.Bitcast(res_ty, c)->Result(0);
+ }
+ call->Result(0)->ReplaceAllUsesWith(c);
+ });
+ call->Destroy();
+ }
+
+ void SDiv(spirv::ir::BuiltinCall* call) {
+ EmitBinaryWrappedSignedSpirvMethods(call, core::BinaryOp::kDivide);
+ }
+ void SMod(spirv::ir::BuiltinCall* call) {
+ EmitBinaryWrappedSignedSpirvMethods(call, core::BinaryOp::kModulo);
+ }
+
// The SPIR-V Signed methods all interpret their arguments as signed (regardless of the type of
// the argument). In order to satisfy this, we must bitcast any unsigned argument to a signed
// type before calling the WGSL equivalent method.
diff --git a/src/tint/lang/spirv/reader/lower/builtins_test.cc b/src/tint/lang/spirv/reader/lower/builtins_test.cc
index ef644dc..fe08291 100644
--- a/src/tint/lang/spirv/reader/lower/builtins_test.cc
+++ b/src/tint/lang/spirv/reader/lower/builtins_test.cc
@@ -4139,5 +4139,955 @@
EXPECT_EQ(expect, str());
}
+struct BinaryCase {
+ spirv::BuiltinFn fn;
+ std::string ir;
+};
+
+using SpirvParser_BuiltinsMixedSignTest = core::ir::transform::TransformTestWithParam<BinaryCase>;
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Signed_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 50_i, 10_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 50i, 10u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.ir +
+ R"( 50i, %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Signed_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 10_u, 50_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 10u, 50i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = bitcast 50i
+ %3:u32 = )" + params.ir +
+ R"( 10u, %2
+ %4:i32 = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 10_u, 20_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 10u, 20u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = )" + params.ir +
+ R"( 10u, 20u
+ %3:i32 = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 50_i, 10_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 50i, 10u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.ir +
+ R"( 50i, %2
+ %4:u32 = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 10_u, 50_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 10u, 50i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = bitcast 50i
+ %3:u32 = )" + params.ir +
+ R"( 10u, %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Scalar_Unsigned_SignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 50_i, 60_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 50i, 60i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = )" + params.ir +
+ R"( 50i, 60i
+ %3:u32 = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Signed_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<u32>>(10_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<i32>(50i), vec2<u32>(10u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.ir +
+ R"( vec2<i32>(50i), %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Signed_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<i32>>(50_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<u32>(10u), vec2<i32>(50i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = bitcast vec2<i32>(50i)
+ %3:vec2<u32> = )" +
+ params.ir +
+ R"( vec2<u32>(10u), %2
+ %4:vec2<i32> = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<u32>>(20_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<u32>(10u), vec2<u32>(20u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = )" +
+ params.ir +
+ R"( vec2<u32>(10u), vec2<u32>(20u)
+ %3:vec2<i32> = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<u32>>(10_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<i32>(50i), vec2<u32>(10u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.ir +
+ R"( vec2<i32>(50i), %2
+ %4:vec2<u32> = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<i32>>(50_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<u32>(10u), vec2<i32>(50i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = bitcast vec2<i32>(50i)
+ %3:vec2<u32> = )" +
+ params.ir +
+ R"( vec2<u32>(10u), %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsMixedSignTest, Vector_Unsigned_SignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<i32>>(60_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<i32>(50i), vec2<i32>(60i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = )" +
+ params.ir +
+ R"( vec2<i32>(50i), vec2<i32>(60i)
+ %3:vec2<u32> = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+INSTANTIATE_TEST_SUITE_P(SpirvReader,
+ SpirvParser_BuiltinsMixedSignTest,
+ testing::Values(BinaryCase{spirv::BuiltinFn::kAdd, "add"},
+ BinaryCase{spirv::BuiltinFn::kSub, "sub"},
+ BinaryCase{spirv::BuiltinFn::kMul, "mul"}));
+
+struct SignedBinaryCase {
+ spirv::BuiltinFn fn;
+ std::string ir;
+ std::string wgsl;
+};
+
+using SpirvParser_BuiltinsSignedTest =
+ core::ir::transform::TransformTestWithParam<SignedBinaryCase>;
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Signed_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 50_i, 10_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 50i, 10u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.wgsl +
+ R"( 50i, %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Signed_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 10_u, 50_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 10u, 50i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.wgsl +
+ R"( %2, 50i
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.i32(), params.fn, Vector<const core::type::Type*, 1>{ty.i32()}, 10_u, 20_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir + R"(<i32> 10u, 20u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = bitcast 20u
+ %4:i32 = )" + params.wgsl +
+ R"( %2, %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 50_i, 10_u);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 50i, 10u
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.wgsl +
+ R"( 50i, %2
+ %4:u32 = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 10_u, 50_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 10u, 50i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = bitcast 10u
+ %3:i32 = )" + params.wgsl +
+ R"( %2, 50i
+ %4:u32 = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Scalar_Unsigned_SignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(
+ ty.u32(), params.fn, Vector<const core::type::Type*, 1>{ty.u32()}, 50_i, 60_i);
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir + R"(<u32> 50i, 60i
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = )" + params.wgsl +
+ R"( 50i, 60i
+ %3:u32 = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Signed_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<u32>>(10_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<i32>(50i), vec2<u32>(10u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.wgsl +
+ R"( vec2<i32>(50i), %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Signed_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<i32>>(50_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<u32>(10u), vec2<i32>(50i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.wgsl +
+ R"( %2, vec2<i32>(50i)
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<i32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.i32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<u32>>(20_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir + R"(<i32> vec2<u32>(10u), vec2<u32>(20u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = bitcast vec2<u32>(20u)
+ %4:vec2<i32> = )" +
+ params.wgsl +
+ R"( %2, %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<u32>>(10_u));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<i32>(50i), vec2<u32>(10u)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.wgsl +
+ R"( vec2<i32>(50i), %2
+ %4:vec2<u32> = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<u32>>(10_u), b.Splat<vec2<i32>>(50_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<u32>(10u), vec2<i32>(50i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = bitcast vec2<u32>(10u)
+ %3:vec2<i32> = )" +
+ params.wgsl +
+ R"( %2, vec2<i32>(50i)
+ %4:vec2<u32> = bitcast %3
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+TEST_P(SpirvParser_BuiltinsSignedTest, Vector_Unsigned_SignedSigned) {
+ auto params = GetParam();
+
+ auto* ep = b.ComputeFunction("foo");
+
+ b.Append(ep->Block(), [&] { //
+ b.CallExplicit<spirv::ir::BuiltinCall>(ty.vec2<u32>(), params.fn,
+ Vector<const core::type::Type*, 1>{ty.u32()},
+ b.Splat<vec2<i32>>(50_i), b.Splat<vec2<i32>>(60_i));
+ b.Return(ep);
+ });
+
+ auto src = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir + R"(<u32> vec2<i32>(50i), vec2<i32>(60i)
+ ret
+ }
+}
+)";
+
+ EXPECT_EQ(src, str());
+ Run(Builtins);
+
+ auto expect = R"(
+%foo = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = )" +
+ params.wgsl +
+ R"( vec2<i32>(50i), vec2<i32>(60i)
+ %3:vec2<u32> = bitcast %2
+ ret
+ }
+}
+)";
+ EXPECT_EQ(expect, str());
+}
+
+INSTANTIATE_TEST_SUITE_P(SpirvReader,
+ SpirvParser_BuiltinsSignedTest,
+ testing::Values(SignedBinaryCase{spirv::BuiltinFn::kSDiv, "s_div", "div"},
+ SignedBinaryCase{spirv::BuiltinFn::kSMod, "s_mod",
+ "mod"}));
+
} // namespace
} // namespace tint::spirv::reader::lower
diff --git a/src/tint/lang/spirv/reader/parser/binary_test.cc b/src/tint/lang/spirv/reader/parser/binary_test.cc
index dfe3c6d..6ed458b 100644
--- a/src/tint/lang/spirv/reader/parser/binary_test.cc
+++ b/src/tint/lang/spirv/reader/parser/binary_test.cc
@@ -33,6 +33,7 @@
struct BinaryCase {
std::string spirv_type;
std::string spirv_opcode;
+ std::string ir_type;
std::string ir;
};
std::string PrintBuiltinCase(testing::TestParamInfo<BinaryCase> bc) {
@@ -79,283 +80,100 @@
OpFunctionEnd
)",
R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ ret
+ }
+}
+%2 = func(%3:)" + params.ir_type +
+ ", %4:" + params.ir_type + "):" + params.ir_type + R"( {
$B2: {
)" + params.ir +
R"(
ret %5
}
+}
)");
}
-INSTANTIATE_TEST_SUITE_P(SpirvParser,
- BinaryTest,
- testing::Values(
- // OpFAdd
- BinaryCase{
- "f16",
- "OpFAdd",
- "%5:f16 = add %3, %4",
- },
- BinaryCase{
- "f32",
- "OpFAdd",
- "%5:f32 = add %3, %4",
- },
- BinaryCase{
- "vec3h",
- "OpFAdd",
- "%5:vec3<f16> = add %3, %4",
- },
- BinaryCase{
- "vec4f",
- "OpFAdd",
- "%5:vec4<f32> = add %3, %4",
- },
+INSTANTIATE_TEST_SUITE_P(
+ SpirvParser,
+ BinaryTest,
+ testing::Values(
+ // OpFAdd
+ BinaryCase{"f16", "OpFAdd", "f16", "%5:f16 = add %3, %4"},
+ BinaryCase{"f32", "OpFAdd", "f32", "%5:f32 = add %3, %4"},
+ BinaryCase{"vec3h", "OpFAdd", "vec3<f16>", "%5:vec3<f16> = add %3, %4"},
+ BinaryCase{"vec4f", "OpFAdd", "vec4<f32>", "%5:vec4<f32> = add %3, %4"},
- // OpFSub
- BinaryCase{
- "f16",
- "OpFSub",
- "%5:f16 = sub %3, %4",
- },
- BinaryCase{
- "f32",
- "OpFSub",
- "%5:f32 = sub %3, %4",
- },
- BinaryCase{
- "vec3h",
- "OpFSub",
- "%5:vec3<f16> = sub %3, %4",
- },
- BinaryCase{
- "vec4f",
- "OpFSub",
- "%5:vec4<f32> = sub %3, %4",
- },
+ // OpFSub
+ BinaryCase{"f16", "OpFSub", "f16", "%5:f16 = sub %3, %4"},
+ BinaryCase{"f32", "OpFSub", "f32", "%5:f32 = sub %3, %4"},
+ BinaryCase{"vec3h", "OpFSub", "vec3<f16>", "%5:vec3<f16> = sub %3, %4"},
+ BinaryCase{"vec4f", "OpFSub", "vec4<f32>", "%5:vec4<f32> = sub %3, %4"},
- // OpFMul
- BinaryCase{
- "f16",
- "OpFMul",
- "%5:f16 = mul %3, %4",
- },
- BinaryCase{
- "f32",
- "OpFMul",
- "%5:f32 = mul %3, %4",
- },
- BinaryCase{
- "vec3h",
- "OpFMul",
- "%5:vec3<f16> = mul %3, %4",
- },
- BinaryCase{
- "vec4f",
- "OpFMul",
- "%5:vec4<f32> = mul %3, %4",
- },
+ // OpFMul
+ BinaryCase{"f16", "OpFMul", "f16", "%5:f16 = mul %3, %4"},
+ BinaryCase{"f32", "OpFMul", "f32", "%5:f32 = mul %3, %4"},
+ BinaryCase{"vec3h", "OpFMul", "vec3<f16>", "%5:vec3<f16> = mul %3, %4"},
+ BinaryCase{"vec4f", "OpFMul", "vec4<f32>", "%5:vec4<f32> = mul %3, %4"},
- // OpFDiv
- BinaryCase{
- "f16",
- "OpFDiv",
- "%5:f16 = div %3, %4",
- },
- BinaryCase{
- "f32",
- "OpFDiv",
- "%5:f32 = div %3, %4",
- },
- BinaryCase{
- "vec3h",
- "OpFDiv",
- "%5:vec3<f16> = div %3, %4",
- },
- BinaryCase{
- "vec4f",
- "OpFDiv",
- "%5:vec4<f32> = div %3, %4",
- },
+ // OpFDiv
+ BinaryCase{"f16", "OpFDiv", "f16", "%5:f16 = div %3, %4"},
+ BinaryCase{"f32", "OpFDiv", "f32", "%5:f32 = div %3, %4"},
+ BinaryCase{"vec3h", "OpFDiv", "vec3<f16>", "%5:vec3<f16> = div %3, %4"},
+ BinaryCase{"vec4f", "OpFDiv", "vec4<f32>", "%5:vec4<f32> = div %3, %4"},
- // OpFRem
- BinaryCase{
- "f16",
- "OpFRem",
- "%5:f16 = mod %3, %4",
- },
- BinaryCase{
- "f32",
- "OpFRem",
- "%5:f32 = mod %3, %4",
- },
- BinaryCase{
- "vec3h",
- "OpFRem",
- "%5:vec3<f16> = mod %3, %4",
- },
- BinaryCase{
- "vec4f",
- "OpFRem",
- "%5:vec4<f32> = mod %3, %4",
- },
+ // OpFRem
+ BinaryCase{"f16", "OpFRem", "f16", "%5:f16 = mod %3, %4"},
+ BinaryCase{"f32", "OpFRem", "f32", "%5:f32 = mod %3, %4"},
+ BinaryCase{"vec3h", "OpFRem", "vec3<f16>", "%5:vec3<f16> = mod %3, %4"},
+ BinaryCase{"vec4f", "OpFRem", "vec4<f32>", "%5:vec4<f32> = mod %3, %4"},
- // OpIAdd
- BinaryCase{
- "i32",
- "OpIAdd",
- "%5:i32 = add %3, %4",
- },
- BinaryCase{
- "u32",
- "OpIAdd",
- "%5:u32 = add %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpIAdd",
- "%5:vec3<i32> = add %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpIAdd",
- "%5:vec4<u32> = add %3, %4",
- },
+ // OpIAdd
+ BinaryCase{"i32", "OpIAdd", "i32", "%5:i32 = spirv.add<i32> %3, %4"},
+ BinaryCase{"u32", "OpIAdd", "u32", "%5:u32 = spirv.add<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpIAdd", "vec3<i32>", "%5:vec3<i32> = spirv.add<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpIAdd", "vec4<u32>", "%5:vec4<u32> = spirv.add<u32> %3, %4"},
- // OpISub
- BinaryCase{
- "i32",
- "OpISub",
- "%5:i32 = sub %3, %4",
- },
- BinaryCase{
- "u32",
- "OpISub",
- "%5:u32 = sub %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpISub",
- "%5:vec3<i32> = sub %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpISub",
- "%5:vec4<u32> = sub %3, %4",
- },
+ // OpISub
+ BinaryCase{"i32", "OpISub", "i32", "%5:i32 = spirv.sub<i32> %3, %4"},
+ BinaryCase{"u32", "OpISub", "u32", "%5:u32 = spirv.sub<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpISub", "vec3<i32>", "%5:vec3<i32> = spirv.sub<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpISub", "vec4<u32>", "%5:vec4<u32> = spirv.sub<u32> %3, %4"},
- // OpIMul
- BinaryCase{
- "i32",
- "OpIMul",
- "%5:i32 = mul %3, %4",
- },
- BinaryCase{
- "u32",
- "OpIMul",
- "%5:u32 = mul %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpIMul",
- "%5:vec3<i32> = mul %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpIMul",
- "%5:vec4<u32> = mul %3, %4",
- },
+ // OpIMul
+ BinaryCase{"i32", "OpIMul", "i32", "%5:i32 = spirv.mul<i32> %3, %4"},
+ BinaryCase{"u32", "OpIMul", "u32", "%5:u32 = spirv.mul<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpIMul", "vec3<i32>", "%5:vec3<i32> = spirv.mul<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpIMul", "vec4<u32>", "%5:vec4<u32> = spirv.mul<u32> %3, %4"},
- // OpSDiv
- BinaryCase{
- "i32",
- "OpSDiv",
- "%5:i32 = div %3, %4",
- },
- BinaryCase{
- "u32",
- "OpSDiv",
- "%5:u32 = div %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpSDiv",
- "%5:vec3<i32> = div %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpSDiv",
- "%5:vec4<u32> = div %3, %4",
- },
+ // OpSDiv
+ BinaryCase{"i32", "OpSDiv", "i32", "%5:i32 = spirv.s_div<i32> %3, %4"},
+ BinaryCase{"u32", "OpSDiv", "u32", "%5:u32 = spirv.s_div<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpSDiv", "vec3<i32>", "%5:vec3<i32> = spirv.s_div<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpSDiv", "vec4<u32>", "%5:vec4<u32> = spirv.s_div<u32> %3, %4"},
- // OpSMod
- BinaryCase{
- "i32",
- "OpSMod",
- "%5:i32 = mod %3, %4",
- },
- BinaryCase{
- "u32",
- "OpSMod",
- "%5:u32 = mod %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpSMod",
- "%5:vec3<i32> = mod %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpSMod",
- "%5:vec4<u32> = mod %3, %4",
- },
+ // OpSMod
+ BinaryCase{"i32", "OpSMod", "i32", "%5:i32 = spirv.s_mod<i32> %3, %4"},
+ BinaryCase{"u32", "OpSMod", "u32", "%5:u32 = spirv.s_mod<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpSMod", "vec3<i32>", "%5:vec3<i32> = spirv.s_mod<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpSMod", "vec4<u32>", "%5:vec4<u32> = spirv.s_mod<u32> %3, %4"},
- // OpSRem
- BinaryCase{
- "i32",
- "OpSRem",
- "%5:i32 = mod %3, %4",
- },
- BinaryCase{
- "u32",
- "OpSRem",
- "%5:u32 = mod %3, %4",
- },
- BinaryCase{
- "vec3i",
- "OpSRem",
- "%5:vec3<i32> = mod %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpSRem",
- "%5:vec4<u32> = mod %3, %4",
- },
+ // OpSRem
+ BinaryCase{"i32", "OpSRem", "i32", "%5:i32 = spirv.s_mod<i32> %3, %4"},
+ BinaryCase{"u32", "OpSRem", "u32", "%5:u32 = spirv.s_mod<u32> %3, %4"},
+ BinaryCase{"vec3i", "OpSRem", "vec3<i32>", "%5:vec3<i32> = spirv.s_mod<i32> %3, %4"},
+ BinaryCase{"vec4u", "OpSRem", "vec4<u32>", "%5:vec4<u32> = spirv.s_mod<u32> %3, %4"},
- // OpUDiv
- BinaryCase{
- "u32",
- "OpUDiv",
- "%5:u32 = div %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpUDiv",
- "%5:vec4<u32> = div %3, %4",
- },
+ // OpUDiv
+ BinaryCase{"u32", "OpUDiv", "u32", "%5:u32 = div %3, %4"},
+ BinaryCase{"vec4u", "OpUDiv", "vec4<u32>", "%5:vec4<u32> = div %3, %4"},
- // OpUMod
- BinaryCase{
- "u32",
- "OpUMod",
- "%5:u32 = mod %3, %4",
- },
- BinaryCase{
- "vec4u",
- "OpUMod",
- "%5:vec4<u32> = mod %3, %4",
- }),
- PrintBuiltinCase);
+ // OpUMod
+ BinaryCase{"u32", "OpUMod", "u32", "%5:u32 = mod %3, %4"},
+ BinaryCase{"vec4u", "OpUMod", "vec4<u32>", "%5:vec4<u32> = mod %3, %4"}),
+ PrintBuiltinCase);
struct VectorMatTimesCase {
std::string lhs_type;
@@ -414,89 +232,491 @@
)");
}
+INSTANTIATE_TEST_SUITE_P(
+ SpirvParser,
+ VectorScalarTest,
+ testing::Values(
+ // OpVectorTimesScalar
+ VectorMatTimesCase{"vec3h", "f16", "vec3h", "OpVectorTimesScalar",
+ "%5:vec3<f16> = mul %3, %4"},
+ VectorMatTimesCase{"vec3f", "f32", "vec3f", "OpVectorTimesScalar",
+ "%5:vec3<f32> = mul %3, %4"},
+
+ // OpMatrixTimesScalar
+ VectorMatTimesCase{"mat3x3h", "f16", "mat3x3h", "OpMatrixTimesScalar",
+ "%5:mat3x3<f16> = mul %3, %4"},
+ VectorMatTimesCase{"mat3x3f", "f32", "mat3x3f", "OpMatrixTimesScalar",
+ "%5:mat3x3<f32> = mul %3, %4"},
+
+ // OpMatrixTimesVector
+ VectorMatTimesCase{"mat3x3h", "vec3h", "vec3h", "OpMatrixTimesVector",
+ "%5:vec3<f16> = mul %3, %4"},
+ VectorMatTimesCase{"mat3x3f", "vec3f", "vec3f", "OpMatrixTimesVector",
+ "%5:vec3<f32> = mul %3, %4"},
+
+ // OpVectorTimesMatrix
+ VectorMatTimesCase{"vec3h", "mat3x3h", "vec3h", "OpVectorTimesMatrix",
+ "%5:vec3<f16> = mul %3, %4"},
+ VectorMatTimesCase{"vec3f", "mat3x3f", "vec3f", "OpVectorTimesMatrix",
+ "%5:vec3<f32> = mul %3, %4"},
+
+ // OpMatrixTimesMatrix
+ VectorMatTimesCase{"mat3x3h", "mat3x3h", "mat3x3h", "OpMatrixTimesMatrix",
+ "%5:mat3x3<f16> = mul %3, %4"},
+ VectorMatTimesCase{"mat3x3f", "mat3x3f", "mat3x3f", "OpMatrixTimesMatrix",
+ "%5:mat3x3<f32> = mul %3, %4"}),
+ PrintVectorMatTimesCase);
+
+using BinaryMixedSignTest = SpirvParserTestWithParam<BinaryCase>;
+
+TEST_P(BinaryMixedSignTest, Scalar_Signed_SignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %u32_10 = OpConstant %u32 10
+ %i32_20 = OpConstant %i32 20
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %i32 %i32_20 %u32_10
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir +
+ R"(<i32> 20i, 10u
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Scalar_Signed_UnsignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %u32_10 = OpConstant %u32 10
+ %i32_20 = OpConstant %i32 20
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %i32 %u32_10 %i32_20
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir +
+ R"(<i32> 10u, 20i
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Scalar_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %u32_10 = OpConstant %u32 10
+ %u32_30 = OpConstant %u32 30
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %i32 %u32_10 %u32_30
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:i32 = spirv.)" +
+ params.ir +
+ R"(<i32> 10u, 30u
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Scalar_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %u32_10 = OpConstant %u32 10
+ %i32_20 = OpConstant %i32 20
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %u32 %i32_20 %u32_10
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir +
+ R"(<u32> 20i, 10u
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Scalar_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %u32_10 = OpConstant %u32 10
+ %i32_20 = OpConstant %i32 20
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %u32 %u32_10 %i32_20
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir +
+ R"(<u32> 10u, 20i
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Scalar_Unsigned_SignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %i32_10 = OpConstant %i32 10
+ %i32_30 = OpConstant %i32 30
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %u32 %i32_10 %i32_30
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:u32 = spirv.)" +
+ params.ir +
+ R"(<u32> 10i, 30i
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Signed_SignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %u32_10 = OpConstant %u32 10
+ %u32_20 = OpConstant %u32 20
+ %i32_50 = OpConstant %i32 50
+ %i32_60 = OpConstant %i32 60
+ %v2u32_10_20 = OpConstantComposite %v2u32 %u32_10 %u32_20
+ %v2i32_50_60 = OpConstantComposite %v2i32 %i32_50 %i32_60
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2i32 %v2i32_50_60 %v2u32_10_20
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir +
+ R"(<i32> vec2<i32>(50i, 60i), vec2<u32>(10u, 20u)
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Signed_UnsignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %u32_10 = OpConstant %u32 10
+ %u32_20 = OpConstant %u32 20
+ %i32_50 = OpConstant %i32 50
+ %i32_60 = OpConstant %i32 60
+ %v2u32_10_20 = OpConstantComposite %v2u32 %u32_10 %u32_20
+ %v2i32_50_60 = OpConstantComposite %v2i32 %i32_50 %i32_60
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2i32 %v2u32_10_20 %v2i32_50_60
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir +
+ R"(<i32> vec2<u32>(10u, 20u), vec2<i32>(50i, 60i)
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Signed_UnsignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %u32_10 = OpConstant %u32 10
+ %u32_20 = OpConstant %u32 20
+ %v2u32_10_20 = OpConstantComposite %v2u32 %u32_10 %u32_20
+ %v2u32_20_10 = OpConstantComposite %v2u32 %u32_20 %u32_10
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2i32 %v2u32_10_20 %v2u32_20_10
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<i32> = spirv.)" +
+ params.ir +
+ R"(<i32> vec2<u32>(10u, 20u), vec2<u32>(20u, 10u)
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Unsigned_SignedUnsigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %u32_10 = OpConstant %u32 10
+ %u32_20 = OpConstant %u32 20
+ %i32_50 = OpConstant %i32 50
+ %i32_60 = OpConstant %i32 60
+ %v2u32_10_20 = OpConstantComposite %v2u32 %u32_10 %u32_20
+ %v2i32_50_60 = OpConstantComposite %v2i32 %i32_50 %i32_60
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2u32 %v2i32_50_60 %v2u32_10_20
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir +
+ R"(<u32> vec2<i32>(50i, 60i), vec2<u32>(10u, 20u)
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Unsigned_UnsignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %u32_10 = OpConstant %u32 10
+ %u32_20 = OpConstant %u32 20
+ %i32_50 = OpConstant %i32 50
+ %i32_60 = OpConstant %i32 60
+ %v2u32_10_20 = OpConstantComposite %v2u32 %u32_10 %u32_20
+ %v2i32_50_60 = OpConstantComposite %v2i32 %i32_50 %i32_60
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2u32 %v2u32_10_20 %v2i32_50_60
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir +
+ R"(<u32> vec2<u32>(10u, 20u), vec2<i32>(50i, 60i)
+ ret
+ }
+}
+)");
+}
+
+TEST_P(BinaryMixedSignTest, Vector_Unsigned_SignedSigned) {
+ auto params = GetParam();
+ EXPECT_IR(R"(
+ OpCapability Shader
+ OpCapability Float16
+ OpMemoryModel Logical GLSL450
+ OpEntryPoint GLCompute %main "main"
+ OpExecutionMode %main LocalSize 1 1 1
+ %void = OpTypeVoid
+ %i32 = OpTypeInt 32 1
+ %u32 = OpTypeInt 32 0
+ %v2i32 = OpTypeVector %i32 2
+ %v2u32 = OpTypeVector %u32 2
+ %i32_50 = OpConstant %i32 50
+ %i32_60 = OpConstant %i32 60
+ %v2i32_50_60 = OpConstantComposite %v2i32 %i32_50 %i32_60
+ %v2i32_60_50 = OpConstantComposite %v2i32 %i32_60 %i32_50
+ %ep_type = OpTypeFunction %void
+ %main = OpFunction %void None %ep_type
+ %main_start = OpLabel
+ %1 = )" +
+ params.spirv_opcode +
+ R"( %v2u32 %v2i32_50_60 %v2i32_60_50
+ OpReturn
+ OpFunctionEnd
+)",
+ R"(
+%main = @compute @workgroup_size(1u, 1u, 1u) func():void {
+ $B1: {
+ %2:vec2<u32> = spirv.)" +
+ params.ir +
+ R"(<u32> vec2<i32>(50i, 60i), vec2<i32>(60i, 50i)
+ ret
+ }
+}
+)");
+}
+
INSTANTIATE_TEST_SUITE_P(SpirvParser,
- VectorScalarTest,
- testing::Values(
- // OpVectorTimesScalar
- VectorMatTimesCase{
- "vec3h",
- "f16",
- "vec3h",
- "OpVectorTimesScalar",
- "%5:vec3<f16> = mul %3, %4",
- },
- VectorMatTimesCase{
- "vec3f",
- "f32",
- "vec3f",
- "OpVectorTimesScalar",
- "%5:vec3<f32> = mul %3, %4",
- },
-
- // OpMatrixTimesScalar
- VectorMatTimesCase{
- "mat3x3h",
- "f16",
- "mat3x3h",
- "OpMatrixTimesScalar",
- "%5:mat3x3<f16> = mul %3, %4",
- },
- VectorMatTimesCase{
- "mat3x3f",
- "f32",
- "mat3x3f",
- "OpMatrixTimesScalar",
- "%5:mat3x3<f32> = mul %3, %4",
- },
-
- // OpMatrixTimesVector
- VectorMatTimesCase{
- "mat3x3h",
- "vec3h",
- "vec3h",
- "OpMatrixTimesVector",
- "%5:vec3<f16> = mul %3, %4",
- },
- VectorMatTimesCase{
- "mat3x3f",
- "vec3f",
- "vec3f",
- "OpMatrixTimesVector",
- "%5:vec3<f32> = mul %3, %4",
- },
-
- // OpVectorTimesMatrix
- VectorMatTimesCase{
- "vec3h",
- "mat3x3h",
- "vec3h",
- "OpVectorTimesMatrix",
- "%5:vec3<f16> = mul %3, %4",
- },
- VectorMatTimesCase{
- "vec3f",
- "mat3x3f",
- "vec3f",
- "OpVectorTimesMatrix",
- "%5:vec3<f32> = mul %3, %4",
- },
-
- // OpMatrixTimesMatrix
- VectorMatTimesCase{
- "mat3x3h",
- "mat3x3h",
- "mat3x3h",
- "OpMatrixTimesMatrix",
- "%5:mat3x3<f16> = mul %3, %4",
- },
- VectorMatTimesCase{
- "mat3x3f",
- "mat3x3f",
- "mat3x3f",
- "OpMatrixTimesMatrix",
- "%5:mat3x3<f32> = mul %3, %4",
- }),
- PrintVectorMatTimesCase);
+ BinaryMixedSignTest,
+ testing::Values(BinaryCase{"", "OpIAdd", "", "add"},
+ BinaryCase{"", "OpISub", "", "sub"},
+ BinaryCase{"", "OpIMul", "", "mul"},
+ BinaryCase{"", "OpSDiv", "", "s_div"},
+ BinaryCase{"", "OpSMod", "", "s_mod"},
+ BinaryCase{"", "OpSRem", "", "s_mod"}));
} // namespace
} // namespace tint::spirv::reader
diff --git a/src/tint/lang/spirv/reader/parser/parser.cc b/src/tint/lang/spirv/reader/parser/parser.cc
index 7ed79c7..3323527 100644
--- a/src/tint/lang/spirv/reader/parser/parser.cc
+++ b/src/tint/lang/spirv/reader/parser/parser.cc
@@ -566,16 +566,22 @@
EmitCompositeExtract(inst);
break;
case spv::Op::OpFAdd:
- case spv::Op::OpIAdd:
EmitBinary(inst, core::BinaryOp::kAdd);
break;
- case spv::Op::OpFDiv:
+ case spv::Op::OpIAdd:
+ EmitSpirvExplicitBuiltinCall(inst, spirv::BuiltinFn::kAdd);
+ break;
case spv::Op::OpSDiv:
+ EmitSpirvExplicitBuiltinCall(inst, spirv::BuiltinFn::kSDiv);
+ break;
+ case spv::Op::OpFDiv:
case spv::Op::OpUDiv:
EmitBinary(inst, core::BinaryOp::kDivide);
break;
- case spv::Op::OpFMul:
case spv::Op::OpIMul:
+ EmitSpirvExplicitBuiltinCall(inst, spirv::BuiltinFn::kMul);
+ break;
+ case spv::Op::OpFMul:
case spv::Op::OpVectorTimesScalar:
case spv::Op::OpMatrixTimesScalar:
case spv::Op::OpVectorTimesMatrix:
@@ -585,14 +591,18 @@
break;
case spv::Op::OpFRem:
case spv::Op::OpUMod:
- case spv::Op::OpSMod:
- case spv::Op::OpSRem:
EmitBinary(inst, core::BinaryOp::kModulo);
break;
+ case spv::Op::OpSMod:
+ case spv::Op::OpSRem:
+ EmitSpirvExplicitBuiltinCall(inst, spirv::BuiltinFn::kSMod);
+ break;
case spv::Op::OpFSub:
- case spv::Op::OpISub:
EmitBinary(inst, core::BinaryOp::kSubtract);
break;
+ case spv::Op::OpISub:
+ EmitSpirvExplicitBuiltinCall(inst, spirv::BuiltinFn::kSub);
+ break;
case spv::Op::OpFunctionCall:
EmitFunctionCall(inst);
break;
@@ -689,6 +699,13 @@
Emit(b_.Call(Type(inst.type_id()), fn, Args(inst, 2)), inst.result_id());
}
+ void EmitSpirvExplicitBuiltinCall(const spvtools::opt::Instruction& inst, spirv::BuiltinFn fn) {
+ Emit(b_.CallExplicit<spirv::ir::BuiltinCall>(Type(inst.type_id()), fn,
+ Vector{Type(inst.type_id())->DeepestElement()},
+ Args(inst, 2)),
+ inst.result_id());
+ }
+
void EmitSpirvBuiltinCall(const spvtools::opt::Instruction& inst, spirv::BuiltinFn fn) {
Emit(b_.Call<spirv::ir::BuiltinCall>(Type(inst.type_id()), fn, Args(inst, 2)),
inst.result_id());
@@ -696,9 +713,9 @@
void EmitBitCount(const spvtools::opt::Instruction& inst) {
auto* res_ty = Type(inst.type_id());
- Emit(b_.CallExplicit<spirv::ir::BuiltinCall>(
- res_ty, spirv::BuiltinFn::kBitCount,
- Vector<const core::type::Type*, 1>{res_ty->DeepestElement()}, Args(inst, 2)),
+ Emit(b_.CallExplicit<spirv::ir::BuiltinCall>(res_ty, spirv::BuiltinFn::kBitCount,
+ Vector{res_ty->DeepestElement()},
+ Args(inst, 2)),
inst.result_id());
}
@@ -1010,7 +1027,7 @@
/// @param inst the SPIR-V instruction for OpAccessChain
void EmitAccess(const spvtools::opt::Instruction& inst) {
- Vector<core::ir::Value*, 4> indices = Args(inst, 3);
+ Vector indices = Args(inst, 3);
auto* base = Value(inst.GetSingleWordOperand(2));
if (indices.IsEmpty()) {
diff --git a/src/tint/lang/spirv/spirv.def b/src/tint/lang/spirv/spirv.def
index 854cbcc..a5a8299 100644
--- a/src/tint/lang/spirv/spirv.def
+++ b/src/tint/lang/spirv/spirv.def
@@ -403,6 +403,17 @@
implicit(T: iu32, N: num)
fn bit_field_u_extract(base: vec<N, T>, offset: iu32, count: iu32) -> vec<N, T>
+implicit(A: iu32, B: iu32) fn add<R: iu32>(A, B) -> R
+implicit(A: iu32, B: iu32) fn sub<R: iu32>(A, B) -> R
+implicit(A: iu32, B: iu32) fn mul<R: iu32>(A, B) -> R
+implicit(A: iu32, B: iu32) fn s_div<R: iu32>(A, B) -> R
+implicit(A: iu32, B: iu32) fn s_mod<R: iu32>(A, B) -> R
+implicit(A: iu32, B: iu32, N: num) fn add<R: iu32>(vec<N, A>, vec<N, B>) -> vec<N, R>
+implicit(A: iu32, B: iu32, N: num) fn sub<R: iu32>(vec<N, A>, vec<N, B>) -> vec<N, R>
+implicit(A: iu32, B: iu32, N: num) fn mul<R: iu32>(vec<N, A>, vec<N, B>) -> vec<N, R>
+implicit(A: iu32, B: iu32, N: num) fn s_div<R: iu32>(vec<N, A>, vec<N, B>) -> vec<N, R>
+implicit(A: iu32, B: iu32, N: num) fn s_mod<R: iu32>(vec<N, A>, vec<N, B>) -> vec<N, R>
+
////////////////////////////////////////////////////////////////////////////////
// SPV_KHR_integer_dot_product instructions
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/tint/lang/spirv/writer/printer/printer.cc b/src/tint/lang/spirv/writer/printer/printer.cc
index 37ef6d8..b68586f 100644
--- a/src/tint/lang/spirv/writer/printer/printer.cc
+++ b/src/tint/lang/spirv/writer/printer/printer.cc
@@ -1509,6 +1509,21 @@
case spirv::BuiltinFn::kBitFieldUExtract:
op = spv::Op::OpBitFieldUExtract;
break;
+ case BuiltinFn::kAdd:
+ op = spv::Op::OpIAdd;
+ break;
+ case BuiltinFn::kSub:
+ op = spv::Op::OpISub;
+ break;
+ case BuiltinFn::kMul:
+ op = spv::Op::OpIMul;
+ break;
+ case BuiltinFn::kSDiv:
+ op = spv::Op::OpSDiv;
+ break;
+ case BuiltinFn::kSMod:
+ op = spv::Op::OpSMod;
+ break;
case spirv::BuiltinFn::kNone:
TINT_ICE() << "undefined spirv ir function";
}