blob: 9778711f956590141da27c303eaef64f7997896a [file] [log] [blame]
// Copyright 2024 The Dawn & Tint Authors
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/tint/lang/core/fluent_types.h"
#include "src/tint/lang/core/ir/function.h"
#include "src/tint/lang/core/number.h"
#include "src/tint/lang/core/type/builtin_structs.h"
#include "src/tint/lang/core/type/depth_multisampled_texture.h"
#include "src/tint/lang/core/type/depth_texture.h"
#include "src/tint/lang/core/type/multisampled_texture.h"
#include "src/tint/lang/core/type/sampled_texture.h"
#include "src/tint/lang/core/type/sampler.h"
#include "src/tint/lang/core/type/sampler_kind.h"
#include "src/tint/lang/core/type/storage_texture.h"
#include "src/tint/lang/core/type/texture_dimension.h"
#include "src/tint/lang/glsl/writer/helper_test.h"
#include "gtest/gtest.h"
using namespace tint::core::fluent_types; // NOLINT
using namespace tint::core::number_suffixes; // NOLINT
namespace tint::glsl::writer {
namespace {
TEST_F(GlslWriterTest, BuiltinGeneric) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
auto* x = b.Let("x", 1_i);
auto* c = b.Call(ty.i32(), core::BuiltinFn::kAbs, x);
b.Let("w", c);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
int x = 1;
int w = abs(x);
}
)");
}
TEST_F(GlslWriterTest, BuiltinSelectScalar) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kFragment);
b.Append(func->Block(), [&] {
auto* x = b.Let("x", 1_i);
auto* y = b.Let("y", 2_i);
auto* c = b.Call(ty.i32(), core::BuiltinFn::kSelect, x, y, true);
b.Let("w", c);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(precision highp float;
precision highp int;
void main() {
int x = 1;
int y = 2;
int w = ((true) ? (y) : (x));
}
)");
}
TEST_F(GlslWriterTest, BuiltinSelectVector) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kFragment);
b.Append(func->Block(), [&] {
auto* x = b.Let("x", b.Construct<vec2<i32>>(1_i, 2_i));
auto* y = b.Let("y", b.Construct<vec2<i32>>(3_i, 4_i));
auto* cmp = b.Construct<vec2<bool>>(true, false);
auto* c = b.Call(ty.vec2<i32>(), core::BuiltinFn::kSelect, x, y, cmp);
b.Let("w", c);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(precision highp float;
precision highp int;
void main() {
ivec2 x = ivec2(1, 2);
ivec2 y = ivec2(3, 4);
bvec2 v = bvec2(true, false);
int v_1 = ((v.x) ? (y.x) : (x.x));
ivec2 w = ivec2(v_1, ((v.y) ? (y.y) : (x.y)));
}
)");
}
TEST_F(GlslWriterTest, BuiltinStorageBarrier) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Call(ty.void_(), core::BuiltinFn::kStorageBarrier);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
barrier();
memoryBarrierBuffer();
}
)");
}
TEST_F(GlslWriterTest, BuiltinTextureBarrier) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Call(ty.void_(), core::BuiltinFn::kTextureBarrier);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
barrier();
memoryBarrierImage();
}
)");
}
TEST_F(GlslWriterTest, BuiltinWorkgroupBarrier) {
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Call(ty.void_(), core::BuiltinFn::kWorkgroupBarrier);
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
barrier();
}
)");
}
// TODO(dsinclair): Needs bitcast
TEST_F(GlslWriterTest, DISABLED_BuiltinStorageAtomicCompareExchangeWeak) {
auto* sb = ty.Struct(mod.symbols.New("SB"), {
{mod.symbols.New("padding"), ty.vec4<f32>()},
{mod.symbols.New("a"), ty.atomic<i32>()},
{mod.symbols.New("b"), ty.atomic<u32>()},
});
auto* var = b.Var("v", storage, sb, core::Access::kReadWrite);
var->SetBindingPoint(0, 0);
b.ir.root_block->Append(var);
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kFragment);
b.Append(func->Block(), [&] {
b.Let("x",
b.Call(core::type::CreateAtomicCompareExchangeResult(ty, mod.symbols, ty.i32()),
core::BuiltinFn::kAtomicCompareExchangeWeak,
b.Access(ty.ptr<storage, atomic<i32>, read_write>(), var, 1_u), 123_i, 345_i));
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(struct atomic_compare_exchange_result_i32 {
int old_value;
bool exchanged;
};
RWByteAddressBuffer v : register(u0);
void foo() {
int v_1 = 0;
v.InterlockedCompareExchange(int(16u), 123, 345, v_1);
int v_2 = v_1;
atomic_compare_exchange_result_i32 x = {v_2, (v_2 == 123)};
}
)");
}
// TODO(dsinclair): Needs bitcast
TEST_F(GlslWriterTest, DISABLED_BuiltinStorageAtomicCompareExchangeWeakDirect) {
auto* var = b.Var("v", storage, ty.atomic<i32>(), core::Access::kReadWrite);
var->SetBindingPoint(0, 0);
b.ir.root_block->Append(var);
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kFragment);
b.Append(func->Block(), [&] {
b.Let("x", b.Call(core::type::CreateAtomicCompareExchangeResult(ty, mod.symbols, ty.i32()),
core::BuiltinFn::kAtomicCompareExchangeWeak, var, 123_i, 345_i));
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(struct atomic_compare_exchange_result_i32 {
int old_value;
bool exchanged;
};
RWByteAddressBuffer v : register(u0);
void foo() {
int v_1 = 0;
v.InterlockedCompareExchange(int(0u), 123, 345, v_1);
int v_2 = v_1;
atomic_compare_exchange_result_i32 x = {v_2, (v_2 == 123)};
}
)");
}
TEST_F(GlslWriterTest, BuiltinWorkgroupAtomicLoad) {
auto* sb = ty.Struct(mod.symbols.New("SB"), {
{mod.symbols.New("padding"), ty.vec4<f32>()},
{mod.symbols.New("a"), ty.atomic<i32>()},
{mod.symbols.New("b"), ty.atomic<u32>()},
});
auto* var = b.Var("v", workgroup, sb, core::Access::kReadWrite);
b.ir.root_block->Append(var);
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Let("x", b.Call(ty.i32(), core::BuiltinFn::kAtomicLoad,
b.Access(ty.ptr<workgroup, atomic<i32>, read_write>(), var, 1_u)));
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
struct SB {
vec4 padding;
int a;
uint b;
};
shared SB v;
void foo_inner(uint tint_local_index) {
if ((tint_local_index == 0u)) {
v.padding = vec4(0.0f);
atomicExchange(v.a, 0);
atomicExchange(v.b, 0u);
}
barrier();
int x = atomicOr(v.a, 0);
}
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
foo_inner(gl_LocalInvocationIndex);
}
)");
}
TEST_F(GlslWriterTest, BuiltinWorkgroupAtomicSub) {
auto* sb = ty.Struct(mod.symbols.New("SB"), {
{mod.symbols.New("padding"), ty.vec4<f32>()},
{mod.symbols.New("a"), ty.atomic<i32>()},
{mod.symbols.New("b"), ty.atomic<u32>()},
});
auto* var = b.Var("v", workgroup, sb, core::Access::kReadWrite);
b.ir.root_block->Append(var);
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Let("x", b.Call(ty.i32(), core::BuiltinFn::kAtomicSub,
b.Access(ty.ptr<workgroup, atomic<i32>, read_write>(), var, 1_u), 123_i));
b.Let("y", b.Call(ty.u32(), core::BuiltinFn::kAtomicSub,
b.Access(ty.ptr<workgroup, atomic<u32>, read_write>(), var, 2_u), 123_u));
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(
struct SB {
vec4 padding;
int a;
uint b;
};
shared SB v;
void foo_inner(uint tint_local_index) {
if ((tint_local_index == 0u)) {
v.padding = vec4(0.0f);
atomicExchange(v.a, 0);
atomicExchange(v.b, 0u);
}
barrier();
int x = atomicAdd(v.a, -(123));
uint y = atomicAdd(v.b, -(123u));
}
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
void main() {
foo_inner(gl_LocalInvocationIndex);
}
)");
}
// TODO(dsinclair): Needs bitcast
TEST_F(GlslWriterTest, DISABLED_BuiltinWorkgroupAtomicCompareExchangeWeak) {
auto* sb = ty.Struct(mod.symbols.New("SB"), {
{mod.symbols.New("padding"), ty.vec4<f32>()},
{mod.symbols.New("a"), ty.atomic<i32>()},
{mod.symbols.New("b"), ty.atomic<u32>()},
});
auto* var = b.Var("v", workgroup, sb, core::Access::kReadWrite);
b.ir.root_block->Append(var);
auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute);
func->SetWorkgroupSize(1, 1, 1);
b.Append(func->Block(), [&] {
b.Let("x", b.Call(core::type::CreateAtomicCompareExchangeResult(ty, mod.symbols, ty.i32()),
core::BuiltinFn::kAtomicCompareExchangeWeak,
b.Access(ty.ptr<workgroup, atomic<i32>, read_write>(), var, 1_u), 123_i,
345_i));
b.Return(func);
});
ASSERT_TRUE(Generate()) << err_ << output_.glsl;
EXPECT_EQ(output_.glsl, GlslHeader() + R"(struct SB {
float4 padding;
int a;
uint b;
};
struct atomic_compare_exchange_result_i32 {
int old_value;
bool exchanged;
};
struct foo_inputs {
uint tint_local_index : SV_GroupIndex;
};
groupshared SB v;
void foo_inner(uint tint_local_index) {
if ((tint_local_index == 0u)) {
v.padding = (0.0f).xxxx;
int v_1 = 0;
InterlockedExchange(v.a, 0, v_1);
uint v_2 = 0u;
InterlockedExchange(v.b, 0u, v_2);
}
GroupMemoryBarrierWithGroupSync();
int v_3 = 0;
InterlockedCompareExchange(v.a, 123, 345, v_3);
int v_4 = v_3;
atomic_compare_exchange_result_i32 x = {v_4, (v_4 == 123)};
}
[numthreads(1, 1, 1)]
void foo(foo_inputs inputs) {
foo_inner(inputs.tint_local_index);
}
)");
}
} // namespace
} // namespace tint::glsl::writer