[ir] Move ir to `tint::core:ir` namespace

Update the `tint::ir` namespace to the new `tint::core::ir` location.

Bug: tint:1718
Change-Id: Id64e070328732302b64f75e7c73449a12528e824
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/147020
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Ben Clayton <bclayton@google.com>
Kokoro: Kokoro <noreply+kokoro@google.com>
diff --git a/src/tint/lang/spirv/writer/binary_test.cc b/src/tint/lang/spirv/writer/binary_test.cc
index b9fdc92..d15d9bc 100644
--- a/src/tint/lang/spirv/writer/binary_test.cc
+++ b/src/tint/lang/spirv/writer/binary_test.cc
@@ -30,7 +30,7 @@
     /// The element type to test.
     TestElementType type;
     /// The binary operation.
-    enum ir::Binary::Kind kind;
+    enum core::ir::Binary::Kind kind;
     /// The expected SPIR-V instruction.
     std::string spirv_inst;
     /// The expected SPIR-V result type name.
@@ -71,52 +71,53 @@
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_I32,
     Arithmetic_Bitwise,
-    testing::Values(BinaryTestCase{kI32, ir::Binary::Kind::kAdd, "OpIAdd", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kSubtract, "OpISub", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kMultiply, "OpIMul", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kDivide, "OpSDiv", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kModulo, "OpSRem", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kAnd, "OpBitwiseAnd", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kOr, "OpBitwiseOr", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kXor, "OpBitwiseXor", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kShiftLeft, "OpShiftLeftLogical", "int"},
-                    BinaryTestCase{kI32, ir::Binary::Kind::kShiftRight, "OpShiftRightArithmetic",
-                                   "int"}));
+    testing::Values(BinaryTestCase{kI32, core::ir::Binary::Kind::kAdd, "OpIAdd", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kSubtract, "OpISub", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kMultiply, "OpIMul", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kDivide, "OpSDiv", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kModulo, "OpSRem", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kAnd, "OpBitwiseAnd", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kOr, "OpBitwiseOr", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kXor, "OpBitwiseXor", "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kShiftLeft, "OpShiftLeftLogical",
+                                   "int"},
+                    BinaryTestCase{kI32, core::ir::Binary::Kind::kShiftRight,
+                                   "OpShiftRightArithmetic", "int"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_U32,
     Arithmetic_Bitwise,
     testing::Values(
-        BinaryTestCase{kU32, ir::Binary::Kind::kAdd, "OpIAdd", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kSubtract, "OpISub", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kMultiply, "OpIMul", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kDivide, "OpUDiv", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kModulo, "OpUMod", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kAnd, "OpBitwiseAnd", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kOr, "OpBitwiseOr", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kXor, "OpBitwiseXor", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kShiftLeft, "OpShiftLeftLogical", "uint"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kShiftRight, "OpShiftRightLogical", "uint"}));
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kAdd, "OpIAdd", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kSubtract, "OpISub", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kMultiply, "OpIMul", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kDivide, "OpUDiv", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kModulo, "OpUMod", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kAnd, "OpBitwiseAnd", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kOr, "OpBitwiseOr", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kXor, "OpBitwiseXor", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kShiftLeft, "OpShiftLeftLogical", "uint"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kShiftRight, "OpShiftRightLogical", "uint"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_F32,
     Arithmetic_Bitwise,
-    testing::Values(BinaryTestCase{kF32, ir::Binary::Kind::kAdd, "OpFAdd", "float"},
-                    BinaryTestCase{kF32, ir::Binary::Kind::kSubtract, "OpFSub", "float"},
-                    BinaryTestCase{kF32, ir::Binary::Kind::kMultiply, "OpFMul", "float"},
-                    BinaryTestCase{kF32, ir::Binary::Kind::kDivide, "OpFDiv", "float"},
-                    BinaryTestCase{kF32, ir::Binary::Kind::kModulo, "OpFRem", "float"}));
+    testing::Values(BinaryTestCase{kF32, core::ir::Binary::Kind::kAdd, "OpFAdd", "float"},
+                    BinaryTestCase{kF32, core::ir::Binary::Kind::kSubtract, "OpFSub", "float"},
+                    BinaryTestCase{kF32, core::ir::Binary::Kind::kMultiply, "OpFMul", "float"},
+                    BinaryTestCase{kF32, core::ir::Binary::Kind::kDivide, "OpFDiv", "float"},
+                    BinaryTestCase{kF32, core::ir::Binary::Kind::kModulo, "OpFRem", "float"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_F16,
     Arithmetic_Bitwise,
-    testing::Values(BinaryTestCase{kF16, ir::Binary::Kind::kAdd, "OpFAdd", "half"},
-                    BinaryTestCase{kF16, ir::Binary::Kind::kSubtract, "OpFSub", "half"},
-                    BinaryTestCase{kF16, ir::Binary::Kind::kMultiply, "OpFMul", "half"},
-                    BinaryTestCase{kF16, ir::Binary::Kind::kDivide, "OpFDiv", "half"},
-                    BinaryTestCase{kF16, ir::Binary::Kind::kModulo, "OpFRem", "half"}));
+    testing::Values(BinaryTestCase{kF16, core::ir::Binary::Kind::kAdd, "OpFAdd", "half"},
+                    BinaryTestCase{kF16, core::ir::Binary::Kind::kSubtract, "OpFSub", "half"},
+                    BinaryTestCase{kF16, core::ir::Binary::Kind::kMultiply, "OpFMul", "half"},
+                    BinaryTestCase{kF16, core::ir::Binary::Kind::kDivide, "OpFDiv", "half"},
+                    BinaryTestCase{kF16, core::ir::Binary::Kind::kModulo, "OpFRem", "half"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_Bool,
     Arithmetic_Bitwise,
-    testing::Values(BinaryTestCase{kBool, ir::Binary::Kind::kAnd, "OpLogicalAnd", "bool"},
-                    BinaryTestCase{kBool, ir::Binary::Kind::kOr, "OpLogicalOr", "bool"}));
+    testing::Values(BinaryTestCase{kBool, core::ir::Binary::Kind::kAnd, "OpLogicalAnd", "bool"},
+                    BinaryTestCase{kBool, core::ir::Binary::Kind::kOr, "OpLogicalOr", "bool"}));
 
 TEST_F(SpirvWriterTest, Binary_ScalarTimesVector_F32) {
     auto* scalar = b.FunctionParam("scalar", ty.f32());
@@ -259,47 +260,53 @@
     SpirvWriterTest_Binary_I32,
     Comparison,
     testing::Values(
-        BinaryTestCase{kI32, ir::Binary::Kind::kEqual, "OpIEqual", "bool"},
-        BinaryTestCase{kI32, ir::Binary::Kind::kNotEqual, "OpINotEqual", "bool"},
-        BinaryTestCase{kI32, ir::Binary::Kind::kGreaterThan, "OpSGreaterThan", "bool"},
-        BinaryTestCase{kI32, ir::Binary::Kind::kGreaterThanEqual, "OpSGreaterThanEqual", "bool"},
-        BinaryTestCase{kI32, ir::Binary::Kind::kLessThan, "OpSLessThan", "bool"},
-        BinaryTestCase{kI32, ir::Binary::Kind::kLessThanEqual, "OpSLessThanEqual", "bool"}));
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kEqual, "OpIEqual", "bool"},
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kNotEqual, "OpINotEqual", "bool"},
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kGreaterThan, "OpSGreaterThan", "bool"},
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kGreaterThanEqual, "OpSGreaterThanEqual",
+                       "bool"},
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kLessThan, "OpSLessThan", "bool"},
+        BinaryTestCase{kI32, core::ir::Binary::Kind::kLessThanEqual, "OpSLessThanEqual", "bool"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_U32,
     Comparison,
     testing::Values(
-        BinaryTestCase{kU32, ir::Binary::Kind::kEqual, "OpIEqual", "bool"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kNotEqual, "OpINotEqual", "bool"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kGreaterThan, "OpUGreaterThan", "bool"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kGreaterThanEqual, "OpUGreaterThanEqual", "bool"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kLessThan, "OpULessThan", "bool"},
-        BinaryTestCase{kU32, ir::Binary::Kind::kLessThanEqual, "OpULessThanEqual", "bool"}));
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kEqual, "OpIEqual", "bool"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kNotEqual, "OpINotEqual", "bool"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kGreaterThan, "OpUGreaterThan", "bool"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kGreaterThanEqual, "OpUGreaterThanEqual",
+                       "bool"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kLessThan, "OpULessThan", "bool"},
+        BinaryTestCase{kU32, core::ir::Binary::Kind::kLessThanEqual, "OpULessThanEqual", "bool"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_F32,
     Comparison,
     testing::Values(
-        BinaryTestCase{kF32, ir::Binary::Kind::kEqual, "OpFOrdEqual", "bool"},
-        BinaryTestCase{kF32, ir::Binary::Kind::kNotEqual, "OpFOrdNotEqual", "bool"},
-        BinaryTestCase{kF32, ir::Binary::Kind::kGreaterThan, "OpFOrdGreaterThan", "bool"},
-        BinaryTestCase{kF32, ir::Binary::Kind::kGreaterThanEqual, "OpFOrdGreaterThanEqual", "bool"},
-        BinaryTestCase{kF32, ir::Binary::Kind::kLessThan, "OpFOrdLessThan", "bool"},
-        BinaryTestCase{kF32, ir::Binary::Kind::kLessThanEqual, "OpFOrdLessThanEqual", "bool"}));
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kEqual, "OpFOrdEqual", "bool"},
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kNotEqual, "OpFOrdNotEqual", "bool"},
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kGreaterThan, "OpFOrdGreaterThan", "bool"},
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kGreaterThanEqual, "OpFOrdGreaterThanEqual",
+                       "bool"},
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kLessThan, "OpFOrdLessThan", "bool"},
+        BinaryTestCase{kF32, core::ir::Binary::Kind::kLessThanEqual, "OpFOrdLessThanEqual",
+                       "bool"}));
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Binary_F16,
     Comparison,
     testing::Values(
-        BinaryTestCase{kF16, ir::Binary::Kind::kEqual, "OpFOrdEqual", "bool"},
-        BinaryTestCase{kF16, ir::Binary::Kind::kNotEqual, "OpFOrdNotEqual", "bool"},
-        BinaryTestCase{kF16, ir::Binary::Kind::kGreaterThan, "OpFOrdGreaterThan", "bool"},
-        BinaryTestCase{kF16, ir::Binary::Kind::kGreaterThanEqual, "OpFOrdGreaterThanEqual", "bool"},
-        BinaryTestCase{kF16, ir::Binary::Kind::kLessThan, "OpFOrdLessThan", "bool"},
-        BinaryTestCase{kF16, ir::Binary::Kind::kLessThanEqual, "OpFOrdLessThanEqual", "bool"}));
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kEqual, "OpFOrdEqual", "bool"},
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kNotEqual, "OpFOrdNotEqual", "bool"},
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kGreaterThan, "OpFOrdGreaterThan", "bool"},
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kGreaterThanEqual, "OpFOrdGreaterThanEqual",
+                       "bool"},
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kLessThan, "OpFOrdLessThan", "bool"},
+        BinaryTestCase{kF16, core::ir::Binary::Kind::kLessThanEqual, "OpFOrdLessThanEqual",
+                       "bool"}));
 INSTANTIATE_TEST_SUITE_P(SpirvWriterTest_Binary_Bool,
                          Comparison,
-                         testing::Values(BinaryTestCase{kBool, ir::Binary::Kind::kEqual,
+                         testing::Values(BinaryTestCase{kBool, core::ir::Binary::Kind::kEqual,
                                                         "OpLogicalEqual", "bool"},
-                                         BinaryTestCase{kBool, ir::Binary::Kind::kNotEqual,
+                                         BinaryTestCase{kBool, core::ir::Binary::Kind::kNotEqual,
                                                         "OpLogicalNotEqual", "bool"}));
 
 TEST_F(SpirvWriterTest, Binary_Chain) {
diff --git a/src/tint/lang/spirv/writer/common/helper_test.h b/src/tint/lang/spirv/writer/common/helper_test.h
index 62e811c..e534c6a 100644
--- a/src/tint/lang/spirv/writer/common/helper_test.h
+++ b/src/tint/lang/spirv/writer/common/helper_test.h
@@ -81,9 +81,9 @@
     SpirvWriterTestHelperBase() : writer_(&mod, false) {}
 
     /// The test module.
-    ir::Module mod;
+    core::ir::Module mod;
     /// The test builder.
-    ir::Builder b{mod};
+    core::ir::Builder b{mod};
     /// The type manager.
     core::type::Manager& ty{mod.Types()};
 
@@ -199,7 +199,7 @@
     /// @param type the element type
     /// @param value the optional value to use
     /// @returns the scalar value
-    ir::Constant* MakeScalarValue(TestElementType type, uint32_t value = 1) {
+    core::ir::Constant* MakeScalarValue(TestElementType type, uint32_t value = 1) {
         switch (type) {
             case kBool:
                 return b.Constant(true);
@@ -218,7 +218,7 @@
     /// Helper to make a vector value with an element type of `type`.
     /// @param type the element type
     /// @returns the vector value
-    ir::Constant* MakeVectorValue(TestElementType type) {
+    core::ir::Constant* MakeVectorValue(TestElementType type) {
         switch (type) {
             case kBool:
                 return b.Composite(MakeVectorType(type), true, false);
diff --git a/src/tint/lang/spirv/writer/discard_test.cc b/src/tint/lang/spirv/writer/discard_test.cc
index ea3281a..74d383d 100644
--- a/src/tint/lang/spirv/writer/discard_test.cc
+++ b/src/tint/lang/spirv/writer/discard_test.cc
@@ -28,8 +28,8 @@
     b.RootBlock()->Append(buffer);
 
     auto* front_facing = b.FunctionParam("front_facing", ty.bool_());
-    front_facing->SetBuiltin(ir::FunctionParam::Builtin::kFrontFacing);
-    auto* ep = b.Function("ep", ty.f32(), ir::Function::PipelineStage::kFragment);
+    front_facing->SetBuiltin(core::ir::FunctionParam::Builtin::kFrontFacing);
+    auto* ep = b.Function("ep", ty.f32(), core::ir::Function::PipelineStage::kFragment);
     ep->SetParams({front_facing});
     ep->SetReturnLocation(0_u, {});
 
diff --git a/src/tint/lang/spirv/writer/function_test.cc b/src/tint/lang/spirv/writer/function_test.cc
index 971cc30..6ab2c34 100644
--- a/src/tint/lang/spirv/writer/function_test.cc
+++ b/src/tint/lang/spirv/writer/function_test.cc
@@ -80,7 +80,7 @@
 
 TEST_F(SpirvWriterTest, Function_EntryPoint_Compute) {
     auto* func =
-        b.Function("main", ty.void_(), ir::Function::PipelineStage::kCompute, {{32, 4, 1}});
+        b.Function("main", ty.void_(), core::ir::Function::PipelineStage::kCompute, {{32, 4, 1}});
     b.Append(func->Block(), [&] {  //
         b.Return(func);
     });
@@ -106,7 +106,7 @@
 }
 
 TEST_F(SpirvWriterTest, Function_EntryPoint_Fragment) {
-    auto* func = b.Function("main", ty.void_(), ir::Function::PipelineStage::kFragment);
+    auto* func = b.Function("main", ty.void_(), core::ir::Function::PipelineStage::kFragment);
     b.Append(func->Block(), [&] {  //
         b.Return(func);
     });
@@ -132,7 +132,7 @@
 }
 
 TEST_F(SpirvWriterTest, Function_EntryPoint_Vertex) {
-    auto* func = b.Function("main", ty.void_(), ir::Function::PipelineStage::kVertex);
+    auto* func = b.Function("main", ty.void_(), core::ir::Function::PipelineStage::kVertex);
     b.Append(func->Block(), [&] {  //
         b.Return(func);
     });
@@ -157,17 +157,19 @@
 }
 
 TEST_F(SpirvWriterTest, Function_EntryPoint_Multiple) {
-    auto* f1 = b.Function("main1", ty.void_(), ir::Function::PipelineStage::kCompute, {{32, 4, 1}});
+    auto* f1 =
+        b.Function("main1", ty.void_(), core::ir::Function::PipelineStage::kCompute, {{32, 4, 1}});
     b.Append(f1->Block(), [&] {  //
         b.Return(f1);
     });
 
-    auto* f2 = b.Function("main2", ty.void_(), ir::Function::PipelineStage::kCompute, {{8, 2, 16}});
+    auto* f2 =
+        b.Function("main2", ty.void_(), core::ir::Function::PipelineStage::kCompute, {{8, 2, 16}});
     b.Append(f2->Block(), [&] {  //
         b.Return(f2);
     });
 
-    auto* f3 = b.Function("main3", ty.void_(), ir::Function::PipelineStage::kFragment);
+    auto* f3 = b.Function("main3", ty.void_(), core::ir::Function::PipelineStage::kFragment);
     b.Append(f3->Block(), [&] {  //
         b.Return(f3);
     });
diff --git a/src/tint/lang/spirv/writer/printer/printer.cc b/src/tint/lang/spirv/writer/printer/printer.cc
index 827ee1e..ca7e390 100644
--- a/src/tint/lang/spirv/writer/printer/printer.cc
+++ b/src/tint/lang/spirv/writer/printer/printer.cc
@@ -150,11 +150,11 @@
 
 }  // namespace
 
-Printer::Printer(ir::Module* module, bool zero_init_workgroup_mem)
+Printer::Printer(core::ir::Module* module, bool zero_init_workgroup_mem)
     : ir_(module), b_(*module), zero_init_workgroup_memory_(zero_init_workgroup_mem) {}
 
 Result<std::vector<uint32_t>, std::string> Printer::Generate() {
-    auto valid = ir::ValidateAndDumpIfNeeded(*ir_, "SPIR-V writer");
+    auto valid = core::ir::ValidateAndDumpIfNeeded(*ir_, "SPIR-V writer");
     if (!valid) {
         return std::move(valid.Failure());
     }
@@ -231,7 +231,7 @@
     return SpvBuiltInMax;
 }
 
-uint32_t Printer::Constant(ir::Constant* constant) {
+uint32_t Printer::Constant(core::ir::Constant* constant) {
     // If it is a literal operand, just return the value.
     if (auto* literal = constant->As<raise::LiteralOperand>()) {
         return literal->Value()->ValueAs<uint32_t>();
@@ -383,18 +383,20 @@
     });
 }
 
-uint32_t Printer::Value(ir::Instruction* inst) {
+uint32_t Printer::Value(core::ir::Instruction* inst) {
     return Value(inst->Result());
 }
 
-uint32_t Printer::Value(ir::Value* value) {
+uint32_t Printer::Value(core::ir::Value* value) {
     return Switch(
         value,  //
-        [&](ir::Constant* constant) { return Constant(constant); },
-        [&](ir::Value*) { return values_.GetOrCreate(value, [&] { return module_.NextId(); }); });
+        [&](core::ir::Constant* constant) { return Constant(constant); },
+        [&](core::ir::Value*) {
+            return values_.GetOrCreate(value, [&] { return module_.NextId(); });
+        });
 }
 
-uint32_t Printer::Label(ir::Block* block) {
+uint32_t Printer::Label(core::ir::Block* block) {
     return block_labels_.GetOrCreate(block, [&] { return module_.NextId(); });
 }
 
@@ -573,14 +575,14 @@
                      {id, sampled_type, dim, depth, array, ms, sampled, format});
 }
 
-void Printer::EmitFunction(ir::Function* func) {
+void Printer::EmitFunction(core::ir::Function* func) {
     auto id = Value(func);
 
     // Emit the function name.
     module_.PushDebug(spv::Op::OpName, {id, Operand(ir_->NameOf(func).Name())});
 
     // Emit OpEntryPoint and OpExecutionMode declarations if needed.
-    if (func->Stage() != ir::Function::PipelineStage::kUndefined) {
+    if (func->Stage() != core::ir::Function::PipelineStage::kUndefined) {
         EmitEntryPoint(func, id);
     }
 
@@ -628,10 +630,10 @@
     module_.PushFunction(current_function_);
 }
 
-void Printer::EmitEntryPoint(ir::Function* func, uint32_t id) {
+void Printer::EmitEntryPoint(core::ir::Function* func, uint32_t id) {
     SpvExecutionModel stage = SpvExecutionModelMax;
     switch (func->Stage()) {
-        case ir::Function::PipelineStage::kCompute: {
+        case core::ir::Function::PipelineStage::kCompute: {
             stage = SpvExecutionModelGLCompute;
             module_.PushExecutionMode(
                 spv::Op::OpExecutionMode,
@@ -639,17 +641,17 @@
                  func->WorkgroupSize()->at(1), func->WorkgroupSize()->at(2)});
             break;
         }
-        case ir::Function::PipelineStage::kFragment: {
+        case core::ir::Function::PipelineStage::kFragment: {
             stage = SpvExecutionModelFragment;
             module_.PushExecutionMode(spv::Op::OpExecutionMode,
                                       {id, U32Operand(SpvExecutionModeOriginUpperLeft)});
             break;
         }
-        case ir::Function::PipelineStage::kVertex: {
+        case core::ir::Function::PipelineStage::kVertex: {
             stage = SpvExecutionModelVertex;
             break;
         }
-        case ir::Function::PipelineStage::kUndefined:
+        case core::ir::Function::PipelineStage::kUndefined:
             TINT_ICE() << "undefined pipeline stage for entry point";
             return;
     }
@@ -659,7 +661,7 @@
     // Add the list of all referenced shader IO variables.
     if (ir_->root_block) {
         for (auto* global : *ir_->root_block) {
-            auto* var = global->As<ir::Var>();
+            auto* var = global->As<core::ir::Var>();
             if (!var) {
                 continue;
             }
@@ -702,18 +704,18 @@
     module_.PushEntryPoint(spv::Op::OpEntryPoint, operands);
 }
 
-void Printer::EmitRootBlock(ir::Block* root_block) {
+void Printer::EmitRootBlock(core::ir::Block* root_block) {
     for (auto* inst : *root_block) {
         Switch(
             inst,  //
-            [&](ir::Var* v) { return EmitVar(v); },
+            [&](core::ir::Var* v) { return EmitVar(v); },
             [&](Default) {
                 TINT_ICE() << "unimplemented root block instruction: " << inst->TypeInfo().name;
             });
     }
 }
 
-void Printer::EmitBlock(ir::Block* block) {
+void Printer::EmitBlock(core::ir::Block* block) {
     // Emit the label.
     // Skip if this is the function's entry block, as it will be emitted by the function object.
     if (!current_function_.instructions().empty()) {
@@ -727,7 +729,7 @@
         return;
     }
 
-    if (auto* mib = block->As<ir::MultiInBlock>()) {
+    if (auto* mib = block->As<core::ir::MultiInBlock>()) {
         // Emit all OpPhi nodes for incoming branches to block.
         EmitIncomingPhis(mib);
     }
@@ -736,7 +738,7 @@
     EmitBlockInstructions(block);
 }
 
-void Printer::EmitIncomingPhis(ir::MultiInBlock* block) {
+void Printer::EmitIncomingPhis(core::ir::MultiInBlock* block) {
     // Emit Phi nodes for all the incoming block parameters
     for (size_t param_idx = 0; param_idx < block->Params().Length(); param_idx++) {
         auto* param = block->Params()[param_idx];
@@ -752,34 +754,34 @@
     }
 }
 
-void Printer::EmitBlockInstructions(ir::Block* block) {
+void Printer::EmitBlockInstructions(core::ir::Block* block) {
     for (auto* inst : *block) {
         Switch(
-            inst,                                                           //
-            [&](ir::Access* a) { EmitAccess(a); },                          //
-            [&](ir::Binary* b) { EmitBinary(b); },                          //
-            [&](ir::Bitcast* b) { EmitBitcast(b); },                        //
-            [&](ir::CoreBuiltinCall* b) { EmitCoreBuiltinCall(b); },        //
-            [&](ir::Construct* c) { EmitConstruct(c); },                    //
-            [&](ir::Convert* c) { EmitConvert(c); },                        //
-            [&](ir::IntrinsicCall* i) { EmitIntrinsicCall(i); },            //
-            [&](ir::Load* l) { EmitLoad(l); },                              //
-            [&](ir::LoadVectorElement* l) { EmitLoadVectorElement(l); },    //
-            [&](ir::Loop* l) { EmitLoop(l); },                              //
-            [&](ir::Switch* sw) { EmitSwitch(sw); },                        //
-            [&](ir::Swizzle* s) { EmitSwizzle(s); },                        //
-            [&](ir::Store* s) { EmitStore(s); },                            //
-            [&](ir::StoreVectorElement* s) { EmitStoreVectorElement(s); },  //
-            [&](ir::UserCall* c) { EmitUserCall(c); },                      //
-            [&](ir::Unary* u) { EmitUnary(u); },                            //
-            [&](ir::Var* v) { EmitVar(v); },                                //
-            [&](ir::Let* l) { EmitLet(l); },                                //
-            [&](ir::If* i) { EmitIf(i); },                                  //
-            [&](ir::Terminator* t) { EmitTerminator(t); },                  //
+            inst,                                                                 //
+            [&](core::ir::Access* a) { EmitAccess(a); },                          //
+            [&](core::ir::Binary* b) { EmitBinary(b); },                          //
+            [&](core::ir::Bitcast* b) { EmitBitcast(b); },                        //
+            [&](core::ir::CoreBuiltinCall* b) { EmitCoreBuiltinCall(b); },        //
+            [&](core::ir::Construct* c) { EmitConstruct(c); },                    //
+            [&](core::ir::Convert* c) { EmitConvert(c); },                        //
+            [&](core::ir::IntrinsicCall* i) { EmitIntrinsicCall(i); },            //
+            [&](core::ir::Load* l) { EmitLoad(l); },                              //
+            [&](core::ir::LoadVectorElement* l) { EmitLoadVectorElement(l); },    //
+            [&](core::ir::Loop* l) { EmitLoop(l); },                              //
+            [&](core::ir::Switch* sw) { EmitSwitch(sw); },                        //
+            [&](core::ir::Swizzle* s) { EmitSwizzle(s); },                        //
+            [&](core::ir::Store* s) { EmitStore(s); },                            //
+            [&](core::ir::StoreVectorElement* s) { EmitStoreVectorElement(s); },  //
+            [&](core::ir::UserCall* c) { EmitUserCall(c); },                      //
+            [&](core::ir::Unary* u) { EmitUnary(u); },                            //
+            [&](core::ir::Var* v) { EmitVar(v); },                                //
+            [&](core::ir::Let* l) { EmitLet(l); },                                //
+            [&](core::ir::If* i) { EmitIf(i); },                                  //
+            [&](core::ir::Terminator* t) { EmitTerminator(t); },                  //
             [&](Default) { TINT_ICE() << "unimplemented instruction: " << inst->TypeInfo().name; });
 
         // Set the name for the SPIR-V result ID if provided in the module.
-        if (inst->Result() && !inst->Is<ir::Var>()) {
+        if (inst->Result() && !inst->Is<core::ir::Var>()) {
             if (auto name = ir_->NameOf(inst)) {
                 module_.PushDebug(spv::Op::OpName, {Value(inst), Operand(name.Name())});
             }
@@ -792,10 +794,10 @@
     }
 }
 
-void Printer::EmitTerminator(ir::Terminator* t) {
+void Printer::EmitTerminator(core::ir::Terminator* t) {
     tint::Switch(  //
         t,         //
-        [&](ir::Return*) {
+        [&](core::ir::Return*) {
             if (!t->Args().IsEmpty()) {
                 TINT_ASSERT(t->Args().Length() == 1u);
                 OperandList operands;
@@ -806,7 +808,7 @@
             }
             return;
         },
-        [&](ir::BreakIf* breakif) {
+        [&](core::ir::BreakIf* breakif) {
             current_function_.push_inst(spv::Op::OpBranchConditional,
                                         {
                                             Value(breakif->Condition()),
@@ -814,24 +816,28 @@
                                             loop_header_label_,
                                         });
         },
-        [&](ir::Continue* cont) {
+        [&](core::ir::Continue* cont) {
             current_function_.push_inst(spv::Op::OpBranch, {Label(cont->Loop()->Continuing())});
         },
-        [&](ir::ExitIf*) { current_function_.push_inst(spv::Op::OpBranch, {if_merge_label_}); },
-        [&](ir::ExitLoop*) { current_function_.push_inst(spv::Op::OpBranch, {loop_merge_label_}); },
-        [&](ir::ExitSwitch*) {
+        [&](core::ir::ExitIf*) {
+            current_function_.push_inst(spv::Op::OpBranch, {if_merge_label_});
+        },
+        [&](core::ir::ExitLoop*) {
+            current_function_.push_inst(spv::Op::OpBranch, {loop_merge_label_});
+        },
+        [&](core::ir::ExitSwitch*) {
             current_function_.push_inst(spv::Op::OpBranch, {switch_merge_label_});
         },
-        [&](ir::NextIteration*) {
+        [&](core::ir::NextIteration*) {
             current_function_.push_inst(spv::Op::OpBranch, {loop_header_label_});
         },
-        [&](ir::TerminateInvocation*) { current_function_.push_inst(spv::Op::OpKill, {}); },
-        [&](ir::Unreachable*) { current_function_.push_inst(spv::Op::OpUnreachable, {}); },
+        [&](core::ir::TerminateInvocation*) { current_function_.push_inst(spv::Op::OpKill, {}); },
+        [&](core::ir::Unreachable*) { current_function_.push_inst(spv::Op::OpUnreachable, {}); },
 
         [&](Default) { TINT_ICE() << "unimplemented branch: " << t->TypeInfo().name; });
 }
 
-void Printer::EmitIf(ir::If* i) {
+void Printer::EmitIf(core::ir::If* i) {
     auto* true_block = i->True();
     auto* false_block = i->False();
 
@@ -846,11 +852,11 @@
     uint32_t true_label = merge_label;
     uint32_t false_label = merge_label;
     if (true_block->Length() > 1 || i->HasResults() ||
-        (true_block->HasTerminator() && !true_block->Terminator()->Is<ir::ExitIf>())) {
+        (true_block->HasTerminator() && !true_block->Terminator()->Is<core::ir::ExitIf>())) {
         true_label = Label(true_block);
     }
     if (false_block->Length() > 1 || i->HasResults() ||
-        (false_block->HasTerminator() && !false_block->Terminator()->Is<ir::ExitIf>())) {
+        (false_block->HasTerminator() && !false_block->Terminator()->Is<core::ir::ExitIf>())) {
         false_label = Label(false_block);
     }
 
@@ -874,7 +880,7 @@
     EmitExitPhis(i);
 }
 
-void Printer::EmitAccess(ir::Access* access) {
+void Printer::EmitAccess(core::ir::Access* access) {
     auto* ty = access->Result()->Type();
 
     auto id = Value(access);
@@ -893,7 +899,7 @@
     // If we hit a non-constant index into a vector type, use OpVectorExtractDynamic for it.
     auto* source_ty = access->Object()->Type();
     for (auto* idx : access->Indices()) {
-        if (auto* constant = idx->As<ir::Constant>()) {
+        if (auto* constant = idx->As<core::ir::Constant>()) {
             // Push the index to the chain and update the current type.
             auto i = constant->Value()->ValueAs<u32>();
             operands.push_back(i);
@@ -922,7 +928,7 @@
     current_function_.push_inst(spv::Op::OpCompositeExtract, std::move(operands));
 }
 
-void Printer::EmitBinary(ir::Binary* binary) {
+void Printer::EmitBinary(core::ir::Binary* binary) {
     auto id = Value(binary);
     auto lhs = Value(binary->LHS());
     auto rhs = Value(binary->RHS());
@@ -932,11 +938,11 @@
     // Determine the opcode.
     spv::Op op = spv::Op::Max;
     switch (binary->Kind()) {
-        case ir::Binary::Kind::kAdd: {
+        case core::ir::Binary::Kind::kAdd: {
             op = ty->is_integer_scalar_or_vector() ? spv::Op::OpIAdd : spv::Op::OpFAdd;
             break;
         }
-        case ir::Binary::Kind::kDivide: {
+        case core::ir::Binary::Kind::kDivide: {
             if (ty->is_signed_integer_scalar_or_vector()) {
                 op = spv::Op::OpSDiv;
             } else if (ty->is_unsigned_integer_scalar_or_vector()) {
@@ -946,7 +952,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kMultiply: {
+        case core::ir::Binary::Kind::kMultiply: {
             if (ty->is_integer_scalar_or_vector()) {
                 op = spv::Op::OpIMul;
             } else if (ty->is_float_scalar_or_vector()) {
@@ -954,11 +960,11 @@
             }
             break;
         }
-        case ir::Binary::Kind::kSubtract: {
+        case core::ir::Binary::Kind::kSubtract: {
             op = ty->is_integer_scalar_or_vector() ? spv::Op::OpISub : spv::Op::OpFSub;
             break;
         }
-        case ir::Binary::Kind::kModulo: {
+        case core::ir::Binary::Kind::kModulo: {
             if (ty->is_signed_integer_scalar_or_vector()) {
                 op = spv::Op::OpSRem;
             } else if (ty->is_unsigned_integer_scalar_or_vector()) {
@@ -969,7 +975,7 @@
             break;
         }
 
-        case ir::Binary::Kind::kAnd: {
+        case core::ir::Binary::Kind::kAnd: {
             if (ty->is_integer_scalar_or_vector()) {
                 op = spv::Op::OpBitwiseAnd;
             } else if (ty->is_bool_scalar_or_vector()) {
@@ -977,7 +983,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kOr: {
+        case core::ir::Binary::Kind::kOr: {
             if (ty->is_integer_scalar_or_vector()) {
                 op = spv::Op::OpBitwiseOr;
             } else if (ty->is_bool_scalar_or_vector()) {
@@ -985,16 +991,16 @@
             }
             break;
         }
-        case ir::Binary::Kind::kXor: {
+        case core::ir::Binary::Kind::kXor: {
             op = spv::Op::OpBitwiseXor;
             break;
         }
 
-        case ir::Binary::Kind::kShiftLeft: {
+        case core::ir::Binary::Kind::kShiftLeft: {
             op = spv::Op::OpShiftLeftLogical;
             break;
         }
-        case ir::Binary::Kind::kShiftRight: {
+        case core::ir::Binary::Kind::kShiftRight: {
             if (ty->is_signed_integer_scalar_or_vector()) {
                 op = spv::Op::OpShiftRightArithmetic;
             } else if (ty->is_unsigned_integer_scalar_or_vector()) {
@@ -1003,7 +1009,7 @@
             break;
         }
 
-        case ir::Binary::Kind::kEqual: {
+        case core::ir::Binary::Kind::kEqual: {
             if (lhs_ty->is_bool_scalar_or_vector()) {
                 op = spv::Op::OpLogicalEqual;
             } else if (lhs_ty->is_float_scalar_or_vector()) {
@@ -1013,7 +1019,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kNotEqual: {
+        case core::ir::Binary::Kind::kNotEqual: {
             if (lhs_ty->is_bool_scalar_or_vector()) {
                 op = spv::Op::OpLogicalNotEqual;
             } else if (lhs_ty->is_float_scalar_or_vector()) {
@@ -1023,7 +1029,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kGreaterThan: {
+        case core::ir::Binary::Kind::kGreaterThan: {
             if (lhs_ty->is_float_scalar_or_vector()) {
                 op = spv::Op::OpFOrdGreaterThan;
             } else if (lhs_ty->is_signed_integer_scalar_or_vector()) {
@@ -1033,7 +1039,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kGreaterThanEqual: {
+        case core::ir::Binary::Kind::kGreaterThanEqual: {
             if (lhs_ty->is_float_scalar_or_vector()) {
                 op = spv::Op::OpFOrdGreaterThanEqual;
             } else if (lhs_ty->is_signed_integer_scalar_or_vector()) {
@@ -1043,7 +1049,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kLessThan: {
+        case core::ir::Binary::Kind::kLessThan: {
             if (lhs_ty->is_float_scalar_or_vector()) {
                 op = spv::Op::OpFOrdLessThan;
             } else if (lhs_ty->is_signed_integer_scalar_or_vector()) {
@@ -1053,7 +1059,7 @@
             }
             break;
         }
-        case ir::Binary::Kind::kLessThanEqual: {
+        case core::ir::Binary::Kind::kLessThanEqual: {
             if (lhs_ty->is_float_scalar_or_vector()) {
                 op = spv::Op::OpFOrdLessThanEqual;
             } else if (lhs_ty->is_signed_integer_scalar_or_vector()) {
@@ -1069,7 +1075,7 @@
     current_function_.push_inst(op, {Type(ty), id, lhs, rhs});
 }
 
-void Printer::EmitBitcast(ir::Bitcast* bitcast) {
+void Printer::EmitBitcast(core::ir::Bitcast* bitcast) {
     auto* ty = bitcast->Result()->Type();
     if (ty == bitcast->Val()->Type()) {
         values_.Add(bitcast->Result(), Value(bitcast->Val()));
@@ -1079,7 +1085,7 @@
                                 {Type(ty), Value(bitcast), Value(bitcast->Val())});
 }
 
-void Printer::EmitCoreBuiltinCall(ir::CoreBuiltinCall* builtin) {
+void Printer::EmitCoreBuiltinCall(core::ir::CoreBuiltinCall* builtin) {
     auto* result_ty = builtin->Result()->Type();
 
     if (builtin->Func() == core::Function::kAbs &&
@@ -1425,7 +1431,7 @@
     current_function_.push_inst(op, operands);
 }
 
-void Printer::EmitConstruct(ir::Construct* construct) {
+void Printer::EmitConstruct(core::ir::Construct* construct) {
     // If there is just a single argument with the same type as the result, this is an identity
     // constructor and we can just pass through the ID of the argument.
     if (construct->Args().Length() == 1 &&
@@ -1441,7 +1447,7 @@
     current_function_.push_inst(spv::Op::OpCompositeConstruct, std::move(operands));
 }
 
-void Printer::EmitConvert(ir::Convert* convert) {
+void Printer::EmitConvert(core::ir::Convert* convert) {
     auto* res_ty = convert->Result()->Type();
     auto* arg_ty = convert->Args()[0]->Type();
 
@@ -1485,8 +1491,8 @@
         operands.push_back(ConstantNull(arg_ty));
     } else if (arg_ty->is_bool_scalar_or_vector()) {
         // Select between constant one and zero, splatting them to vectors if necessary.
-        ir::Constant* one = nullptr;
-        ir::Constant* zero = nullptr;
+        core::ir::Constant* one = nullptr;
+        core::ir::Constant* zero = nullptr;
         Switch(
             res_ty->DeepestElement(),  //
             [&](const core::type::F32*) {
@@ -1523,107 +1529,107 @@
     current_function_.push_inst(op, std::move(operands));
 }
 
-void Printer::EmitIntrinsicCall(ir::IntrinsicCall* call) {
+void Printer::EmitIntrinsicCall(core::ir::IntrinsicCall* call) {
     auto id = Value(call);
 
     spv::Op op = spv::Op::Max;
     switch (call->Kind()) {
-        case ir::IntrinsicCall::Kind::kSpirvArrayLength:
+        case core::ir::IntrinsicCall::Kind::kSpirvArrayLength:
             op = spv::Op::OpArrayLength;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicIAdd:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicIAdd:
             op = spv::Op::OpAtomicIAdd;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicISub:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicISub:
             op = spv::Op::OpAtomicISub;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicAnd:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicAnd:
             op = spv::Op::OpAtomicAnd;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicCompareExchange:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicCompareExchange:
             op = spv::Op::OpAtomicCompareExchange;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicExchange:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicExchange:
             op = spv::Op::OpAtomicExchange;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicLoad:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicLoad:
             op = spv::Op::OpAtomicLoad;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicOr:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicOr:
             op = spv::Op::OpAtomicOr;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicSMax:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicSMax:
             op = spv::Op::OpAtomicSMax;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicSMin:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicSMin:
             op = spv::Op::OpAtomicSMin;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicStore:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicStore:
             op = spv::Op::OpAtomicStore;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicUMax:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicUMax:
             op = spv::Op::OpAtomicUMax;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicUMin:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicUMin:
             op = spv::Op::OpAtomicUMin;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvAtomicXor:
+        case core::ir::IntrinsicCall::Kind::kSpirvAtomicXor:
             op = spv::Op::OpAtomicXor;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvDot:
+        case core::ir::IntrinsicCall::Kind::kSpirvDot:
             op = spv::Op::OpDot;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageFetch:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageFetch:
             op = spv::Op::OpImageFetch;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageGather:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageGather:
             op = spv::Op::OpImageGather;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageDrefGather:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageDrefGather:
             op = spv::Op::OpImageDrefGather;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageQuerySize:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageQuerySize:
             module_.PushCapability(SpvCapabilityImageQuery);
             op = spv::Op::OpImageQuerySize;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod:
             module_.PushCapability(SpvCapabilityImageQuery);
             op = spv::Op::OpImageQuerySizeLod;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod:
             op = spv::Op::OpImageSampleImplicitLod;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod:
             op = spv::Op::OpImageSampleExplicitLod;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageSampleDrefImplicitLod:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageSampleDrefImplicitLod:
             op = spv::Op::OpImageSampleDrefImplicitLod;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageSampleDrefExplicitLod:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageSampleDrefExplicitLod:
             op = spv::Op::OpImageSampleDrefExplicitLod;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvImageWrite:
+        case core::ir::IntrinsicCall::Kind::kSpirvImageWrite:
             op = spv::Op::OpImageWrite;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvMatrixTimesMatrix:
+        case core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesMatrix:
             op = spv::Op::OpMatrixTimesMatrix;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar:
+        case core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar:
             op = spv::Op::OpMatrixTimesScalar;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvMatrixTimesVector:
+        case core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesVector:
             op = spv::Op::OpMatrixTimesVector;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvSampledImage:
+        case core::ir::IntrinsicCall::Kind::kSpirvSampledImage:
             op = spv::Op::OpSampledImage;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvSelect:
+        case core::ir::IntrinsicCall::Kind::kSpirvSelect:
             op = spv::Op::OpSelect;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvVectorTimesMatrix:
+        case core::ir::IntrinsicCall::Kind::kSpirvVectorTimesMatrix:
             op = spv::Op::OpVectorTimesMatrix;
             break;
-        case ir::IntrinsicCall::Kind::kSpirvVectorTimesScalar:
+        case core::ir::IntrinsicCall::Kind::kSpirvVectorTimesScalar:
             op = spv::Op::OpVectorTimesScalar;
             break;
     }
@@ -1638,12 +1644,12 @@
     current_function_.push_inst(op, operands);
 }
 
-void Printer::EmitLoad(ir::Load* load) {
+void Printer::EmitLoad(core::ir::Load* load) {
     current_function_.push_inst(spv::Op::OpLoad,
                                 {Type(load->Result()->Type()), Value(load), Value(load->From())});
 }
 
-void Printer::EmitLoadVectorElement(ir::LoadVectorElement* load) {
+void Printer::EmitLoadVectorElement(core::ir::LoadVectorElement* load) {
     auto* vec_ptr_ty = load->From()->Type()->As<core::type::Pointer>();
     auto* el_ty = load->Result()->Type();
     auto* el_ptr_ty = ir_->Types().ptr(vec_ptr_ty->AddressSpace(), el_ty, vec_ptr_ty->Access());
@@ -1655,7 +1661,7 @@
                                 {Type(load->Result()->Type()), Value(load), el_ptr_id});
 }
 
-void Printer::EmitLoop(ir::Loop* loop) {
+void Printer::EmitLoop(core::ir::Loop* loop) {
     auto init_label = loop->HasInitializer() ? Label(loop->Initializer()) : 0;
     auto body_label = Label(loop->Body());
     auto continuing_label = Label(loop->Continuing());
@@ -1703,7 +1709,7 @@
     EmitExitPhis(loop);
 }
 
-void Printer::EmitSwitch(ir::Switch* swtch) {
+void Printer::EmitSwitch(core::ir::Switch* swtch) {
     // Find the default selector. There must be exactly one.
     uint32_t default_label = 0u;
     for (auto& c : swtch->Cases()) {
@@ -1748,7 +1754,7 @@
     EmitExitPhis(swtch);
 }
 
-void Printer::EmitSwizzle(ir::Swizzle* swizzle) {
+void Printer::EmitSwizzle(core::ir::Swizzle* swizzle) {
     auto id = Value(swizzle);
     auto obj = Value(swizzle->Object());
     OperandList operands = {Type(swizzle->Result()->Type()), id, obj, obj};
@@ -1758,11 +1764,11 @@
     current_function_.push_inst(spv::Op::OpVectorShuffle, operands);
 }
 
-void Printer::EmitStore(ir::Store* store) {
+void Printer::EmitStore(core::ir::Store* store) {
     current_function_.push_inst(spv::Op::OpStore, {Value(store->To()), Value(store->From())});
 }
 
-void Printer::EmitStoreVectorElement(ir::StoreVectorElement* store) {
+void Printer::EmitStoreVectorElement(core::ir::StoreVectorElement* store) {
     auto* vec_ptr_ty = store->To()->Type()->As<core::type::Pointer>();
     auto* el_ty = store->Value()->Type();
     auto* el_ptr_ty = ir_->Types().ptr(vec_ptr_ty->AddressSpace(), el_ty, vec_ptr_ty->Access());
@@ -1773,15 +1779,15 @@
     current_function_.push_inst(spv::Op::OpStore, {el_ptr_id, Value(store->Value())});
 }
 
-void Printer::EmitUnary(ir::Unary* unary) {
+void Printer::EmitUnary(core::ir::Unary* unary) {
     auto id = Value(unary);
     auto* ty = unary->Result()->Type();
     spv::Op op = spv::Op::Max;
     switch (unary->Kind()) {
-        case ir::Unary::Kind::kComplement:
+        case core::ir::Unary::Kind::kComplement:
             op = spv::Op::OpNot;
             break;
-        case ir::Unary::Kind::kNegation:
+        case core::ir::Unary::Kind::kNegation:
             if (ty->is_float_scalar_or_vector()) {
                 op = spv::Op::OpFNegate;
             } else if (ty->is_signed_integer_scalar_or_vector()) {
@@ -1792,7 +1798,7 @@
     current_function_.push_inst(op, {Type(ty), id, Value(unary->Val())});
 }
 
-void Printer::EmitUserCall(ir::UserCall* call) {
+void Printer::EmitUserCall(core::ir::UserCall* call) {
     auto id = Value(call);
     OperandList operands = {Type(call->Result()->Type()), id, Value(call->Func())};
     for (auto* arg : call->Args()) {
@@ -1801,7 +1807,7 @@
     current_function_.push_inst(spv::Op::OpFunctionCall, operands);
 }
 
-void Printer::EmitVar(ir::Var* var) {
+void Printer::EmitVar(core::ir::Var* var) {
     auto id = Value(var);
     auto* ptr = var->Result()->Type()->As<core::type::Pointer>();
     auto ty = Type(ptr);
@@ -1824,7 +1830,7 @@
             TINT_ASSERT(!current_function_);
             OperandList operands = {ty, id, U32Operand(SpvStorageClassPrivate)};
             if (var->Initializer()) {
-                TINT_ASSERT(var->Initializer()->Is<ir::Constant>());
+                TINT_ASSERT(var->Initializer()->Is<core::ir::Constant>());
                 operands.push_back(Value(var->Initializer()));
             }
             module_.PushType(spv::Op::OpVariable, operands);
@@ -1876,15 +1882,15 @@
     }
 }
 
-void Printer::EmitLet(ir::Let* let) {
+void Printer::EmitLet(core::ir::Let* let) {
     auto id = Value(let->Value());
     values_.Add(let->Result(), id);
 }
 
-void Printer::EmitExitPhis(ir::ControlInstruction* inst) {
+void Printer::EmitExitPhis(core::ir::ControlInstruction* inst) {
     struct Branch {
         uint32_t label = 0;
-        ir::Value* value = nullptr;
+        core::ir::Value* value = nullptr;
         bool operator<(const Branch& other) const { return label < other.label; }
     };
 
diff --git a/src/tint/lang/spirv/writer/printer/printer.h b/src/tint/lang/spirv/writer/printer/printer.h
index dceafb0..4218ade 100644
--- a/src/tint/lang/spirv/writer/printer/printer.h
+++ b/src/tint/lang/spirv/writer/printer/printer.h
@@ -34,7 +34,7 @@
 #include "src/tint/utils/symbol/symbol.h"
 
 // Forward declarations
-namespace tint::ir {
+namespace tint::core::ir {
 class Access;
 class Binary;
 class Bitcast;
@@ -65,7 +65,7 @@
 class UserCall;
 class Value;
 class Var;
-}  // namespace tint::ir
+}  // namespace tint::core::ir
 namespace tint::core::type {
 class Struct;
 class Texture;
@@ -81,7 +81,7 @@
     /// @param module the Tint IR module to generate
     /// @param zero_init_workgroup_memory `true` to initialize all the variables in the Workgroup
     ///                                   storage class with OpConstantNull
-    Printer(ir::Module* module, bool zero_init_workgroup_memory);
+    Printer(core::ir::Module* module, bool zero_init_workgroup_memory);
 
     /// @returns the generated SPIR-V binary on success, or an error string on failure
     tint::Result<std::vector<uint32_t>, std::string> Generate();
@@ -92,7 +92,7 @@
     /// Get the result ID of the constant `constant`, emitting its instruction if necessary.
     /// @param constant the constant to get the ID for
     /// @returns the result ID of the constant
-    uint32_t Constant(ir::Constant* constant);
+    uint32_t Constant(core::ir::Constant* constant);
 
     /// Get the result ID of the type `ty`, emitting a type declaration instruction if necessary.
     /// @param ty the type to get the ID for
@@ -127,17 +127,17 @@
     /// Get the ID of the label for `block`.
     /// @param block the block to get the label ID for
     /// @returns the ID of the block's label
-    uint32_t Label(ir::Block* block);
+    uint32_t Label(core::ir::Block* block);
 
     /// Get the result ID of the value `value`, emitting its instruction if necessary.
     /// @param value the value to get the ID for
     /// @returns the result ID of the value
-    uint32_t Value(ir::Value* value);
+    uint32_t Value(core::ir::Value* value);
 
     /// Get the result ID of the instruction result `value`, emitting its instruction if necessary.
     /// @param inst the instruction to get the ID for
     /// @returns the result ID of the instruction
-    uint32_t Value(ir::Instruction* inst);
+    uint32_t Value(core::ir::Instruction* inst);
 
     /// Get the result ID of the OpUndef instruction with type `ty`, emitting it if necessary.
     /// @param ty the type of the undef value
@@ -159,115 +159,115 @@
 
     /// Emit a function.
     /// @param func the function to emit
-    void EmitFunction(ir::Function* func);
+    void EmitFunction(core::ir::Function* func);
 
     /// Emit entry point declarations for a function.
     /// @param func the function to emit entry point declarations for
     /// @param id the result ID of the function declaration
-    void EmitEntryPoint(ir::Function* func, uint32_t id);
+    void EmitEntryPoint(core::ir::Function* func, uint32_t id);
 
     /// Emit a block, including the initial OpLabel, OpPhis and instructions.
     /// @param block the block to emit
-    void EmitBlock(ir::Block* block);
+    void EmitBlock(core::ir::Block* block);
 
     /// Emit all OpPhi nodes for incoming branches to @p block.
     /// @param block the block to emit the OpPhis for
-    void EmitIncomingPhis(ir::MultiInBlock* block);
+    void EmitIncomingPhis(core::ir::MultiInBlock* block);
 
     /// Emit all instructions of @p block.
     /// @param block the block's instructions to emit
-    void EmitBlockInstructions(ir::Block* block);
+    void EmitBlockInstructions(core::ir::Block* block);
 
     /// Emit the root block.
     /// @param root_block the root block to emit
-    void EmitRootBlock(ir::Block* root_block);
+    void EmitRootBlock(core::ir::Block* root_block);
 
     /// Emit an `if` flow node.
     /// @param i the if node to emit
-    void EmitIf(ir::If* i);
+    void EmitIf(core::ir::If* i);
 
     /// Emit an access instruction
     /// @param access the access instruction to emit
-    void EmitAccess(ir::Access* access);
+    void EmitAccess(core::ir::Access* access);
 
     /// Emit a binary instruction.
     /// @param binary the binary instruction to emit
-    void EmitBinary(ir::Binary* binary);
+    void EmitBinary(core::ir::Binary* binary);
 
     /// Emit a bitcast instruction.
     /// @param bitcast the bitcast instruction to emit
-    void EmitBitcast(ir::Bitcast* bitcast);
+    void EmitBitcast(core::ir::Bitcast* bitcast);
 
     /// Emit a builtin function call instruction.
     /// @param call the builtin call instruction to emit
-    void EmitCoreBuiltinCall(ir::CoreBuiltinCall* call);
+    void EmitCoreBuiltinCall(core::ir::CoreBuiltinCall* call);
 
     /// Emit a construct instruction.
     /// @param construct the construct instruction to emit
-    void EmitConstruct(ir::Construct* construct);
+    void EmitConstruct(core::ir::Construct* construct);
 
     /// Emit a convert instruction.
     /// @param convert the convert instruction to emit
-    void EmitConvert(ir::Convert* convert);
+    void EmitConvert(core::ir::Convert* convert);
 
     /// Emit an intrinsic call instruction.
     /// @param call the intrinsic call instruction to emit
-    void EmitIntrinsicCall(ir::IntrinsicCall* call);
+    void EmitIntrinsicCall(core::ir::IntrinsicCall* call);
 
     /// Emit a load instruction.
     /// @param load the load instruction to emit
-    void EmitLoad(ir::Load* load);
+    void EmitLoad(core::ir::Load* load);
 
     /// Emit a load vector element instruction.
     /// @param load the load vector element instruction to emit
-    void EmitLoadVectorElement(ir::LoadVectorElement* load);
+    void EmitLoadVectorElement(core::ir::LoadVectorElement* load);
 
     /// Emit a loop instruction.
     /// @param loop the loop instruction to emit
-    void EmitLoop(ir::Loop* loop);
+    void EmitLoop(core::ir::Loop* loop);
 
     /// Emit a store instruction.
     /// @param store the store instruction to emit
-    void EmitStore(ir::Store* store);
+    void EmitStore(core::ir::Store* store);
 
     /// Emit a store vector element instruction.
     /// @param store the store vector element instruction to emit
-    void EmitStoreVectorElement(ir::StoreVectorElement* store);
+    void EmitStoreVectorElement(core::ir::StoreVectorElement* store);
 
     /// Emit a switch instruction.
     /// @param swtch the switch instruction to emit
-    void EmitSwitch(ir::Switch* swtch);
+    void EmitSwitch(core::ir::Switch* swtch);
 
     /// Emit a swizzle instruction.
     /// @param swizzle the swizzle instruction to emit
-    void EmitSwizzle(ir::Swizzle* swizzle);
+    void EmitSwizzle(core::ir::Swizzle* swizzle);
 
     /// Emit a unary instruction.
     /// @param unary the unary instruction to emit
-    void EmitUnary(ir::Unary* unary);
+    void EmitUnary(core::ir::Unary* unary);
 
     /// Emit a user call instruction.
     /// @param call the user call instruction to emit
-    void EmitUserCall(ir::UserCall* call);
+    void EmitUserCall(core::ir::UserCall* call);
 
     /// Emit a var instruction.
     /// @param var the var instruction to emit
-    void EmitVar(ir::Var* var);
+    void EmitVar(core::ir::Var* var);
 
     /// Emit a let instruction.
     /// @param let the let instruction to emit
-    void EmitLet(ir::Let* let);
+    void EmitLet(core::ir::Let* let);
 
     /// Emit a terminator instruction.
     /// @param term the terminator instruction to emit
-    void EmitTerminator(ir::Terminator* term);
+    void EmitTerminator(core::ir::Terminator* term);
 
     /// Emit the OpPhis for the given flow control instruction.
     /// @param inst the flow control instruction
-    void EmitExitPhis(ir::ControlInstruction* inst);
+    void EmitExitPhis(core::ir::ControlInstruction* inst);
 
-    ir::Module* ir_;
-    ir::Builder b_;
+    core::ir::Module* ir_;
+    core::ir::Builder b_;
     writer::Module module_;
     BinaryWriter writer_;
 
@@ -312,10 +312,10 @@
     Hashmap<const core::type::Type*, uint32_t, 4> undef_values_;
 
     /// The map of non-constant values to their result IDs.
-    Hashmap<ir::Value*, uint32_t, 8> values_;
+    Hashmap<core::ir::Value*, uint32_t, 8> values_;
 
     /// The map of blocks to the IDs of their label instructions.
-    Hashmap<ir::Block*, uint32_t, 8> block_labels_;
+    Hashmap<core::ir::Block*, uint32_t, 8> block_labels_;
 
     /// The map of extended instruction set names to their result IDs.
     Hashmap<std::string_view, uint32_t, 2> imports_;
diff --git a/src/tint/lang/spirv/writer/raise/builtin_polyfill.cc b/src/tint/lang/spirv/writer/raise/builtin_polyfill.cc
index 65289a3..bd73280 100644
--- a/src/tint/lang/spirv/writer/raise/builtin_polyfill.cc
+++ b/src/tint/lang/spirv/writer/raise/builtin_polyfill.cc
@@ -43,10 +43,10 @@
 /// PIMPL state for the transform.
 struct State {
     /// The IR module.
-    ir::Module* ir = nullptr;
+    core::ir::Module* ir = nullptr;
 
     /// The IR builder.
-    ir::Builder b{*ir};
+    core::ir::Builder b{*ir};
 
     /// The type manager.
     core::type::Manager& ty{ir->Types()};
@@ -54,12 +54,12 @@
     /// Process the module.
     void Process() {
         // Find the builtins that need replacing.
-        Vector<ir::CoreBuiltinCall*, 4> worklist;
+        Vector<core::ir::CoreBuiltinCall*, 4> worklist;
         for (auto* inst : ir->instructions.Objects()) {
             if (!inst->Alive()) {
                 continue;
             }
-            if (auto* builtin = inst->As<ir::CoreBuiltinCall>()) {
+            if (auto* builtin = inst->As<core::ir::CoreBuiltinCall>()) {
                 switch (builtin->Func()) {
                     case core::Function::kArrayLength:
                     case core::Function::kAtomicAdd:
@@ -97,7 +97,7 @@
 
         // Replace the builtins that we found.
         for (auto* builtin : worklist) {
-            ir::Value* replacement = nullptr;
+            core::ir::Value* replacement = nullptr;
             switch (builtin->Func()) {
                 case core::Function::kArrayLength:
                     replacement = ArrayLength(builtin);
@@ -169,24 +169,24 @@
     /// Handle an `arrayLength()` builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* ArrayLength(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* ArrayLength(core::ir::CoreBuiltinCall* builtin) {
         // Strip away any let instructions to get to the original struct member access instruction.
-        auto* ptr = builtin->Args()[0]->As<ir::InstructionResult>();
-        while (auto* let = tint::As<ir::Let>(ptr->Source())) {
-            ptr = let->Value()->As<ir::InstructionResult>();
+        auto* ptr = builtin->Args()[0]->As<core::ir::InstructionResult>();
+        while (auto* let = tint::As<core::ir::Let>(ptr->Source())) {
+            ptr = let->Value()->As<core::ir::InstructionResult>();
         }
         TINT_ASSERT_OR_RETURN_VALUE(ptr, nullptr);
 
-        auto* access = ptr->Source()->As<ir::Access>();
+        auto* access = ptr->Source()->As<core::ir::Access>();
         TINT_ASSERT_OR_RETURN_VALUE(access, nullptr);
         TINT_ASSERT_OR_RETURN_VALUE(access->Indices().Length() == 1u, nullptr);
         TINT_ASSERT_OR_RETURN_VALUE(access->Object()->Type()->UnwrapPtr()->Is<core::type::Struct>(),
                                     nullptr);
-        auto* const_idx = access->Indices()[0]->As<ir::Constant>();
+        auto* const_idx = access->Indices()[0]->As<core::ir::Constant>();
 
         // Replace the builtin call with a call to the spirv.array_length intrinsic.
         auto* call =
-            b.Call(builtin->Result()->Type(), ir::IntrinsicCall::Kind::kSpirvArrayLength,
+            b.Call(builtin->Result()->Type(), core::ir::IntrinsicCall::Kind::kSpirvArrayLength,
                    Vector{access->Object(), Literal(u32(const_idx->Value()->ValueAs<uint32_t>()))});
         call->InsertBefore(builtin);
         return call->Result();
@@ -195,11 +195,11 @@
     /// Handle an atomic*() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* Atomic(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* Atomic(core::ir::CoreBuiltinCall* builtin) {
         auto* result_ty = builtin->Result()->Type();
 
         auto* pointer = builtin->Args()[0];
-        auto* memory = [&]() -> ir::Value* {
+        auto* memory = [&]() -> core::ir::Value* {
             switch (pointer->Type()->As<core::type::Pointer>()->AddressSpace()) {
                 case core::AddressSpace::kWorkgroup:
                     return b.Constant(u32(SpvScopeWorkgroup));
@@ -213,26 +213,27 @@
         auto* memory_semantics = b.Constant(u32(SpvMemorySemanticsMaskNone));
 
         // Helper to build the intrinsic call with the common operands.
-        auto build = [&](const core::type::Type* type, enum ir::IntrinsicCall::Kind intrinsic) {
+        auto build = [&](const core::type::Type* type,
+                         enum core::ir::IntrinsicCall::Kind intrinsic) {
             return b.Call(type, intrinsic, pointer, memory, memory_semantics);
         };
 
         // Create the replacement call instruction.
-        ir::Call* call = nullptr;
+        core::ir::Call* call = nullptr;
         switch (builtin->Func()) {
             case core::Function::kAtomicAdd:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicIAdd);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicIAdd);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicAnd:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicAnd);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicAnd);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicCompareExchangeWeak: {
                 auto* cmp = builtin->Args()[1];
                 auto* value = builtin->Args()[2];
                 auto* int_ty = value->Type();
-                call = build(int_ty, ir::IntrinsicCall::Kind::kSpirvAtomicCompareExchange);
+                call = build(int_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicCompareExchange);
                 call->AppendArg(memory_semantics);
                 call->AppendArg(value);
                 call->AppendArg(cmp);
@@ -250,42 +251,42 @@
                 break;
             }
             case core::Function::kAtomicExchange:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicExchange);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicExchange);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicLoad:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicLoad);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicLoad);
                 break;
             case core::Function::kAtomicOr:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicOr);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicOr);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicMax:
                 if (result_ty->is_signed_integer_scalar()) {
-                    call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicSMax);
+                    call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicSMax);
                 } else {
-                    call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicUMax);
+                    call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicUMax);
                 }
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicMin:
                 if (result_ty->is_signed_integer_scalar()) {
-                    call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicSMin);
+                    call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicSMin);
                 } else {
-                    call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicUMin);
+                    call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicUMin);
                 }
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicStore:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicStore);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicStore);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicSub:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicISub);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicISub);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             case core::Function::kAtomicXor:
-                call = build(result_ty, ir::IntrinsicCall::Kind::kSpirvAtomicXor);
+                call = build(result_ty, core::ir::IntrinsicCall::Kind::kSpirvAtomicXor);
                 call->AppendArg(builtin->Args()[1]);
                 break;
             default:
@@ -299,11 +300,11 @@
     /// Handle a `dot()` builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* Dot(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* Dot(core::ir::CoreBuiltinCall* builtin) {
         // OpDot only supports floating point operands, so we need to polyfill the integer case.
         // TODO(crbug.com/tint/1267): If SPV_KHR_integer_dot_product is supported, use that instead.
         if (builtin->Result()->Type()->is_integer_scalar()) {
-            ir::Instruction* sum = nullptr;
+            core::ir::Instruction* sum = nullptr;
 
             auto* v1 = builtin->Args()[0];
             auto* v2 = builtin->Args()[1];
@@ -325,9 +326,9 @@
         }
 
         // Replace the builtin call with a call to the spirv.dot intrinsic.
-        auto args = Vector<ir::Value*, 4>(builtin->Args());
-        auto* call =
-            b.Call(builtin->Result()->Type(), ir::IntrinsicCall::Kind::kSpirvDot, std::move(args));
+        auto args = Vector<core::ir::Value*, 4>(builtin->Args());
+        auto* call = b.Call(builtin->Result()->Type(), core::ir::IntrinsicCall::Kind::kSpirvDot,
+                            std::move(args));
         call->InsertBefore(builtin);
         return call->Result();
     }
@@ -335,9 +336,9 @@
     /// Handle a `select()` builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* Select(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* Select(core::ir::CoreBuiltinCall* builtin) {
         // Argument order is different in SPIR-V: (condition, true_operand, false_operand).
-        Vector<ir::Value*, 4> args = {
+        Vector<core::ir::Value*, 4> args = {
             builtin->Args()[2],
             builtin->Args()[1],
             builtin->Args()[0],
@@ -348,7 +349,7 @@
         // TODO(jrprice): We don't need to do this if we're targeting SPIR-V 1.4 or newer.
         auto* vec = builtin->Result()->Type()->As<core::type::Vector>();
         if (vec && args[0]->Type()->Is<core::type::Scalar>()) {
-            Vector<ir::Value*, 4> elements;
+            Vector<core::ir::Value*, 4> elements;
             elements.Resize(vec->Width(), args[0]);
 
             auto* construct = b.Construct(ty.vec(ty.bool_(), vec->Width()), std::move(elements));
@@ -357,7 +358,7 @@
         }
 
         // Replace the builtin call with a call to the spirv.select intrinsic.
-        auto* call = b.Call(builtin->Result()->Type(), ir::IntrinsicCall::Kind::kSpirvSelect,
+        auto* call = b.Call(builtin->Result()->Type(), core::ir::IntrinsicCall::Kind::kSpirvSelect,
                             std::move(args));
         call->InsertBefore(builtin);
         return call->Result();
@@ -366,17 +367,17 @@
     /// ImageOperands represents the optional image operands for an image instruction.
     struct ImageOperands {
         /// Bias
-        ir::Value* bias = nullptr;
+        core::ir::Value* bias = nullptr;
         /// Lod
-        ir::Value* lod = nullptr;
+        core::ir::Value* lod = nullptr;
         /// Grad (dx)
-        ir::Value* ddx = nullptr;
+        core::ir::Value* ddx = nullptr;
         /// Grad (dy)
-        ir::Value* ddy = nullptr;
+        core::ir::Value* ddy = nullptr;
         /// ConstOffset
-        ir::Value* offset = nullptr;
+        core::ir::Value* offset = nullptr;
         /// Sample
-        ir::Value* sample = nullptr;
+        core::ir::Value* sample = nullptr;
     };
 
     /// Append optional image operands to an image intrinsic argument list.
@@ -385,8 +386,8 @@
     /// @param insertion_point the insertion point for new instructions
     /// @param requires_float_lod true if the lod needs to be a floating point value
     void AppendImageOperands(ImageOperands& operands,
-                             Vector<ir::Value*, 8>& args,
-                             ir::Instruction* insertion_point,
+                             Vector<core::ir::Value*, 8>& args,
+                             core::ir::Instruction* insertion_point,
                              bool requires_float_lod) {
         // Add a placeholder argument for the image operand mask, which we will fill in when we have
         // processed the image operands.
@@ -431,9 +432,9 @@
     /// @param array_idx the array index
     /// @param insertion_point the insertion point for new instructions
     /// @returns the modified coordinate vector
-    ir::Value* AppendArrayIndex(ir::Value* coords,
-                                ir::Value* array_idx,
-                                ir::Instruction* insertion_point) {
+    core::ir::Value* AppendArrayIndex(core::ir::Value* coords,
+                                      core::ir::Value* array_idx,
+                                      core::ir::Instruction* insertion_point) {
         auto* vec = coords->Type()->As<core::type::Vector>();
         auto* element_ty = vec->type();
 
@@ -455,7 +456,7 @@
     /// Handle a textureSample*() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureSample(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureSample(core::ir::CoreBuiltinCall* builtin) {
         // Helper to get the next argument from the call, or nullptr if there are no more arguments.
         uint32_t arg_idx = 0;
         auto next_arg = [&]() {
@@ -469,8 +470,8 @@
 
         // Use OpSampledImage to create an OpTypeSampledImage object.
         auto* sampled_image =
-            b.Call(ty.Get<SampledImage>(texture_ty), ir::IntrinsicCall::Kind::kSpirvSampledImage,
-                   Vector{texture, sampler});
+            b.Call(ty.Get<SampledImage>(texture_ty),
+                   core::ir::IntrinsicCall::Kind::kSpirvSampledImage, Vector{texture, sampler});
         sampled_image->InsertBefore(builtin);
 
         // Append the array index to the coordinates if provided.
@@ -480,38 +481,38 @@
         }
 
         // Determine which SPIR-V intrinsic to use and which optional image operands are needed.
-        enum ir::IntrinsicCall::Kind intrinsic;
-        ir::Value* depth = nullptr;
+        enum core::ir::IntrinsicCall::Kind intrinsic;
+        core::ir::Value* depth = nullptr;
         ImageOperands operands;
         switch (builtin->Func()) {
             case core::Function::kTextureSample:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod;
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureSampleBias:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleImplicitLod;
                 operands.bias = next_arg();
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureSampleCompare:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleDrefImplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleDrefImplicitLod;
                 depth = next_arg();
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureSampleCompareLevel:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleDrefExplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleDrefExplicitLod;
                 depth = next_arg();
                 operands.lod = b.Constant(0_f);
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureSampleGrad:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod;
                 operands.ddx = next_arg();
                 operands.ddy = next_arg();
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureSampleLevel:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageSampleExplicitLod;
                 operands.lod = next_arg();
                 operands.offset = next_arg();
                 break;
@@ -522,7 +523,7 @@
         // Start building the argument list for the intrinsic.
         // The first two operands are always the sampled image and then the coordinates, followed by
         // the depth reference if used.
-        Vector<ir::Value*, 8> intrinsic_args;
+        Vector<core::ir::Value*, 8> intrinsic_args;
         intrinsic_args.Push(sampled_image->Result());
         intrinsic_args.Push(coords);
         if (depth) {
@@ -555,7 +556,7 @@
     /// Handle a textureGather*() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureGather(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureGather(core::ir::CoreBuiltinCall* builtin) {
         // Helper to get the next argument from the call, or nullptr if there are no more arguments.
         uint32_t arg_idx = 0;
         auto next_arg = [&]() {
@@ -576,8 +577,8 @@
 
         // Use OpSampledImage to create an OpTypeSampledImage object.
         auto* sampled_image =
-            b.Call(ty.Get<SampledImage>(texture_ty), ir::IntrinsicCall::Kind::kSpirvSampledImage,
-                   Vector{texture, sampler});
+            b.Call(ty.Get<SampledImage>(texture_ty),
+                   core::ir::IntrinsicCall::Kind::kSpirvSampledImage, Vector{texture, sampler});
         sampled_image->InsertBefore(builtin);
 
         // Append the array index to the coordinates if provided.
@@ -587,16 +588,16 @@
         }
 
         // Determine which SPIR-V intrinsic to use and which optional image operands are needed.
-        enum ir::IntrinsicCall::Kind intrinsic;
-        ir::Value* depth = nullptr;
+        enum core::ir::IntrinsicCall::Kind intrinsic;
+        core::ir::Value* depth = nullptr;
         ImageOperands operands;
         switch (builtin->Func()) {
             case core::Function::kTextureGather:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageGather;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageGather;
                 operands.offset = next_arg();
                 break;
             case core::Function::kTextureGatherCompare:
-                intrinsic = ir::IntrinsicCall::Kind::kSpirvImageDrefGather;
+                intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageDrefGather;
                 depth = next_arg();
                 operands.offset = next_arg();
                 break;
@@ -607,7 +608,7 @@
         // Start building the argument list for the intrinsic.
         // The first two operands are always the sampled image and then the coordinates, followed by
         // either the depth reference or the component.
-        Vector<ir::Value*, 8> intrinsic_args;
+        Vector<core::ir::Value*, 8> intrinsic_args;
         intrinsic_args.Push(sampled_image->Result());
         intrinsic_args.Push(coords);
         if (depth) {
@@ -629,7 +630,7 @@
     /// Handle a textureLoad() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureLoad(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureLoad(core::ir::CoreBuiltinCall* builtin) {
         // Helper to get the next argument from the call, or nullptr if there are no more arguments.
         uint32_t arg_idx = 0;
         auto next_arg = [&]() {
@@ -648,7 +649,7 @@
 
         // Start building the argument list for the intrinsic.
         // The first two operands are always the texture and then the coordinates.
-        Vector<ir::Value*, 8> intrinsic_args;
+        Vector<core::ir::Value*, 8> intrinsic_args;
         intrinsic_args.Push(texture);
         intrinsic_args.Push(coords);
 
@@ -669,8 +670,8 @@
         if (expects_scalar_result) {
             result_ty = ty.vec4(result_ty);
         }
-        auto* texture_call =
-            b.Call(result_ty, ir::IntrinsicCall::Kind::kSpirvImageFetch, std::move(intrinsic_args));
+        auto* texture_call = b.Call(result_ty, core::ir::IntrinsicCall::Kind::kSpirvImageFetch,
+                                    std::move(intrinsic_args));
         texture_call->InsertBefore(builtin);
         auto* result = texture_call->Result();
 
@@ -687,7 +688,7 @@
     /// Handle a textureStore() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureStore(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureStore(core::ir::CoreBuiltinCall* builtin) {
         // Helper to get the next argument from the call, or nullptr if there are no more arguments.
         uint32_t arg_idx = 0;
         auto next_arg = [&]() {
@@ -708,7 +709,7 @@
 
         // Start building the argument list for the intrinsic.
         // The first two operands are always the texture and then the coordinates.
-        Vector<ir::Value*, 8> intrinsic_args;
+        Vector<core::ir::Value*, 8> intrinsic_args;
         intrinsic_args.Push(texture);
         intrinsic_args.Push(coords);
         intrinsic_args.Push(texel);
@@ -717,7 +718,7 @@
         AppendImageOperands(operands, intrinsic_args, builtin, /* requires_float_lod */ false);
 
         // Call the intrinsic.
-        auto* texture_call = b.Call(ty.void_(), ir::IntrinsicCall::Kind::kSpirvImageWrite,
+        auto* texture_call = b.Call(ty.void_(), core::ir::IntrinsicCall::Kind::kSpirvImageWrite,
                                     std::move(intrinsic_args));
         texture_call->InsertBefore(builtin);
         return texture_call->Result();
@@ -726,7 +727,7 @@
     /// Handle a textureDimensions() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureDimensions(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureDimensions(core::ir::CoreBuiltinCall* builtin) {
         // Helper to get the next argument from the call, or nullptr if there are no more arguments.
         uint32_t arg_idx = 0;
         auto next_arg = [&]() {
@@ -736,17 +737,17 @@
         auto* texture = next_arg();
         auto* texture_ty = texture->Type()->As<core::type::Texture>();
 
-        Vector<ir::Value*, 8> intrinsic_args;
+        Vector<core::ir::Value*, 8> intrinsic_args;
         intrinsic_args.Push(texture);
 
         // Determine which SPIR-V intrinsic to use, and add the Lod argument if needed.
-        enum ir::IntrinsicCall::Kind intrinsic;
+        enum core::ir::IntrinsicCall::Kind intrinsic;
         if (texture_ty
                 ->IsAnyOf<core::type::MultisampledTexture, core::type::DepthMultisampledTexture,
                           core::type::StorageTexture>()) {
-            intrinsic = ir::IntrinsicCall::Kind::kSpirvImageQuerySize;
+            intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageQuerySize;
         } else {
-            intrinsic = ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod;
+            intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod;
             if (auto* lod = next_arg()) {
                 intrinsic_args.Push(lod);
             } else {
@@ -781,21 +782,21 @@
     /// Handle a textureNumLayers() builtin.
     /// @param builtin the builtin call instruction
     /// @returns the replacement value
-    ir::Value* TextureNumLayers(ir::CoreBuiltinCall* builtin) {
+    core::ir::Value* TextureNumLayers(core::ir::CoreBuiltinCall* builtin) {
         auto* texture = builtin->Args()[0];
         auto* texture_ty = texture->Type()->As<core::type::Texture>();
 
-        Vector<ir::Value*, 2> intrinsic_args;
+        Vector<core::ir::Value*, 2> intrinsic_args;
         intrinsic_args.Push(texture);
 
         // Determine which SPIR-V intrinsic to use, and add the Lod argument if needed.
-        enum ir::IntrinsicCall::Kind intrinsic;
+        enum core::ir::IntrinsicCall::Kind intrinsic;
         if (texture_ty
                 ->IsAnyOf<core::type::MultisampledTexture, core::type::DepthMultisampledTexture,
                           core::type::StorageTexture>()) {
-            intrinsic = ir::IntrinsicCall::Kind::kSpirvImageQuerySize;
+            intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageQuerySize;
         } else {
-            intrinsic = ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod;
+            intrinsic = core::ir::IntrinsicCall::Kind::kSpirvImageQuerySizeLod;
             intrinsic_args.Push(b.Constant(0_u));
         }
 
@@ -812,7 +813,7 @@
 
 }  // namespace
 
-Result<SuccessType, std::string> BuiltinPolyfill(ir::Module* ir) {
+Result<SuccessType, std::string> BuiltinPolyfill(core::ir::Module* ir) {
     auto result = ValidateAndDumpIfNeeded(*ir, "BuiltinPolyfill transform");
     if (!result) {
         return result;
diff --git a/src/tint/lang/spirv/writer/raise/builtin_polyfill.h b/src/tint/lang/spirv/writer/raise/builtin_polyfill.h
index 343caf6..a99adb9 100644
--- a/src/tint/lang/spirv/writer/raise/builtin_polyfill.h
+++ b/src/tint/lang/spirv/writer/raise/builtin_polyfill.h
@@ -22,10 +22,10 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 class Texture;
-}  // namespace tint::ir
+}  // namespace tint::core::ir
 
 namespace tint::spirv::writer::raise {
 
@@ -33,12 +33,12 @@
 /// SPIR-V backend intrinsic functions.
 /// @param module the module to transform
 /// @returns an error string on failure
-Result<SuccessType, std::string> BuiltinPolyfill(ir::Module* module);
+Result<SuccessType, std::string> BuiltinPolyfill(core::ir::Module* module);
 
 /// LiteralOperand is a type of constant value that is intended to be emitted as a literal in
 /// the SPIR-V instruction stream.
 /// TODO(jrprice): Move this to lang/spirv.
-class LiteralOperand final : public Castable<LiteralOperand, ir::Constant> {
+class LiteralOperand final : public Castable<LiteralOperand, core::ir::Constant> {
   public:
     /// Constructor
     /// @param value the operand value
diff --git a/src/tint/lang/spirv/writer/raise/builtin_polyfill_test.cc b/src/tint/lang/spirv/writer/raise/builtin_polyfill_test.cc
index a19982a..586b90d 100644
--- a/src/tint/lang/spirv/writer/raise/builtin_polyfill_test.cc
+++ b/src/tint/lang/spirv/writer/raise/builtin_polyfill_test.cc
@@ -31,7 +31,7 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_BuiltinPolyfillTest = ir::transform::TransformTest;
+using SpirvWriter_BuiltinPolyfillTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_BuiltinPolyfillTest, ArrayLength) {
     auto* arr = ty.runtime_array(ty.i32());
diff --git a/src/tint/lang/spirv/writer/raise/expand_implicit_splats.cc b/src/tint/lang/spirv/writer/raise/expand_implicit_splats.cc
index 19dc647..fbf1ee1 100644
--- a/src/tint/lang/spirv/writer/raise/expand_implicit_splats.cc
+++ b/src/tint/lang/spirv/writer/raise/expand_implicit_splats.cc
@@ -26,18 +26,18 @@
 
 namespace {
 
-void Run(ir::Module* ir) {
-    ir::Builder b(*ir);
+void Run(core::ir::Module* ir) {
+    core::ir::Builder b(*ir);
 
     // Find the instructions that use implicit splats and either modify them in place or record them
     // to be replaced in a second pass.
-    Vector<ir::Binary*, 4> binary_worklist;
-    Vector<ir::CoreBuiltinCall*, 4> builtin_worklist;
+    Vector<core::ir::Binary*, 4> binary_worklist;
+    Vector<core::ir::CoreBuiltinCall*, 4> builtin_worklist;
     for (auto* inst : ir->instructions.Objects()) {
         if (!inst->Alive()) {
             continue;
         }
-        if (auto* construct = inst->As<ir::Construct>()) {
+        if (auto* construct = inst->As<core::ir::Construct>()) {
             // A vector constructor with a single scalar argument needs to be modified to replicate
             // the argument N times.
             auto* vec = construct->Result()->Type()->As<core::type::Vector>();
@@ -48,7 +48,7 @@
                     construct->AppendArg(construct->Args()[0]);
                 }
             }
-        } else if (auto* binary = inst->As<ir::Binary>()) {
+        } else if (auto* binary = inst->As<core::ir::Binary>()) {
             // A binary instruction that mixes vector and scalar operands needs to have the scalar
             // operand replaced with an explicit vector constructor.
             if (binary->Result()->Type()->Is<core::type::Vector>()) {
@@ -57,7 +57,7 @@
                     binary_worklist.Push(binary);
                 }
             }
-        } else if (auto* builtin = inst->As<ir::CoreBuiltinCall>()) {
+        } else if (auto* builtin = inst->As<core::ir::CoreBuiltinCall>()) {
             // A mix builtin call that mixes vector and scalar operands needs to have the scalar
             // operand replaced with an explicit vector constructor.
             if (builtin->Func() == core::Function::kMix) {
@@ -72,10 +72,10 @@
 
     // Helper to expand a scalar operand of an instruction by replacing it with an explicitly
     // constructed vector that matches the result type.
-    auto expand_operand = [&](ir::Instruction* inst, size_t operand_idx) {
+    auto expand_operand = [&](core::ir::Instruction* inst, size_t operand_idx) {
         auto* vec = inst->Result()->Type()->As<core::type::Vector>();
 
-        Vector<ir::Value*, 4> args;
+        Vector<core::ir::Value*, 4> args;
         args.Resize(vec->Width(), inst->Operands()[operand_idx]);
 
         auto* construct = b.Construct(vec, std::move(args));
@@ -86,9 +86,9 @@
     // Replace scalar operands to binary instructions that produce vectors.
     for (auto* binary : binary_worklist) {
         auto* result_ty = binary->Result()->Type();
-        if (result_ty->is_float_vector() && binary->Kind() == ir::Binary::Kind::kMultiply) {
+        if (result_ty->is_float_vector() && binary->Kind() == core::ir::Binary::Kind::kMultiply) {
             // Use OpVectorTimesScalar for floating point multiply.
-            auto* vts = b.Call(result_ty, ir::IntrinsicCall::Kind::kSpirvVectorTimesScalar);
+            auto* vts = b.Call(result_ty, core::ir::IntrinsicCall::Kind::kSpirvVectorTimesScalar);
             if (binary->LHS()->Type()->Is<core::type::Scalar>()) {
                 vts->AppendArg(binary->RHS());
                 vts->AppendArg(binary->LHS());
@@ -105,9 +105,9 @@
         } else {
             // Expand the scalar argument into an explicitly constructed vector.
             if (binary->LHS()->Type()->Is<core::type::Scalar>()) {
-                expand_operand(binary, ir::Binary::kLhsOperandOffset);
+                expand_operand(binary, core::ir::Binary::kLhsOperandOffset);
             } else if (binary->RHS()->Type()->Is<core::type::Scalar>()) {
-                expand_operand(binary, ir::Binary::kRhsOperandOffset);
+                expand_operand(binary, core::ir::Binary::kRhsOperandOffset);
             }
         }
     }
@@ -117,7 +117,7 @@
         switch (builtin->Func()) {
             case core::Function::kMix:
                 // Expand the scalar argument into an explicitly constructed vector.
-                expand_operand(builtin, ir::CoreBuiltinCall::kArgsOperandOffset + 2);
+                expand_operand(builtin, core::ir::CoreBuiltinCall::kArgsOperandOffset + 2);
                 break;
             default:
                 TINT_UNREACHABLE() << "unhandled builtin call";
@@ -128,7 +128,7 @@
 
 }  // namespace
 
-Result<SuccessType, std::string> ExpandImplicitSplats(ir::Module* ir) {
+Result<SuccessType, std::string> ExpandImplicitSplats(core::ir::Module* ir) {
     auto result = ValidateAndDumpIfNeeded(*ir, "ExpandImplicitSplats transform");
     if (!result) {
         return result;
diff --git a/src/tint/lang/spirv/writer/raise/expand_implicit_splats.h b/src/tint/lang/spirv/writer/raise/expand_implicit_splats.h
index 9efef18..18bf1d8 100644
--- a/src/tint/lang/spirv/writer/raise/expand_implicit_splats.h
+++ b/src/tint/lang/spirv/writer/raise/expand_implicit_splats.h
@@ -20,7 +20,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -30,7 +30,7 @@
 /// instructions and binary instructions where not supported by SPIR-V.
 /// @param module the module to transform
 /// @returns an error string on failure
-Result<SuccessType, std::string> ExpandImplicitSplats(ir::Module* module);
+Result<SuccessType, std::string> ExpandImplicitSplats(core::ir::Module* module);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/expand_implicit_splats_test.cc b/src/tint/lang/spirv/writer/raise/expand_implicit_splats_test.cc
index 0c1b919..9111271 100644
--- a/src/tint/lang/spirv/writer/raise/expand_implicit_splats_test.cc
+++ b/src/tint/lang/spirv/writer/raise/expand_implicit_splats_test.cc
@@ -24,7 +24,7 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_ExpandImplicitSplatsTest = ir::transform::TransformTest;
+using SpirvWriter_ExpandImplicitSplatsTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_ExpandImplicitSplatsTest, NoModify_Construct_VectorIdentity) {
     auto* vector = b.FunctionParam("vector", ty.vec2<i32>());
diff --git a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.cc b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.cc
index fbcc95b..7c321a8 100644
--- a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.cc
+++ b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.cc
@@ -30,23 +30,23 @@
 
 namespace {
 
-void Run(ir::Module* ir) {
-    ir::Builder b(*ir);
+void Run(core::ir::Module* ir) {
+    core::ir::Builder b(*ir);
 
     // Find the instructions that need to be modified.
-    Vector<ir::Binary*, 4> binary_worklist;
-    Vector<ir::Convert*, 4> convert_worklist;
+    Vector<core::ir::Binary*, 4> binary_worklist;
+    Vector<core::ir::Convert*, 4> convert_worklist;
     for (auto* inst : ir->instructions.Objects()) {
         if (!inst->Alive()) {
             continue;
         }
-        if (auto* binary = inst->As<ir::Binary>()) {
+        if (auto* binary = inst->As<core::ir::Binary>()) {
             TINT_ASSERT(binary->Operands().Length() == 2);
             if (binary->LHS()->Type()->Is<core::type::Matrix>() ||
                 binary->RHS()->Type()->Is<core::type::Matrix>()) {
                 binary_worklist.Push(binary);
             }
-        } else if (auto* convert = inst->As<ir::Convert>()) {
+        } else if (auto* convert = inst->As<core::ir::Convert>()) {
             if (convert->Result()->Type()->Is<core::type::Matrix>()) {
                 convert_worklist.Push(convert);
             }
@@ -62,7 +62,7 @@
         auto* ty = binary->Result()->Type();
 
         // Helper to replace the instruction with a new one.
-        auto replace = [&](ir::Instruction* inst) {
+        auto replace = [&](core::ir::Instruction* inst) {
             if (auto name = ir->NameOf(binary)) {
                 ir->SetName(inst->Result(), name);
             }
@@ -72,9 +72,9 @@
         };
 
         // Helper to replace the instruction with a column-wise operation.
-        auto column_wise = [&](enum ir::Binary::Kind op) {
+        auto column_wise = [&](enum core::ir::Binary::Kind op) {
             auto* mat = ty->As<core::type::Matrix>();
-            Vector<ir::Value*, 4> args;
+            Vector<core::ir::Value*, 4> args;
             for (uint32_t col = 0; col < mat->columns(); col++) {
                 b.InsertBefore(binary, [&] {
                     auto* lhs_col = b.Access(mat->ColumnType(), lhs, u32(col));
@@ -87,32 +87,32 @@
         };
 
         switch (binary->Kind()) {
-            case ir::Binary::Kind::kAdd:
-                column_wise(ir::Binary::Kind::kAdd);
+            case core::ir::Binary::Kind::kAdd:
+                column_wise(core::ir::Binary::Kind::kAdd);
                 break;
-            case ir::Binary::Kind::kSubtract:
-                column_wise(ir::Binary::Kind::kSubtract);
+            case core::ir::Binary::Kind::kSubtract:
+                column_wise(core::ir::Binary::Kind::kSubtract);
                 break;
-            case ir::Binary::Kind::kMultiply:
+            case core::ir::Binary::Kind::kMultiply:
                 // Select the SPIR-V intrinsic that corresponds to the operation being performed.
                 if (lhs_ty->Is<core::type::Matrix>()) {
                     if (rhs_ty->Is<core::type::Scalar>()) {
-                        replace(
-                            b.Call(ty, ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar, lhs, rhs));
+                        replace(b.Call(ty, core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar,
+                                       lhs, rhs));
                     } else if (rhs_ty->Is<core::type::Vector>()) {
-                        replace(
-                            b.Call(ty, ir::IntrinsicCall::Kind::kSpirvMatrixTimesVector, lhs, rhs));
+                        replace(b.Call(ty, core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesVector,
+                                       lhs, rhs));
                     } else if (rhs_ty->Is<core::type::Matrix>()) {
-                        replace(
-                            b.Call(ty, ir::IntrinsicCall::Kind::kSpirvMatrixTimesMatrix, lhs, rhs));
+                        replace(b.Call(ty, core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesMatrix,
+                                       lhs, rhs));
                     }
                 } else {
                     if (lhs_ty->Is<core::type::Scalar>()) {
-                        replace(
-                            b.Call(ty, ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar, rhs, lhs));
+                        replace(b.Call(ty, core::ir::IntrinsicCall::Kind::kSpirvMatrixTimesScalar,
+                                       rhs, lhs));
                     } else if (lhs_ty->Is<core::type::Vector>()) {
-                        replace(
-                            b.Call(ty, ir::IntrinsicCall::Kind::kSpirvVectorTimesMatrix, lhs, rhs));
+                        replace(b.Call(ty, core::ir::IntrinsicCall::Kind::kSpirvVectorTimesMatrix,
+                                       lhs, rhs));
                     }
                 }
                 break;
@@ -125,12 +125,12 @@
 
     // Replace the matrix convert instructions that we found.
     for (auto* convert : convert_worklist) {
-        auto* arg = convert->Args()[ir::Convert::kValueOperandOffset];
+        auto* arg = convert->Args()[core::ir::Convert::kValueOperandOffset];
         auto* in_mat = arg->Type()->As<core::type::Matrix>();
         auto* out_mat = convert->Result()->Type()->As<core::type::Matrix>();
 
         // Extract and convert each column separately.
-        Vector<ir::Value*, 4> args;
+        Vector<core::ir::Value*, 4> args;
         for (uint32_t c = 0; c < out_mat->columns(); c++) {
             b.InsertBefore(convert, [&] {
                 auto* col = b.Access(in_mat->ColumnType(), arg, u32(c));
@@ -152,7 +152,7 @@
 
 }  // namespace
 
-Result<SuccessType, std::string> HandleMatrixArithmetic(ir::Module* ir) {
+Result<SuccessType, std::string> HandleMatrixArithmetic(core::ir::Module* ir) {
     auto result = ValidateAndDumpIfNeeded(*ir, "HandleMatrixArithmetic transform");
     if (!result) {
         return result;
diff --git a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.h b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.h
index 8590f37..41b4a72 100644
--- a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.h
+++ b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic.h
@@ -20,7 +20,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -30,7 +30,7 @@
 /// SPIR-V intrinsics or polyfills.
 /// @param module the module to transform
 /// @returns an error string on failure
-Result<SuccessType, std::string> HandleMatrixArithmetic(ir::Module* module);
+Result<SuccessType, std::string> HandleMatrixArithmetic(core::ir::Module* module);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic_test.cc b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic_test.cc
index 264bd54..bd31efd 100644
--- a/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic_test.cc
+++ b/src/tint/lang/spirv/writer/raise/handle_matrix_arithmetic_test.cc
@@ -25,7 +25,7 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_HandleMatrixArithmeticTest = ir::transform::TransformTest;
+using SpirvWriter_HandleMatrixArithmeticTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_HandleMatrixArithmeticTest, Add_Mat2x3f) {
     auto* arg1 = b.FunctionParam("arg1", ty.mat2x3<f32>());
diff --git a/src/tint/lang/spirv/writer/raise/merge_return.cc b/src/tint/lang/spirv/writer/raise/merge_return.cc
index 6f657f2..c09fdc6 100644
--- a/src/tint/lang/spirv/writer/raise/merge_return.cc
+++ b/src/tint/lang/spirv/writer/raise/merge_return.cc
@@ -33,38 +33,38 @@
 /// PIMPL state for the transform, for a single function.
 struct State {
     /// The IR module.
-    ir::Module* ir = nullptr;
+    core::ir::Module* ir = nullptr;
 
     /// The IR builder.
-    ir::Builder b{*ir};
+    core::ir::Builder b{*ir};
 
     /// The type manager.
     core::type::Manager& ty{ir->Types()};
 
     /// The "has not returned" flag.
-    ir::Var* continue_execution = nullptr;
+    core::ir::Var* continue_execution = nullptr;
 
     /// The variable that holds the return value.
     /// Null when the function does not return a value.
-    ir::Var* return_val = nullptr;
+    core::ir::Var* return_val = nullptr;
 
     /// The final return at the end of the function block.
     /// May be null when the function returns in all blocks of a control instruction.
-    ir::Return* fn_return = nullptr;
+    core::ir::Return* fn_return = nullptr;
 
     /// A set of control instructions that transitively hold a return instruction
-    Hashset<ir::ControlInstruction*, 8> holds_return_;
+    Hashset<core::ir::ControlInstruction*, 8> holds_return_;
 
     /// Constructor
     /// @param mod the module
-    explicit State(ir::Module* mod) : ir(mod) {}
+    explicit State(core::ir::Module* mod) : ir(mod) {}
 
     /// Process the function.
     /// @param fn the function to process
-    void Process(ir::Function* fn) {
+    void Process(core::ir::Function* fn) {
         // Find all of the nested return instructions in the function.
         for (const auto& usage : fn->Usages()) {
-            if (auto* ret = usage.instruction->As<ir::Return>()) {
+            if (auto* ret = usage.instruction->As<core::ir::Return>()) {
                 TransitivelyMarkAsReturning(ret->Block()->Parent());
             }
         }
@@ -86,7 +86,7 @@
         }
 
         // Look to see if the function ends with a return
-        fn_return = tint::As<ir::Return>(fn->Block()->Terminator());
+        fn_return = tint::As<core::ir::Return>(fn->Block()->Terminator());
 
         // Process the function's block.
         // This will traverse into control instructions that hold returns, and apply the necessary
@@ -105,7 +105,7 @@
     /// Marks all the control instructions from ctrl to the function as holding a return.
     /// @param ctrl the control instruction to mark as returning, along with all ancestor control
     /// instructions.
-    void TransitivelyMarkAsReturning(ir::ControlInstruction* ctrl) {
+    void TransitivelyMarkAsReturning(core::ir::ControlInstruction* ctrl) {
         for (; ctrl; ctrl = ctrl->Block()->Parent()) {
             if (!holds_return_.Add(ctrl)) {
                 return;
@@ -118,21 +118,21 @@
     /// instructions following the control instruction will be wrapped in a 'if' that only executes
     /// if a return was not reached.
     /// @param block the block to process
-    void ProcessBlock(ir::Block* block) {
-        ir::If* inner_if = nullptr;
+    void ProcessBlock(core::ir::Block* block) {
+        core::ir::If* inner_if = nullptr;
         for (auto* inst = *block->begin(); inst;) {  // For each instruction in 'block'
             // As we're modifying the block that we're iterating over, grab the pointer to the next
             // instruction before (potentially) moving 'inst' to another block.
             auto* next = inst->next;
             TINT_DEFER(inst = next);
 
-            if (auto* ret = inst->As<ir::Return>()) {
+            if (auto* ret = inst->As<core::ir::Return>()) {
                 // Note: Return instructions are processed without being moved into the 'if' block.
                 ProcessReturn(ret, inner_if);
                 break;  // All instructions processed.
             }
 
-            if (inst->Is<ir::Unreachable>()) {
+            if (inst->Is<core::ir::Unreachable>()) {
                 // Unreachable can become reachable once returns are turned into exits.
                 // As this is the terminator for the block, simply stop processing the
                 // instructions. A appropriate terminator will be created for this block below.
@@ -150,12 +150,13 @@
             // Control instructions holding a return need to be processed, and then a new 'if' needs
             // to be created to hold the instructions that are between the control instruction and
             // the block's terminating instruction.
-            if (auto* ctrl = inst->As<ir::ControlInstruction>()) {
+            if (auto* ctrl = inst->As<core::ir::ControlInstruction>()) {
                 if (holds_return_.Contains(ctrl)) {
                     // Control instruction transitively holds a return.
-                    ctrl->ForeachBlock([&](ir::Block* ctrl_block) { ProcessBlock(ctrl_block); });
+                    ctrl->ForeachBlock(
+                        [&](core::ir::Block* ctrl_block) { ProcessBlock(ctrl_block); });
                     if (next && (next != fn_return || fn_return->Value()) &&
-                        !tint::IsAnyOf<ir::Exit, ir::Unreachable>(next)) {
+                        !tint::IsAnyOf<core::ir::Exit, core::ir::Unreachable>(next)) {
                         inner_if = CreateIfContinueExecution(ctrl);
                     }
                 }
@@ -164,10 +165,12 @@
 
         if (inner_if) {
             // new_value_with_type returns a new RuntimeValue with the same type as 'v'
-            auto new_value_with_type = [&](ir::Value* v) { return b.InstructionResult(v->Type()); };
+            auto new_value_with_type = [&](core::ir::Value* v) {
+                return b.InstructionResult(v->Type());
+            };
 
             if (inner_if->True()->HasTerminator()) {
-                if (auto* exit_if = inner_if->True()->Terminator()->As<ir::ExitIf>()) {
+                if (auto* exit_if = inner_if->True()->Terminator()->As<core::ir::ExitIf>()) {
                     // Ensure the associated 'if' is updated.
                     exit_if->SetIf(inner_if);
 
@@ -185,10 +188,10 @@
 
             // Loop over the 'if' instructions, starting with the inner-most, and add any missing
             // terminating instructions to the blocks holding the 'if'.
-            for (auto* i = inner_if; i; i = tint::As<ir::If>(i->Block()->Parent())) {
+            for (auto* i = inner_if; i; i = tint::As<core::ir::If>(i->Block()->Parent())) {
                 if (!i->Block()->HasTerminator()) {
                     // Append the exit instruction to the block holding the 'if'.
-                    Vector<ir::InstructionResult*, 8> exit_args = i->Results();
+                    Vector<core::ir::InstructionResult*, 8> exit_args = i->Results();
                     if (!i->HasResults()) {
                         i->SetResults(tint::Transform(exit_args, new_value_with_type));
                     }
@@ -204,7 +207,7 @@
     /// @param cond the possibly null 'if(continue_execution)' instruction for the current block.
     /// @note unlike other instructions, return instructions are not automatically moved into the
     /// 'if(continue_execution)' block.
-    void ProcessReturn(ir::Return* ret, ir::If* cond) {
+    void ProcessReturn(core::ir::Return* ret, core::ir::If* cond) {
         if (ret == fn_return) {
             // 'ret' is the final instruction for the function.
             ProcessFunctionBlockReturn(ret, cond);
@@ -217,7 +220,7 @@
     /// Transforms the return instruction that is the last instruction in the function's block.
     /// @param ret the return instruction
     /// @param cond the possibly null 'if(continue_execution)' instruction for the current block.
-    void ProcessFunctionBlockReturn(ir::Return* ret, ir::If* cond) {
+    void ProcessFunctionBlockReturn(core::ir::Return* ret, core::ir::If* cond) {
         if (!return_val) {
             return;  // No need to transform non-value, end-of-function returns
         }
@@ -238,7 +241,7 @@
     /// Transforms the return instruction that is found in a control instruction.
     /// @param ret the return instruction
     /// @param cond the possibly null 'if(continue_execution)' instruction for the current block.
-    void ProcessNestedReturn(ir::Return* ret, ir::If* cond) {
+    void ProcessNestedReturn(core::ir::Return* ret, core::ir::If* cond) {
         // If we have a 'if(continue_execution)' block, then insert instructions into that,
         // otherwise insert into the block holding the return.
         auto* block = cond ? cond->True() : ret->Block();
@@ -253,7 +256,7 @@
         // If the outermost control instruction is expecting exit values, then return them as
         // 'undef' values.
         auto* ctrl = block->Parent();
-        Vector<ir::Value*, 8> exit_args;
+        Vector<core::ir::Value*, 8> exit_args;
         exit_args.Resize(ctrl->Results().Length());
 
         // Replace the return instruction with an exit instruction.
@@ -264,7 +267,7 @@
     /// Builds instructions to create a 'if(continue_execution)' conditional.
     /// @param after new instructions will be inserted after this instruction
     /// @return the 'If' control instruction
-    ir::If* CreateIfContinueExecution(ir::Instruction* after) {
+    core::ir::If* CreateIfContinueExecution(core::ir::Instruction* after) {
         auto* load = b.Load(continue_execution);
         auto* cond = b.If(load);
         load->InsertAfter(after);
@@ -274,7 +277,7 @@
 
     /// Adds a final return instruction to the end of @p fn
     /// @param fn the function
-    void AppendFinalReturn(ir::Function* fn) {
+    void AppendFinalReturn(core::ir::Function* fn) {
         b.Append(fn->Block(), [&] {
             if (return_val) {
                 b.Return(fn, b.Load(return_val));
@@ -287,7 +290,7 @@
 
 }  // namespace
 
-Result<SuccessType, std::string> MergeReturn(ir::Module* ir) {
+Result<SuccessType, std::string> MergeReturn(core::ir::Module* ir) {
     auto result = ValidateAndDumpIfNeeded(*ir, "MergeReturn transform");
     if (!result) {
         return result;
diff --git a/src/tint/lang/spirv/writer/raise/merge_return.h b/src/tint/lang/spirv/writer/raise/merge_return.h
index 88ba2ce..1ad6d87 100644
--- a/src/tint/lang/spirv/writer/raise/merge_return.h
+++ b/src/tint/lang/spirv/writer/raise/merge_return.h
@@ -20,7 +20,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -30,7 +30,7 @@
 /// at the end of the function.
 /// @param module the module to transform
 /// @returns an error string on failure
-Result<SuccessType, std::string> MergeReturn(ir::Module* module);
+Result<SuccessType, std::string> MergeReturn(core::ir::Module* module);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/merge_return_test.cc b/src/tint/lang/spirv/writer/raise/merge_return_test.cc
index f6554c7..a7a53a5 100644
--- a/src/tint/lang/spirv/writer/raise/merge_return_test.cc
+++ b/src/tint/lang/spirv/writer/raise/merge_return_test.cc
@@ -24,7 +24,7 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_MergeReturnTest = ir::transform::TransformTest;
+using SpirvWriter_MergeReturnTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_MergeReturnTest, NoModify_SingleReturnInRootBlock) {
     auto* in = b.FunctionParam(ty.i32());
@@ -98,7 +98,7 @@
 
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(in);
-        b.Append(b.Case(swtch, {ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(swtch); });
+        b.Append(b.Case(swtch, {core::ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(swtch); });
 
         auto* l = b.Loop();
         b.Append(l->Body(), [&] { b.ExitLoop(l); });
@@ -1551,9 +1551,9 @@
 
     b.Append(func->Block(), [&] {
         auto* sw = b.Switch(cond);
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{b.Constant(1_i)}}),
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{b.Constant(1_i)}}),
                  [&] { b.Return(func, 42_i); });
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw); });
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw); });
 
         b.Return(func, 0_i);
     });
@@ -1618,7 +1618,7 @@
 
     b.Append(func->Block(), [&] {
         auto* sw = b.Switch(cond);
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{b.Constant(1_i)}}), [&] {
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{b.Constant(1_i)}}), [&] {
             auto* ifcond = b.Equal(ty.bool_(), cond, 1_i);
             auto* ifelse = b.If(ifcond);
             b.Append(ifelse->True(), [&] { b.Return(func, 42_i); });
@@ -1628,7 +1628,7 @@
             b.ExitSwitch(sw);
         });
 
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw); });
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw); });
 
         b.Return(func, 0_i);
     });
@@ -1725,13 +1725,13 @@
     b.Append(func->Block(), [&] {
         auto* sw = b.Switch(cond);
         sw->SetResults(b.InstructionResult(ty.i32()));  // NOLINT: false detection of std::tuple
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{b.Constant(1_i)}}),
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{b.Constant(1_i)}}),
                  [&] { b.Return(func, 42_i); });
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{b.Constant(2_i)}}),
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{b.Constant(2_i)}}),
                  [&] { b.Return(func, 99_i); });
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{b.Constant(3_i)}}),
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{b.Constant(3_i)}}),
                  [&] { b.ExitSwitch(sw, 1_i); });
-        b.Append(b.Case(sw, {ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw, 0_i); });
+        b.Append(b.Case(sw, {core::ir::Switch::CaseSelector{}}), [&] { b.ExitSwitch(sw, 0_i); });
 
         b.Return(func, sw->Result(0));
     });
diff --git a/src/tint/lang/spirv/writer/raise/raise.cc b/src/tint/lang/spirv/writer/raise/raise.cc
index 842c799..2d510db 100644
--- a/src/tint/lang/spirv/writer/raise/raise.cc
+++ b/src/tint/lang/spirv/writer/raise/raise.cc
@@ -32,7 +32,7 @@
 
 namespace tint::spirv::writer::raise {
 
-Result<SuccessType, std::string> Raise(ir::Module* module, const Options& options) {
+Result<SuccessType, std::string> Raise(core::ir::Module* module, const Options& options) {
 #define RUN_TRANSFORM(name, ...)         \
     do {                                 \
         auto result = name(__VA_ARGS__); \
@@ -41,28 +41,28 @@
         }                                \
     } while (false)
 
-    ir::transform::BuiltinPolyfillConfig core_polyfills;
+    core::ir::transform::BuiltinPolyfillConfig core_polyfills;
     core_polyfills.count_leading_zeros = true;
     core_polyfills.count_trailing_zeros = true;
     core_polyfills.first_leading_bit = true;
     core_polyfills.first_trailing_bit = true;
     core_polyfills.saturate = true;
     core_polyfills.texture_sample_base_clamp_to_edge_2d_f32 = true;
-    RUN_TRANSFORM(ir::transform::BuiltinPolyfill, module, core_polyfills);
+    RUN_TRANSFORM(core::ir::transform::BuiltinPolyfill, module, core_polyfills);
 
-    RUN_TRANSFORM(ir::transform::MultiplanarExternalTexture, module,
+    RUN_TRANSFORM(core::ir::transform::MultiplanarExternalTexture, module,
                   options.external_texture_options);
 
-    RUN_TRANSFORM(ir::transform::AddEmptyEntryPoint, module);
-    RUN_TRANSFORM(ir::transform::Bgra8UnormPolyfill, module);
-    RUN_TRANSFORM(ir::transform::BlockDecoratedStructs, module);
+    RUN_TRANSFORM(core::ir::transform::AddEmptyEntryPoint, module);
+    RUN_TRANSFORM(core::ir::transform::Bgra8UnormPolyfill, module);
+    RUN_TRANSFORM(core::ir::transform::BlockDecoratedStructs, module);
     RUN_TRANSFORM(BuiltinPolyfill, module);
-    RUN_TRANSFORM(ir::transform::DemoteToHelper, module);
+    RUN_TRANSFORM(core::ir::transform::DemoteToHelper, module);
     RUN_TRANSFORM(ExpandImplicitSplats, module);
     RUN_TRANSFORM(HandleMatrixArithmetic, module);
     RUN_TRANSFORM(MergeReturn, module);
     RUN_TRANSFORM(ShaderIO, module, ShaderIOConfig{options.clamp_frag_depth});
-    RUN_TRANSFORM(ir::transform::Std140, module);
+    RUN_TRANSFORM(core::ir::transform::Std140, module);
     RUN_TRANSFORM(VarForDynamicIndex, module);
 
     return Success;
diff --git a/src/tint/lang/spirv/writer/raise/raise.h b/src/tint/lang/spirv/writer/raise/raise.h
index 50d47bd..adaae92 100644
--- a/src/tint/lang/spirv/writer/raise/raise.h
+++ b/src/tint/lang/spirv/writer/raise/raise.h
@@ -21,7 +21,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -31,7 +31,7 @@
 /// @param module the core IR module to raise to SPIR-V dialect
 /// @param options the SPIR-V writer options
 /// @returns success or an error string
-Result<SuccessType, std::string> Raise(ir::Module* module, const Options& options);
+Result<SuccessType, std::string> Raise(core::ir::Module* module, const Options& options);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/shader_io.cc b/src/tint/lang/spirv/writer/raise/shader_io.cc
index ebdfc2a..2716b67 100644
--- a/src/tint/lang/spirv/writer/raise/shader_io.cc
+++ b/src/tint/lang/spirv/writer/raise/shader_io.cc
@@ -36,15 +36,15 @@
 /// output, and declare global variables for them. The wrapper entry point then loads from and
 /// stores to these variables.
 /// We also modify the type of the SampleMask builtin to be an array, as required by Vulkan.
-struct StateImpl : ir::transform::ShaderIOBackendState {
+struct StateImpl : core::ir::transform::ShaderIOBackendState {
     /// The global variable for input builtins.
-    ir::Var* builtin_input_var = nullptr;
+    core::ir::Var* builtin_input_var = nullptr;
     /// The global variable for input locations.
-    ir::Var* location_input_var = nullptr;
+    core::ir::Var* location_input_var = nullptr;
     /// The global variable for output builtins.
-    ir::Var* builtin_output_var = nullptr;
+    core::ir::Var* builtin_output_var = nullptr;
     /// The global variable for output locations.
-    ir::Var* location_output_var = nullptr;
+    core::ir::Var* location_output_var = nullptr;
     /// The member indices for inputs.
     Vector<uint32_t, 4> input_indices;
     /// The member indices for outputs.
@@ -54,10 +54,10 @@
     const ShaderIOConfig& config;
 
     /// The frag_depth clamp arguments.
-    ir::Value* frag_depth_clamp_args = nullptr;
+    core::ir::Value* frag_depth_clamp_args = nullptr;
 
     /// Constructor
-    StateImpl(ir::Module* mod, ir::Function* f, const ShaderIOConfig& cfg)
+    StateImpl(core::ir::Module* mod, core::ir::Function* f, const ShaderIOConfig& cfg)
         : ShaderIOBackendState(mod, f), config(cfg) {}
 
     /// Destructor
@@ -72,8 +72,8 @@
     /// @param addrspace the address to use for the global variables
     /// @param access the access mode to use for the global variables
     /// @param name_suffix the suffix to add to struct and variable names
-    void MakeStructs(ir::Var*& builtin_var,
-                     ir::Var*& location_var,
+    void MakeStructs(core::ir::Var*& builtin_var,
+                     core::ir::Var*& location_var,
                      Vector<uint32_t, 4>* indices,
                      Vector<core::type::Manager::StructMemberDesc, 4>& entries,
                      core::AddressSpace addrspace,
@@ -116,24 +116,24 @@
     }
 
     /// @copydoc ShaderIO::BackendState::FinalizeInputs
-    Vector<ir::FunctionParam*, 4> FinalizeInputs() override {
+    Vector<core::ir::FunctionParam*, 4> FinalizeInputs() override {
         MakeStructs(builtin_input_var, location_input_var, &input_indices, inputs,
                     core::AddressSpace::kIn, core::Access::kRead, "Inputs");
         return tint::Empty;
     }
 
     /// @copydoc ShaderIO::BackendState::FinalizeOutputs
-    ir::Value* FinalizeOutputs() override {
+    core::ir::Value* FinalizeOutputs() override {
         MakeStructs(builtin_output_var, location_output_var, &output_indices, outputs,
                     core::AddressSpace::kOut, core::Access::kWrite, "Outputs");
         return nullptr;
     }
 
     /// @copydoc ShaderIO::BackendState::GetInput
-    ir::Value* GetInput(ir::Builder& builder, uint32_t idx) override {
+    core::ir::Value* GetInput(core::ir::Builder& builder, uint32_t idx) override {
         // Load the input from the global variable declared earlier.
         auto* ptr = ty.ptr(core::AddressSpace::kIn, inputs[idx].type, core::Access::kRead);
-        ir::Access* from = nullptr;
+        core::ir::Access* from = nullptr;
         if (inputs[idx].attributes.builtin) {
             if (inputs[idx].attributes.builtin.value() == core::BuiltinValue::kSampleMask) {
                 // SampleMask becomes an array for SPIR-V, so load from the first element.
@@ -148,10 +148,10 @@
     }
 
     /// @copydoc ShaderIO::BackendState::SetOutput
-    void SetOutput(ir::Builder& builder, uint32_t idx, ir::Value* value) override {
+    void SetOutput(core::ir::Builder& builder, uint32_t idx, core::ir::Value* value) override {
         // Store the output to the global variable declared earlier.
         auto* ptr = ty.ptr(core::AddressSpace::kOut, outputs[idx].type, core::Access::kWrite);
-        ir::Access* to = nullptr;
+        core::ir::Access* to = nullptr;
         if (outputs[idx].attributes.builtin) {
             if (outputs[idx].attributes.builtin.value() == core::BuiltinValue::kSampleMask) {
                 // SampleMask becomes an array for SPIR-V, so store to the first element.
@@ -174,7 +174,7 @@
     /// @param builder the builder to use for new instructions
     /// @param frag_depth the incoming frag_depth value
     /// @returns the clamped value
-    ir::Value* ClampFragDepth(ir::Builder& builder, ir::Value* frag_depth) {
+    core::ir::Value* ClampFragDepth(core::ir::Builder& builder, core::ir::Value* frag_depth) {
         if (!config.clamp_frag_depth) {
             return frag_depth;
         }
@@ -183,7 +183,7 @@
         if (!frag_depth_clamp_args) {
             // Check that there are no push constants in the module already.
             for (auto* inst : *b.RootBlock()) {
-                if (auto* var = inst->As<ir::Var>()) {
+                if (auto* var = inst->As<core::ir::Var>()) {
                     auto* ptr = var->Result()->Type()->As<core::type::Pointer>();
                     if (ptr->AddressSpace() == core::AddressSpace::kPushConstant) {
                         TINT_ICE() << "cannot clamp frag_depth with pre-existing push constants";
@@ -216,13 +216,13 @@
 };
 }  // namespace
 
-Result<SuccessType, std::string> ShaderIO(ir::Module* ir, const ShaderIOConfig& config) {
+Result<SuccessType, std::string> ShaderIO(core::ir::Module* ir, const ShaderIOConfig& config) {
     auto result = ValidateAndDumpIfNeeded(*ir, "ShaderIO transform");
     if (!result) {
         return result;
     }
 
-    ir::transform::RunShaderIOBase(ir, [&](ir::Module* mod, ir::Function* func) {
+    core::ir::transform::RunShaderIOBase(ir, [&](core::ir::Module* mod, core::ir::Function* func) {
         return std::make_unique<StateImpl>(mod, func, config);
     });
 
diff --git a/src/tint/lang/spirv/writer/raise/shader_io.h b/src/tint/lang/spirv/writer/raise/shader_io.h
index 633339f..5924c12 100644
--- a/src/tint/lang/spirv/writer/raise/shader_io.h
+++ b/src/tint/lang/spirv/writer/raise/shader_io.h
@@ -20,7 +20,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -37,7 +37,7 @@
 /// @param module the module to transform
 /// @param config the configuration
 /// @returns an error string on failure
-Result<SuccessType, std::string> ShaderIO(ir::Module* module, const ShaderIOConfig& config);
+Result<SuccessType, std::string> ShaderIO(core::ir::Module* module, const ShaderIOConfig& config);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/shader_io_test.cc b/src/tint/lang/spirv/writer/raise/shader_io_test.cc
index 63229e6..0638d50 100644
--- a/src/tint/lang/spirv/writer/raise/shader_io_test.cc
+++ b/src/tint/lang/spirv/writer/raise/shader_io_test.cc
@@ -24,11 +24,11 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_ShaderIOTest = ir::transform::TransformTest;
+using SpirvWriter_ShaderIOTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_ShaderIOTest, NoInputsOrOutputs) {
     auto* ep = b.Function("foo", ty.void_());
-    ep->SetStage(ir::Function::PipelineStage::kCompute);
+    ep->SetStage(core::ir::Function::PipelineStage::kCompute);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep);
@@ -55,9 +55,9 @@
 TEST_F(SpirvWriter_ShaderIOTest, Parameters_NonStruct) {
     auto* ep = b.Function("foo", ty.void_());
     auto* front_facing = b.FunctionParam("front_facing", ty.bool_());
-    front_facing->SetBuiltin(ir::FunctionParam::Builtin::kFrontFacing);
+    front_facing->SetBuiltin(core::ir::FunctionParam::Builtin::kFrontFacing);
     auto* position = b.FunctionParam("position", ty.vec4<f32>());
-    position->SetBuiltin(ir::FunctionParam::Builtin::kPosition);
+    position->SetBuiltin(core::ir::FunctionParam::Builtin::kPosition);
     position->SetInvariant(true);
     auto* color1 = b.FunctionParam("color1", ty.f32());
     color1->SetLocation(0, {});
@@ -66,7 +66,7 @@
                                                core::InterpolationSampling::kSample});
 
     ep->SetParams({front_facing, position, color1, color2});
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
 
     b.Append(ep->Block(), [&] {
         auto* ifelse = b.If(front_facing);
@@ -177,7 +177,7 @@
     auto* ep = b.Function("foo", ty.void_());
     auto* str_param = b.FunctionParam("inputs", str_ty);
     ep->SetParams({str_param});
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
 
     b.Append(ep->Block(), [&] {
         auto* ifelse = b.If(b.Access(ty.bool_(), str_param, 0_i));
@@ -298,14 +298,14 @@
 
     auto* ep = b.Function("foo", ty.void_());
     auto* front_facing = b.FunctionParam("front_facing", ty.bool_());
-    front_facing->SetBuiltin(ir::FunctionParam::Builtin::kFrontFacing);
+    front_facing->SetBuiltin(core::ir::FunctionParam::Builtin::kFrontFacing);
     auto* str_param = b.FunctionParam("inputs", str_ty);
     auto* color2 = b.FunctionParam("color2", ty.f32());
     color2->SetLocation(1, core::Interpolation{core::InterpolationType::kLinear,
                                                core::InterpolationSampling::kSample});
 
     ep->SetParams({front_facing, str_param, color2});
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
 
     b.Append(ep->Block(), [&] {
         auto* ifelse = b.If(front_facing);
@@ -402,9 +402,9 @@
 
 TEST_F(SpirvWriter_ShaderIOTest, ReturnValue_NonStructBuiltin) {
     auto* ep = b.Function("foo", ty.vec4<f32>());
-    ep->SetReturnBuiltin(ir::Function::ReturnBuiltin::kPosition);
+    ep->SetReturnBuiltin(core::ir::Function::ReturnBuiltin::kPosition);
     ep->SetReturnInvariant(true);
-    ep->SetStage(ir::Function::PipelineStage::kVertex);
+    ep->SetStage(core::ir::Function::PipelineStage::kVertex);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep, b.Construct(ty.vec4<f32>(), 0.5_f));
@@ -455,7 +455,7 @@
 TEST_F(SpirvWriter_ShaderIOTest, ReturnValue_NonStructLocation) {
     auto* ep = b.Function("foo", ty.vec4<f32>());
     ep->SetReturnLocation(1u, {});
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep, b.Construct(ty.vec4<f32>(), 0.5_f));
@@ -529,7 +529,7 @@
                              });
 
     auto* ep = b.Function("foo", str_ty);
-    ep->SetStage(ir::Function::PipelineStage::kVertex);
+    ep->SetStage(core::ir::Function::PipelineStage::kVertex);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep, b.Construct(str_ty, b.Construct(ty.vec4<f32>(), 0_f), 0.25_f, 0.75_f));
@@ -623,7 +623,7 @@
     // Vertex shader.
     {
         auto* ep = b.Function("vert", str_ty);
-        ep->SetStage(ir::Function::PipelineStage::kVertex);
+        ep->SetStage(core::ir::Function::PipelineStage::kVertex);
 
         b.Append(ep->Block(), [&] {  //
             auto* position = b.Construct(vec4f, 0_f);
@@ -636,7 +636,7 @@
     {
         auto* ep = b.Function("frag", vec4f);
         auto* inputs = b.FunctionParam("inputs", str_ty);
-        ep->SetStage(ir::Function::PipelineStage::kFragment);
+        ep->SetStage(core::ir::Function::PipelineStage::kFragment);
         ep->SetParams({inputs});
 
         b.Append(ep->Block(), [&] {  //
@@ -774,7 +774,7 @@
     auto* buffer = b.RootBlock()->Append(b.Var(ty.ptr(storage, str_ty, read)));
 
     auto* ep = b.Function("vert", str_ty);
-    ep->SetStage(ir::Function::PipelineStage::kVertex);
+    ep->SetStage(core::ir::Function::PipelineStage::kVertex);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep, b.Load(buffer));
@@ -863,10 +863,10 @@
                              });
 
     auto* mask_in = b.FunctionParam("mask_in", ty.u32());
-    mask_in->SetBuiltin(ir::FunctionParam::Builtin::kSampleMask);
+    mask_in->SetBuiltin(core::ir::FunctionParam::Builtin::kSampleMask);
 
     auto* ep = b.Function("foo", str_ty);
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
     ep->SetParams({mask_in});
 
     b.Append(ep->Block(), [&] {  //
@@ -957,7 +957,7 @@
                              });
 
     auto* ep = b.Function("foo", str_ty);
-    ep->SetStage(ir::Function::PipelineStage::kFragment);
+    ep->SetStage(core::ir::Function::PipelineStage::kFragment);
 
     b.Append(ep->Block(), [&] {  //
         b.Return(ep, b.Construct(str_ty, 0.5_f, 2_f));
diff --git a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.cc b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.cc
index c66d8de7..e749a28 100644
--- a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.cc
+++ b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.cc
@@ -35,7 +35,7 @@
 // An access that needs replacing.
 struct AccessToReplace {
     // The access instruction.
-    ir::Access* access = nullptr;
+    core::ir::Access* access = nullptr;
     // The index of the first dynamic index.
     size_t first_dynamic_index = 0;
     // The object type that corresponds to the source of the first dynamic index.
@@ -48,9 +48,9 @@
 // dynamically indexed.
 struct PartialAccess {
     // The base object.
-    ir::Value* base = nullptr;
+    core::ir::Value* base = nullptr;
     // The list of constant indices to get from the base to the source object.
-    Vector<ir::Value*, 4> indices;
+    Vector<core::ir::Value*, 4> indices;
 
     // A specialization of Hasher for PartialAccess.
     struct Hasher {
@@ -68,26 +68,26 @@
 enum class Action { kStop, kContinue };
 
 template <typename CALLBACK>
-void WalkAccessChain(ir::Access* access, CALLBACK&& callback) {
+void WalkAccessChain(core::ir::Access* access, CALLBACK&& callback) {
     auto indices = access->Indices();
     auto* ty = access->Object()->Type();
     for (size_t i = 0; i < indices.Length(); i++) {
         if (callback(i, indices[i], ty) == Action::kStop) {
             break;
         }
-        auto* const_idx = indices[i]->As<ir::Constant>();
+        auto* const_idx = indices[i]->As<core::ir::Constant>();
         ty = const_idx ? ty->Element(const_idx->Value()->ValueAs<u32>()) : ty->Elements().type;
     }
 }
 
-std::optional<AccessToReplace> ShouldReplace(ir::Access* access) {
+std::optional<AccessToReplace> ShouldReplace(core::ir::Access* access) {
     if (access->Result()->Type()->Is<core::type::Pointer>()) {
         // No need to modify accesses into pointer types.
         return {};
     }
 
     std::optional<AccessToReplace> result;
-    WalkAccessChain(access, [&](size_t i, ir::Value* index, const core::type::Type* ty) {
+    WalkAccessChain(access, [&](size_t i, core::ir::Value* index, const core::type::Type* ty) {
         if (auto* vec = ty->As<core::type::Vector>()) {
             // If we haven't found a dynamic index before the vector, then the transform doesn't
             // need to hoist the access into a var as a vector value can be dynamically indexed.
@@ -101,7 +101,7 @@
         }
 
         // Check if this is the first dynamic index.
-        if (!result && !index->Is<ir::Constant>()) {
+        if (!result && !index->Is<core::ir::Constant>()) {
             result = AccessToReplace{access, i, ty};
         }
 
@@ -111,13 +111,13 @@
     return result;
 }
 
-void Run(ir::Module* ir) {
-    ir::Builder builder(*ir);
+void Run(core::ir::Module* ir) {
+    core::ir::Builder builder(*ir);
 
     // Find the access instructions that need replacing.
     Vector<AccessToReplace, 4> worklist;
     for (auto* inst : ir->instructions.Objects()) {
-        if (auto* access = inst->As<ir::Access>()) {
+        if (auto* access = inst->As<core::ir::Access>()) {
             if (auto to_replace = ShouldReplace(access)) {
                 worklist.Push(to_replace.value());
             }
@@ -125,8 +125,8 @@
     }
 
     // Replace each access instruction that we recorded.
-    Hashmap<ir::Value*, ir::Value*, 4> object_to_local;
-    Hashmap<PartialAccess, ir::Value*, 4, PartialAccess::Hasher> source_object_to_value;
+    Hashmap<core::ir::Value*, core::ir::Value*, 4> object_to_local;
+    Hashmap<PartialAccess, core::ir::Value*, 4, PartialAccess::Hasher> source_object_to_value;
     for (const auto& to_replace : worklist) {
         auto* access = to_replace.access;
         auto* source_object = access->Object();
@@ -154,9 +154,10 @@
         });
 
         // Create a new access instruction using the local variable as the source.
-        Vector<ir::Value*, 4> indices{access->Indices().Offset(to_replace.first_dynamic_index)};
+        Vector<core::ir::Value*, 4> indices{
+            access->Indices().Offset(to_replace.first_dynamic_index)};
         const core::type::Type* access_type = access->Result()->Type();
-        ir::Value* vector_index = nullptr;
+        core::ir::Value* vector_index = nullptr;
         if (to_replace.vector_access_type) {
             // The old access indexed the element of a vector.
             // Its not valid to obtain the address of an element of a vector, so we need to access
@@ -168,12 +169,12 @@
             vector_index = indices.Pop();
         }
 
-        ir::Instruction* new_access = builder.Access(
+        core::ir::Instruction* new_access = builder.Access(
             ir->Types().ptr(core::AddressSpace::kFunction, access_type, core::Access::kReadWrite),
             local, indices);
         new_access->InsertBefore(access);
 
-        ir::Instruction* load = nullptr;
+        core::ir::Instruction* load = nullptr;
         if (to_replace.vector_access_type) {
             load = builder.LoadVectorElement(new_access->Result(), vector_index);
         } else {
@@ -188,7 +189,7 @@
 
 }  // namespace
 
-Result<SuccessType, std::string> VarForDynamicIndex(ir::Module* ir) {
+Result<SuccessType, std::string> VarForDynamicIndex(core::ir::Module* ir) {
     auto result = ValidateAndDumpIfNeeded(*ir, "VarForDynamicIndex transform");
     if (!result) {
         return result;
diff --git a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.h b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.h
index e8c1387..6878db4 100644
--- a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.h
+++ b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index.h
@@ -20,7 +20,7 @@
 #include "src/tint/utils/result/result.h"
 
 // Forward declarations.
-namespace tint::ir {
+namespace tint::core::ir {
 class Module;
 }
 
@@ -32,7 +32,7 @@
 /// composite.
 /// @param module the module to transform
 /// @returns an error string on failure
-Result<SuccessType, std::string> VarForDynamicIndex(ir::Module* module);
+Result<SuccessType, std::string> VarForDynamicIndex(core::ir::Module* module);
 
 }  // namespace tint::spirv::writer::raise
 
diff --git a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index_test.cc b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index_test.cc
index d71a10c..7daf9b2 100644
--- a/src/tint/lang/spirv/writer/raise/var_for_dynamic_index_test.cc
+++ b/src/tint/lang/spirv/writer/raise/var_for_dynamic_index_test.cc
@@ -27,7 +27,7 @@
 using namespace tint::core::fluent_types;     // NOLINT
 using namespace tint::core::number_suffixes;  // NOLINT
 
-using SpirvWriter_VarForDynamicIndexTest = ir::transform::TransformTest;
+using SpirvWriter_VarForDynamicIndexTest = core::ir::transform::TransformTest;
 
 TEST_F(SpirvWriter_VarForDynamicIndexTest, NoModify_ConstantIndex_ArrayValue) {
     auto* arr = b.FunctionParam(ty.array<i32, 4u>());
diff --git a/src/tint/lang/spirv/writer/switch_test.cc b/src/tint/lang/spirv/writer/switch_test.cc
index 7accfd2..da931bb 100644
--- a/src/tint/lang/spirv/writer/switch_test.cc
+++ b/src/tint/lang/spirv/writer/switch_test.cc
@@ -26,7 +26,7 @@
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(42_i);
 
-        auto* def_case = b.Case(swtch, Vector{ir::Switch::CaseSelector()});
+        auto* def_case = b.Case(swtch, Vector{core::ir::Switch::CaseSelector()});
         b.Append(def_case, [&] {  //
             b.ExitSwitch(swtch);
         });
@@ -52,17 +52,17 @@
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(42_i);
 
-        auto* case_a = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(1_i)}});
+        auto* case_a = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)}});
         b.Append(case_a, [&] {  //
             b.ExitSwitch(swtch);
         });
 
-        auto* case_b = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(swtch);
         });
 
-        auto* def_case = b.Case(swtch, Vector{ir::Switch::CaseSelector()});
+        auto* def_case = b.Case(swtch, Vector{core::ir::Switch::CaseSelector()});
         b.Append(def_case, [&] {  //
             b.ExitSwitch(swtch);
         });
@@ -92,20 +92,20 @@
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(42_i);
 
-        auto* case_a = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(1_i)},
-                                            ir::Switch::CaseSelector{b.Constant(3_i)}});
+        auto* case_a = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)},
+                                            core::ir::Switch::CaseSelector{b.Constant(3_i)}});
         b.Append(case_a, [&] {  //
             b.ExitSwitch(swtch);
         });
 
-        auto* case_b = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(2_i)},
-                                            ir::Switch::CaseSelector{b.Constant(4_i)}});
+        auto* case_b = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)},
+                                            core::ir::Switch::CaseSelector{b.Constant(4_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(swtch);
         });
 
-        auto* def_case = b.Case(
-            swtch, Vector{ir::Switch::CaseSelector{b.Constant(5_i)}, ir::Switch::CaseSelector()});
+        auto* def_case = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(5_i)},
+                                              core::ir::Switch::CaseSelector()});
         b.Append(def_case, [&] {  //
             b.ExitSwitch(swtch);
         });
@@ -135,17 +135,17 @@
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(42_i);
 
-        auto* case_a = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(1_i)}});
+        auto* case_a = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)}});
         b.Append(case_a, [&] {  //
             b.Return(func);
         });
 
-        auto* case_b = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.Return(func);
         });
 
-        auto* def_case = b.Case(swtch, Vector{ir::Switch::CaseSelector()});
+        auto* def_case = b.Case(swtch, Vector{core::ir::Switch::CaseSelector()});
         b.Append(def_case, [&] {  //
             b.Return(func);
         });
@@ -175,7 +175,7 @@
     b.Append(func->Block(), [&] {
         auto* swtch = b.Switch(42_i);
 
-        auto* case_a = b.Case(swtch, Vector{ir::Switch::CaseSelector{b.Constant(1_i)}});
+        auto* case_a = b.Case(swtch, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)}});
         b.Append(case_a, [&] {
             auto* cond_break = b.If(true);
             b.Append(cond_break->True(), [&] {  //
@@ -188,7 +188,7 @@
             b.Return(func);
         });
 
-        auto* def_case = b.Case(swtch, Vector{ir::Switch::CaseSelector()});
+        auto* def_case = b.Case(swtch, Vector{core::ir::Switch::CaseSelector()});
         b.Append(def_case, [&] {  //
             b.ExitSwitch(swtch);
         });
@@ -221,13 +221,13 @@
     b.Append(func->Block(), [&] {
         auto* s = b.Switch(42_i);
         s->SetResults(b.InstructionResult(ty.i32()));
-        auto* case_a = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(1_i)},
-                                        ir::Switch::CaseSelector{nullptr}});
+        auto* case_a = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)},
+                                        core::ir::Switch::CaseSelector{nullptr}});
         b.Append(case_a, [&] {  //
             b.ExitSwitch(s, 10_i);
         });
 
-        auto* case_b = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(s, 20_i);
         });
@@ -256,13 +256,13 @@
     b.Append(func->Block(), [&] {
         auto* s = b.Switch(42_i);
         s->SetResults(b.InstructionResult(ty.i32()));
-        auto* case_a = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(1_i)},
-                                        ir::Switch::CaseSelector{nullptr}});
+        auto* case_a = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)},
+                                        core::ir::Switch::CaseSelector{nullptr}});
         b.Append(case_a, [&] {  //
             b.Return(func, 10_i);
         });
 
-        auto* case_b = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(s, 20_i);
         });
@@ -304,13 +304,13 @@
     b.Append(func->Block(), [&] {
         auto* s = b.Switch(42_i);
         s->SetResults(b.InstructionResult(ty.i32()), b.InstructionResult(ty.bool_()));
-        auto* case_a = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(1_i)},
-                                        ir::Switch::CaseSelector{nullptr}});
+        auto* case_a = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)},
+                                        core::ir::Switch::CaseSelector{nullptr}});
         b.Append(case_a, [&] {  //
             b.ExitSwitch(s, 10_i, true);
         });
 
-        auto* case_b = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(s, 20_i, false);
         });
@@ -340,13 +340,13 @@
     b.Append(func->Block(), [&] {
         auto* s = b.Switch(b.Constant(42_i));
         s->SetResults(b.InstructionResult(ty.i32()), b.InstructionResult(ty.bool_()));
-        auto* case_a = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(1_i)},
-                                        ir::Switch::CaseSelector{nullptr}});
+        auto* case_a = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(1_i)},
+                                        core::ir::Switch::CaseSelector{nullptr}});
         b.Append(case_a, [&] {  //
             b.ExitSwitch(s, 10_i, true);
         });
 
-        auto* case_b = b.Case(s, Vector{ir::Switch::CaseSelector{b.Constant(2_i)}});
+        auto* case_b = b.Case(s, Vector{core::ir::Switch::CaseSelector{b.Constant(2_i)}});
         b.Append(case_b, [&] {  //
             b.ExitSwitch(s, 20_i, false);
         });
diff --git a/src/tint/lang/spirv/writer/texture_builtin_test.cc b/src/tint/lang/spirv/writer/texture_builtin_test.cc
index 4b91601..baa3e04 100644
--- a/src/tint/lang/spirv/writer/texture_builtin_test.cc
+++ b/src/tint/lang/spirv/writer/texture_builtin_test.cc
@@ -143,12 +143,12 @@
             result_ty = ty.vec(result_ty, params.result.width);
         }
 
-        Vector<ir::FunctionParam*, 4> func_params;
+        Vector<core::ir::FunctionParam*, 4> func_params;
 
         auto* t = b.FunctionParam(
             "t", MakeTextureType(params.texture_type, params.dim, params.texel_type));
         func_params.Push(t);
-        ir::FunctionParam* s = nullptr;
+        core::ir::FunctionParam* s = nullptr;
         if (sampler == kSampler) {
             s = b.FunctionParam("s", ty.sampler());
             func_params.Push(s);
@@ -163,7 +163,7 @@
         b.Append(func->Block(), [&] {
             uint32_t arg_value = 1;
 
-            Vector<ir::Value*, 4> args;
+            Vector<core::ir::Value*, 4> args;
             if (function == core::Function::kTextureGather &&
                 params.texture_type != kDepthTexture) {
                 // Special case for textureGather, which has a component argument first.
@@ -1883,7 +1883,7 @@
     auto* texture_ty =
         ty.Get<core::type::SampledTexture>(core::type::TextureDimension::k2d, ty.f32());
 
-    Vector<ir::FunctionParam*, 4> args;
+    Vector<core::ir::FunctionParam*, 4> args;
     args.Push(b.FunctionParam("texture", texture_ty));
     args.Push(b.FunctionParam("sampler", ty.sampler()));
     args.Push(b.FunctionParam("coords", ty.vec2<f32>()));
diff --git a/src/tint/lang/spirv/writer/unary_test.cc b/src/tint/lang/spirv/writer/unary_test.cc
index 0384b39..b991a61 100644
--- a/src/tint/lang/spirv/writer/unary_test.cc
+++ b/src/tint/lang/spirv/writer/unary_test.cc
@@ -28,7 +28,7 @@
     /// The element type to test.
     TestElementType type;
     /// The unary operation.
-    enum ir::Unary::Kind kind;
+    enum core::ir::Unary::Kind kind;
     /// The expected SPIR-V instruction.
     std::string spirv_inst;
     /// The expected SPIR-V result type name.
@@ -69,11 +69,11 @@
 INSTANTIATE_TEST_SUITE_P(
     SpirvWriterTest_Unary,
     Arithmetic,
-    testing::Values(UnaryTestCase{kI32, ir::Unary::Kind::kComplement, "OpNot", "int"},
-                    UnaryTestCase{kU32, ir::Unary::Kind::kComplement, "OpNot", "uint"},
-                    UnaryTestCase{kI32, ir::Unary::Kind::kNegation, "OpSNegate", "int"},
-                    UnaryTestCase{kF32, ir::Unary::Kind::kNegation, "OpFNegate", "float"},
-                    UnaryTestCase{kF16, ir::Unary::Kind::kNegation, "OpFNegate", "half"}));
+    testing::Values(UnaryTestCase{kI32, core::ir::Unary::Kind::kComplement, "OpNot", "int"},
+                    UnaryTestCase{kU32, core::ir::Unary::Kind::kComplement, "OpNot", "uint"},
+                    UnaryTestCase{kI32, core::ir::Unary::Kind::kNegation, "OpSNegate", "int"},
+                    UnaryTestCase{kF32, core::ir::Unary::Kind::kNegation, "OpFNegate", "float"},
+                    UnaryTestCase{kF16, core::ir::Unary::Kind::kNegation, "OpFNegate", "half"}));
 
 }  // namespace
 }  // namespace tint::spirv::writer
diff --git a/src/tint/lang/spirv/writer/var_test.cc b/src/tint/lang/spirv/writer/var_test.cc
index ffe1437..6581e3d 100644
--- a/src/tint/lang/spirv/writer/var_test.cc
+++ b/src/tint/lang/spirv/writer/var_test.cc
@@ -124,7 +124,7 @@
     v->SetInitializer(b.Constant(42_i));
     b.RootBlock()->Append(v);
 
-    auto* func = b.Function("foo", ty.void_(), ir::Function::PipelineStage::kFragment);
+    auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kFragment);
     b.Append(func->Block(), [&] {
         auto* load = b.Load(v);
         auto* add = b.Add(ty.i32(), load, 1_i);
@@ -150,7 +150,7 @@
 TEST_F(SpirvWriterTest, WorkgroupVar_LoadAndStore) {
     auto* v = b.RootBlock()->Append(b.Var("v", ty.ptr<workgroup, i32>()));
 
-    auto* func = b.Function("foo", ty.void_(), ir::Function::PipelineStage::kCompute,
+    auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute,
                             std::array{1u, 1u, 1u});
     b.Append(func->Block(), [&] {
         auto* load = b.Load(v);
@@ -200,7 +200,7 @@
     v->SetBindingPoint(0, 0);
     b.RootBlock()->Append(v);
 
-    auto* func = b.Function("foo", ty.void_(), ir::Function::PipelineStage::kCompute,
+    auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute,
                             std::array{1u, 1u, 1u});
     b.Append(func->Block(), [&] {
         auto* load = b.Load(v);
@@ -244,7 +244,7 @@
     v->SetBindingPoint(0, 0);
     b.RootBlock()->Append(v);
 
-    auto* func = b.Function("foo", ty.void_(), ir::Function::PipelineStage::kCompute,
+    auto* func = b.Function("foo", ty.void_(), core::ir::Function::PipelineStage::kCompute,
                             std::array{1u, 1u, 1u});
     b.Append(func->Block(), [&] {
         auto* load = b.Load(v);
diff --git a/src/tint/lang/spirv/writer/writer.cc b/src/tint/lang/spirv/writer/writer.cc
index 02443c2..42fe4c2 100644
--- a/src/tint/lang/spirv/writer/writer.cc
+++ b/src/tint/lang/spirv/writer/writer.cc
@@ -55,7 +55,7 @@
         auto ir = converted.Move();
 
         // Apply transforms as required by writer options.
-        auto remapper = ir::transform::BindingRemapper(&ir, options.binding_remapper_options);
+        auto remapper = core::ir::transform::BindingRemapper(&ir, options.binding_remapper_options);
         if (!remapper) {
             return remapper.Failure();
         }